diff options
author | Luca Boccassi <luca.boccassi@gmail.com> | 2017-08-16 18:42:05 +0100 |
---|---|---|
committer | Luca Boccassi <luca.boccassi@gmail.com> | 2017-08-16 18:46:04 +0100 |
commit | f239aed5e674965691846e8ce3f187dd47523689 (patch) | |
tree | a153a3125c6e183c73871a8ecaa4b285fed5fbd5 /drivers/net/bnxt | |
parent | bf7567fd2a5b0b28ab724046143c24561d38d015 (diff) |
New upstream version 17.08
Change-Id: I288b50990f52646089d6b1f3aaa6ba2f091a51d7
Signed-off-by: Luca Boccassi <luca.boccassi@gmail.com>
Diffstat (limited to 'drivers/net/bnxt')
25 files changed, 10268 insertions, 1068 deletions
diff --git a/drivers/net/bnxt/Makefile b/drivers/net/bnxt/Makefile index 0fffe356..b03f65dc 100644 --- a/drivers/net/bnxt/Makefile +++ b/drivers/net/bnxt/Makefile @@ -38,6 +38,8 @@ include $(RTE_SDK)/mk/rte.vars.mk # LIB = librte_pmd_bnxt.a +EXPORT_MAP := rte_pmd_bnxt_version.map + LIBABIVER := 1 CFLAGS += -O3 @@ -60,10 +62,12 @@ SRCS-$(CONFIG_RTE_LIBRTE_BNXT_PMD) += bnxt_txq.c SRCS-$(CONFIG_RTE_LIBRTE_BNXT_PMD) += bnxt_txr.c SRCS-$(CONFIG_RTE_LIBRTE_BNXT_PMD) += bnxt_vnic.c SRCS-$(CONFIG_RTE_LIBRTE_BNXT_PMD) += bnxt_irq.c +SRCS-$(CONFIG_RTE_LIBRTE_BNXT_PMD) += rte_pmd_bnxt.c # # Export include files # SYMLINK-y-include += +SYMLINK-$(CONFIG_RTE_LIBRTE_BNXT_PMD)-include := rte_pmd_bnxt.h include $(RTE_SDK)/mk/rte.lib.mk diff --git a/drivers/net/bnxt/bnxt.h b/drivers/net/bnxt/bnxt.h index 4418c7fd..405d94de 100644 --- a/drivers/net/bnxt/bnxt.h +++ b/drivers/net/bnxt/bnxt.h @@ -35,8 +35,10 @@ #define _BNXT_H_ #include <inttypes.h> +#include <stdbool.h> #include <sys/queue.h> +#include <rte_pci.h> #include <rte_ethdev.h> #include <rte_memory.h> #include <rte_lcore.h> @@ -44,8 +46,44 @@ #include "bnxt_cpr.h" -#define BNXT_MAX_MTU 9000 +#define BNXT_MAX_MTU 9500 #define VLAN_TAG_SIZE 4 +#define BNXT_MAX_LED 4 + +struct bnxt_led_info { + uint8_t led_id; + uint8_t led_type; + uint8_t led_group_id; + uint8_t unused; + uint16_t led_state_caps; +#define BNXT_LED_ALT_BLINK_CAP(x) ((x) & \ + rte_cpu_to_le_16(HWRM_PORT_LED_QCFG_OUTPUT_LED0_STATE_BLINKALT)) + + uint16_t led_color_caps; +}; + +struct bnxt_led_cfg { + uint8_t led_id; + uint8_t led_state; + uint8_t led_color; + uint8_t unused; + uint16_t led_blink_on; + uint16_t led_blink_off; + uint8_t led_group_id; + uint8_t rsvd; +}; + +#define BNXT_LED_DFLT_ENA \ + (HWRM_PORT_LED_CFG_INPUT_ENABLES_LED0_ID | \ + HWRM_PORT_LED_CFG_INPUT_ENABLES_LED0_STATE | \ + HWRM_PORT_LED_CFG_INPUT_ENABLES_LED0_BLINK_ON | \ + HWRM_PORT_LED_CFG_INPUT_ENABLES_LED0_BLINK_OFF | \ + HWRM_PORT_LED_CFG_INPUT_ENABLES_LED0_GROUP_ID) + +#define BNXT_LED_DFLT_ENA_SHIFT 6 + +#define BNXT_LED_DFLT_ENABLES(x) \ + rte_cpu_to_le_32(BNXT_LED_DFLT_ENA << (BNXT_LED_DFLT_ENA_SHIFT * (x))) enum bnxt_hw_context { HW_CONTEXT_NONE = 0, @@ -54,17 +92,32 @@ enum bnxt_hw_context { HW_CONTEXT_IS_LB = 3, }; -struct bnxt_vf_info { - uint16_t fw_fid; - uint8_t mac_addr[ETHER_ADDR_LEN]; - uint16_t max_rsscos_ctx; - uint16_t max_cp_rings; - uint16_t max_tx_rings; - uint16_t max_rx_rings; - uint16_t max_l2_ctx; - uint16_t max_vnics; - uint16_t vlan; - struct bnxt_pf_info *pf; +struct bnxt_vlan_table_entry { + uint16_t tpid; + uint16_t vid; +} __attribute__((packed)); + +struct bnxt_vlan_antispoof_table_entry { + uint16_t tpid; + uint16_t vid; + uint16_t mask; +} __attribute__((packed)); + +struct bnxt_child_vf_info { + void *req_buf; + struct bnxt_vlan_table_entry *vlan_table; + struct bnxt_vlan_antispoof_table_entry *vlan_as_table; + STAILQ_HEAD(, bnxt_filter_info) filter; + uint32_t func_cfg_flags; + uint32_t l2_rx_mask; + uint16_t fid; + uint16_t max_tx_rate; + uint16_t dflt_vlan; + uint16_t vlan_count; + uint8_t mac_spoof_en; + uint8_t vlan_spoof_en; + bool random_mac; + bool persist_stats; }; struct bnxt_pf_info { @@ -73,22 +126,20 @@ struct bnxt_pf_info { #define BNXT_FIRST_VF_FID 128 #define BNXT_PF_RINGS_USED(bp) bnxt_get_num_queues(bp) #define BNXT_PF_RINGS_AVAIL(bp) (bp->pf.max_cp_rings - BNXT_PF_RINGS_USED(bp)) - uint32_t fw_fid; uint8_t port_id; - uint8_t mac_addr[ETHER_ADDR_LEN]; - uint16_t max_rsscos_ctx; - uint16_t max_cp_rings; - uint16_t max_tx_rings; - uint16_t max_rx_rings; - uint16_t max_l2_ctx; - uint16_t max_vnics; uint16_t first_vf_id; uint16_t active_vfs; uint16_t max_vfs; + uint32_t func_cfg_flags; void *vf_req_buf; phys_addr_t vf_req_buf_dma_addr; uint32_t vf_req_fwd[8]; - struct bnxt_vf_info *vf; + uint16_t total_vnics; + struct bnxt_child_vf_info *vf_info; +#define BNXT_EVB_MODE_NONE 0 +#define BNXT_EVB_MODE_VEB 1 +#define BNXT_EVB_MODE_VEPA 2 + uint8_t evb_mode; }; /* Max wait time is 10 * 100ms = 1s */ @@ -120,6 +171,7 @@ struct bnxt_cos_queue_info { uint8_t profile; }; +#define BNXT_HWRM_SHORT_REQ_LEN sizeof(struct hwrm_short_input) struct bnxt { void *bar0; @@ -129,6 +181,9 @@ struct bnxt { uint32_t flags; #define BNXT_FLAG_REGISTERED (1 << 0) #define BNXT_FLAG_VF (1 << 1) +#define BNXT_FLAG_PORT_STATS (1 << 2) +#define BNXT_FLAG_JUMBO (1 << 3) +#define BNXT_FLAG_SHORT_CMD (1 << 4) #define BNXT_PF(bp) (!((bp)->flags & BNXT_FLAG_VF)) #define BNXT_VF(bp) ((bp)->flags & BNXT_FLAG_VF) #define BNXT_NPAR_ENABLED(bp) ((bp)->port_partition_type) @@ -137,10 +192,16 @@ struct bnxt { unsigned int rx_nr_rings; unsigned int rx_cp_nr_rings; struct bnxt_rx_queue **rx_queues; + const void *rx_mem_zone; + struct rx_port_stats *hw_rx_port_stats; + phys_addr_t hw_rx_port_stats_map; unsigned int tx_nr_rings; unsigned int tx_cp_nr_rings; struct bnxt_tx_queue **tx_queues; + const void *tx_mem_zone; + struct tx_port_stats *hw_tx_port_stats; + phys_addr_t hw_tx_port_stats_map; /* Default completion ring */ struct bnxt_cp_ring_info *def_cp_ring; @@ -167,6 +228,8 @@ struct bnxt { uint16_t hwrm_cmd_seq; void *hwrm_cmd_resp_addr; phys_addr_t hwrm_cmd_resp_dma_addr; + void *hwrm_short_cmd_req_addr; + phys_addr_t hwrm_short_cmd_req_dma_addr; rte_spinlock_t hwrm_lock; uint16_t max_req_len; uint16_t max_resp_len; @@ -174,12 +237,36 @@ struct bnxt { struct bnxt_link_info link_info; struct bnxt_cos_queue_info cos_queue[BNXT_COS_QUEUE_COUNT]; + uint16_t fw_fid; + uint8_t dflt_mac_addr[ETHER_ADDR_LEN]; + uint16_t max_rsscos_ctx; + uint16_t max_cp_rings; + uint16_t max_tx_rings; + uint16_t max_rx_rings; + uint16_t max_l2_ctx; + uint16_t max_vnics; + uint16_t max_stat_ctx; + uint16_t vlan; struct bnxt_pf_info pf; - struct bnxt_vf_info vf; uint8_t port_partition_type; uint8_t dev_stopped; + uint8_t vxlan_port_cnt; + uint8_t geneve_port_cnt; + uint16_t vxlan_port; + uint16_t geneve_port; + uint16_t vxlan_fw_dst_port_id; + uint16_t geneve_fw_dst_port_id; + uint32_t fw_ver; + rte_atomic64_t rx_mbuf_alloc_fail; + + struct bnxt_led_info leds[BNXT_MAX_LED]; + uint8_t num_leds; }; int bnxt_link_update_op(struct rte_eth_dev *eth_dev, int wait_to_complete); +int bnxt_rcv_msg_from_vf(struct bnxt *bp, uint16_t vf_id, void *msg); + +#define RX_PROD_AGG_BD_TYPE_RX_PROD_AGG 0x6 +bool is_bnxt_supported(struct rte_eth_dev *dev); #endif diff --git a/drivers/net/bnxt/bnxt_cpr.c b/drivers/net/bnxt/bnxt_cpr.c index 3aedcb8d..68979bc4 100644 --- a/drivers/net/bnxt/bnxt_cpr.c +++ b/drivers/net/bnxt/bnxt_cpr.c @@ -58,63 +58,123 @@ void bnxt_handle_async_event(struct bnxt *bp, bnxt_link_update_op(bp->eth_dev, 0); break; default: - RTE_LOG(ERR, PMD, "handle_async_event id = 0x%x\n", event_id); + RTE_LOG(DEBUG, PMD, "handle_async_event id = 0x%x\n", event_id); break; } } void bnxt_handle_fwd_req(struct bnxt *bp, struct cmpl_base *cmpl) { + struct hwrm_exec_fwd_resp_input *fwreq; struct hwrm_fwd_req_cmpl *fwd_cmpl = (struct hwrm_fwd_req_cmpl *)cmpl; struct input *fwd_cmd; - uint16_t logical_vf_id, error_code; + uint16_t fw_vf_id; + uint16_t vf_id; + uint16_t req_len; + int rc; - /* Qualify the fwd request */ - if (fwd_cmpl->source_id < bp->pf.first_vf_id) { - RTE_LOG(ERR, PMD, - "FWD req's source_id 0x%x > first_vf_id 0x%x\n", - fwd_cmpl->source_id, bp->pf.first_vf_id); - error_code = HWRM_ERR_CODE_RESOURCE_ACCESS_DENIED; - goto reject; - } else if (fwd_cmpl->req_len_type >> HWRM_FWD_REQ_CMPL_REQ_LEN_SFT > - 128 - sizeof(struct input)) { - RTE_LOG(ERR, PMD, - "FWD req's cmd len 0x%x > 108 bytes allowed\n", - fwd_cmpl->req_len_type >> HWRM_FWD_REQ_CMPL_REQ_LEN_SFT); - error_code = HWRM_ERR_CODE_INVALID_PARAMS; - goto reject; + if (bp->pf.active_vfs <= 0) { + RTE_LOG(ERR, PMD, "Forwarded VF with no active VFs\n"); + return; } + /* Qualify the fwd request */ + fw_vf_id = rte_le_to_cpu_16(fwd_cmpl->source_id); + vf_id = fw_vf_id - bp->pf.first_vf_id; + + req_len = (rte_le_to_cpu_16(fwd_cmpl->req_len_type) & + HWRM_FWD_REQ_CMPL_REQ_LEN_MASK) >> + HWRM_FWD_REQ_CMPL_REQ_LEN_SFT; + if (req_len > sizeof(fwreq->encap_request)) + req_len = sizeof(fwreq->encap_request); + /* Locate VF's forwarded command */ - logical_vf_id = fwd_cmpl->source_id - bp->pf.first_vf_id; - fwd_cmd = (struct input *)((uint8_t *)bp->pf.vf_req_buf + - (logical_vf_id * 128)); - - /* Provision the request */ - switch (fwd_cmd->req_type) { - case HWRM_CFA_L2_FILTER_ALLOC: - case HWRM_CFA_L2_FILTER_FREE: - case HWRM_CFA_L2_FILTER_CFG: - case HWRM_CFA_L2_SET_RX_MASK: - break; - default: - error_code = HWRM_ERR_CODE_INVALID_PARAMS; + fwd_cmd = (struct input *)bp->pf.vf_info[vf_id].req_buf; + + if (fw_vf_id < bp->pf.first_vf_id || + fw_vf_id >= (bp->pf.first_vf_id) + bp->pf.active_vfs) { + RTE_LOG(ERR, PMD, + "FWD req's source_id 0x%x out of range 0x%x - 0x%x (%d %d)\n", + fw_vf_id, bp->pf.first_vf_id, + (bp->pf.first_vf_id) + bp->pf.active_vfs - 1, + bp->pf.first_vf_id, bp->pf.active_vfs); goto reject; } - /* Forward */ - fwd_cmd->target_id = fwd_cmpl->source_id; - bnxt_hwrm_exec_fwd_resp(bp, fwd_cmd); - return; + if (bnxt_rcv_msg_from_vf(bp, vf_id, fwd_cmd) == true) { + /* + * In older firmware versions, the MAC had to be all zeros for + * the VF to set it's MAC via hwrm_func_vf_cfg. Set to all + * zeros if it's being configured and has been ok'd by caller. + */ + if (fwd_cmd->req_type == HWRM_FUNC_VF_CFG) { + struct hwrm_func_vf_cfg_input *vfc = (void *)fwd_cmd; + + if (vfc->enables & + HWRM_FUNC_VF_CFG_INPUT_ENABLES_DFLT_MAC_ADDR) { + bnxt_hwrm_func_vf_mac(bp, vf_id, + (const uint8_t *)"\x00\x00\x00\x00\x00"); + } + } + if (fwd_cmd->req_type == HWRM_CFA_L2_SET_RX_MASK) { + struct hwrm_cfa_l2_set_rx_mask_input *srm = + (void *)fwd_cmd; + + srm->vlan_tag_tbl_addr = rte_cpu_to_le_64(0); + srm->num_vlan_tags = rte_cpu_to_le_32(0); + srm->mask &= ~rte_cpu_to_le_32( + HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_VLANONLY | + HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_VLAN_NONVLAN | + HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_ANYVLAN_NONVLAN); + } + /* Forward */ + rc = bnxt_hwrm_exec_fwd_resp(bp, fw_vf_id, fwd_cmd, req_len); + if (rc) { + RTE_LOG(ERR, PMD, + "Failed to send FWD req VF 0x%x, type 0x%x.\n", + fw_vf_id - bp->pf.first_vf_id, + rte_le_to_cpu_16(fwd_cmd->req_type)); + } + return; + } reject: - /* TODO: Encap the reject error resp into the hwrm_err_iput? */ - /* Use the error_code for the reject cmd */ - RTE_LOG(ERR, PMD, - "Error 0x%x found in the forward request\n", error_code); + rc = bnxt_hwrm_reject_fwd_resp(bp, fw_vf_id, fwd_cmd, req_len); + if (rc) { + RTE_LOG(ERR, PMD, + "Failed to send REJECT req VF 0x%x, type 0x%x.\n", + fw_vf_id - bp->pf.first_vf_id, + rte_le_to_cpu_16(fwd_cmd->req_type)); + } + + return; } /* For the default completion ring only */ +int bnxt_alloc_def_cp_ring(struct bnxt *bp) +{ + struct bnxt_cp_ring_info *cpr = bp->def_cp_ring; + struct bnxt_ring *cp_ring = cpr->cp_ring_struct; + int rc; + + rc = bnxt_hwrm_ring_alloc(bp, cp_ring, + HWRM_RING_ALLOC_INPUT_RING_TYPE_L2_CMPL, + 0, HWRM_NA_SIGNATURE, + HWRM_NA_SIGNATURE); + if (rc) + goto err_out; + cpr->cp_doorbell = bp->pdev->mem_resource[2].addr; + B_CP_DIS_DB(cpr, cpr->cp_raw_cons); + bp->grp_info[0].cp_fw_ring_id = cp_ring->fw_ring_id; + if (BNXT_PF(bp)) + rc = bnxt_hwrm_func_cfg_def_cp(bp); + else + rc = bnxt_hwrm_vf_func_cfg_def_cp(bp); + +err_out: + return rc; +} + void bnxt_free_def_cp_ring(struct bnxt *bp) { struct bnxt_cp_ring_info *cpr = bp->def_cp_ring; diff --git a/drivers/net/bnxt/bnxt_cpr.h b/drivers/net/bnxt/bnxt_cpr.h index 83e53761..a6e87858 100644 --- a/drivers/net/bnxt/bnxt_cpr.h +++ b/drivers/net/bnxt/bnxt_cpr.h @@ -33,6 +33,7 @@ #ifndef _BNXT_CPR_H_ #define _BNXT_CPR_H_ +#include <stdbool.h> #include <rte_io.h> @@ -56,6 +57,19 @@ RING_CMP(((cpr)->cp_ring_struct), raw_cons)), \ ((cpr)->cp_doorbell)) +#define B_CP_DB_ARM(cpr) rte_write32((DB_KEY_CP), ((cpr)->cp_doorbell)) +#define B_CP_DB_DISARM(cpr) (*(uint32_t *)((cpr)->cp_doorbell) = \ + DB_KEY_CP | DB_IRQ_DIS) + +#define B_CP_DB_IDX_ARM(cpr, cons) \ + (*(uint32_t *)((cpr)->cp_doorbell) = (DB_CP_REARM_FLAGS | \ + (cons))) + +#define B_CP_DB_IDX_DISARM(cpr, cons) do { \ + rte_smp_wmb(); \ + (*(uint32_t *)((cpr)->cp_doorbell) = (DB_CP_FLAGS | \ + (cons)); \ +} while (0) #define B_CP_DIS_DB(cpr, raw_cons) \ rte_write32((DB_CP_FLAGS | \ RING_CMP(((cpr)->cp_ring_struct), raw_cons)), \ @@ -75,6 +89,8 @@ struct bnxt_cp_ring_info { uint32_t hw_stats_ctx_id; struct bnxt_ring *cp_ring_struct; + uint16_t cp_cons; + bool v; }; #define RX_CMP_L2_ERRORS \ @@ -82,6 +98,7 @@ struct bnxt_cp_ring_info { struct bnxt; +int bnxt_alloc_def_cp_ring(struct bnxt *bp); void bnxt_free_def_cp_ring(struct bnxt *bp); int bnxt_init_def_ring_struct(struct bnxt *bp, unsigned int socket_id); void bnxt_handle_async_event(struct bnxt *bp, struct cmpl_base *cmp); diff --git a/drivers/net/bnxt/bnxt_ethdev.c b/drivers/net/bnxt/bnxt_ethdev.c index 81711e48..c9d11228 100644 --- a/drivers/net/bnxt/bnxt_ethdev.c +++ b/drivers/net/bnxt/bnxt_ethdev.c @@ -60,6 +60,7 @@ static const char bnxt_version[] = #define PCI_VENDOR_ID_BROADCOM 0x14E4 +#define BROADCOM_DEV_ID_STRATUS_NIC_VF 0x1609 #define BROADCOM_DEV_ID_STRATUS_NIC 0x1614 #define BROADCOM_DEV_ID_57414_VF 0x16c1 #define BROADCOM_DEV_ID_57301 0x16c8 @@ -96,6 +97,8 @@ static const char bnxt_version[] = #define BROADCOM_DEV_ID_57416_MF 0x16ee static const struct rte_pci_id bnxt_pci_id_map[] = { + { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, + BROADCOM_DEV_ID_STRATUS_NIC_VF) }, { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_STRATUS_NIC) }, { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57414_VF) }, { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57301) }, @@ -141,6 +144,8 @@ static const struct rte_pci_id bnxt_pci_id_map[] = { ETH_RSS_NONFRAG_IPV6_TCP | \ ETH_RSS_NONFRAG_IPV6_UDP) +static void bnxt_vlan_offload_set_op(struct rte_eth_dev *dev, int mask); + /***********************/ /* @@ -198,6 +203,14 @@ static int bnxt_init_chip(struct bnxt *bp) struct rte_eth_link new; int rc; + if (bp->eth_dev->data->mtu > ETHER_MTU) { + bp->eth_dev->data->dev_conf.rxmode.jumbo_frame = 1; + bp->flags |= BNXT_FLAG_JUMBO; + } else { + bp->eth_dev->data->dev_conf.rxmode.jumbo_frame = 0; + bp->flags &= ~BNXT_FLAG_JUMBO; + } + rc = bnxt_alloc_all_hwrm_stat_ctxs(bp); if (rc) { RTE_LOG(ERR, PMD, "HWRM stat ctx alloc failure rc: %x\n", rc); @@ -228,28 +241,31 @@ static int bnxt_init_chip(struct bnxt *bp) rc = bnxt_hwrm_vnic_alloc(bp, vnic); if (rc) { - RTE_LOG(ERR, PMD, "HWRM vnic alloc failure rc: %x\n", - rc); + RTE_LOG(ERR, PMD, "HWRM vnic %d alloc failure rc: %x\n", + i, rc); goto err_out; } rc = bnxt_hwrm_vnic_ctx_alloc(bp, vnic); if (rc) { RTE_LOG(ERR, PMD, - "HWRM vnic ctx alloc failure rc: %x\n", rc); + "HWRM vnic %d ctx alloc failure rc: %x\n", + i, rc); goto err_out; } rc = bnxt_hwrm_vnic_cfg(bp, vnic); if (rc) { - RTE_LOG(ERR, PMD, "HWRM vnic cfg failure rc: %x\n", rc); + RTE_LOG(ERR, PMD, "HWRM vnic %d cfg failure rc: %x\n", + i, rc); goto err_out; } rc = bnxt_set_hwrm_vnic_filters(bp, vnic); if (rc) { - RTE_LOG(ERR, PMD, "HWRM vnic filter failure rc: %x\n", - rc); + RTE_LOG(ERR, PMD, + "HWRM vnic %d filter failure rc: %x\n", + i, rc); goto err_out; } if (vnic->rss_table && vnic->hash_type) { @@ -269,13 +285,20 @@ static int bnxt_init_chip(struct bnxt *bp) rc = bnxt_hwrm_vnic_rss_cfg(bp, vnic); if (rc) { RTE_LOG(ERR, PMD, - "HWRM vnic set RSS failure rc: %x\n", - rc); + "HWRM vnic %d set RSS failure rc: %x\n", + i, rc); goto err_out; } } + + bnxt_hwrm_vnic_plcmode_cfg(bp, vnic); + + if (bp->eth_dev->data->dev_conf.rxmode.enable_lro) + bnxt_hwrm_vnic_tpa_cfg(bp, vnic, 1); + else + bnxt_hwrm_vnic_tpa_cfg(bp, vnic, 0); } - rc = bnxt_hwrm_cfa_l2_set_rx_mask(bp, &bp->vnic_info[0]); + rc = bnxt_hwrm_cfa_l2_set_rx_mask(bp, &bp->vnic_info[0], 0, NULL); if (rc) { RTE_LOG(ERR, PMD, "HWRM cfa l2 rx mask failure rc: %x\n", rc); @@ -338,25 +361,19 @@ static void bnxt_dev_info_get_op(struct rte_eth_dev *eth_dev, struct bnxt *bp = (struct bnxt *)eth_dev->data->dev_private; uint16_t max_vnics, i, j, vpool, vrxq; - dev_info->pci_dev = RTE_DEV_TO_PCI(eth_dev->device); + dev_info->pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev); /* MAC Specifics */ dev_info->max_mac_addrs = MAX_NUM_MAC_ADDR; dev_info->max_hash_mac_addrs = 0; /* PF/VF specifics */ - if (BNXT_PF(bp)) { - dev_info->max_rx_queues = bp->pf.max_rx_rings; - dev_info->max_tx_queues = bp->pf.max_tx_rings; - dev_info->max_vfs = bp->pf.active_vfs; - dev_info->reta_size = bp->pf.max_rsscos_ctx; - max_vnics = bp->pf.max_vnics; - } else { - dev_info->max_rx_queues = bp->vf.max_rx_rings; - dev_info->max_tx_queues = bp->vf.max_tx_rings; - dev_info->reta_size = bp->vf.max_rsscos_ctx; - max_vnics = bp->vf.max_vnics; - } + if (BNXT_PF(bp)) + dev_info->max_vfs = bp->pdev->max_vfs; + dev_info->max_rx_queues = bp->max_rx_rings; + dev_info->max_tx_queues = bp->max_tx_rings; + dev_info->reta_size = bp->max_rsscos_ctx; + max_vnics = bp->max_vnics; /* Fast path specifics */ dev_info->min_rx_bufsize = 1; @@ -366,7 +383,12 @@ static void bnxt_dev_info_get_op(struct rte_eth_dev *eth_dev, dev_info->tx_offload_capa = DEV_TX_OFFLOAD_IPV4_CKSUM | DEV_TX_OFFLOAD_TCP_CKSUM | DEV_TX_OFFLOAD_UDP_CKSUM | - DEV_TX_OFFLOAD_TCP_TSO; + DEV_TX_OFFLOAD_TCP_TSO | + DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM | + DEV_TX_OFFLOAD_VXLAN_TNL_TSO | + DEV_TX_OFFLOAD_GRE_TNL_TSO | + DEV_TX_OFFLOAD_IPIP_TNL_TSO | + DEV_TX_OFFLOAD_GENEVE_TNL_TSO; /* *INDENT-OFF* */ dev_info->default_rxconf = (struct rte_eth_rxconf) { @@ -485,44 +507,29 @@ static int bnxt_dev_lsc_intr_setup(struct rte_eth_dev *eth_dev) static int bnxt_dev_start_op(struct rte_eth_dev *eth_dev) { struct bnxt *bp = (struct bnxt *)eth_dev->data->dev_private; + int vlan_mask = 0; int rc; bp->dev_stopped = 0; - rc = bnxt_hwrm_func_reset(bp); - if (rc) { - RTE_LOG(ERR, PMD, "hwrm chip reset failure rc: %x\n", rc); - rc = -1; - goto error; - } - - rc = bnxt_setup_int(bp); - if (rc) - goto error; - - rc = bnxt_alloc_mem(bp); - if (rc) - goto error; - - rc = bnxt_request_int(bp); - if (rc) - goto error; rc = bnxt_init_nic(bp); if (rc) goto error; - bnxt_enable_int(bp); - bnxt_link_update_op(eth_dev, 0); + + if (eth_dev->data->dev_conf.rxmode.hw_vlan_filter) + vlan_mask |= ETH_VLAN_FILTER_MASK; + if (eth_dev->data->dev_conf.rxmode.hw_vlan_strip) + vlan_mask |= ETH_VLAN_STRIP_MASK; + bnxt_vlan_offload_set_op(eth_dev, vlan_mask); + return 0; error: bnxt_shutdown_nic(bp); - bnxt_disable_int(bp); - bnxt_free_int(bp); bnxt_free_tx_mbufs(bp); bnxt_free_rx_mbufs(bp); - bnxt_free_mem(bp); return rc; } @@ -554,8 +561,7 @@ static void bnxt_dev_stop_op(struct rte_eth_dev *eth_dev) eth_dev->data->dev_link.link_status = 0; } bnxt_set_hwrm_link_config(bp, false); - bnxt_disable_int(bp); - bnxt_free_int(bp); + bnxt_hwrm_port_clr_stats(bp); bnxt_shutdown_nic(bp); bp->dev_stopped = 1; } @@ -651,7 +657,7 @@ static int bnxt_mac_addr_add_op(struct rte_eth_dev *eth_dev, STAILQ_INSERT_TAIL(&vnic->filter, filter, next); filter->mac_index = index; memcpy(filter->l2_addr, mac_addr, ETHER_ADDR_LEN); - return bnxt_hwrm_set_filter(bp, vnic, filter); + return bnxt_hwrm_set_filter(bp, vnic->fw_vnic_id, filter); } int bnxt_link_update_op(struct rte_eth_dev *eth_dev, int wait_to_complete) @@ -669,7 +675,7 @@ int bnxt_link_update_op(struct rte_eth_dev *eth_dev, int wait_to_complete) new.link_speed = ETH_LINK_SPEED_100M; new.link_duplex = ETH_LINK_FULL_DUPLEX; RTE_LOG(ERR, PMD, - "Failed to retrieve link rc = 0x%x!", rc); + "Failed to retrieve link rc = 0x%x!\n", rc); goto out; } rte_delay_ms(BNXT_LINK_WAIT_INTERVAL); @@ -700,7 +706,7 @@ static void bnxt_promiscuous_enable_op(struct rte_eth_dev *eth_dev) vnic = &bp->vnic_info[0]; vnic->flags |= BNXT_VNIC_INFO_PROMISC; - bnxt_hwrm_cfa_l2_set_rx_mask(bp, vnic); + bnxt_hwrm_cfa_l2_set_rx_mask(bp, vnic, 0, NULL); } static void bnxt_promiscuous_disable_op(struct rte_eth_dev *eth_dev) @@ -714,7 +720,7 @@ static void bnxt_promiscuous_disable_op(struct rte_eth_dev *eth_dev) vnic = &bp->vnic_info[0]; vnic->flags &= ~BNXT_VNIC_INFO_PROMISC; - bnxt_hwrm_cfa_l2_set_rx_mask(bp, vnic); + bnxt_hwrm_cfa_l2_set_rx_mask(bp, vnic, 0, NULL); } static void bnxt_allmulticast_enable_op(struct rte_eth_dev *eth_dev) @@ -728,7 +734,7 @@ static void bnxt_allmulticast_enable_op(struct rte_eth_dev *eth_dev) vnic = &bp->vnic_info[0]; vnic->flags |= BNXT_VNIC_INFO_ALLMULTI; - bnxt_hwrm_cfa_l2_set_rx_mask(bp, vnic); + bnxt_hwrm_cfa_l2_set_rx_mask(bp, vnic, 0, NULL); } static void bnxt_allmulticast_disable_op(struct rte_eth_dev *eth_dev) @@ -742,7 +748,7 @@ static void bnxt_allmulticast_disable_op(struct rte_eth_dev *eth_dev) vnic = &bp->vnic_info[0]; vnic->flags &= ~BNXT_VNIC_INFO_ALLMULTI; - bnxt_hwrm_cfa_l2_set_rx_mask(bp, vnic); + bnxt_hwrm_cfa_l2_set_rx_mask(bp, vnic, 0, NULL); } static int bnxt_reta_update_op(struct rte_eth_dev *eth_dev, @@ -918,7 +924,7 @@ static int bnxt_rss_hash_conf_get_op(struct rte_eth_dev *eth_dev, } static int bnxt_flow_ctrl_get_op(struct rte_eth_dev *dev, - struct rte_eth_fc_conf *fc_conf __rte_unused) + struct rte_eth_fc_conf *fc_conf) { struct bnxt *bp = (struct bnxt *)dev->data->dev_private; struct rte_eth_link link_info; @@ -1003,6 +1009,514 @@ static int bnxt_flow_ctrl_set_op(struct rte_eth_dev *dev, return bnxt_set_hwrm_link_config(bp, true); } +/* Add UDP tunneling port */ +static int +bnxt_udp_tunnel_port_add_op(struct rte_eth_dev *eth_dev, + struct rte_eth_udp_tunnel *udp_tunnel) +{ + struct bnxt *bp = (struct bnxt *)eth_dev->data->dev_private; + uint16_t tunnel_type = 0; + int rc = 0; + + switch (udp_tunnel->prot_type) { + case RTE_TUNNEL_TYPE_VXLAN: + if (bp->vxlan_port_cnt) { + RTE_LOG(ERR, PMD, "Tunnel Port %d already programmed\n", + udp_tunnel->udp_port); + if (bp->vxlan_port != udp_tunnel->udp_port) { + RTE_LOG(ERR, PMD, "Only one port allowed\n"); + return -ENOSPC; + } + bp->vxlan_port_cnt++; + return 0; + } + tunnel_type = + HWRM_TUNNEL_DST_PORT_ALLOC_INPUT_TUNNEL_TYPE_VXLAN; + bp->vxlan_port_cnt++; + break; + case RTE_TUNNEL_TYPE_GENEVE: + if (bp->geneve_port_cnt) { + RTE_LOG(ERR, PMD, "Tunnel Port %d already programmed\n", + udp_tunnel->udp_port); + if (bp->geneve_port != udp_tunnel->udp_port) { + RTE_LOG(ERR, PMD, "Only one port allowed\n"); + return -ENOSPC; + } + bp->geneve_port_cnt++; + return 0; + } + tunnel_type = + HWRM_TUNNEL_DST_PORT_ALLOC_INPUT_TUNNEL_TYPE_GENEVE; + bp->geneve_port_cnt++; + break; + default: + RTE_LOG(ERR, PMD, "Tunnel type is not supported\n"); + return -ENOTSUP; + } + rc = bnxt_hwrm_tunnel_dst_port_alloc(bp, udp_tunnel->udp_port, + tunnel_type); + return rc; +} + +static int +bnxt_udp_tunnel_port_del_op(struct rte_eth_dev *eth_dev, + struct rte_eth_udp_tunnel *udp_tunnel) +{ + struct bnxt *bp = (struct bnxt *)eth_dev->data->dev_private; + uint16_t tunnel_type = 0; + uint16_t port = 0; + int rc = 0; + + switch (udp_tunnel->prot_type) { + case RTE_TUNNEL_TYPE_VXLAN: + if (!bp->vxlan_port_cnt) { + RTE_LOG(ERR, PMD, "No Tunnel port configured yet\n"); + return -EINVAL; + } + if (bp->vxlan_port != udp_tunnel->udp_port) { + RTE_LOG(ERR, PMD, "Req Port: %d. Configured port: %d\n", + udp_tunnel->udp_port, bp->vxlan_port); + return -EINVAL; + } + if (--bp->vxlan_port_cnt) + return 0; + + tunnel_type = + HWRM_TUNNEL_DST_PORT_FREE_INPUT_TUNNEL_TYPE_VXLAN; + port = bp->vxlan_fw_dst_port_id; + break; + case RTE_TUNNEL_TYPE_GENEVE: + if (!bp->geneve_port_cnt) { + RTE_LOG(ERR, PMD, "No Tunnel port configured yet\n"); + return -EINVAL; + } + if (bp->geneve_port != udp_tunnel->udp_port) { + RTE_LOG(ERR, PMD, "Req Port: %d. Configured port: %d\n", + udp_tunnel->udp_port, bp->geneve_port); + return -EINVAL; + } + if (--bp->geneve_port_cnt) + return 0; + + tunnel_type = + HWRM_TUNNEL_DST_PORT_FREE_INPUT_TUNNEL_TYPE_GENEVE; + port = bp->geneve_fw_dst_port_id; + break; + default: + RTE_LOG(ERR, PMD, "Tunnel type is not supported\n"); + return -ENOTSUP; + } + + rc = bnxt_hwrm_tunnel_dst_port_free(bp, port, tunnel_type); + if (!rc) { + if (tunnel_type == + HWRM_TUNNEL_DST_PORT_FREE_INPUT_TUNNEL_TYPE_VXLAN) + bp->vxlan_port = 0; + if (tunnel_type == + HWRM_TUNNEL_DST_PORT_FREE_INPUT_TUNNEL_TYPE_GENEVE) + bp->geneve_port = 0; + } + return rc; +} + +static int bnxt_del_vlan_filter(struct bnxt *bp, uint16_t vlan_id) +{ + struct bnxt_filter_info *filter, *temp_filter, *new_filter; + struct bnxt_vnic_info *vnic; + unsigned int i; + int rc = 0; + uint32_t chk = HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_OVLAN; + + /* Cycle through all VNICs */ + for (i = 0; i < bp->nr_vnics; i++) { + /* + * For each VNIC and each associated filter(s) + * if VLAN exists && VLAN matches vlan_id + * remove the MAC+VLAN filter + * add a new MAC only filter + * else + * VLAN filter doesn't exist, just skip and continue + */ + STAILQ_FOREACH(vnic, &bp->ff_pool[i], next) { + filter = STAILQ_FIRST(&vnic->filter); + while (filter) { + temp_filter = STAILQ_NEXT(filter, next); + + if (filter->enables & chk && + filter->l2_ovlan == vlan_id) { + /* Must delete the filter */ + STAILQ_REMOVE(&vnic->filter, filter, + bnxt_filter_info, next); + bnxt_hwrm_clear_filter(bp, filter); + STAILQ_INSERT_TAIL( + &bp->free_filter_list, + filter, next); + + /* + * Need to examine to see if the MAC + * filter already existed or not before + * allocating a new one + */ + + new_filter = bnxt_alloc_filter(bp); + if (!new_filter) { + RTE_LOG(ERR, PMD, + "MAC/VLAN filter alloc failed\n"); + rc = -ENOMEM; + goto exit; + } + STAILQ_INSERT_TAIL(&vnic->filter, + new_filter, next); + /* Inherit MAC from previous filter */ + new_filter->mac_index = + filter->mac_index; + memcpy(new_filter->l2_addr, + filter->l2_addr, ETHER_ADDR_LEN); + /* MAC only filter */ + rc = bnxt_hwrm_set_filter(bp, + vnic->fw_vnic_id, + new_filter); + if (rc) + goto exit; + RTE_LOG(INFO, PMD, + "Del Vlan filter for %d\n", + vlan_id); + } + filter = temp_filter; + } + } + } +exit: + return rc; +} + +static int bnxt_add_vlan_filter(struct bnxt *bp, uint16_t vlan_id) +{ + struct bnxt_filter_info *filter, *temp_filter, *new_filter; + struct bnxt_vnic_info *vnic; + unsigned int i; + int rc = 0; + uint32_t en = HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_OVLAN | + HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_OVLAN_MASK; + uint32_t chk = HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_OVLAN; + + /* Cycle through all VNICs */ + for (i = 0; i < bp->nr_vnics; i++) { + /* + * For each VNIC and each associated filter(s) + * if VLAN exists: + * if VLAN matches vlan_id + * VLAN filter already exists, just skip and continue + * else + * add a new MAC+VLAN filter + * else + * Remove the old MAC only filter + * Add a new MAC+VLAN filter + */ + STAILQ_FOREACH(vnic, &bp->ff_pool[i], next) { + filter = STAILQ_FIRST(&vnic->filter); + while (filter) { + temp_filter = STAILQ_NEXT(filter, next); + + if (filter->enables & chk) { + if (filter->l2_ovlan == vlan_id) + goto cont; + } else { + /* Must delete the MAC filter */ + STAILQ_REMOVE(&vnic->filter, filter, + bnxt_filter_info, next); + bnxt_hwrm_clear_filter(bp, filter); + filter->l2_ovlan = 0; + STAILQ_INSERT_TAIL( + &bp->free_filter_list, + filter, next); + } + new_filter = bnxt_alloc_filter(bp); + if (!new_filter) { + RTE_LOG(ERR, PMD, + "MAC/VLAN filter alloc failed\n"); + rc = -ENOMEM; + goto exit; + } + STAILQ_INSERT_TAIL(&vnic->filter, new_filter, + next); + /* Inherit MAC from the previous filter */ + new_filter->mac_index = filter->mac_index; + memcpy(new_filter->l2_addr, filter->l2_addr, + ETHER_ADDR_LEN); + /* MAC + VLAN ID filter */ + new_filter->l2_ovlan = vlan_id; + new_filter->l2_ovlan_mask = 0xF000; + new_filter->enables |= en; + rc = bnxt_hwrm_set_filter(bp, vnic->fw_vnic_id, + new_filter); + if (rc) + goto exit; + RTE_LOG(INFO, PMD, + "Added Vlan filter for %d\n", vlan_id); +cont: + filter = temp_filter; + } + } + } +exit: + return rc; +} + +static int bnxt_vlan_filter_set_op(struct rte_eth_dev *eth_dev, + uint16_t vlan_id, int on) +{ + struct bnxt *bp = (struct bnxt *)eth_dev->data->dev_private; + + /* These operations apply to ALL existing MAC/VLAN filters */ + if (on) + return bnxt_add_vlan_filter(bp, vlan_id); + else + return bnxt_del_vlan_filter(bp, vlan_id); +} + +static void +bnxt_vlan_offload_set_op(struct rte_eth_dev *dev, int mask) +{ + struct bnxt *bp = (struct bnxt *)dev->data->dev_private; + unsigned int i; + + if (mask & ETH_VLAN_FILTER_MASK) { + if (!dev->data->dev_conf.rxmode.hw_vlan_filter) { + /* Remove any VLAN filters programmed */ + for (i = 0; i < 4095; i++) + bnxt_del_vlan_filter(bp, i); + } + RTE_LOG(INFO, PMD, "VLAN Filtering: %d\n", + dev->data->dev_conf.rxmode.hw_vlan_filter); + } + + if (mask & ETH_VLAN_STRIP_MASK) { + /* Enable or disable VLAN stripping */ + for (i = 0; i < bp->nr_vnics; i++) { + struct bnxt_vnic_info *vnic = &bp->vnic_info[i]; + if (dev->data->dev_conf.rxmode.hw_vlan_strip) + vnic->vlan_strip = true; + else + vnic->vlan_strip = false; + bnxt_hwrm_vnic_cfg(bp, vnic); + } + RTE_LOG(INFO, PMD, "VLAN Strip Offload: %d\n", + dev->data->dev_conf.rxmode.hw_vlan_strip); + } + + if (mask & ETH_VLAN_EXTEND_MASK) + RTE_LOG(ERR, PMD, "Extend VLAN Not supported\n"); +} + +static void +bnxt_set_default_mac_addr_op(struct rte_eth_dev *dev, struct ether_addr *addr) +{ + struct bnxt *bp = (struct bnxt *)dev->data->dev_private; + /* Default Filter is tied to VNIC 0 */ + struct bnxt_vnic_info *vnic = &bp->vnic_info[0]; + struct bnxt_filter_info *filter; + int rc; + + if (BNXT_VF(bp)) + return; + + memcpy(bp->mac_addr, addr, sizeof(bp->mac_addr)); + memcpy(&dev->data->mac_addrs[0], bp->mac_addr, ETHER_ADDR_LEN); + + STAILQ_FOREACH(filter, &vnic->filter, next) { + /* Default Filter is at Index 0 */ + if (filter->mac_index != 0) + continue; + rc = bnxt_hwrm_clear_filter(bp, filter); + if (rc) + break; + memcpy(filter->l2_addr, bp->mac_addr, ETHER_ADDR_LEN); + memset(filter->l2_addr_mask, 0xff, ETHER_ADDR_LEN); + filter->flags |= HWRM_CFA_L2_FILTER_ALLOC_INPUT_FLAGS_PATH_RX; + filter->enables |= + HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_ADDR | + HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_ADDR_MASK; + rc = bnxt_hwrm_set_filter(bp, vnic->fw_vnic_id, filter); + if (rc) + break; + filter->mac_index = 0; + RTE_LOG(DEBUG, PMD, "Set MAC addr\n"); + } +} + +static int +bnxt_dev_set_mc_addr_list_op(struct rte_eth_dev *eth_dev, + struct ether_addr *mc_addr_set, + uint32_t nb_mc_addr) +{ + struct bnxt *bp = (struct bnxt *)eth_dev->data->dev_private; + char *mc_addr_list = (char *)mc_addr_set; + struct bnxt_vnic_info *vnic; + uint32_t off = 0, i = 0; + + vnic = &bp->vnic_info[0]; + + if (nb_mc_addr > BNXT_MAX_MC_ADDRS) { + vnic->flags |= BNXT_VNIC_INFO_ALLMULTI; + goto allmulti; + } + + /* TODO Check for Duplicate mcast addresses */ + vnic->flags &= ~BNXT_VNIC_INFO_ALLMULTI; + for (i = 0; i < nb_mc_addr; i++) { + memcpy(vnic->mc_list + off, &mc_addr_list[i], ETHER_ADDR_LEN); + off += ETHER_ADDR_LEN; + } + + vnic->mc_addr_cnt = i; + +allmulti: + return bnxt_hwrm_cfa_l2_set_rx_mask(bp, vnic, 0, NULL); +} + +static int +bnxt_fw_version_get(struct rte_eth_dev *dev, char *fw_version, size_t fw_size) +{ + struct bnxt *bp = (struct bnxt *)dev->data->dev_private; + uint8_t fw_major = (bp->fw_ver >> 24) & 0xff; + uint8_t fw_minor = (bp->fw_ver >> 16) & 0xff; + uint8_t fw_updt = (bp->fw_ver >> 8) & 0xff; + int ret; + + ret = snprintf(fw_version, fw_size, "%d.%d.%d", + fw_major, fw_minor, fw_updt); + + ret += 1; /* add the size of '\0' */ + if (fw_size < (uint32_t)ret) + return ret; + else + return 0; +} + +static void +bnxt_rxq_info_get_op(struct rte_eth_dev *dev, uint16_t queue_id, + struct rte_eth_rxq_info *qinfo) +{ + struct bnxt_rx_queue *rxq; + + rxq = dev->data->rx_queues[queue_id]; + + qinfo->mp = rxq->mb_pool; + qinfo->scattered_rx = dev->data->scattered_rx; + qinfo->nb_desc = rxq->nb_rx_desc; + + qinfo->conf.rx_free_thresh = rxq->rx_free_thresh; + qinfo->conf.rx_drop_en = 0; + qinfo->conf.rx_deferred_start = 0; +} + +static void +bnxt_txq_info_get_op(struct rte_eth_dev *dev, uint16_t queue_id, + struct rte_eth_txq_info *qinfo) +{ + struct bnxt_tx_queue *txq; + + txq = dev->data->tx_queues[queue_id]; + + qinfo->nb_desc = txq->nb_tx_desc; + + qinfo->conf.tx_thresh.pthresh = txq->pthresh; + qinfo->conf.tx_thresh.hthresh = txq->hthresh; + qinfo->conf.tx_thresh.wthresh = txq->wthresh; + + qinfo->conf.tx_free_thresh = txq->tx_free_thresh; + qinfo->conf.tx_rs_thresh = 0; + qinfo->conf.txq_flags = txq->txq_flags; + qinfo->conf.tx_deferred_start = txq->tx_deferred_start; +} + +static int bnxt_mtu_set_op(struct rte_eth_dev *eth_dev, uint16_t new_mtu) +{ + struct bnxt *bp = eth_dev->data->dev_private; + struct rte_eth_dev_info dev_info; + uint32_t max_dev_mtu; + uint32_t rc = 0; + uint32_t i; + + bnxt_dev_info_get_op(eth_dev, &dev_info); + max_dev_mtu = dev_info.max_rx_pktlen - + ETHER_HDR_LEN - ETHER_CRC_LEN - VLAN_TAG_SIZE * 2; + + if (new_mtu < ETHER_MIN_MTU || new_mtu > max_dev_mtu) { + RTE_LOG(ERR, PMD, "MTU requested must be within (%d, %d)\n", + ETHER_MIN_MTU, max_dev_mtu); + return -EINVAL; + } + + + if (new_mtu > ETHER_MTU) { + bp->flags |= BNXT_FLAG_JUMBO; + eth_dev->data->dev_conf.rxmode.jumbo_frame = 1; + } else { + eth_dev->data->dev_conf.rxmode.jumbo_frame = 0; + bp->flags &= ~BNXT_FLAG_JUMBO; + } + + eth_dev->data->dev_conf.rxmode.max_rx_pkt_len = + new_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN + VLAN_TAG_SIZE * 2; + + eth_dev->data->mtu = new_mtu; + RTE_LOG(INFO, PMD, "New MTU is %d\n", eth_dev->data->mtu); + + for (i = 0; i < bp->nr_vnics; i++) { + struct bnxt_vnic_info *vnic = &bp->vnic_info[i]; + + vnic->mru = bp->eth_dev->data->mtu + ETHER_HDR_LEN + + ETHER_CRC_LEN + VLAN_TAG_SIZE * 2; + rc = bnxt_hwrm_vnic_cfg(bp, vnic); + if (rc) + break; + + rc = bnxt_hwrm_vnic_plcmode_cfg(bp, vnic); + if (rc) + return rc; + } + + return rc; +} + +static int +bnxt_vlan_pvid_set_op(struct rte_eth_dev *dev, uint16_t pvid, int on) +{ + struct bnxt *bp = (struct bnxt *)dev->data->dev_private; + uint16_t vlan = bp->vlan; + int rc; + + if (BNXT_NPAR_PF(bp) || BNXT_VF(bp)) { + RTE_LOG(ERR, PMD, + "PVID cannot be modified for this function\n"); + return -ENOTSUP; + } + bp->vlan = on ? pvid : 0; + + rc = bnxt_hwrm_set_default_vlan(bp, 0, 0); + if (rc) + bp->vlan = vlan; + return rc; +} + +static int +bnxt_dev_led_on_op(struct rte_eth_dev *dev) +{ + struct bnxt *bp = (struct bnxt *)dev->data->dev_private; + + return bnxt_hwrm_port_led_cfg(bp, true); +} + +static int +bnxt_dev_led_off_op(struct rte_eth_dev *dev) +{ + struct bnxt *bp = (struct bnxt *)dev->data->dev_private; + + return bnxt_hwrm_port_led_cfg(bp, false); +} + /* * Initialization */ @@ -1034,6 +1548,22 @@ static const struct eth_dev_ops bnxt_dev_ops = { .mac_addr_remove = bnxt_mac_addr_remove_op, .flow_ctrl_get = bnxt_flow_ctrl_get_op, .flow_ctrl_set = bnxt_flow_ctrl_set_op, + .udp_tunnel_port_add = bnxt_udp_tunnel_port_add_op, + .udp_tunnel_port_del = bnxt_udp_tunnel_port_del_op, + .vlan_filter_set = bnxt_vlan_filter_set_op, + .vlan_offload_set = bnxt_vlan_offload_set_op, + .vlan_pvid_set = bnxt_vlan_pvid_set_op, + .mtu_set = bnxt_mtu_set_op, + .mac_addr_set = bnxt_set_default_mac_addr_op, + .xstats_get = bnxt_dev_xstats_get_op, + .xstats_get_names = bnxt_dev_xstats_get_names_op, + .xstats_reset = bnxt_dev_xstats_reset_op, + .fw_version_get = bnxt_fw_version_get, + .set_mc_addr_list = bnxt_dev_set_mc_addr_list_op, + .rxq_info_get = bnxt_rxq_info_get_op, + .txq_info_get = bnxt_txq_info_get_op, + .dev_led_on = bnxt_dev_led_on_op, + .dev_led_off = bnxt_dev_led_off_op, }; static bool bnxt_vf_pciid(uint16_t id) @@ -1041,7 +1571,9 @@ static bool bnxt_vf_pciid(uint16_t id) if (id == BROADCOM_DEV_ID_57304_VF || id == BROADCOM_DEV_ID_57406_VF || id == BROADCOM_DEV_ID_5731X_VF || - id == BROADCOM_DEV_ID_5741X_VF) + id == BROADCOM_DEV_ID_5741X_VF || + id == BROADCOM_DEV_ID_57414_VF || + id == BROADCOM_DEV_ID_STRATUS_NIC_VF) return true; return false; } @@ -1049,7 +1581,7 @@ static bool bnxt_vf_pciid(uint16_t id) static int bnxt_init_board(struct rte_eth_dev *eth_dev) { struct bnxt *bp = eth_dev->data->dev_private; - struct rte_pci_device *pci_dev = RTE_DEV_TO_PCI(eth_dev->device); + struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev); int rc; /* enable device (incl. PCI PM wakeup), and bus-mastering */ @@ -1082,22 +1614,35 @@ init_err_disable: static int bnxt_dev_uninit(struct rte_eth_dev *eth_dev); +#define ALLOW_FUNC(x) \ + { \ + typeof(x) arg = (x); \ + bp->pf.vf_req_fwd[((arg) >> 5)] &= \ + ~rte_cpu_to_le_32(1 << ((arg) & 0x1f)); \ + } static int bnxt_dev_init(struct rte_eth_dev *eth_dev) { - struct rte_pci_device *pci_dev = RTE_DEV_TO_PCI(eth_dev->device); + struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev); + char mz_name[RTE_MEMZONE_NAMESIZE]; + const struct rte_memzone *mz = NULL; static int version_printed; + uint32_t total_alloc_len; + phys_addr_t mz_phys_addr; struct bnxt *bp; int rc; if (version_printed++ == 0) - RTE_LOG(INFO, PMD, "%s", bnxt_version); + RTE_LOG(INFO, PMD, "%s\n", bnxt_version); rte_eth_copy_pci_info(eth_dev, pci_dev); eth_dev->data->dev_flags |= RTE_ETH_DEV_DETACHABLE; bp = eth_dev->data->dev_private; + rte_atomic64_init(&bp->rx_mbuf_alloc_fail); + bp->dev_stopped = 1; + if (bnxt_vf_pciid(pci_dev->id.device_id)) bp->flags |= BNXT_FLAG_VF; @@ -1111,6 +1656,80 @@ bnxt_dev_init(struct rte_eth_dev *eth_dev) eth_dev->rx_pkt_burst = &bnxt_recv_pkts; eth_dev->tx_pkt_burst = &bnxt_xmit_pkts; + if (BNXT_PF(bp) && pci_dev->id.device_id != BROADCOM_DEV_ID_NS2) { + snprintf(mz_name, RTE_MEMZONE_NAMESIZE, + "bnxt_%04x:%02x:%02x:%02x-%s", pci_dev->addr.domain, + pci_dev->addr.bus, pci_dev->addr.devid, + pci_dev->addr.function, "rx_port_stats"); + mz_name[RTE_MEMZONE_NAMESIZE - 1] = 0; + mz = rte_memzone_lookup(mz_name); + total_alloc_len = RTE_CACHE_LINE_ROUNDUP( + sizeof(struct rx_port_stats) + 512); + if (!mz) { + mz = rte_memzone_reserve(mz_name, total_alloc_len, + SOCKET_ID_ANY, + RTE_MEMZONE_2MB | + RTE_MEMZONE_SIZE_HINT_ONLY); + if (mz == NULL) + return -ENOMEM; + } + memset(mz->addr, 0, mz->len); + mz_phys_addr = mz->phys_addr; + if ((unsigned long)mz->addr == mz_phys_addr) { + RTE_LOG(WARNING, PMD, + "Memzone physical address same as virtual.\n"); + RTE_LOG(WARNING, PMD, + "Using rte_mem_virt2phy()\n"); + mz_phys_addr = rte_mem_virt2phy(mz->addr); + if (mz_phys_addr == 0) { + RTE_LOG(ERR, PMD, + "unable to map address to physical memory\n"); + return -ENOMEM; + } + } + + bp->rx_mem_zone = (const void *)mz; + bp->hw_rx_port_stats = mz->addr; + bp->hw_rx_port_stats_map = mz_phys_addr; + + snprintf(mz_name, RTE_MEMZONE_NAMESIZE, + "bnxt_%04x:%02x:%02x:%02x-%s", pci_dev->addr.domain, + pci_dev->addr.bus, pci_dev->addr.devid, + pci_dev->addr.function, "tx_port_stats"); + mz_name[RTE_MEMZONE_NAMESIZE - 1] = 0; + mz = rte_memzone_lookup(mz_name); + total_alloc_len = RTE_CACHE_LINE_ROUNDUP( + sizeof(struct tx_port_stats) + 512); + if (!mz) { + mz = rte_memzone_reserve(mz_name, total_alloc_len, + SOCKET_ID_ANY, + RTE_MEMZONE_2MB | + RTE_MEMZONE_SIZE_HINT_ONLY); + if (mz == NULL) + return -ENOMEM; + } + memset(mz->addr, 0, mz->len); + mz_phys_addr = mz->phys_addr; + if ((unsigned long)mz->addr == mz_phys_addr) { + RTE_LOG(WARNING, PMD, + "Memzone physical address same as virtual.\n"); + RTE_LOG(WARNING, PMD, + "Using rte_mem_virt2phy()\n"); + mz_phys_addr = rte_mem_virt2phy(mz->addr); + if (mz_phys_addr == 0) { + RTE_LOG(ERR, PMD, + "unable to map address to physical memory\n"); + return -ENOMEM; + } + } + + bp->tx_mem_zone = (const void *)mz; + bp->hw_tx_port_stats = mz->addr; + bp->hw_tx_port_stats_map = mz_phys_addr; + + bp->flags |= BNXT_FLAG_PORT_STATS; + } + rc = bnxt_alloc_hwrm_resources(bp); if (rc) { RTE_LOG(ERR, PMD, @@ -1130,6 +1749,11 @@ bnxt_dev_init(struct rte_eth_dev *eth_dev) RTE_LOG(ERR, PMD, "hwrm query capability failure rc: %x\n", rc); goto error_free; } + if (bp->max_tx_rings == 0) { + RTE_LOG(ERR, PMD, "No TX rings available!\n"); + rc = -EBUSY; + goto error_free; + } eth_dev->data->mac_addrs = rte_zmalloc("bnxt_mac_addr_tbl", ETHER_ADDR_LEN * MAX_NUM_MAC_ADDR, 0); if (eth_dev->data->mac_addrs == NULL) { @@ -1140,10 +1764,7 @@ bnxt_dev_init(struct rte_eth_dev *eth_dev) goto error_free; } /* Copy the permanent MAC from the qcap response address now. */ - if (BNXT_PF(bp)) - memcpy(bp->mac_addr, bp->pf.mac_addr, sizeof(bp->mac_addr)); - else - memcpy(bp->mac_addr, bp->vf.mac_addr, sizeof(bp->mac_addr)); + memcpy(bp->mac_addr, bp->dflt_mac_addr, sizeof(bp->mac_addr)); memcpy(ð_dev->data->mac_addrs[0], bp->mac_addr, ETHER_ADDR_LEN); bp->grp_info = rte_zmalloc("bnxt_grp_info", sizeof(*bp->grp_info) * bp->max_ring_grps, 0); @@ -1155,8 +1776,29 @@ bnxt_dev_init(struct rte_eth_dev *eth_dev) goto error_free; } - rc = bnxt_hwrm_func_driver_register(bp, 0, - bp->pf.vf_req_fwd); + /* Forward all requests if firmware is new enough */ + if (((bp->fw_ver >= ((20 << 24) | (6 << 16) | (100 << 8))) && + (bp->fw_ver < ((20 << 24) | (7 << 16)))) || + ((bp->fw_ver >= ((20 << 24) | (8 << 16))))) { + memset(bp->pf.vf_req_fwd, 0xff, sizeof(bp->pf.vf_req_fwd)); + } else { + RTE_LOG(WARNING, PMD, + "Firmware too old for VF mailbox functionality\n"); + memset(bp->pf.vf_req_fwd, 0, sizeof(bp->pf.vf_req_fwd)); + } + + /* + * The following are used for driver cleanup. If we disallow these, + * VF drivers can't clean up cleanly. + */ + ALLOW_FUNC(HWRM_FUNC_DRV_UNRGTR); + ALLOW_FUNC(HWRM_VNIC_FREE); + ALLOW_FUNC(HWRM_RING_FREE); + ALLOW_FUNC(HWRM_RING_GRP_FREE); + ALLOW_FUNC(HWRM_VNIC_RSS_COS_LB_CTX_FREE); + ALLOW_FUNC(HWRM_CFA_L2_FILTER_FREE); + ALLOW_FUNC(HWRM_STAT_CTX_FREE); + rc = bnxt_hwrm_func_driver_register(bp); if (rc) { RTE_LOG(ERR, PMD, "Failed to register driver"); @@ -1169,10 +1811,61 @@ bnxt_dev_init(struct rte_eth_dev *eth_dev) pci_dev->mem_resource[0].phys_addr, pci_dev->mem_resource[0].addr); - bp->dev_stopped = 0; + rc = bnxt_hwrm_func_reset(bp); + if (rc) { + RTE_LOG(ERR, PMD, "hwrm chip reset failure rc: %x\n", rc); + rc = -1; + goto error_free; + } + + if (BNXT_PF(bp)) { + //if (bp->pf.active_vfs) { + // TODO: Deallocate VF resources? + //} + if (bp->pdev->max_vfs) { + rc = bnxt_hwrm_allocate_vfs(bp, bp->pdev->max_vfs); + if (rc) { + RTE_LOG(ERR, PMD, "Failed to allocate VFs\n"); + goto error_free; + } + } else { + rc = bnxt_hwrm_allocate_pf_only(bp); + if (rc) { + RTE_LOG(ERR, PMD, + "Failed to allocate PF resources\n"); + goto error_free; + } + } + } + + bnxt_hwrm_port_led_qcaps(bp); + + rc = bnxt_setup_int(bp); + if (rc) + goto error_free; + + rc = bnxt_alloc_mem(bp); + if (rc) + goto error_free_int; + + rc = bnxt_request_int(bp); + if (rc) + goto error_free_int; + + rc = bnxt_alloc_def_cp_ring(bp); + if (rc) + goto error_free_int; + + bnxt_enable_int(bp); return 0; +error_free_int: + bnxt_disable_int(bp); + bnxt_free_def_cp_ring(bp); + bnxt_hwrm_func_buf_unrgtr(bp); + bnxt_free_int(bp); + bnxt_free_mem(bp); error_free: bnxt_dev_uninit(eth_dev); error: @@ -1184,6 +1877,9 @@ bnxt_dev_uninit(struct rte_eth_dev *eth_dev) { struct bnxt *bp = eth_dev->data->dev_private; int rc; + bnxt_disable_int(bp); + bnxt_free_int(bp); + bnxt_free_mem(bp); if (eth_dev->data->mac_addrs != NULL) { rte_free(eth_dev->data->mac_addrs); eth_dev->data->mac_addrs = NULL; @@ -1194,8 +1890,12 @@ bnxt_dev_uninit(struct rte_eth_dev *eth_dev) { } rc = bnxt_hwrm_func_driver_unregister(bp, 0); bnxt_free_hwrm_resources(bp); + rte_memzone_free((const struct rte_memzone *)bp->tx_mem_zone); + rte_memzone_free((const struct rte_memzone *)bp->rx_mem_zone); if (bp->dev_stopped == 0) bnxt_dev_close_op(eth_dev); + if (bp->pf.vf_info) + rte_free(bp->pf.vf_info); eth_dev->dev_ops = NULL; eth_dev->rx_pkt_burst = NULL; eth_dev->tx_pkt_burst = NULL; @@ -1223,6 +1923,20 @@ static struct rte_pci_driver bnxt_rte_pmd = { .remove = bnxt_pci_remove, }; +static bool +is_device_supported(struct rte_eth_dev *dev, struct rte_pci_driver *drv) +{ + if (strcmp(dev->device->driver->name, drv->driver.name)) + return false; + + return true; +} + +bool is_bnxt_supported(struct rte_eth_dev *dev) +{ + return is_device_supported(dev, &bnxt_rte_pmd); +} + RTE_PMD_REGISTER_PCI(net_bnxt, bnxt_rte_pmd); RTE_PMD_REGISTER_PCI_TABLE(net_bnxt, bnxt_pci_id_map); RTE_PMD_REGISTER_KMOD_DEP(net_bnxt, "* igb_uio | uio_pci_generic | vfio-pci"); diff --git a/drivers/net/bnxt/bnxt_filter.c b/drivers/net/bnxt/bnxt_filter.c index df1042cf..e9aac271 100644 --- a/drivers/net/bnxt/bnxt_filter.c +++ b/drivers/net/bnxt/bnxt_filter.c @@ -68,20 +68,28 @@ struct bnxt_filter_info *bnxt_alloc_filter(struct bnxt *bp) return filter; } -void bnxt_init_filters(struct bnxt *bp) +struct bnxt_filter_info *bnxt_alloc_vf_filter(struct bnxt *bp, uint16_t vf) { struct bnxt_filter_info *filter; - int i, max_filters; - if (BNXT_PF(bp)) { - struct bnxt_pf_info *pf = &bp->pf; + filter = rte_zmalloc("bnxt_vf_filter_info", sizeof(*filter), 0); + if (!filter) { + RTE_LOG(ERR, PMD, "Failed to alloc memory for VF %hu filters\n", + vf); + return NULL; + } - max_filters = pf->max_l2_ctx; - } else { - struct bnxt_vf_info *vf = &bp->vf; + filter->fw_l2_filter_id = UINT64_MAX; + STAILQ_INSERT_TAIL(&bp->pf.vf_info[vf].filter, filter, next); + return filter; +} - max_filters = vf->max_l2_ctx; - } +void bnxt_init_filters(struct bnxt *bp) +{ + struct bnxt_filter_info *filter; + int i, max_filters; + + max_filters = bp->max_l2_ctx; STAILQ_INIT(&bp->free_filter_list); for (i = 0; i < max_filters; i++) { filter = &bp->filter_info[i]; @@ -110,6 +118,12 @@ void bnxt_free_all_filters(struct bnxt *bp) STAILQ_INIT(&vnic->filter); } } + + for (i = 0; i < bp->pf.max_vfs; i++) { + STAILQ_FOREACH(filter, &bp->pf.vf_info[i].filter, next) { + bnxt_hwrm_clear_filter(bp, filter); + } + } } void bnxt_free_filter_mem(struct bnxt *bp) @@ -122,15 +136,7 @@ void bnxt_free_filter_mem(struct bnxt *bp) return; /* Ensure that all filters are freed */ - if (BNXT_PF(bp)) { - struct bnxt_pf_info *pf = &bp->pf; - - max_filters = pf->max_l2_ctx; - } else { - struct bnxt_vf_info *vf = &bp->vf; - - max_filters = vf->max_l2_ctx; - } + max_filters = bp->max_l2_ctx; for (i = 0; i < max_filters; i++) { filter = &bp->filter_info[i]; if (filter->fw_l2_filter_id != ((uint64_t)-1)) { @@ -142,7 +148,7 @@ void bnxt_free_filter_mem(struct bnxt *bp) "HWRM filter cannot be freed rc = %d\n", rc); } - filter->fw_l2_filter_id = -1; + filter->fw_l2_filter_id = UINT64_MAX; } STAILQ_INIT(&bp->free_filter_list); @@ -155,15 +161,7 @@ int bnxt_alloc_filter_mem(struct bnxt *bp) struct bnxt_filter_info *filter_mem; uint16_t max_filters; - if (BNXT_PF(bp)) { - struct bnxt_pf_info *pf = &bp->pf; - - max_filters = pf->max_l2_ctx; - } else { - struct bnxt_vf_info *vf = &bp->vf; - - max_filters = vf->max_l2_ctx; - } + max_filters = bp->max_l2_ctx; /* Allocate memory for VNIC pool and filter pool */ filter_mem = rte_zmalloc("bnxt_filter_info", max_filters * sizeof(struct bnxt_filter_info), diff --git a/drivers/net/bnxt/bnxt_filter.h b/drivers/net/bnxt/bnxt_filter.h index 06fe134a..613b2eea 100644 --- a/drivers/net/bnxt/bnxt_filter.h +++ b/drivers/net/bnxt/bnxt_filter.h @@ -63,9 +63,12 @@ struct bnxt_filter_info { uint32_t vni; uint8_t pri_hint; uint64_t l2_filter_id_hint; + uint32_t src_id; + uint8_t src_type; }; struct bnxt_filter_info *bnxt_alloc_filter(struct bnxt *bp); +struct bnxt_filter_info *bnxt_alloc_vf_filter(struct bnxt *bp, uint16_t vf); void bnxt_init_filters(struct bnxt *bp); void bnxt_free_all_filters(struct bnxt *bp); void bnxt_free_filter_mem(struct bnxt *bp); diff --git a/drivers/net/bnxt/bnxt_hwrm.c b/drivers/net/bnxt/bnxt_hwrm.c index d8987234..e710e636 100644 --- a/drivers/net/bnxt/bnxt_hwrm.c +++ b/drivers/net/bnxt/bnxt_hwrm.c @@ -31,6 +31,8 @@ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ +#include <unistd.h> + #include <rte_byteorder.h> #include <rte_common.h> #include <rte_cycles.h> @@ -54,6 +56,38 @@ #define HWRM_CMD_TIMEOUT 2000 +struct bnxt_plcmodes_cfg { + uint32_t flags; + uint16_t jumbo_thresh; + uint16_t hds_offset; + uint16_t hds_threshold; +}; + +static int page_getenum(size_t size) +{ + if (size <= 1 << 4) + return 4; + if (size <= 1 << 12) + return 12; + if (size <= 1 << 13) + return 13; + if (size <= 1 << 16) + return 16; + if (size <= 1 << 21) + return 21; + if (size <= 1 << 22) + return 22; + if (size <= 1 << 30) + return 30; + RTE_LOG(ERR, PMD, "Page size %zu out of range\n", size); + return sizeof(void *) * 8 - 1; +} + +static int page_roundup(size_t size) +{ + return 1 << page_getenum(size); +} + /* * HWRM Functions (sent to HWRM) * These are named bnxt_hwrm_*() and return -1 if bnxt_hwrm_send_message() @@ -70,6 +104,30 @@ static int bnxt_hwrm_send_message_locked(struct bnxt *bp, void *msg, uint32_t *data = msg; uint8_t *bar; uint8_t *valid; + uint16_t max_req_len = bp->max_req_len; + struct hwrm_short_input short_input = { 0 }; + + if (bp->flags & BNXT_FLAG_SHORT_CMD) { + void *short_cmd_req = bp->hwrm_short_cmd_req_addr; + + memset(short_cmd_req, 0, bp->max_req_len); + memcpy(short_cmd_req, req, msg_len); + + short_input.req_type = rte_cpu_to_le_16(req->req_type); + short_input.signature = rte_cpu_to_le_16( + HWRM_SHORT_REQ_SIGNATURE_SHORT_CMD); + short_input.size = rte_cpu_to_le_16(msg_len); + short_input.req_addr = + rte_cpu_to_le_64(bp->hwrm_short_cmd_req_dma_addr); + + data = (uint32_t *)&short_input; + msg_len = sizeof(short_input); + + /* Sync memory write before updating doorbell */ + rte_wmb(); + + max_req_len = BNXT_HWRM_SHORT_REQ_LEN; + } /* Write request msg to hwrm channel */ for (i = 0; i < msg_len; i += 4) { @@ -79,7 +137,7 @@ static int bnxt_hwrm_send_message_locked(struct bnxt *bp, void *msg, } /* Zero the rest of the request space */ - for (; i < bp->max_req_len; i += 4) { + for (; i < max_req_len; i += 4) { bar = (uint8_t *)bp->bar0 + i; rte_write32(0, bar); } @@ -103,7 +161,7 @@ static int bnxt_hwrm_send_message_locked(struct bnxt *bp, void *msg, } if (i >= HWRM_CMD_TIMEOUT) { - RTE_LOG(ERR, PMD, "Error sending msg %x\n", + RTE_LOG(ERR, PMD, "Error sending msg 0x%04x\n", req->req_type); goto err_ret; } @@ -140,7 +198,22 @@ static int bnxt_hwrm_send_message(struct bnxt *bp, void *msg, uint32_t msg_len) } \ if (resp->error_code) { \ rc = rte_le_to_cpu_16(resp->error_code); \ - RTE_LOG(ERR, PMD, "%s error %d\n", __func__, rc); \ + if (resp->resp_len >= 16) { \ + struct hwrm_err_output *tmp_hwrm_err_op = \ + (void *)resp; \ + RTE_LOG(ERR, PMD, \ + "%s error %d:%d:%08x:%04x\n", \ + __func__, \ + rc, tmp_hwrm_err_op->cmd_err, \ + rte_le_to_cpu_32(\ + tmp_hwrm_err_op->opaque_0), \ + rte_le_to_cpu_16(\ + tmp_hwrm_err_op->opaque_1)); \ + } \ + else { \ + RTE_LOG(ERR, PMD, \ + "%s error %d\n", __func__, rc); \ + } \ return rc; \ } \ } @@ -162,7 +235,10 @@ int bnxt_hwrm_cfa_l2_clear_rx_mask(struct bnxt *bp, struct bnxt_vnic_info *vnic) return rc; } -int bnxt_hwrm_cfa_l2_set_rx_mask(struct bnxt *bp, struct bnxt_vnic_info *vnic) +int bnxt_hwrm_cfa_l2_set_rx_mask(struct bnxt *bp, + struct bnxt_vnic_info *vnic, + uint16_t vlan_count, + struct bnxt_vlan_table_entry *vlan_table) { int rc = 0; struct hwrm_cfa_l2_set_rx_mask_input req = {.req_type = 0 }; @@ -175,10 +251,28 @@ int bnxt_hwrm_cfa_l2_set_rx_mask(struct bnxt *bp, struct bnxt_vnic_info *vnic) /* FIXME add multicast flag, when multicast adding options is supported * by ethtool. */ + if (vnic->flags & BNXT_VNIC_INFO_BCAST) + mask = HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_BCAST; + if (vnic->flags & BNXT_VNIC_INFO_UNTAGGED) + mask |= HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_VLAN_NONVLAN; if (vnic->flags & BNXT_VNIC_INFO_PROMISC) - mask = HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_PROMISCUOUS; + mask |= HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_PROMISCUOUS; if (vnic->flags & BNXT_VNIC_INFO_ALLMULTI) - mask = HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_ALL_MCAST; + mask |= HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_ALL_MCAST; + if (vnic->flags & BNXT_VNIC_INFO_MCAST) + mask |= HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_MCAST; + if (vnic->mc_addr_cnt) { + mask |= HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_MCAST; + req.num_mc_entries = rte_cpu_to_le_32(vnic->mc_addr_cnt); + req.mc_tbl_addr = rte_cpu_to_le_64(vnic->mc_list_dma_addr); + } + if (vlan_table) { + if (!(mask & HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_VLAN_NONVLAN)) + mask |= HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_VLANONLY; + req.vlan_tag_tbl_addr = rte_cpu_to_le_16( + rte_mem_virt2phy(vlan_table)); + req.num_vlan_tags = rte_cpu_to_le_32((uint32_t)vlan_count); + } req.mask = rte_cpu_to_le_32(HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_BCAST | mask); @@ -189,6 +283,44 @@ int bnxt_hwrm_cfa_l2_set_rx_mask(struct bnxt *bp, struct bnxt_vnic_info *vnic) return rc; } +int bnxt_hwrm_cfa_vlan_antispoof_cfg(struct bnxt *bp, uint16_t fid, + uint16_t vlan_count, + struct bnxt_vlan_antispoof_table_entry *vlan_table) +{ + int rc = 0; + struct hwrm_cfa_vlan_antispoof_cfg_input req = {.req_type = 0 }; + struct hwrm_cfa_vlan_antispoof_cfg_output *resp = + bp->hwrm_cmd_resp_addr; + + /* + * Older HWRM versions did not support this command, and the set_rx_mask + * list was used for anti-spoof. In 1.8.0, the TX path configuration was + * removed from set_rx_mask call, and this command was added. + * + * This command is also present from 1.7.8.11 and higher, + * as well as 1.7.8.0 + */ + if (bp->fw_ver < ((1 << 24) | (8 << 16))) { + if (bp->fw_ver != ((1 << 24) | (7 << 16) | (8 << 8))) { + if (bp->fw_ver < ((1 << 24) | (7 << 16) | (8 << 8) | + (11))) + return 0; + } + } + HWRM_PREP(req, CFA_VLAN_ANTISPOOF_CFG, -1, resp); + req.fid = rte_cpu_to_le_16(fid); + + req.vlan_tag_mask_tbl_addr = + rte_cpu_to_le_64(rte_mem_virt2phy(vlan_table)); + req.num_vlan_entries = rte_cpu_to_le_32((uint32_t)vlan_count); + + rc = bnxt_hwrm_send_message(bp, &req, sizeof(req)); + + HWRM_CHECK_RESULT; + + return rc; +} + int bnxt_hwrm_clear_filter(struct bnxt *bp, struct bnxt_filter_info *filter) { @@ -196,6 +328,9 @@ int bnxt_hwrm_clear_filter(struct bnxt *bp, struct hwrm_cfa_l2_filter_free_input req = {.req_type = 0 }; struct hwrm_cfa_l2_filter_free_output *resp = bp->hwrm_cmd_resp_addr; + if (filter->fw_l2_filter_id == UINT64_MAX) + return 0; + HWRM_PREP(req, CFA_L2_FILTER_FREE, -1, resp); req.l2_filter_id = rte_cpu_to_le_64(filter->fw_l2_filter_id); @@ -210,7 +345,7 @@ int bnxt_hwrm_clear_filter(struct bnxt *bp, } int bnxt_hwrm_set_filter(struct bnxt *bp, - struct bnxt_vnic_info *vnic, + uint16_t dst_id, struct bnxt_filter_info *filter) { int rc = 0; @@ -218,13 +353,16 @@ int bnxt_hwrm_set_filter(struct bnxt *bp, struct hwrm_cfa_l2_filter_alloc_output *resp = bp->hwrm_cmd_resp_addr; uint32_t enables = 0; + if (filter->fw_l2_filter_id != UINT64_MAX) + bnxt_hwrm_clear_filter(bp, filter); + HWRM_PREP(req, CFA_L2_FILTER_ALLOC, -1, resp); req.flags = rte_cpu_to_le_32(filter->flags); enables = filter->enables | HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_DST_ID; - req.dst_id = rte_cpu_to_le_16(vnic->fw_vnic_id); + req.dst_id = rte_cpu_to_le_16(dst_id); if (enables & HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_ADDR) @@ -240,6 +378,10 @@ int bnxt_hwrm_set_filter(struct bnxt *bp, if (enables & HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_OVLAN_MASK) req.l2_ovlan_mask = filter->l2_ovlan_mask; + if (enables & HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_SRC_ID) + req.src_id = rte_cpu_to_le_32(filter->src_id); + if (enables & HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_SRC_TYPE) + req.src_type = filter->src_type; req.enables = rte_cpu_to_le_32(enables); @@ -252,29 +394,13 @@ int bnxt_hwrm_set_filter(struct bnxt *bp, return rc; } -int bnxt_hwrm_exec_fwd_resp(struct bnxt *bp, void *fwd_cmd) -{ - int rc; - struct hwrm_exec_fwd_resp_input req = {.req_type = 0 }; - struct hwrm_exec_fwd_resp_output *resp = bp->hwrm_cmd_resp_addr; - - HWRM_PREP(req, EXEC_FWD_RESP, -1, resp); - - memcpy(req.encap_request, fwd_cmd, - sizeof(req.encap_request)); - - rc = bnxt_hwrm_send_message(bp, &req, sizeof(req)); - - HWRM_CHECK_RESULT; - - return rc; -} - int bnxt_hwrm_func_qcaps(struct bnxt *bp) { int rc = 0; struct hwrm_func_qcaps_input req = {.req_type = 0 }; struct hwrm_func_qcaps_output *resp = bp->hwrm_cmd_resp_addr; + uint16_t new_max_vfs; + int i; HWRM_PREP(req, FUNC_QCAPS, -1, resp); @@ -286,31 +412,63 @@ int bnxt_hwrm_func_qcaps(struct bnxt *bp) bp->max_ring_grps = rte_le_to_cpu_32(resp->max_hw_ring_grps); if (BNXT_PF(bp)) { - struct bnxt_pf_info *pf = &bp->pf; - - pf->fw_fid = rte_le_to_cpu_32(resp->fid); - pf->port_id = resp->port_id; - memcpy(pf->mac_addr, resp->mac_address, ETHER_ADDR_LEN); - pf->max_rsscos_ctx = rte_le_to_cpu_16(resp->max_rsscos_ctx); - pf->max_cp_rings = rte_le_to_cpu_16(resp->max_cmpl_rings); - pf->max_tx_rings = rte_le_to_cpu_16(resp->max_tx_rings); - pf->max_rx_rings = rte_le_to_cpu_16(resp->max_rx_rings); - pf->max_l2_ctx = rte_le_to_cpu_16(resp->max_l2_ctxs); - pf->max_vnics = rte_le_to_cpu_16(resp->max_vnics); - pf->first_vf_id = rte_le_to_cpu_16(resp->first_vf_id); - pf->max_vfs = rte_le_to_cpu_16(resp->max_vfs); - } else { - struct bnxt_vf_info *vf = &bp->vf; + bp->pf.port_id = resp->port_id; + bp->pf.first_vf_id = rte_le_to_cpu_16(resp->first_vf_id); + new_max_vfs = bp->pdev->max_vfs; + if (new_max_vfs != bp->pf.max_vfs) { + if (bp->pf.vf_info) + rte_free(bp->pf.vf_info); + bp->pf.vf_info = rte_malloc("bnxt_vf_info", + sizeof(bp->pf.vf_info[0]) * new_max_vfs, 0); + bp->pf.max_vfs = new_max_vfs; + for (i = 0; i < new_max_vfs; i++) { + bp->pf.vf_info[i].fid = bp->pf.first_vf_id + i; + bp->pf.vf_info[i].vlan_table = + rte_zmalloc("VF VLAN table", + getpagesize(), + getpagesize()); + if (bp->pf.vf_info[i].vlan_table == NULL) + RTE_LOG(ERR, PMD, + "Fail to alloc VLAN table for VF %d\n", + i); + else + rte_mem_lock_page( + bp->pf.vf_info[i].vlan_table); + bp->pf.vf_info[i].vlan_as_table = + rte_zmalloc("VF VLAN AS table", + getpagesize(), + getpagesize()); + if (bp->pf.vf_info[i].vlan_as_table == NULL) + RTE_LOG(ERR, PMD, + "Alloc VLAN AS table for VF %d fail\n", + i); + else + rte_mem_lock_page( + bp->pf.vf_info[i].vlan_as_table); + STAILQ_INIT(&bp->pf.vf_info[i].filter); + } + } + } - vf->fw_fid = rte_le_to_cpu_32(resp->fid); - memcpy(vf->mac_addr, &resp->mac_address, ETHER_ADDR_LEN); - vf->max_rsscos_ctx = rte_le_to_cpu_16(resp->max_rsscos_ctx); - vf->max_cp_rings = rte_le_to_cpu_16(resp->max_cmpl_rings); - vf->max_tx_rings = rte_le_to_cpu_16(resp->max_tx_rings); - vf->max_rx_rings = rte_le_to_cpu_16(resp->max_rx_rings); - vf->max_l2_ctx = rte_le_to_cpu_16(resp->max_l2_ctxs); - vf->max_vnics = rte_le_to_cpu_16(resp->max_vnics); + bp->fw_fid = rte_le_to_cpu_32(resp->fid); + memcpy(bp->dflt_mac_addr, &resp->mac_address, ETHER_ADDR_LEN); + bp->max_rsscos_ctx = rte_le_to_cpu_16(resp->max_rsscos_ctx); + bp->max_cp_rings = rte_le_to_cpu_16(resp->max_cmpl_rings); + bp->max_tx_rings = rte_le_to_cpu_16(resp->max_tx_rings); + bp->max_rx_rings = rte_le_to_cpu_16(resp->max_rx_rings); + bp->max_l2_ctx = rte_le_to_cpu_16(resp->max_l2_ctxs); + /* TODO: For now, do not support VMDq/RFS on VFs. */ + if (BNXT_PF(bp)) { + if (bp->pf.max_vfs) + bp->max_vnics = 1; + else + bp->max_vnics = rte_le_to_cpu_16(resp->max_vnics); + } else { + bp->max_vnics = 1; } + bp->max_stat_ctx = rte_le_to_cpu_16(resp->max_stat_ctx); + if (BNXT_PF(bp)) + bp->pf.total_vnics = rte_le_to_cpu_16(resp->max_vnics); return rc; } @@ -332,8 +490,7 @@ int bnxt_hwrm_func_reset(struct bnxt *bp) return rc; } -int bnxt_hwrm_func_driver_register(struct bnxt *bp, uint32_t flags, - uint32_t *vf_req_fwd) +int bnxt_hwrm_func_driver_register(struct bnxt *bp) { int rc; struct hwrm_func_drv_rgtr_input req = {.req_type = 0 }; @@ -343,16 +500,22 @@ int bnxt_hwrm_func_driver_register(struct bnxt *bp, uint32_t flags, return 0; HWRM_PREP(req, FUNC_DRV_RGTR, -1, resp); - req.flags = flags; - req.enables = HWRM_FUNC_DRV_RGTR_INPUT_ENABLES_VER | - HWRM_FUNC_DRV_RGTR_INPUT_ENABLES_ASYNC_EVENT_FWD; + req.enables = rte_cpu_to_le_32(HWRM_FUNC_DRV_RGTR_INPUT_ENABLES_VER | + HWRM_FUNC_DRV_RGTR_INPUT_ENABLES_ASYNC_EVENT_FWD); req.ver_maj = RTE_VER_YEAR; req.ver_min = RTE_VER_MONTH; req.ver_upd = RTE_VER_MINOR; - memcpy(req.vf_req_fwd, vf_req_fwd, sizeof(req.vf_req_fwd)); + if (BNXT_PF(bp)) { + req.enables |= rte_cpu_to_le_32( + HWRM_FUNC_DRV_RGTR_INPUT_ENABLES_VF_INPUT_FWD); + memcpy(req.vf_req_fwd, bp->pf.vf_req_fwd, + RTE_MIN(sizeof(req.vf_req_fwd), + sizeof(bp->pf.vf_req_fwd))); + } req.async_event_fwd[0] |= rte_cpu_to_le_32(0x1); /* TODO: Use MACRO */ + memset(req.async_event_fwd, 0xff, sizeof(req.async_event_fwd)); rc = bnxt_hwrm_send_message(bp, &req, sizeof(req)); @@ -372,7 +535,9 @@ int bnxt_hwrm_ver_get(struct bnxt *bp) uint32_t fw_version; uint16_t max_resp_len; char type[RTE_MEMZONE_NAMESIZE]; + uint32_t dev_caps_cfg; + bp->max_req_len = HWRM_MAX_REQ_LEN; HWRM_PREP(req, VER_GET, -1, resp); req.hwrm_intf_maj = HWRM_VERSION_MAJOR; @@ -391,6 +556,8 @@ int bnxt_hwrm_ver_get(struct bnxt *bp) resp->hwrm_intf_maj, resp->hwrm_intf_min, resp->hwrm_intf_upd, resp->hwrm_fw_maj, resp->hwrm_fw_min, resp->hwrm_fw_bld); + bp->fw_ver = (resp->hwrm_fw_maj << 24) | (resp->hwrm_fw_min << 16) | + (resp->hwrm_fw_bld << 8) | resp->hwrm_fw_rsvd; RTE_LOG(INFO, PMD, "Driver HWRM version: %d.%d.%d\n", HWRM_VERSION_MAJOR, HWRM_VERSION_MINOR, HWRM_VERSION_UPDATE); @@ -427,8 +594,10 @@ int bnxt_hwrm_ver_get(struct bnxt *bp) RTE_LOG(ERR, PMD, "Unsupported request length\n"); rc = -EINVAL; } - bp->max_req_len = resp->max_req_win_len; + bp->max_req_len = rte_le_to_cpu_16(resp->max_req_win_len); max_resp_len = resp->max_resp_len; + dev_caps_cfg = rte_le_to_cpu_32(resp->dev_caps_cfg); + if (bp->max_resp_len != max_resp_len) { sprintf(type, "bnxt_hwrm_%04x:%02x:%02x:%02x", bp->pdev->addr.domain, bp->pdev->addr.bus, @@ -441,11 +610,46 @@ int bnxt_hwrm_ver_get(struct bnxt *bp) rc = -ENOMEM; goto error; } + rte_mem_lock_page(bp->hwrm_cmd_resp_addr); bp->hwrm_cmd_resp_dma_addr = - rte_malloc_virt2phy(bp->hwrm_cmd_resp_addr); + rte_mem_virt2phy(bp->hwrm_cmd_resp_addr); + if (bp->hwrm_cmd_resp_dma_addr == 0) { + RTE_LOG(ERR, PMD, + "Unable to map response buffer to physical memory.\n"); + rc = -ENOMEM; + goto error; + } bp->max_resp_len = max_resp_len; } + if ((dev_caps_cfg & + HWRM_VER_GET_OUTPUT_DEV_CAPS_CFG_SHORT_CMD_SUPPORTED) && + (dev_caps_cfg & + HWRM_VER_GET_OUTPUT_DEV_CAPS_CFG_SHORT_CMD_INPUTUIRED)) { + RTE_LOG(DEBUG, PMD, "Short command supported\n"); + + rte_free(bp->hwrm_short_cmd_req_addr); + + bp->hwrm_short_cmd_req_addr = rte_malloc(type, + bp->max_req_len, 0); + if (bp->hwrm_short_cmd_req_addr == NULL) { + rc = -ENOMEM; + goto error; + } + rte_mem_lock_page(bp->hwrm_short_cmd_req_addr); + bp->hwrm_short_cmd_req_dma_addr = + rte_mem_virt2phy(bp->hwrm_short_cmd_req_addr); + if (bp->hwrm_short_cmd_req_dma_addr == 0) { + rte_free(bp->hwrm_short_cmd_req_addr); + RTE_LOG(ERR, PMD, + "Unable to map buffer to physical memory.\n"); + rc = -ENOMEM; + goto error; + } + + bp->flags |= BNXT_FLAG_SHORT_CMD; + } + error: rte_spinlock_unlock(&bp->hwrm_lock); return rc; @@ -478,6 +682,8 @@ static int bnxt_hwrm_port_phy_cfg(struct bnxt *bp, struct bnxt_link_info *conf) struct hwrm_port_phy_cfg_input req = {0}; struct hwrm_port_phy_cfg_output *resp = bp->hwrm_cmd_resp_addr; uint32_t enables = 0; + uint32_t link_speed_mask = + HWRM_PORT_PHY_CFG_INPUT_ENABLES_AUTO_LINK_SPEED_MASK; HWRM_PREP(req, PORT_PHY_CFG, -1, resp); @@ -489,14 +695,20 @@ static int bnxt_hwrm_port_phy_cfg(struct bnxt *bp, struct bnxt_link_info *conf) * any auto mode, even "none". */ if (!conf->link_speed) { - req.auto_mode |= conf->auto_mode; - enables = HWRM_PORT_PHY_CFG_INPUT_ENABLES_AUTO_MODE; - req.auto_link_speed_mask = conf->auto_link_speed_mask; - enables |= - HWRM_PORT_PHY_CFG_INPUT_ENABLES_AUTO_LINK_SPEED_MASK; - req.auto_link_speed = bp->link_info.auto_link_speed; - enables |= + req.auto_mode = conf->auto_mode; + enables |= HWRM_PORT_PHY_CFG_INPUT_ENABLES_AUTO_MODE; + if (conf->auto_mode == + HWRM_PORT_PHY_CFG_INPUT_AUTO_MODE_SPEED_MASK) { + req.auto_link_speed_mask = + conf->auto_link_speed_mask; + enables |= link_speed_mask; + } + if (bp->link_info.auto_link_speed) { + req.auto_link_speed = + bp->link_info.auto_link_speed; + enables |= HWRM_PORT_PHY_CFG_INPUT_ENABLES_AUTO_LINK_SPEED; + } } req.auto_duplex = conf->duplex; enables |= HWRM_PORT_PHY_CFG_INPUT_ENABLES_AUTO_DUPLEX; @@ -511,7 +723,7 @@ static int bnxt_hwrm_port_phy_cfg(struct bnxt *bp, struct bnxt_link_info *conf) req.enables = rte_cpu_to_le_32(enables); } else { req.flags = - rte_cpu_to_le_32(HWRM_PORT_PHY_CFG_INPUT_FLAGS_FORCE_LINK_DOWN); + rte_cpu_to_le_32(HWRM_PORT_PHY_CFG_INPUT_FLAGS_FORCE_LINK_DWN); RTE_LOG(INFO, PMD, "Force Link Down\n"); } @@ -536,13 +748,10 @@ static int bnxt_hwrm_port_phy_qcfg(struct bnxt *bp, HWRM_CHECK_RESULT; link_info->phy_link_status = resp->link; - if (link_info->phy_link_status == HWRM_PORT_PHY_QCFG_OUTPUT_LINK_LINK) { - link_info->link_up = 1; - link_info->link_speed = rte_le_to_cpu_16(resp->link_speed); - } else { - link_info->link_up = 0; - link_info->link_speed = 0; - } + link_info->link_up = + (link_info->phy_link_status == + HWRM_PORT_PHY_QCFG_OUTPUT_LINK_LINK) ? 1 : 0; + link_info->link_speed = rte_le_to_cpu_16(resp->link_speed); link_info->duplex = resp->duplex; link_info->pause = resp->pause; link_info->auto_pause = resp->auto_pause; @@ -590,20 +799,20 @@ int bnxt_hwrm_queue_qportcfg(struct bnxt *bp) int bnxt_hwrm_ring_alloc(struct bnxt *bp, struct bnxt_ring *ring, uint32_t ring_type, uint32_t map_index, - uint32_t stats_ctx_id) + uint32_t stats_ctx_id, uint32_t cmpl_ring_id) { int rc = 0; + uint32_t enables = 0; struct hwrm_ring_alloc_input req = {.req_type = 0 }; struct hwrm_ring_alloc_output *resp = bp->hwrm_cmd_resp_addr; HWRM_PREP(req, RING_ALLOC, -1, resp); - req.enables = rte_cpu_to_le_32(0); - req.page_tbl_addr = rte_cpu_to_le_64(ring->bd_dma); req.fbo = rte_cpu_to_le_32(0); /* Association of ring index with doorbell index */ req.logical_id = rte_cpu_to_le_16(map_index); + req.length = rte_cpu_to_le_32(ring->ring_size); switch (ring_type) { case HWRM_RING_ALLOC_INPUT_RING_TYPE_TX: @@ -611,27 +820,26 @@ int bnxt_hwrm_ring_alloc(struct bnxt *bp, /* FALLTHROUGH */ case HWRM_RING_ALLOC_INPUT_RING_TYPE_RX: req.ring_type = ring_type; - req.cmpl_ring_id = - rte_cpu_to_le_16(bp->grp_info[map_index].cp_fw_ring_id); - req.length = rte_cpu_to_le_32(ring->ring_size); + req.cmpl_ring_id = rte_cpu_to_le_16(cmpl_ring_id); req.stat_ctx_id = rte_cpu_to_le_16(stats_ctx_id); - req.enables = rte_cpu_to_le_32(rte_le_to_cpu_32(req.enables) | - HWRM_RING_ALLOC_INPUT_ENABLES_STAT_CTX_ID_VALID); + if (stats_ctx_id != INVALID_STATS_CTX_ID) + enables |= + HWRM_RING_ALLOC_INPUT_ENABLES_STAT_CTX_ID_VALID; break; - case HWRM_RING_ALLOC_INPUT_RING_TYPE_CMPL: + case HWRM_RING_ALLOC_INPUT_RING_TYPE_L2_CMPL: req.ring_type = ring_type; /* * TODO: Some HWRM versions crash with * HWRM_RING_ALLOC_INPUT_INT_MODE_POLL */ req.int_mode = HWRM_RING_ALLOC_INPUT_INT_MODE_MSIX; - req.length = rte_cpu_to_le_32(ring->ring_size); break; default: RTE_LOG(ERR, PMD, "hwrm alloc invalid ring type %d\n", ring_type); return -1; } + req.enables = rte_cpu_to_le_32(enables); rc = bnxt_hwrm_send_message(bp, &req, sizeof(req)); @@ -639,7 +847,7 @@ int bnxt_hwrm_ring_alloc(struct bnxt *bp, if (rc == 0 && resp->error_code) rc = rte_le_to_cpu_16(resp->error_code); switch (ring_type) { - case HWRM_RING_FREE_INPUT_RING_TYPE_CMPL: + case HWRM_RING_FREE_INPUT_RING_TYPE_L2_CMPL: RTE_LOG(ERR, PMD, "hwrm_ring_alloc cp failed. rc:%d\n", rc); return rc; @@ -680,7 +888,7 @@ int bnxt_hwrm_ring_free(struct bnxt *bp, rc = rte_le_to_cpu_16(resp->error_code); switch (ring_type) { - case HWRM_RING_FREE_INPUT_RING_TYPE_CMPL: + case HWRM_RING_FREE_INPUT_RING_TYPE_L2_CMPL: RTE_LOG(ERR, PMD, "hwrm_ring_free cp failed. rc:%d\n", rc); return rc; @@ -747,13 +955,12 @@ int bnxt_hwrm_stat_clear(struct bnxt *bp, struct bnxt_cp_ring_info *cpr) struct hwrm_stat_ctx_clr_stats_input req = {.req_type = 0 }; struct hwrm_stat_ctx_clr_stats_output *resp = bp->hwrm_cmd_resp_addr; - HWRM_PREP(req, STAT_CTX_CLR_STATS, -1, resp); - if (cpr->hw_stats_ctx_id == (uint32_t)HWRM_NA_SIGNATURE) return rc; + HWRM_PREP(req, STAT_CTX_CLR_STATS, -1, resp); + req.stat_ctx_id = rte_cpu_to_le_16(cpr->hw_stats_ctx_id); - req.seq_id = rte_cpu_to_le_16(bp->hwrm_cmd_seq++); rc = bnxt_hwrm_send_message(bp, &req, sizeof(req)); @@ -762,8 +969,8 @@ int bnxt_hwrm_stat_clear(struct bnxt *bp, struct bnxt_cp_ring_info *cpr) return rc; } -int bnxt_hwrm_stat_ctx_alloc(struct bnxt *bp, - struct bnxt_cp_ring_info *cpr, unsigned int idx) +int bnxt_hwrm_stat_ctx_alloc(struct bnxt *bp, struct bnxt_cp_ring_info *cpr, + unsigned int idx __rte_unused) { int rc; struct hwrm_stat_ctx_alloc_input req = {.req_type = 0 }; @@ -771,9 +978,8 @@ int bnxt_hwrm_stat_ctx_alloc(struct bnxt *bp, HWRM_PREP(req, STAT_CTX_ALLOC, -1, resp); - req.update_period_ms = rte_cpu_to_le_32(1000); + req.update_period_ms = rte_cpu_to_le_32(0); - req.seq_id = rte_cpu_to_le_16(bp->hwrm_cmd_seq++); req.stats_dma_addr = rte_cpu_to_le_64(cpr->hw_stats_map); @@ -782,13 +988,12 @@ int bnxt_hwrm_stat_ctx_alloc(struct bnxt *bp, HWRM_CHECK_RESULT; cpr->hw_stats_ctx_id = rte_le_to_cpu_16(resp->stat_ctx_id); - bp->grp_info[idx].fw_stats_ctx = cpr->hw_stats_ctx_id; return rc; } -int bnxt_hwrm_stat_ctx_free(struct bnxt *bp, - struct bnxt_cp_ring_info *cpr, unsigned int idx) +int bnxt_hwrm_stat_ctx_free(struct bnxt *bp, struct bnxt_cp_ring_info *cpr, + unsigned int idx __rte_unused) { int rc; struct hwrm_stat_ctx_free_input req = {.req_type = 0 }; @@ -797,15 +1002,11 @@ int bnxt_hwrm_stat_ctx_free(struct bnxt *bp, HWRM_PREP(req, STAT_CTX_FREE, -1, resp); req.stat_ctx_id = rte_cpu_to_le_16(cpr->hw_stats_ctx_id); - req.seq_id = rte_cpu_to_le_16(bp->hwrm_cmd_seq++); rc = bnxt_hwrm_send_message(bp, &req, sizeof(req)); HWRM_CHECK_RESULT; - cpr->hw_stats_ctx_id = HWRM_NA_SIGNATURE; - bp->grp_info[idx].fw_stats_ctx = cpr->hw_stats_ctx_id; - return rc; } @@ -816,26 +1017,80 @@ int bnxt_hwrm_vnic_alloc(struct bnxt *bp, struct bnxt_vnic_info *vnic) struct hwrm_vnic_alloc_output *resp = bp->hwrm_cmd_resp_addr; /* map ring groups to this vnic */ - for (i = vnic->start_grp_id, j = 0; i <= vnic->end_grp_id; i++, j++) { - if (bp->grp_info[i].fw_grp_id == (uint16_t)HWRM_NA_SIGNATURE) { - RTE_LOG(ERR, PMD, - "Not enough ring groups avail:%x req:%x\n", j, - (vnic->end_grp_id - vnic->start_grp_id) + 1); - break; - } + RTE_LOG(DEBUG, PMD, "Alloc VNIC. Start %x, End %x\n", + vnic->start_grp_id, vnic->end_grp_id); + for (i = vnic->start_grp_id, j = 0; i <= vnic->end_grp_id; i++, j++) vnic->fw_grp_ids[j] = bp->grp_info[i].fw_grp_id; - } - - vnic->fw_rss_cos_lb_ctx = (uint16_t)HWRM_NA_SIGNATURE; - vnic->ctx_is_rss_cos_lb = HW_CONTEXT_NONE; - + vnic->dflt_ring_grp = bp->grp_info[vnic->start_grp_id].fw_grp_id; + vnic->rss_rule = (uint16_t)HWRM_NA_SIGNATURE; + vnic->cos_rule = (uint16_t)HWRM_NA_SIGNATURE; + vnic->lb_rule = (uint16_t)HWRM_NA_SIGNATURE; + vnic->mru = bp->eth_dev->data->mtu + ETHER_HDR_LEN + + ETHER_CRC_LEN + VLAN_TAG_SIZE; HWRM_PREP(req, VNIC_ALLOC, -1, resp); + if (vnic->func_default) + req.flags = HWRM_VNIC_ALLOC_INPUT_FLAGS_DEFAULT; rc = bnxt_hwrm_send_message(bp, &req, sizeof(req)); HWRM_CHECK_RESULT; vnic->fw_vnic_id = rte_le_to_cpu_16(resp->vnic_id); + RTE_LOG(DEBUG, PMD, "VNIC ID %x\n", vnic->fw_vnic_id); + return rc; +} + +static int bnxt_hwrm_vnic_plcmodes_qcfg(struct bnxt *bp, + struct bnxt_vnic_info *vnic, + struct bnxt_plcmodes_cfg *pmode) +{ + int rc = 0; + struct hwrm_vnic_plcmodes_qcfg_input req = {.req_type = 0 }; + struct hwrm_vnic_plcmodes_qcfg_output *resp = bp->hwrm_cmd_resp_addr; + + HWRM_PREP(req, VNIC_PLCMODES_QCFG, -1, resp); + + req.vnic_id = rte_cpu_to_le_32(vnic->fw_vnic_id); + + rc = bnxt_hwrm_send_message(bp, &req, sizeof(req)); + + HWRM_CHECK_RESULT; + + pmode->flags = rte_le_to_cpu_32(resp->flags); + /* dflt_vnic bit doesn't exist in the _cfg command */ + pmode->flags &= ~(HWRM_VNIC_PLCMODES_QCFG_OUTPUT_FLAGS_DFLT_VNIC); + pmode->jumbo_thresh = rte_le_to_cpu_16(resp->jumbo_thresh); + pmode->hds_offset = rte_le_to_cpu_16(resp->hds_offset); + pmode->hds_threshold = rte_le_to_cpu_16(resp->hds_threshold); + + return rc; +} + +static int bnxt_hwrm_vnic_plcmodes_cfg(struct bnxt *bp, + struct bnxt_vnic_info *vnic, + struct bnxt_plcmodes_cfg *pmode) +{ + int rc = 0; + struct hwrm_vnic_plcmodes_cfg_input req = {.req_type = 0 }; + struct hwrm_vnic_plcmodes_cfg_output *resp = bp->hwrm_cmd_resp_addr; + + HWRM_PREP(req, VNIC_PLCMODES_CFG, -1, resp); + + req.vnic_id = rte_cpu_to_le_32(vnic->fw_vnic_id); + req.flags = rte_cpu_to_le_32(pmode->flags); + req.jumbo_thresh = rte_cpu_to_le_16(pmode->jumbo_thresh); + req.hds_offset = rte_cpu_to_le_16(pmode->hds_offset); + req.hds_threshold = rte_cpu_to_le_16(pmode->hds_threshold); + req.enables = rte_cpu_to_le_32( + HWRM_VNIC_PLCMODES_CFG_INPUT_ENABLES_HDS_THRESHOLD_VALID | + HWRM_VNIC_PLCMODES_CFG_INPUT_ENABLES_HDS_OFFSET_VALID | + HWRM_VNIC_PLCMODES_CFG_INPUT_ENABLES_JUMBO_THRESH_VALID + ); + + rc = bnxt_hwrm_send_message(bp, &req, sizeof(req)); + + HWRM_CHECK_RESULT; + return rc; } @@ -844,32 +1099,105 @@ int bnxt_hwrm_vnic_cfg(struct bnxt *bp, struct bnxt_vnic_info *vnic) int rc = 0; struct hwrm_vnic_cfg_input req = {.req_type = 0 }; struct hwrm_vnic_cfg_output *resp = bp->hwrm_cmd_resp_addr; + uint32_t ctx_enable_flag = HWRM_VNIC_CFG_INPUT_ENABLES_RSS_RULE; + struct bnxt_plcmodes_cfg pmodes; + + if (vnic->fw_vnic_id == INVALID_HW_RING_ID) { + RTE_LOG(DEBUG, PMD, "VNIC ID %x\n", vnic->fw_vnic_id); + return rc; + } + + rc = bnxt_hwrm_vnic_plcmodes_qcfg(bp, vnic, &pmodes); + if (rc) + return rc; HWRM_PREP(req, VNIC_CFG, -1, resp); /* Only RSS support for now TBD: COS & LB */ req.enables = rte_cpu_to_le_32(HWRM_VNIC_CFG_INPUT_ENABLES_DFLT_RING_GRP | - HWRM_VNIC_CFG_INPUT_ENABLES_RSS_RULE | HWRM_VNIC_CFG_INPUT_ENABLES_MRU); + if (vnic->lb_rule != 0xffff) + ctx_enable_flag = HWRM_VNIC_CFG_INPUT_ENABLES_LB_RULE; + if (vnic->cos_rule != 0xffff) + ctx_enable_flag = HWRM_VNIC_CFG_INPUT_ENABLES_COS_RULE; + if (vnic->rss_rule != 0xffff) + ctx_enable_flag = HWRM_VNIC_CFG_INPUT_ENABLES_RSS_RULE; + req.enables |= rte_cpu_to_le_32(ctx_enable_flag); req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id); - req.dflt_ring_grp = - rte_cpu_to_le_16(bp->grp_info[vnic->start_grp_id].fw_grp_id); - req.rss_rule = rte_cpu_to_le_16(vnic->fw_rss_cos_lb_ctx); - req.cos_rule = rte_cpu_to_le_16(0xffff); - req.lb_rule = rte_cpu_to_le_16(0xffff); - req.mru = rte_cpu_to_le_16(bp->eth_dev->data->mtu + ETHER_HDR_LEN + - ETHER_CRC_LEN + VLAN_TAG_SIZE); + req.dflt_ring_grp = rte_cpu_to_le_16(vnic->dflt_ring_grp); + req.rss_rule = rte_cpu_to_le_16(vnic->rss_rule); + req.cos_rule = rte_cpu_to_le_16(vnic->cos_rule); + req.lb_rule = rte_cpu_to_le_16(vnic->lb_rule); + req.mru = rte_cpu_to_le_16(vnic->mru); if (vnic->func_default) - req.flags = 1; + req.flags |= + rte_cpu_to_le_32(HWRM_VNIC_CFG_INPUT_FLAGS_DEFAULT); if (vnic->vlan_strip) req.flags |= rte_cpu_to_le_32(HWRM_VNIC_CFG_INPUT_FLAGS_VLAN_STRIP_MODE); + if (vnic->bd_stall) + req.flags |= + rte_cpu_to_le_32(HWRM_VNIC_CFG_INPUT_FLAGS_BD_STALL_MODE); + if (vnic->roce_dual) + req.flags |= rte_cpu_to_le_32( + HWRM_VNIC_QCFG_OUTPUT_FLAGS_ROCE_DUAL_VNIC_MODE); + if (vnic->roce_only) + req.flags |= rte_cpu_to_le_32( + HWRM_VNIC_QCFG_OUTPUT_FLAGS_ROCE_ONLY_VNIC_MODE); + if (vnic->rss_dflt_cr) + req.flags |= rte_cpu_to_le_32( + HWRM_VNIC_QCFG_OUTPUT_FLAGS_RSS_DFLT_CR_MODE); + + rc = bnxt_hwrm_send_message(bp, &req, sizeof(req)); + + HWRM_CHECK_RESULT; + + rc = bnxt_hwrm_vnic_plcmodes_cfg(bp, vnic, &pmodes); + + return rc; +} + +int bnxt_hwrm_vnic_qcfg(struct bnxt *bp, struct bnxt_vnic_info *vnic, + int16_t fw_vf_id) +{ + int rc = 0; + struct hwrm_vnic_qcfg_input req = {.req_type = 0 }; + struct hwrm_vnic_qcfg_output *resp = bp->hwrm_cmd_resp_addr; + + if (vnic->fw_vnic_id == INVALID_HW_RING_ID) { + RTE_LOG(DEBUG, PMD, "VNIC QCFG ID %d\n", vnic->fw_vnic_id); + return rc; + } + HWRM_PREP(req, VNIC_QCFG, -1, resp); + + req.enables = + rte_cpu_to_le_32(HWRM_VNIC_QCFG_INPUT_ENABLES_VF_ID_VALID); + req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id); + req.vf_id = rte_cpu_to_le_16(fw_vf_id); rc = bnxt_hwrm_send_message(bp, &req, sizeof(req)); HWRM_CHECK_RESULT; + vnic->dflt_ring_grp = rte_le_to_cpu_16(resp->dflt_ring_grp); + vnic->rss_rule = rte_le_to_cpu_16(resp->rss_rule); + vnic->cos_rule = rte_le_to_cpu_16(resp->cos_rule); + vnic->lb_rule = rte_le_to_cpu_16(resp->lb_rule); + vnic->mru = rte_le_to_cpu_16(resp->mru); + vnic->func_default = rte_le_to_cpu_32( + resp->flags) & HWRM_VNIC_QCFG_OUTPUT_FLAGS_DEFAULT; + vnic->vlan_strip = rte_le_to_cpu_32(resp->flags) & + HWRM_VNIC_QCFG_OUTPUT_FLAGS_VLAN_STRIP_MODE; + vnic->bd_stall = rte_le_to_cpu_32(resp->flags) & + HWRM_VNIC_QCFG_OUTPUT_FLAGS_BD_STALL_MODE; + vnic->roce_dual = rte_le_to_cpu_32(resp->flags) & + HWRM_VNIC_QCFG_OUTPUT_FLAGS_ROCE_DUAL_VNIC_MODE; + vnic->roce_only = rte_le_to_cpu_32(resp->flags) & + HWRM_VNIC_QCFG_OUTPUT_FLAGS_ROCE_ONLY_VNIC_MODE; + vnic->rss_dflt_cr = rte_le_to_cpu_32(resp->flags) & + HWRM_VNIC_QCFG_OUTPUT_FLAGS_RSS_DFLT_CR_MODE; + return rc; } @@ -886,7 +1214,8 @@ int bnxt_hwrm_vnic_ctx_alloc(struct bnxt *bp, struct bnxt_vnic_info *vnic) HWRM_CHECK_RESULT; - vnic->fw_rss_cos_lb_ctx = rte_le_to_cpu_16(resp->rss_cos_lb_ctx_id); + vnic->rss_rule = rte_le_to_cpu_16(resp->rss_cos_lb_ctx_id); + RTE_LOG(DEBUG, PMD, "VNIC RSS Rule %x\n", vnic->rss_rule); return rc; } @@ -898,15 +1227,19 @@ int bnxt_hwrm_vnic_ctx_free(struct bnxt *bp, struct bnxt_vnic_info *vnic) struct hwrm_vnic_rss_cos_lb_ctx_free_output *resp = bp->hwrm_cmd_resp_addr; + if (vnic->rss_rule == 0xffff) { + RTE_LOG(DEBUG, PMD, "VNIC RSS Rule %x\n", vnic->rss_rule); + return rc; + } HWRM_PREP(req, VNIC_RSS_COS_LB_CTX_FREE, -1, resp); - req.rss_cos_lb_ctx_id = rte_cpu_to_le_16(vnic->fw_rss_cos_lb_ctx); + req.rss_cos_lb_ctx_id = rte_cpu_to_le_16(vnic->rss_rule); rc = bnxt_hwrm_send_message(bp, &req, sizeof(req)); HWRM_CHECK_RESULT; - vnic->fw_rss_cos_lb_ctx = INVALID_HW_RING_ID; + vnic->rss_rule = INVALID_HW_RING_ID; return rc; } @@ -917,8 +1250,10 @@ int bnxt_hwrm_vnic_free(struct bnxt *bp, struct bnxt_vnic_info *vnic) struct hwrm_vnic_free_input req = {.req_type = 0 }; struct hwrm_vnic_free_output *resp = bp->hwrm_cmd_resp_addr; - if (vnic->fw_vnic_id == INVALID_HW_RING_ID) + if (vnic->fw_vnic_id == INVALID_HW_RING_ID) { + RTE_LOG(DEBUG, PMD, "VNIC FREE ID %x\n", vnic->fw_vnic_id); return rc; + } HWRM_PREP(req, VNIC_FREE, -1, resp); @@ -947,7 +1282,168 @@ int bnxt_hwrm_vnic_rss_cfg(struct bnxt *bp, rte_cpu_to_le_64(vnic->rss_table_dma_addr); req.hash_key_tbl_addr = rte_cpu_to_le_64(vnic->rss_hash_key_dma_addr); - req.rss_ctx_idx = rte_cpu_to_le_16(vnic->fw_rss_cos_lb_ctx); + req.rss_ctx_idx = rte_cpu_to_le_16(vnic->rss_rule); + + rc = bnxt_hwrm_send_message(bp, &req, sizeof(req)); + + HWRM_CHECK_RESULT; + + return rc; +} + +int bnxt_hwrm_vnic_plcmode_cfg(struct bnxt *bp, + struct bnxt_vnic_info *vnic) +{ + int rc = 0; + struct hwrm_vnic_plcmodes_cfg_input req = {.req_type = 0 }; + struct hwrm_vnic_plcmodes_cfg_output *resp = bp->hwrm_cmd_resp_addr; + uint16_t size; + + HWRM_PREP(req, VNIC_PLCMODES_CFG, -1, resp); + + req.flags = rte_cpu_to_le_32( + HWRM_VNIC_PLCMODES_CFG_INPUT_FLAGS_JUMBO_PLACEMENT); + + req.enables = rte_cpu_to_le_32( + HWRM_VNIC_PLCMODES_CFG_INPUT_ENABLES_JUMBO_THRESH_VALID); + + size = rte_pktmbuf_data_room_size(bp->rx_queues[0]->mb_pool); + size -= RTE_PKTMBUF_HEADROOM; + + req.jumbo_thresh = rte_cpu_to_le_16(size); + req.vnic_id = rte_cpu_to_le_32(vnic->fw_vnic_id); + + rc = bnxt_hwrm_send_message(bp, &req, sizeof(req)); + + HWRM_CHECK_RESULT; + + return rc; +} + +int bnxt_hwrm_vnic_tpa_cfg(struct bnxt *bp, + struct bnxt_vnic_info *vnic, bool enable) +{ + int rc = 0; + struct hwrm_vnic_tpa_cfg_input req = {.req_type = 0 }; + struct hwrm_vnic_tpa_cfg_output *resp = bp->hwrm_cmd_resp_addr; + + HWRM_PREP(req, VNIC_TPA_CFG, -1, resp); + + if (enable) { + req.enables = rte_cpu_to_le_32( + HWRM_VNIC_TPA_CFG_INPUT_ENABLES_MAX_AGG_SEGS | + HWRM_VNIC_TPA_CFG_INPUT_ENABLES_MAX_AGGS | + HWRM_VNIC_TPA_CFG_INPUT_ENABLES_MIN_AGG_LEN); + req.flags = rte_cpu_to_le_32( + HWRM_VNIC_TPA_CFG_INPUT_FLAGS_TPA | + HWRM_VNIC_TPA_CFG_INPUT_FLAGS_ENCAP_TPA | + HWRM_VNIC_TPA_CFG_INPUT_FLAGS_RSC_WND_UPDATE | + HWRM_VNIC_TPA_CFG_INPUT_FLAGS_GRO | + HWRM_VNIC_TPA_CFG_INPUT_FLAGS_AGG_WITH_ECN | + HWRM_VNIC_TPA_CFG_INPUT_FLAGS_AGG_WITH_SAME_GRE_SEQ); + req.vnic_id = rte_cpu_to_le_32(vnic->fw_vnic_id); + req.max_agg_segs = rte_cpu_to_le_16(5); + req.max_aggs = + rte_cpu_to_le_16(HWRM_VNIC_TPA_CFG_INPUT_MAX_AGGS_MAX); + req.min_agg_len = rte_cpu_to_le_32(512); + } + + rc = bnxt_hwrm_send_message(bp, &req, sizeof(req)); + + HWRM_CHECK_RESULT; + + return rc; +} + +int bnxt_hwrm_func_vf_mac(struct bnxt *bp, uint16_t vf, const uint8_t *mac_addr) +{ + struct hwrm_func_cfg_input req = {0}; + struct hwrm_func_cfg_output *resp = bp->hwrm_cmd_resp_addr; + int rc; + + req.flags = rte_cpu_to_le_32(bp->pf.vf_info[vf].func_cfg_flags); + req.enables = rte_cpu_to_le_32( + HWRM_FUNC_CFG_INPUT_ENABLES_DFLT_MAC_ADDR); + memcpy(req.dflt_mac_addr, mac_addr, sizeof(req.dflt_mac_addr)); + req.fid = rte_cpu_to_le_16(bp->pf.vf_info[vf].fid); + + HWRM_PREP(req, FUNC_CFG, -1, resp); + + rc = bnxt_hwrm_send_message(bp, &req, sizeof(req)); + HWRM_CHECK_RESULT; + + bp->pf.vf_info[vf].random_mac = false; + + return rc; +} + +int bnxt_hwrm_func_qstats_tx_drop(struct bnxt *bp, uint16_t fid, + uint64_t *dropped) +{ + int rc = 0; + struct hwrm_func_qstats_input req = {.req_type = 0}; + struct hwrm_func_qstats_output *resp = bp->hwrm_cmd_resp_addr; + + HWRM_PREP(req, FUNC_QSTATS, -1, resp); + + req.fid = rte_cpu_to_le_16(fid); + + rc = bnxt_hwrm_send_message(bp, &req, sizeof(req)); + + HWRM_CHECK_RESULT; + + if (dropped) + *dropped = rte_le_to_cpu_64(resp->tx_drop_pkts); + + return rc; +} + +int bnxt_hwrm_func_qstats(struct bnxt *bp, uint16_t fid, + struct rte_eth_stats *stats) +{ + int rc = 0; + struct hwrm_func_qstats_input req = {.req_type = 0}; + struct hwrm_func_qstats_output *resp = bp->hwrm_cmd_resp_addr; + + HWRM_PREP(req, FUNC_QSTATS, -1, resp); + + req.fid = rte_cpu_to_le_16(fid); + + rc = bnxt_hwrm_send_message(bp, &req, sizeof(req)); + + HWRM_CHECK_RESULT; + + stats->ipackets = rte_le_to_cpu_64(resp->rx_ucast_pkts); + stats->ipackets += rte_le_to_cpu_64(resp->rx_mcast_pkts); + stats->ipackets += rte_le_to_cpu_64(resp->rx_bcast_pkts); + stats->ibytes = rte_le_to_cpu_64(resp->rx_ucast_bytes); + stats->ibytes += rte_le_to_cpu_64(resp->rx_mcast_bytes); + stats->ibytes += rte_le_to_cpu_64(resp->rx_bcast_bytes); + + stats->opackets = rte_le_to_cpu_64(resp->tx_ucast_pkts); + stats->opackets += rte_le_to_cpu_64(resp->tx_mcast_pkts); + stats->opackets += rte_le_to_cpu_64(resp->tx_bcast_pkts); + stats->obytes = rte_le_to_cpu_64(resp->tx_ucast_bytes); + stats->obytes += rte_le_to_cpu_64(resp->tx_mcast_bytes); + stats->obytes += rte_le_to_cpu_64(resp->tx_bcast_bytes); + + stats->ierrors = rte_le_to_cpu_64(resp->rx_err_pkts); + stats->oerrors = rte_le_to_cpu_64(resp->tx_err_pkts); + + stats->imissed = rte_le_to_cpu_64(resp->rx_drop_pkts); + + return rc; +} + +int bnxt_hwrm_func_clr_stats(struct bnxt *bp, uint16_t fid) +{ + int rc = 0; + struct hwrm_func_clr_stats_input req = {.req_type = 0}; + struct hwrm_func_clr_stats_output *resp = bp->hwrm_cmd_resp_addr; + + HWRM_PREP(req, FUNC_CLR_STATS, -1, resp); + + req.fid = rte_cpu_to_le_16(fid); rc = bnxt_hwrm_send_message(bp, &req, sizeof(req)); @@ -992,14 +1488,20 @@ int bnxt_free_all_hwrm_stat_ctxs(struct bnxt *bp) struct bnxt_cp_ring_info *cpr; for (i = 0; i < bp->rx_cp_nr_rings + bp->tx_cp_nr_rings; i++) { - unsigned int idx = i + 1; if (i >= bp->rx_cp_nr_rings) cpr = bp->tx_queues[i - bp->rx_cp_nr_rings]->cp_ring; else cpr = bp->rx_queues[i]->cp_ring; if (cpr->hw_stats_ctx_id != HWRM_NA_SIGNATURE) { - rc = bnxt_hwrm_stat_ctx_free(bp, cpr, idx); + rc = bnxt_hwrm_stat_ctx_free(bp, cpr, i); + cpr->hw_stats_ctx_id = HWRM_NA_SIGNATURE; + /* + * TODO. Need a better way to reset grp_info.stats_ctx + * for Rx rings only. stats_ctx is not saved for Tx + * in grp_info. + */ + bp->grp_info[i].fw_stats_ctx = cpr->hw_stats_ctx_id; if (rc) return rc; } @@ -1016,7 +1518,6 @@ int bnxt_alloc_all_hwrm_stat_ctxs(struct bnxt *bp) struct bnxt_tx_queue *txq; struct bnxt_rx_queue *rxq; struct bnxt_cp_ring_info *cpr; - unsigned int idx = i + 1; if (i >= bp->rx_cp_nr_rings) { txq = bp->tx_queues[i - bp->rx_cp_nr_rings]; @@ -1026,7 +1527,7 @@ int bnxt_alloc_all_hwrm_stat_ctxs(struct bnxt *bp) cpr = rxq->cp_ring; } - rc = bnxt_hwrm_stat_ctx_alloc(bp, cpr, idx); + rc = bnxt_hwrm_stat_ctx_alloc(bp, cpr, i); if (rc) return rc; @@ -1036,11 +1537,10 @@ int bnxt_alloc_all_hwrm_stat_ctxs(struct bnxt *bp) int bnxt_free_all_hwrm_ring_grps(struct bnxt *bp) { - uint16_t i; + uint16_t idx; uint32_t rc = 0; - for (i = 0; i < bp->rx_cp_nr_rings; i++) { - unsigned int idx = i + 1; + for (idx = 0; idx < bp->rx_cp_nr_rings; idx++) { if (bp->grp_info[idx].fw_grp_id == INVALID_HW_RING_ID) { RTE_LOG(ERR, PMD, @@ -1057,13 +1557,13 @@ int bnxt_free_all_hwrm_ring_grps(struct bnxt *bp) return rc; } -static void bnxt_free_cp_ring(struct bnxt *bp, - struct bnxt_cp_ring_info *cpr, unsigned int idx) +static void bnxt_free_cp_ring(struct bnxt *bp, struct bnxt_cp_ring_info *cpr, + unsigned int idx __rte_unused) { struct bnxt_ring *cp_ring = cpr->cp_ring_struct; bnxt_hwrm_ring_free(bp, cp_ring, - HWRM_RING_FREE_INPUT_RING_TYPE_CMPL); + HWRM_RING_FREE_INPUT_RING_TYPE_L2_CMPL); cp_ring->fw_ring_id = INVALID_HW_RING_ID; bp->grp_info[idx].cp_fw_ring_id = INVALID_HW_RING_ID; memset(cpr->cp_desc_ring, 0, cpr->cp_ring_struct->ring_size * @@ -1096,8 +1596,10 @@ int bnxt_free_all_hwrm_rings(struct bnxt *bp) txr->tx_prod = 0; txr->tx_cons = 0; } - if (cpr->cp_ring_struct->fw_ring_id != INVALID_HW_RING_ID) + if (cpr->cp_ring_struct->fw_ring_id != INVALID_HW_RING_ID) { bnxt_free_cp_ring(bp, cpr, idx); + cpr->cp_ring_struct->fw_ring_id = INVALID_HW_RING_ID; + } } for (i = 0; i < bp->rx_cp_nr_rings; i++) { @@ -1119,17 +1621,26 @@ int bnxt_free_all_hwrm_rings(struct bnxt *bp) rxr->rx_ring_struct->ring_size * sizeof(*rxr->rx_buf_ring)); rxr->rx_prod = 0; + memset(rxr->ag_buf_ring, 0, + rxr->ag_ring_struct->ring_size * + sizeof(*rxr->ag_buf_ring)); + rxr->ag_prod = 0; } - if (cpr->cp_ring_struct->fw_ring_id != INVALID_HW_RING_ID) + if (cpr->cp_ring_struct->fw_ring_id != INVALID_HW_RING_ID) { bnxt_free_cp_ring(bp, cpr, idx); + bp->grp_info[i].cp_fw_ring_id = INVALID_HW_RING_ID; + cpr->cp_ring_struct->fw_ring_id = INVALID_HW_RING_ID; + } } /* Default completion ring */ { struct bnxt_cp_ring_info *cpr = bp->def_cp_ring; - if (cpr->cp_ring_struct->fw_ring_id != INVALID_HW_RING_ID) + if (cpr->cp_ring_struct->fw_ring_id != INVALID_HW_RING_ID) { bnxt_free_cp_ring(bp, cpr, 0); + cpr->cp_ring_struct->fw_ring_id = INVALID_HW_RING_ID; + } } return rc; @@ -1141,14 +1652,7 @@ int bnxt_alloc_all_hwrm_ring_grps(struct bnxt *bp) uint32_t rc = 0; for (i = 0; i < bp->rx_cp_nr_rings; i++) { - unsigned int idx = i + 1; - - if (bp->grp_info[idx].cp_fw_ring_id == INVALID_HW_RING_ID || - bp->grp_info[idx].rx_fw_ring_id == INVALID_HW_RING_ID) - continue; - - rc = bnxt_hwrm_ring_grp_alloc(bp, idx); - + rc = bnxt_hwrm_ring_grp_alloc(bp, i); if (rc) return rc; } @@ -1159,8 +1663,11 @@ void bnxt_free_hwrm_resources(struct bnxt *bp) { /* Release memzone */ rte_free(bp->hwrm_cmd_resp_addr); + rte_free(bp->hwrm_short_cmd_req_addr); bp->hwrm_cmd_resp_addr = NULL; + bp->hwrm_short_cmd_req_addr = NULL; bp->hwrm_cmd_resp_dma_addr = 0; + bp->hwrm_short_cmd_req_dma_addr = 0; } int bnxt_alloc_hwrm_resources(struct bnxt *bp) @@ -1170,13 +1677,18 @@ int bnxt_alloc_hwrm_resources(struct bnxt *bp) sprintf(type, "bnxt_hwrm_%04x:%02x:%02x:%02x", pdev->addr.domain, pdev->addr.bus, pdev->addr.devid, pdev->addr.function); - bp->max_req_len = HWRM_MAX_REQ_LEN; bp->max_resp_len = HWRM_MAX_RESP_LEN; bp->hwrm_cmd_resp_addr = rte_malloc(type, bp->max_resp_len, 0); + rte_mem_lock_page(bp->hwrm_cmd_resp_addr); if (bp->hwrm_cmd_resp_addr == NULL) return -ENOMEM; bp->hwrm_cmd_resp_dma_addr = - rte_malloc_virt2phy(bp->hwrm_cmd_resp_addr); + rte_mem_virt2phy(bp->hwrm_cmd_resp_addr); + if (bp->hwrm_cmd_resp_dma_addr == 0) { + RTE_LOG(ERR, PMD, + "unable to map response address to physical memory\n"); + return -ENOMEM; + } rte_spinlock_init(&bp->hwrm_lock); return 0; @@ -1201,13 +1713,25 @@ int bnxt_set_hwrm_vnic_filters(struct bnxt *bp, struct bnxt_vnic_info *vnic) int rc = 0; STAILQ_FOREACH(filter, &vnic->filter, next) { - rc = bnxt_hwrm_set_filter(bp, vnic, filter); + rc = bnxt_hwrm_set_filter(bp, vnic->fw_vnic_id, filter); if (rc) break; } return rc; } +void bnxt_free_tunnel_ports(struct bnxt *bp) +{ + if (bp->vxlan_port_cnt) + bnxt_hwrm_tunnel_dst_port_free(bp, bp->vxlan_fw_dst_port_id, + HWRM_TUNNEL_DST_PORT_FREE_INPUT_TUNNEL_TYPE_VXLAN); + bp->vxlan_port = 0; + if (bp->geneve_port_cnt) + bnxt_hwrm_tunnel_dst_port_free(bp, bp->geneve_fw_dst_port_id, + HWRM_TUNNEL_DST_PORT_FREE_INPUT_TUNNEL_TYPE_GENEVE); + bp->geneve_port = 0; +} + void bnxt_free_all_hwrm_resources(struct bnxt *bp) { struct bnxt_vnic_info *vnic; @@ -1217,7 +1741,8 @@ void bnxt_free_all_hwrm_resources(struct bnxt *bp) return; vnic = &bp->vnic_info[0]; - bnxt_hwrm_cfa_l2_clear_rx_mask(bp, vnic); + if (BNXT_PF(bp)) + bnxt_hwrm_cfa_l2_clear_rx_mask(bp, vnic); /* VNIC resources */ for (i = 0; i < bp->nr_vnics; i++) { @@ -1226,12 +1751,16 @@ void bnxt_free_all_hwrm_resources(struct bnxt *bp) bnxt_clear_hwrm_vnic_filters(bp, vnic); bnxt_hwrm_vnic_ctx_free(bp, vnic); + + bnxt_hwrm_vnic_tpa_cfg(bp, vnic, false); + bnxt_hwrm_vnic_free(bp, vnic); } /* Ring resources */ bnxt_free_all_hwrm_rings(bp); bnxt_free_all_hwrm_ring_grps(bp); bnxt_free_all_hwrm_stat_ctxs(bp); + bnxt_free_tunnel_ports(bp); } static uint16_t bnxt_parse_eth_link_duplex(uint32_t conf_link_speed) @@ -1337,12 +1866,16 @@ static int bnxt_valid_link_speed(uint32_t link_speed, uint8_t port_id) return 0; } -static uint16_t bnxt_parse_eth_link_speed_mask(uint32_t link_speed) +static uint16_t +bnxt_parse_eth_link_speed_mask(struct bnxt *bp, uint32_t link_speed) { uint16_t ret = 0; - if (link_speed == ETH_LINK_SPEED_AUTONEG) + if (link_speed == ETH_LINK_SPEED_AUTONEG) { + if (bp->link_info.support_speeds) + return bp->link_info.support_speeds; link_speed = BNXT_SUPPORTED_SPEEDS; + } if (link_speed & ETH_LINK_SPEED_100M) ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_100MB; @@ -1434,7 +1967,7 @@ int bnxt_get_hwrm_link_config(struct bnxt *bp, struct rte_eth_link *link) "Get link config failed with rc %d\n", rc); goto exit; } - if (link_info->link_up) + if (link_info->link_speed) link->link_speed = bnxt_parse_hw_link_speed(link_info->link_speed); else @@ -1443,7 +1976,7 @@ int bnxt_get_hwrm_link_config(struct bnxt *bp, struct rte_eth_link *link) link->link_status = link_info->link_up; link->link_autoneg = link_info->auto_mode == HWRM_PORT_PHY_QCFG_OUTPUT_AUTO_MODE_NONE ? - ETH_LINK_SPEED_FIXED : ETH_LINK_SPEED_AUTONEG; + ETH_LINK_FIXED : ETH_LINK_AUTONEG; exit: return rc; } @@ -1476,7 +2009,8 @@ int bnxt_set_hwrm_link_config(struct bnxt *bp, bool link_up) link_req.auto_mode = HWRM_PORT_PHY_CFG_INPUT_AUTO_MODE_SPEED_MASK; link_req.auto_link_speed_mask = - bnxt_parse_eth_link_speed_mask(dev_conf->link_speeds); + bnxt_parse_eth_link_speed_mask(bp, + dev_conf->link_speeds); } else { link_req.phy_flags |= HWRM_PORT_PHY_CFG_INPUT_FLAGS_FORCE; link_req.link_speed = speed; @@ -1493,7 +2027,6 @@ port_phy_cfg: "Set link config failed with rc %d\n", rc); } - rte_delay_ms(BNXT_LINK_WAIT_INTERVAL); error: return rc; } @@ -1512,12 +2045,8 @@ int bnxt_hwrm_func_qcfg(struct bnxt *bp) HWRM_CHECK_RESULT; - if (BNXT_VF(bp)) { - struct bnxt_vf_info *vf = &bp->vf; - - /* Hard Coded.. 0xfff VLAN ID mask */ - vf->vlan = rte_le_to_cpu_16(resp->vlan) & 0xfff; - } + /* Hard Coded.. 0xfff VLAN ID mask */ + bp->vlan = rte_le_to_cpu_16(resp->vlan) & 0xfff; switch (resp->port_partition_type) { case HWRM_FUNC_QCFG_OUTPUT_PORT_PARTITION_TYPE_NPAR1_0: @@ -1532,3 +2061,946 @@ int bnxt_hwrm_func_qcfg(struct bnxt *bp) return rc; } + +static void copy_func_cfg_to_qcaps(struct hwrm_func_cfg_input *fcfg, + struct hwrm_func_qcaps_output *qcaps) +{ + qcaps->max_rsscos_ctx = fcfg->num_rsscos_ctxs; + memcpy(qcaps->mac_address, fcfg->dflt_mac_addr, + sizeof(qcaps->mac_address)); + qcaps->max_l2_ctxs = fcfg->num_l2_ctxs; + qcaps->max_rx_rings = fcfg->num_rx_rings; + qcaps->max_tx_rings = fcfg->num_tx_rings; + qcaps->max_cmpl_rings = fcfg->num_cmpl_rings; + qcaps->max_stat_ctx = fcfg->num_stat_ctxs; + qcaps->max_vfs = 0; + qcaps->first_vf_id = 0; + qcaps->max_vnics = fcfg->num_vnics; + qcaps->max_decap_records = 0; + qcaps->max_encap_records = 0; + qcaps->max_tx_wm_flows = 0; + qcaps->max_tx_em_flows = 0; + qcaps->max_rx_wm_flows = 0; + qcaps->max_rx_em_flows = 0; + qcaps->max_flow_id = 0; + qcaps->max_mcast_filters = fcfg->num_mcast_filters; + qcaps->max_sp_tx_rings = 0; + qcaps->max_hw_ring_grps = fcfg->num_hw_ring_grps; +} + +static int bnxt_hwrm_pf_func_cfg(struct bnxt *bp, int tx_rings) +{ + struct hwrm_func_cfg_input req = {0}; + struct hwrm_func_cfg_output *resp = bp->hwrm_cmd_resp_addr; + int rc; + + req.enables = rte_cpu_to_le_32(HWRM_FUNC_CFG_INPUT_ENABLES_MTU | + HWRM_FUNC_CFG_INPUT_ENABLES_MRU | + HWRM_FUNC_CFG_INPUT_ENABLES_NUM_RSSCOS_CTXS | + HWRM_FUNC_CFG_INPUT_ENABLES_NUM_STAT_CTXS | + HWRM_FUNC_CFG_INPUT_ENABLES_NUM_CMPL_RINGS | + HWRM_FUNC_CFG_INPUT_ENABLES_NUM_TX_RINGS | + HWRM_FUNC_CFG_INPUT_ENABLES_NUM_RX_RINGS | + HWRM_FUNC_CFG_INPUT_ENABLES_NUM_L2_CTXS | + HWRM_FUNC_CFG_INPUT_ENABLES_NUM_VNICS | + HWRM_FUNC_CFG_INPUT_ENABLES_NUM_HW_RING_GRPS); + req.flags = rte_cpu_to_le_32(bp->pf.func_cfg_flags); + req.mtu = rte_cpu_to_le_16(BNXT_MAX_MTU); + req.mru = rte_cpu_to_le_16(bp->eth_dev->data->mtu + ETHER_HDR_LEN + + ETHER_CRC_LEN + VLAN_TAG_SIZE); + req.num_rsscos_ctxs = rte_cpu_to_le_16(bp->max_rsscos_ctx); + req.num_stat_ctxs = rte_cpu_to_le_16(bp->max_stat_ctx); + req.num_cmpl_rings = rte_cpu_to_le_16(bp->max_cp_rings); + req.num_tx_rings = rte_cpu_to_le_16(tx_rings); + req.num_rx_rings = rte_cpu_to_le_16(bp->max_rx_rings); + req.num_l2_ctxs = rte_cpu_to_le_16(bp->max_l2_ctx); + req.num_vnics = rte_cpu_to_le_16(bp->max_vnics); + req.num_hw_ring_grps = rte_cpu_to_le_16(bp->max_ring_grps); + req.fid = rte_cpu_to_le_16(0xffff); + + HWRM_PREP(req, FUNC_CFG, -1, resp); + + rc = bnxt_hwrm_send_message(bp, &req, sizeof(req)); + HWRM_CHECK_RESULT; + + return rc; +} + +static void populate_vf_func_cfg_req(struct bnxt *bp, + struct hwrm_func_cfg_input *req, + int num_vfs) +{ + req->enables = rte_cpu_to_le_32(HWRM_FUNC_CFG_INPUT_ENABLES_MTU | + HWRM_FUNC_CFG_INPUT_ENABLES_MRU | + HWRM_FUNC_CFG_INPUT_ENABLES_NUM_RSSCOS_CTXS | + HWRM_FUNC_CFG_INPUT_ENABLES_NUM_STAT_CTXS | + HWRM_FUNC_CFG_INPUT_ENABLES_NUM_CMPL_RINGS | + HWRM_FUNC_CFG_INPUT_ENABLES_NUM_TX_RINGS | + HWRM_FUNC_CFG_INPUT_ENABLES_NUM_RX_RINGS | + HWRM_FUNC_CFG_INPUT_ENABLES_NUM_L2_CTXS | + HWRM_FUNC_CFG_INPUT_ENABLES_NUM_VNICS | + HWRM_FUNC_CFG_INPUT_ENABLES_NUM_HW_RING_GRPS); + + req->mtu = rte_cpu_to_le_16(bp->eth_dev->data->mtu + ETHER_HDR_LEN + + ETHER_CRC_LEN + VLAN_TAG_SIZE); + req->mru = rte_cpu_to_le_16(bp->eth_dev->data->mtu + ETHER_HDR_LEN + + ETHER_CRC_LEN + VLAN_TAG_SIZE); + req->num_rsscos_ctxs = rte_cpu_to_le_16(bp->max_rsscos_ctx / + (num_vfs + 1)); + req->num_stat_ctxs = rte_cpu_to_le_16(bp->max_stat_ctx / (num_vfs + 1)); + req->num_cmpl_rings = rte_cpu_to_le_16(bp->max_cp_rings / + (num_vfs + 1)); + req->num_tx_rings = rte_cpu_to_le_16(bp->max_tx_rings / (num_vfs + 1)); + req->num_rx_rings = rte_cpu_to_le_16(bp->max_rx_rings / (num_vfs + 1)); + req->num_l2_ctxs = rte_cpu_to_le_16(bp->max_l2_ctx / (num_vfs + 1)); + /* TODO: For now, do not support VMDq/RFS on VFs. */ + req->num_vnics = rte_cpu_to_le_16(1); + req->num_hw_ring_grps = rte_cpu_to_le_16(bp->max_ring_grps / + (num_vfs + 1)); +} + +static void add_random_mac_if_needed(struct bnxt *bp, + struct hwrm_func_cfg_input *cfg_req, + int vf) +{ + struct ether_addr mac; + + if (bnxt_hwrm_func_qcfg_vf_default_mac(bp, vf, &mac)) + return; + + if (memcmp(mac.addr_bytes, "\x00\x00\x00\x00\x00", 6) == 0) { + cfg_req->enables |= + rte_cpu_to_le_32(HWRM_FUNC_CFG_INPUT_ENABLES_DFLT_MAC_ADDR); + eth_random_addr(cfg_req->dflt_mac_addr); + bp->pf.vf_info[vf].random_mac = true; + } else { + memcpy(cfg_req->dflt_mac_addr, mac.addr_bytes, ETHER_ADDR_LEN); + } +} + +static void reserve_resources_from_vf(struct bnxt *bp, + struct hwrm_func_cfg_input *cfg_req, + int vf) +{ + struct hwrm_func_qcaps_input req = {0}; + struct hwrm_func_qcaps_output *resp = bp->hwrm_cmd_resp_addr; + int rc; + + /* Get the actual allocated values now */ + HWRM_PREP(req, FUNC_QCAPS, -1, resp); + req.fid = rte_cpu_to_le_16(bp->pf.vf_info[vf].fid); + rc = bnxt_hwrm_send_message(bp, &req, sizeof(req)); + + if (rc) { + RTE_LOG(ERR, PMD, "hwrm_func_qcaps failed rc:%d\n", rc); + copy_func_cfg_to_qcaps(cfg_req, resp); + } else if (resp->error_code) { + rc = rte_le_to_cpu_16(resp->error_code); + RTE_LOG(ERR, PMD, "hwrm_func_qcaps error %d\n", rc); + copy_func_cfg_to_qcaps(cfg_req, resp); + } + + bp->max_rsscos_ctx -= rte_le_to_cpu_16(resp->max_rsscos_ctx); + bp->max_stat_ctx -= rte_le_to_cpu_16(resp->max_stat_ctx); + bp->max_cp_rings -= rte_le_to_cpu_16(resp->max_cmpl_rings); + bp->max_tx_rings -= rte_le_to_cpu_16(resp->max_tx_rings); + bp->max_rx_rings -= rte_le_to_cpu_16(resp->max_rx_rings); + bp->max_l2_ctx -= rte_le_to_cpu_16(resp->max_l2_ctxs); + /* + * TODO: While not supporting VMDq with VFs, max_vnics is always + * forced to 1 in this case + */ + //bp->max_vnics -= rte_le_to_cpu_16(esp->max_vnics); + bp->max_ring_grps -= rte_le_to_cpu_16(resp->max_hw_ring_grps); +} + +int bnxt_hwrm_func_qcfg_current_vf_vlan(struct bnxt *bp, int vf) +{ + struct hwrm_func_qcfg_input req = {0}; + struct hwrm_func_qcfg_output *resp = bp->hwrm_cmd_resp_addr; + int rc; + + /* Check for zero MAC address */ + HWRM_PREP(req, FUNC_QCFG, -1, resp); + req.fid = rte_cpu_to_le_16(bp->pf.vf_info[vf].fid); + rc = bnxt_hwrm_send_message(bp, &req, sizeof(req)); + if (rc) { + RTE_LOG(ERR, PMD, "hwrm_func_qcfg failed rc:%d\n", rc); + return -1; + } else if (resp->error_code) { + rc = rte_le_to_cpu_16(resp->error_code); + RTE_LOG(ERR, PMD, "hwrm_func_qcfg error %d\n", rc); + return -1; + } + return rte_le_to_cpu_16(resp->vlan); +} + +static int update_pf_resource_max(struct bnxt *bp) +{ + struct hwrm_func_qcfg_input req = {0}; + struct hwrm_func_qcfg_output *resp = bp->hwrm_cmd_resp_addr; + int rc; + + /* And copy the allocated numbers into the pf struct */ + HWRM_PREP(req, FUNC_QCFG, -1, resp); + req.fid = rte_cpu_to_le_16(0xffff); + rc = bnxt_hwrm_send_message(bp, &req, sizeof(req)); + HWRM_CHECK_RESULT; + + /* Only TX ring value reflects actual allocation? TODO */ + bp->max_tx_rings = rte_le_to_cpu_16(resp->alloc_tx_rings); + bp->pf.evb_mode = resp->evb_mode; + + return rc; +} + +int bnxt_hwrm_allocate_pf_only(struct bnxt *bp) +{ + int rc; + + if (!BNXT_PF(bp)) { + RTE_LOG(ERR, PMD, "Attempt to allcoate VFs on a VF!\n"); + return -1; + } + + rc = bnxt_hwrm_func_qcaps(bp); + if (rc) + return rc; + + bp->pf.func_cfg_flags &= + ~(HWRM_FUNC_CFG_INPUT_FLAGS_STD_TX_RING_MODE_ENABLE | + HWRM_FUNC_CFG_INPUT_FLAGS_STD_TX_RING_MODE_DISABLE); + bp->pf.func_cfg_flags |= + HWRM_FUNC_CFG_INPUT_FLAGS_STD_TX_RING_MODE_DISABLE; + rc = bnxt_hwrm_pf_func_cfg(bp, bp->max_tx_rings); + return rc; +} + +int bnxt_hwrm_allocate_vfs(struct bnxt *bp, int num_vfs) +{ + struct hwrm_func_cfg_input req = {0}; + struct hwrm_func_cfg_output *resp = bp->hwrm_cmd_resp_addr; + int i; + size_t sz; + int rc = 0; + size_t req_buf_sz; + + if (!BNXT_PF(bp)) { + RTE_LOG(ERR, PMD, "Attempt to allcoate VFs on a VF!\n"); + return -1; + } + + rc = bnxt_hwrm_func_qcaps(bp); + + if (rc) + return rc; + + bp->pf.active_vfs = num_vfs; + + /* + * First, configure the PF to only use one TX ring. This ensures that + * there are enough rings for all VFs. + * + * If we don't do this, when we call func_alloc() later, we will lock + * extra rings to the PF that won't be available during func_cfg() of + * the VFs. + * + * This has been fixed with firmware versions above 20.6.54 + */ + bp->pf.func_cfg_flags &= + ~(HWRM_FUNC_CFG_INPUT_FLAGS_STD_TX_RING_MODE_ENABLE | + HWRM_FUNC_CFG_INPUT_FLAGS_STD_TX_RING_MODE_DISABLE); + bp->pf.func_cfg_flags |= + HWRM_FUNC_CFG_INPUT_FLAGS_STD_TX_RING_MODE_ENABLE; + rc = bnxt_hwrm_pf_func_cfg(bp, 1); + if (rc) + return rc; + + /* + * Now, create and register a buffer to hold forwarded VF requests + */ + req_buf_sz = num_vfs * HWRM_MAX_REQ_LEN; + bp->pf.vf_req_buf = rte_malloc("bnxt_vf_fwd", req_buf_sz, + page_roundup(num_vfs * HWRM_MAX_REQ_LEN)); + if (bp->pf.vf_req_buf == NULL) { + rc = -ENOMEM; + goto error_free; + } + for (sz = 0; sz < req_buf_sz; sz += getpagesize()) + rte_mem_lock_page(((char *)bp->pf.vf_req_buf) + sz); + for (i = 0; i < num_vfs; i++) + bp->pf.vf_info[i].req_buf = ((char *)bp->pf.vf_req_buf) + + (i * HWRM_MAX_REQ_LEN); + + rc = bnxt_hwrm_func_buf_rgtr(bp); + if (rc) + goto error_free; + + populate_vf_func_cfg_req(bp, &req, num_vfs); + + bp->pf.active_vfs = 0; + for (i = 0; i < num_vfs; i++) { + add_random_mac_if_needed(bp, &req, i); + + HWRM_PREP(req, FUNC_CFG, -1, resp); + req.flags = rte_cpu_to_le_32(bp->pf.vf_info[i].func_cfg_flags); + req.fid = rte_cpu_to_le_16(bp->pf.vf_info[i].fid); + rc = bnxt_hwrm_send_message(bp, &req, sizeof(req)); + + /* Clear enable flag for next pass */ + req.enables &= ~rte_cpu_to_le_32( + HWRM_FUNC_CFG_INPUT_ENABLES_DFLT_MAC_ADDR); + + if (rc || resp->error_code) { + RTE_LOG(ERR, PMD, + "Failed to initizlie VF %d\n", i); + RTE_LOG(ERR, PMD, + "Not all VFs available. (%d, %d)\n", + rc, resp->error_code); + break; + } + + reserve_resources_from_vf(bp, &req, i); + bp->pf.active_vfs++; + bnxt_hwrm_func_clr_stats(bp, bp->pf.vf_info[i].fid); + } + + /* + * Now configure the PF to use "the rest" of the resources + * We're using STD_TX_RING_MODE here though which will limit the TX + * rings. This will allow QoS to function properly. Not setting this + * will cause PF rings to break bandwidth settings. + */ + rc = bnxt_hwrm_pf_func_cfg(bp, bp->max_tx_rings); + if (rc) + goto error_free; + + rc = update_pf_resource_max(bp); + if (rc) + goto error_free; + + return rc; + +error_free: + bnxt_hwrm_func_buf_unrgtr(bp); + return rc; +} + +int bnxt_hwrm_pf_evb_mode(struct bnxt *bp) +{ + struct hwrm_func_cfg_input req = {0}; + struct hwrm_func_cfg_output *resp = bp->hwrm_cmd_resp_addr; + int rc; + + HWRM_PREP(req, FUNC_CFG, -1, resp); + + req.fid = rte_cpu_to_le_16(0xffff); + req.enables = rte_cpu_to_le_32(HWRM_FUNC_CFG_INPUT_ENABLES_EVB_MODE); + req.evb_mode = bp->pf.evb_mode; + + rc = bnxt_hwrm_send_message(bp, &req, sizeof(req)); + HWRM_CHECK_RESULT; + + return rc; +} + +int bnxt_hwrm_tunnel_dst_port_alloc(struct bnxt *bp, uint16_t port, + uint8_t tunnel_type) +{ + struct hwrm_tunnel_dst_port_alloc_input req = {0}; + struct hwrm_tunnel_dst_port_alloc_output *resp = bp->hwrm_cmd_resp_addr; + int rc = 0; + + HWRM_PREP(req, TUNNEL_DST_PORT_ALLOC, -1, resp); + req.tunnel_type = tunnel_type; + req.tunnel_dst_port_val = port; + rc = bnxt_hwrm_send_message(bp, &req, sizeof(req)); + HWRM_CHECK_RESULT; + + switch (tunnel_type) { + case HWRM_TUNNEL_DST_PORT_ALLOC_INPUT_TUNNEL_TYPE_VXLAN: + bp->vxlan_fw_dst_port_id = resp->tunnel_dst_port_id; + bp->vxlan_port = port; + break; + case HWRM_TUNNEL_DST_PORT_ALLOC_INPUT_TUNNEL_TYPE_GENEVE: + bp->geneve_fw_dst_port_id = resp->tunnel_dst_port_id; + bp->geneve_port = port; + break; + default: + break; + } + return rc; +} + +int bnxt_hwrm_tunnel_dst_port_free(struct bnxt *bp, uint16_t port, + uint8_t tunnel_type) +{ + struct hwrm_tunnel_dst_port_free_input req = {0}; + struct hwrm_tunnel_dst_port_free_output *resp = bp->hwrm_cmd_resp_addr; + int rc = 0; + + HWRM_PREP(req, TUNNEL_DST_PORT_FREE, -1, resp); + req.tunnel_type = tunnel_type; + req.tunnel_dst_port_id = rte_cpu_to_be_16(port); + rc = bnxt_hwrm_send_message(bp, &req, sizeof(req)); + HWRM_CHECK_RESULT; + + return rc; +} + +int bnxt_hwrm_func_cfg_vf_set_flags(struct bnxt *bp, uint16_t vf, + uint32_t flags) +{ + struct hwrm_func_cfg_output *resp = bp->hwrm_cmd_resp_addr; + struct hwrm_func_cfg_input req = {0}; + int rc; + + HWRM_PREP(req, FUNC_CFG, -1, resp); + req.fid = rte_cpu_to_le_16(bp->pf.vf_info[vf].fid); + req.flags = rte_cpu_to_le_32(flags); + rc = bnxt_hwrm_send_message(bp, &req, sizeof(req)); + HWRM_CHECK_RESULT; + + return rc; +} + +void vf_vnic_set_rxmask_cb(struct bnxt_vnic_info *vnic, void *flagp) +{ + uint32_t *flag = flagp; + + vnic->flags = *flag; +} + +int bnxt_set_rx_mask_no_vlan(struct bnxt *bp, struct bnxt_vnic_info *vnic) +{ + return bnxt_hwrm_cfa_l2_set_rx_mask(bp, vnic, 0, NULL); +} + +int bnxt_hwrm_func_buf_rgtr(struct bnxt *bp) +{ + int rc = 0; + struct hwrm_func_buf_rgtr_input req = {.req_type = 0 }; + struct hwrm_func_buf_rgtr_output *resp = bp->hwrm_cmd_resp_addr; + + HWRM_PREP(req, FUNC_BUF_RGTR, -1, resp); + + req.req_buf_num_pages = rte_cpu_to_le_16(1); + req.req_buf_page_size = rte_cpu_to_le_16( + page_getenum(bp->pf.active_vfs * HWRM_MAX_REQ_LEN)); + req.req_buf_len = rte_cpu_to_le_16(HWRM_MAX_REQ_LEN); + req.req_buf_page_addr[0] = + rte_cpu_to_le_64(rte_mem_virt2phy(bp->pf.vf_req_buf)); + if (req.req_buf_page_addr[0] == 0) { + RTE_LOG(ERR, PMD, + "unable to map buffer address to physical memory\n"); + return -ENOMEM; + } + + rc = bnxt_hwrm_send_message(bp, &req, sizeof(req)); + + HWRM_CHECK_RESULT; + + return rc; +} + +int bnxt_hwrm_func_buf_unrgtr(struct bnxt *bp) +{ + int rc = 0; + struct hwrm_func_buf_unrgtr_input req = {.req_type = 0 }; + struct hwrm_func_buf_unrgtr_output *resp = bp->hwrm_cmd_resp_addr; + + HWRM_PREP(req, FUNC_BUF_UNRGTR, -1, resp); + + rc = bnxt_hwrm_send_message(bp, &req, sizeof(req)); + + HWRM_CHECK_RESULT; + + return rc; +} + +int bnxt_hwrm_func_cfg_def_cp(struct bnxt *bp) +{ + struct hwrm_func_cfg_output *resp = bp->hwrm_cmd_resp_addr; + struct hwrm_func_cfg_input req = {0}; + int rc; + + HWRM_PREP(req, FUNC_CFG, -1, resp); + req.fid = rte_cpu_to_le_16(0xffff); + req.flags = rte_cpu_to_le_32(bp->pf.func_cfg_flags); + req.enables = rte_cpu_to_le_32( + HWRM_FUNC_CFG_INPUT_ENABLES_ASYNC_EVENT_CR); + req.async_event_cr = rte_cpu_to_le_16( + bp->def_cp_ring->cp_ring_struct->fw_ring_id); + rc = bnxt_hwrm_send_message(bp, &req, sizeof(req)); + HWRM_CHECK_RESULT; + + return rc; +} + +int bnxt_hwrm_vf_func_cfg_def_cp(struct bnxt *bp) +{ + struct hwrm_func_vf_cfg_output *resp = bp->hwrm_cmd_resp_addr; + struct hwrm_func_vf_cfg_input req = {0}; + int rc; + + HWRM_PREP(req, FUNC_VF_CFG, -1, resp); + req.enables = rte_cpu_to_le_32( + HWRM_FUNC_CFG_INPUT_ENABLES_ASYNC_EVENT_CR); + req.async_event_cr = rte_cpu_to_le_16( + bp->def_cp_ring->cp_ring_struct->fw_ring_id); + rc = bnxt_hwrm_send_message(bp, &req, sizeof(req)); + HWRM_CHECK_RESULT; + + return rc; +} + +int bnxt_hwrm_set_default_vlan(struct bnxt *bp, int vf, uint8_t is_vf) +{ + struct hwrm_func_cfg_input req = {0}; + struct hwrm_func_cfg_output *resp = bp->hwrm_cmd_resp_addr; + uint16_t dflt_vlan, fid; + uint32_t func_cfg_flags; + int rc = 0; + + HWRM_PREP(req, FUNC_CFG, -1, resp); + + if (is_vf) { + dflt_vlan = bp->pf.vf_info[vf].dflt_vlan; + fid = bp->pf.vf_info[vf].fid; + func_cfg_flags = bp->pf.vf_info[vf].func_cfg_flags; + } else { + fid = rte_cpu_to_le_16(0xffff); + func_cfg_flags = bp->pf.func_cfg_flags; + dflt_vlan = bp->vlan; + } + + req.flags = rte_cpu_to_le_32(func_cfg_flags); + req.fid = rte_cpu_to_le_16(fid); + req.enables |= rte_cpu_to_le_32(HWRM_FUNC_CFG_INPUT_ENABLES_DFLT_VLAN); + req.dflt_vlan = rte_cpu_to_le_16(dflt_vlan); + + rc = bnxt_hwrm_send_message(bp, &req, sizeof(req)); + HWRM_CHECK_RESULT; + + return rc; +} + +int bnxt_hwrm_func_bw_cfg(struct bnxt *bp, uint16_t vf, + uint16_t max_bw, uint16_t enables) +{ + struct hwrm_func_cfg_output *resp = bp->hwrm_cmd_resp_addr; + struct hwrm_func_cfg_input req = {0}; + int rc; + + HWRM_PREP(req, FUNC_CFG, -1, resp); + req.fid = rte_cpu_to_le_16(bp->pf.vf_info[vf].fid); + req.enables |= rte_cpu_to_le_32(enables); + req.flags = rte_cpu_to_le_32(bp->pf.vf_info[vf].func_cfg_flags); + req.max_bw = rte_cpu_to_le_32(max_bw); + rc = bnxt_hwrm_send_message(bp, &req, sizeof(req)); + HWRM_CHECK_RESULT; + + return rc; +} + +int bnxt_hwrm_set_vf_vlan(struct bnxt *bp, int vf) +{ + struct hwrm_func_cfg_input req = {0}; + struct hwrm_func_cfg_output *resp = bp->hwrm_cmd_resp_addr; + int rc = 0; + + HWRM_PREP(req, FUNC_CFG, -1, resp); + req.flags = rte_cpu_to_le_32(bp->pf.vf_info[vf].func_cfg_flags); + req.fid = rte_cpu_to_le_16(bp->pf.vf_info[vf].fid); + req.enables |= rte_cpu_to_le_32(HWRM_FUNC_CFG_INPUT_ENABLES_DFLT_VLAN); + req.dflt_vlan = rte_cpu_to_le_16(bp->pf.vf_info[vf].dflt_vlan); + + rc = bnxt_hwrm_send_message(bp, &req, sizeof(req)); + HWRM_CHECK_RESULT; + + return rc; +} + +int bnxt_hwrm_reject_fwd_resp(struct bnxt *bp, uint16_t target_id, + void *encaped, size_t ec_size) +{ + int rc = 0; + struct hwrm_reject_fwd_resp_input req = {.req_type = 0}; + struct hwrm_reject_fwd_resp_output *resp = bp->hwrm_cmd_resp_addr; + + if (ec_size > sizeof(req.encap_request)) + return -1; + + HWRM_PREP(req, REJECT_FWD_RESP, -1, resp); + + req.encap_resp_target_id = rte_cpu_to_le_16(target_id); + memcpy(req.encap_request, encaped, ec_size); + + rc = bnxt_hwrm_send_message(bp, &req, sizeof(req)); + + HWRM_CHECK_RESULT; + + return rc; +} + +int bnxt_hwrm_func_qcfg_vf_default_mac(struct bnxt *bp, uint16_t vf, + struct ether_addr *mac) +{ + struct hwrm_func_qcfg_input req = {0}; + struct hwrm_func_qcfg_output *resp = bp->hwrm_cmd_resp_addr; + int rc; + + HWRM_PREP(req, FUNC_QCFG, -1, resp); + req.fid = rte_cpu_to_le_16(bp->pf.vf_info[vf].fid); + rc = bnxt_hwrm_send_message(bp, &req, sizeof(req)); + + HWRM_CHECK_RESULT; + + memcpy(mac->addr_bytes, resp->mac_address, ETHER_ADDR_LEN); + return rc; +} + +int bnxt_hwrm_exec_fwd_resp(struct bnxt *bp, uint16_t target_id, + void *encaped, size_t ec_size) +{ + int rc = 0; + struct hwrm_exec_fwd_resp_input req = {.req_type = 0}; + struct hwrm_exec_fwd_resp_output *resp = bp->hwrm_cmd_resp_addr; + + if (ec_size > sizeof(req.encap_request)) + return -1; + + HWRM_PREP(req, EXEC_FWD_RESP, -1, resp); + + req.encap_resp_target_id = rte_cpu_to_le_16(target_id); + memcpy(req.encap_request, encaped, ec_size); + + rc = bnxt_hwrm_send_message(bp, &req, sizeof(req)); + + HWRM_CHECK_RESULT; + + return rc; +} + +int bnxt_hwrm_ctx_qstats(struct bnxt *bp, uint32_t cid, int idx, + struct rte_eth_stats *stats) +{ + int rc = 0; + struct hwrm_stat_ctx_query_input req = {.req_type = 0}; + struct hwrm_stat_ctx_query_output *resp = bp->hwrm_cmd_resp_addr; + + HWRM_PREP(req, STAT_CTX_QUERY, -1, resp); + + req.stat_ctx_id = rte_cpu_to_le_32(cid); + + rc = bnxt_hwrm_send_message(bp, &req, sizeof(req)); + + HWRM_CHECK_RESULT; + + stats->q_ipackets[idx] = rte_le_to_cpu_64(resp->rx_ucast_pkts); + stats->q_ipackets[idx] += rte_le_to_cpu_64(resp->rx_mcast_pkts); + stats->q_ipackets[idx] += rte_le_to_cpu_64(resp->rx_bcast_pkts); + stats->q_ibytes[idx] = rte_le_to_cpu_64(resp->rx_ucast_bytes); + stats->q_ibytes[idx] += rte_le_to_cpu_64(resp->rx_mcast_bytes); + stats->q_ibytes[idx] += rte_le_to_cpu_64(resp->rx_bcast_bytes); + + stats->q_opackets[idx] = rte_le_to_cpu_64(resp->tx_ucast_pkts); + stats->q_opackets[idx] += rte_le_to_cpu_64(resp->tx_mcast_pkts); + stats->q_opackets[idx] += rte_le_to_cpu_64(resp->tx_bcast_pkts); + stats->q_obytes[idx] = rte_le_to_cpu_64(resp->tx_ucast_bytes); + stats->q_obytes[idx] += rte_le_to_cpu_64(resp->tx_mcast_bytes); + stats->q_obytes[idx] += rte_le_to_cpu_64(resp->tx_bcast_bytes); + + stats->q_errors[idx] = rte_le_to_cpu_64(resp->rx_err_pkts); + stats->q_errors[idx] += rte_le_to_cpu_64(resp->tx_err_pkts); + stats->q_errors[idx] += rte_le_to_cpu_64(resp->rx_drop_pkts); + + return rc; +} + +int bnxt_hwrm_port_qstats(struct bnxt *bp) +{ + struct hwrm_port_qstats_input req = {0}; + struct hwrm_port_qstats_output *resp = bp->hwrm_cmd_resp_addr; + struct bnxt_pf_info *pf = &bp->pf; + int rc; + + if (!(bp->flags & BNXT_FLAG_PORT_STATS)) + return 0; + + HWRM_PREP(req, PORT_QSTATS, -1, resp); + req.port_id = rte_cpu_to_le_16(pf->port_id); + req.tx_stat_host_addr = rte_cpu_to_le_64(bp->hw_tx_port_stats_map); + req.rx_stat_host_addr = rte_cpu_to_le_64(bp->hw_rx_port_stats_map); + rc = bnxt_hwrm_send_message(bp, &req, sizeof(req)); + HWRM_CHECK_RESULT; + return rc; +} + +int bnxt_hwrm_port_clr_stats(struct bnxt *bp) +{ + struct hwrm_port_clr_stats_input req = {0}; + struct hwrm_port_clr_stats_output *resp = bp->hwrm_cmd_resp_addr; + struct bnxt_pf_info *pf = &bp->pf; + int rc; + + if (!(bp->flags & BNXT_FLAG_PORT_STATS)) + return 0; + + HWRM_PREP(req, PORT_CLR_STATS, -1, resp); + req.port_id = rte_cpu_to_le_16(pf->port_id); + rc = bnxt_hwrm_send_message(bp, &req, sizeof(req)); + HWRM_CHECK_RESULT; + return rc; +} + +int bnxt_hwrm_port_led_qcaps(struct bnxt *bp) +{ + struct hwrm_port_led_qcaps_output *resp = bp->hwrm_cmd_resp_addr; + struct hwrm_port_led_qcaps_input req = {0}; + int rc; + + if (BNXT_VF(bp)) + return 0; + + HWRM_PREP(req, PORT_LED_QCAPS, -1, resp); + req.port_id = bp->pf.port_id; + rc = bnxt_hwrm_send_message(bp, &req, sizeof(req)); + HWRM_CHECK_RESULT; + + if (resp->num_leds > 0 && resp->num_leds < BNXT_MAX_LED) { + unsigned int i; + + bp->num_leds = resp->num_leds; + memcpy(bp->leds, &resp->led0_id, + sizeof(bp->leds[0]) * bp->num_leds); + for (i = 0; i < bp->num_leds; i++) { + struct bnxt_led_info *led = &bp->leds[i]; + + uint16_t caps = led->led_state_caps; + + if (!led->led_group_id || + !BNXT_LED_ALT_BLINK_CAP(caps)) { + bp->num_leds = 0; + break; + } + } + } + return rc; +} + +int bnxt_hwrm_port_led_cfg(struct bnxt *bp, bool led_on) +{ + struct hwrm_port_led_cfg_output *resp = bp->hwrm_cmd_resp_addr; + struct hwrm_port_led_cfg_input req = {0}; + struct bnxt_led_cfg *led_cfg; + uint8_t led_state = HWRM_PORT_LED_QCFG_OUTPUT_LED0_STATE_DEFAULT; + uint16_t duration = 0; + int rc, i; + + if (!bp->num_leds || BNXT_VF(bp)) + return -EOPNOTSUPP; + + HWRM_PREP(req, PORT_LED_CFG, -1, resp); + if (led_on) { + led_state = HWRM_PORT_LED_CFG_INPUT_LED0_STATE_BLINKALT; + duration = rte_cpu_to_le_16(500); + } + req.port_id = bp->pf.port_id; + req.num_leds = bp->num_leds; + led_cfg = (struct bnxt_led_cfg *)&req.led0_id; + for (i = 0; i < bp->num_leds; i++, led_cfg++) { + req.enables |= BNXT_LED_DFLT_ENABLES(i); + led_cfg->led_id = bp->leds[i].led_id; + led_cfg->led_state = led_state; + led_cfg->led_blink_on = duration; + led_cfg->led_blink_off = duration; + led_cfg->led_group_id = bp->leds[i].led_group_id; + } + + rc = bnxt_hwrm_send_message(bp, &req, sizeof(req)); + HWRM_CHECK_RESULT; + + return rc; +} + +static void +bnxt_vnic_count(struct bnxt_vnic_info *vnic __rte_unused, void *cbdata) +{ + uint32_t *count = cbdata; + + *count = *count + 1; +} + +static int bnxt_vnic_count_hwrm_stub(struct bnxt *bp __rte_unused, + struct bnxt_vnic_info *vnic __rte_unused) +{ + return 0; +} + +int bnxt_vf_vnic_count(struct bnxt *bp, uint16_t vf) +{ + uint32_t count = 0; + + bnxt_hwrm_func_vf_vnic_query_and_config(bp, vf, bnxt_vnic_count, + &count, bnxt_vnic_count_hwrm_stub); + + return count; +} + +static int bnxt_hwrm_func_vf_vnic_query(struct bnxt *bp, uint16_t vf, + uint16_t *vnic_ids) +{ + struct hwrm_func_vf_vnic_ids_query_input req = {0}; + struct hwrm_func_vf_vnic_ids_query_output *resp = + bp->hwrm_cmd_resp_addr; + int rc; + + /* First query all VNIC ids */ + HWRM_PREP(req, FUNC_VF_VNIC_IDS_QUERY, -1, resp_vf_vnic_ids); + + req.vf_id = rte_cpu_to_le_16(bp->pf.first_vf_id + vf); + req.max_vnic_id_cnt = rte_cpu_to_le_32(bp->pf.total_vnics); + req.vnic_id_tbl_addr = rte_cpu_to_le_64(rte_mem_virt2phy(vnic_ids)); + + if (req.vnic_id_tbl_addr == 0) { + RTE_LOG(ERR, PMD, + "unable to map VNIC ID table address to physical memory\n"); + return -ENOMEM; + } + rc = bnxt_hwrm_send_message(bp, &req, sizeof(req)); + if (rc) { + RTE_LOG(ERR, PMD, "hwrm_func_vf_vnic_query failed rc:%d\n", rc); + return -1; + } else if (resp->error_code) { + rc = rte_le_to_cpu_16(resp->error_code); + RTE_LOG(ERR, PMD, "hwrm_func_vf_vnic_query error %d\n", rc); + return -1; + } + + return rte_le_to_cpu_32(resp->vnic_id_cnt); +} + +/* + * This function queries the VNIC IDs for a specified VF. It then calls + * the vnic_cb to update the necessary field in vnic_info with cbdata. + * Then it calls the hwrm_cb function to program this new vnic configuration. + */ +int bnxt_hwrm_func_vf_vnic_query_and_config(struct bnxt *bp, uint16_t vf, + void (*vnic_cb)(struct bnxt_vnic_info *, void *), void *cbdata, + int (*hwrm_cb)(struct bnxt *bp, struct bnxt_vnic_info *vnic)) +{ + struct bnxt_vnic_info vnic; + int rc = 0; + int i, num_vnic_ids; + uint16_t *vnic_ids; + size_t vnic_id_sz; + size_t sz; + + /* First query all VNIC ids */ + vnic_id_sz = bp->pf.total_vnics * sizeof(*vnic_ids); + vnic_ids = rte_malloc("bnxt_hwrm_vf_vnic_ids_query", vnic_id_sz, + RTE_CACHE_LINE_SIZE); + if (vnic_ids == NULL) { + rc = -ENOMEM; + return rc; + } + for (sz = 0; sz < vnic_id_sz; sz += getpagesize()) + rte_mem_lock_page(((char *)vnic_ids) + sz); + + num_vnic_ids = bnxt_hwrm_func_vf_vnic_query(bp, vf, vnic_ids); + + if (num_vnic_ids < 0) + return num_vnic_ids; + + /* Retrieve VNIC, update bd_stall then update */ + + for (i = 0; i < num_vnic_ids; i++) { + memset(&vnic, 0, sizeof(struct bnxt_vnic_info)); + vnic.fw_vnic_id = rte_le_to_cpu_16(vnic_ids[i]); + rc = bnxt_hwrm_vnic_qcfg(bp, &vnic, bp->pf.first_vf_id + vf); + if (rc) + break; + if (vnic.mru <= 4) /* Indicates unallocated */ + continue; + + vnic_cb(&vnic, cbdata); + + rc = hwrm_cb(bp, &vnic); + if (rc) + break; + } + + rte_free(vnic_ids); + + return rc; +} + +int bnxt_hwrm_func_cfg_vf_set_vlan_anti_spoof(struct bnxt *bp, uint16_t vf, + bool on) +{ + struct hwrm_func_cfg_output *resp = bp->hwrm_cmd_resp_addr; + struct hwrm_func_cfg_input req = {0}; + int rc; + + HWRM_PREP(req, FUNC_CFG, -1, resp); + req.fid = rte_cpu_to_le_16(bp->pf.vf_info[vf].fid); + req.enables |= rte_cpu_to_le_32( + HWRM_FUNC_CFG_INPUT_ENABLES_VLAN_ANTISPOOF_MODE); + req.vlan_antispoof_mode = on ? + HWRM_FUNC_CFG_INPUT_VLAN_ANTISPOOF_MODE_VALIDATE_VLAN : + HWRM_FUNC_CFG_INPUT_VLAN_ANTISPOOF_MODE_NOCHECK; + rc = bnxt_hwrm_send_message(bp, &req, sizeof(req)); + HWRM_CHECK_RESULT; + + return rc; +} + +int bnxt_hwrm_func_qcfg_vf_dflt_vnic_id(struct bnxt *bp, int vf) +{ + struct bnxt_vnic_info vnic; + uint16_t *vnic_ids; + size_t vnic_id_sz; + int num_vnic_ids, i; + size_t sz; + int rc; + + vnic_id_sz = bp->pf.total_vnics * sizeof(*vnic_ids); + vnic_ids = rte_malloc("bnxt_hwrm_vf_vnic_ids_query", vnic_id_sz, + RTE_CACHE_LINE_SIZE); + if (vnic_ids == NULL) { + rc = -ENOMEM; + return rc; + } + + for (sz = 0; sz < vnic_id_sz; sz += getpagesize()) + rte_mem_lock_page(((char *)vnic_ids) + sz); + + rc = bnxt_hwrm_func_vf_vnic_query(bp, vf, vnic_ids); + if (rc <= 0) + goto exit; + num_vnic_ids = rc; + + /* + * Loop through to find the default VNIC ID. + * TODO: The easier way would be to obtain the resp->dflt_vnic_id + * by sending the hwrm_func_qcfg command to the firmware. + */ + for (i = 0; i < num_vnic_ids; i++) { + memset(&vnic, 0, sizeof(struct bnxt_vnic_info)); + vnic.fw_vnic_id = rte_le_to_cpu_16(vnic_ids[i]); + rc = bnxt_hwrm_vnic_qcfg(bp, &vnic, + bp->pf.first_vf_id + vf); + if (rc) + goto exit; + if (vnic.func_default) { + rte_free(vnic_ids); + return vnic.fw_vnic_id; + } + } + /* Could not find a default VNIC. */ + RTE_LOG(ERR, PMD, "No default VNIC\n"); +exit: + rte_free(vnic_ids); + return -1; +} diff --git a/drivers/net/bnxt/bnxt_hwrm.h b/drivers/net/bnxt/bnxt_hwrm.h index 6519ef21..51cd0dd4 100644 --- a/drivers/net/bnxt/bnxt_hwrm.h +++ b/drivers/net/bnxt/bnxt_hwrm.h @@ -45,27 +45,42 @@ struct bnxt_cp_ring_info; int bnxt_hwrm_cfa_l2_clear_rx_mask(struct bnxt *bp, struct bnxt_vnic_info *vnic); -int bnxt_hwrm_cfa_l2_set_rx_mask(struct bnxt *bp, struct bnxt_vnic_info *vnic); +int bnxt_hwrm_cfa_l2_set_rx_mask(struct bnxt *bp, struct bnxt_vnic_info *vnic, + uint16_t vlan_count, + struct bnxt_vlan_table_entry *vlan_table); +int bnxt_hwrm_cfa_vlan_antispoof_cfg(struct bnxt *bp, uint16_t fid, + uint16_t vlan_count, + struct bnxt_vlan_antispoof_table_entry *vlan_table); int bnxt_hwrm_clear_filter(struct bnxt *bp, struct bnxt_filter_info *filter); int bnxt_hwrm_set_filter(struct bnxt *bp, - struct bnxt_vnic_info *vnic, + uint16_t dst_id, struct bnxt_filter_info *filter); +int bnxt_hwrm_exec_fwd_resp(struct bnxt *bp, uint16_t target_id, + void *encaped, size_t ec_size); +int bnxt_hwrm_reject_fwd_resp(struct bnxt *bp, uint16_t target_id, + void *encaped, size_t ec_size); -int bnxt_hwrm_exec_fwd_resp(struct bnxt *bp, void *fwd_cmd); - -int bnxt_hwrm_func_driver_register(struct bnxt *bp, uint32_t flags, - uint32_t *vf_req_fwd); +int bnxt_hwrm_func_buf_rgtr(struct bnxt *bp); +int bnxt_hwrm_func_buf_unrgtr(struct bnxt *bp); +int bnxt_hwrm_func_driver_register(struct bnxt *bp); int bnxt_hwrm_func_qcaps(struct bnxt *bp); int bnxt_hwrm_func_reset(struct bnxt *bp); int bnxt_hwrm_func_driver_unregister(struct bnxt *bp, uint32_t flags); +int bnxt_hwrm_func_qstats(struct bnxt *bp, uint16_t fid, + struct rte_eth_stats *stats); +int bnxt_hwrm_func_qstats_tx_drop(struct bnxt *bp, uint16_t fid, + uint64_t *dropped); +int bnxt_hwrm_func_clr_stats(struct bnxt *bp, uint16_t fid); +int bnxt_hwrm_func_cfg_def_cp(struct bnxt *bp); +int bnxt_hwrm_vf_func_cfg_def_cp(struct bnxt *bp); int bnxt_hwrm_queue_qportcfg(struct bnxt *bp); int bnxt_hwrm_ring_alloc(struct bnxt *bp, struct bnxt_ring *ring, uint32_t ring_type, uint32_t map_index, - uint32_t stats_ctx_id); + uint32_t stats_ctx_id, uint32_t cmpl_ring_id); int bnxt_hwrm_ring_free(struct bnxt *bp, struct bnxt_ring *ring, uint32_t ring_type); int bnxt_hwrm_ring_grp_alloc(struct bnxt *bp, unsigned int idx); @@ -76,16 +91,24 @@ int bnxt_hwrm_stat_ctx_alloc(struct bnxt *bp, struct bnxt_cp_ring_info *cpr, unsigned int idx); int bnxt_hwrm_stat_ctx_free(struct bnxt *bp, struct bnxt_cp_ring_info *cpr, unsigned int idx); +int bnxt_hwrm_ctx_qstats(struct bnxt *bp, uint32_t cid, int idx, + struct rte_eth_stats *stats); int bnxt_hwrm_ver_get(struct bnxt *bp); int bnxt_hwrm_vnic_alloc(struct bnxt *bp, struct bnxt_vnic_info *vnic); int bnxt_hwrm_vnic_cfg(struct bnxt *bp, struct bnxt_vnic_info *vnic); +int bnxt_hwrm_vnic_qcfg(struct bnxt *bp, struct bnxt_vnic_info *vnic, + int16_t fw_vf_id); int bnxt_hwrm_vnic_ctx_alloc(struct bnxt *bp, struct bnxt_vnic_info *vnic); int bnxt_hwrm_vnic_ctx_free(struct bnxt *bp, struct bnxt_vnic_info *vnic); int bnxt_hwrm_vnic_free(struct bnxt *bp, struct bnxt_vnic_info *vnic); int bnxt_hwrm_vnic_rss_cfg(struct bnxt *bp, struct bnxt_vnic_info *vnic); +int bnxt_hwrm_vnic_plcmode_cfg(struct bnxt *bp, + struct bnxt_vnic_info *vnic); +int bnxt_hwrm_vnic_tpa_cfg(struct bnxt *bp, + struct bnxt_vnic_info *vnic, bool enable); int bnxt_alloc_all_hwrm_stat_ctxs(struct bnxt *bp); int bnxt_clear_all_hwrm_stat_ctxs(struct bnxt *bp); @@ -101,5 +124,36 @@ int bnxt_alloc_hwrm_resources(struct bnxt *bp); int bnxt_get_hwrm_link_config(struct bnxt *bp, struct rte_eth_link *link); int bnxt_set_hwrm_link_config(struct bnxt *bp, bool link_up); int bnxt_hwrm_func_qcfg(struct bnxt *bp); - +int bnxt_hwrm_allocate_pf_only(struct bnxt *bp); +int bnxt_hwrm_allocate_vfs(struct bnxt *bp, int num_vfs); +int bnxt_hwrm_func_vf_mac(struct bnxt *bp, uint16_t vf, + const uint8_t *mac_addr); +int bnxt_hwrm_pf_evb_mode(struct bnxt *bp); +int bnxt_hwrm_func_bw_cfg(struct bnxt *bp, uint16_t vf, + uint16_t max_bw, uint16_t enables); +int bnxt_hwrm_set_vf_vlan(struct bnxt *bp, int vf); +int bnxt_hwrm_func_qcfg_vf_default_mac(struct bnxt *bp, uint16_t vf, + struct ether_addr *mac); +int bnxt_hwrm_func_qcfg_current_vf_vlan(struct bnxt *bp, int vf); +int bnxt_hwrm_tunnel_dst_port_alloc(struct bnxt *bp, uint16_t port, + uint8_t tunnel_type); +int bnxt_hwrm_tunnel_dst_port_free(struct bnxt *bp, uint16_t port, + uint8_t tunnel_type); +void bnxt_free_tunnel_ports(struct bnxt *bp); +int bnxt_hwrm_set_default_vlan(struct bnxt *bp, int vf, uint8_t is_vf); +int bnxt_hwrm_port_qstats(struct bnxt *bp); +int bnxt_hwrm_port_clr_stats(struct bnxt *bp); +int bnxt_hwrm_port_led_cfg(struct bnxt *bp, bool led_on); +int bnxt_hwrm_port_led_qcaps(struct bnxt *bp); +int bnxt_hwrm_func_cfg_vf_set_flags(struct bnxt *bp, uint16_t vf, + uint32_t flags); +void vf_vnic_set_rxmask_cb(struct bnxt_vnic_info *vnic, void *flagp); +int bnxt_set_rx_mask_no_vlan(struct bnxt *bp, struct bnxt_vnic_info *vnic); +int bnxt_vf_vnic_count(struct bnxt *bp, uint16_t vf); +int bnxt_hwrm_func_vf_vnic_query_and_config(struct bnxt *bp, uint16_t vf, + void (*vnic_cb)(struct bnxt_vnic_info *, void *), void *cbdata, + int (*hwrm_cb)(struct bnxt *bp, struct bnxt_vnic_info *vnic)); +int bnxt_hwrm_func_cfg_vf_set_vlan_anti_spoof(struct bnxt *bp, uint16_t vf, + bool on); +int bnxt_hwrm_func_qcfg_vf_dflt_vnic_id(struct bnxt *bp, int vf); #endif diff --git a/drivers/net/bnxt/bnxt_irq.c b/drivers/net/bnxt/bnxt_irq.c index 20e17ff5..47cda7e5 100644 --- a/drivers/net/bnxt/bnxt_irq.c +++ b/drivers/net/bnxt/bnxt_irq.c @@ -66,16 +66,26 @@ static void bnxt_int_handler(void *param) /* Handle any async event */ bnxt_handle_async_event(bp, cmp); break; - case CMPL_BASE_TYPE_HWRM_FWD_RESP: + case CMPL_BASE_TYPE_HWRM_FWD_REQ: /* Handle HWRM forwarded responses */ bnxt_handle_fwd_req(bp, cmp); break; default: /* Ignore any other events */ + if (cmp->type & rte_cpu_to_le_16(0x01)) { + if (!CMP_VALID(cmp, raw_cons, + cpr->cp_ring_struct)) + goto no_more; + } + RTE_LOG(INFO, PMD, + "Ignoring %02x completion\n", CMP_TYPE(cmp)); break; } raw_cons = NEXT_RAW_CMP(raw_cons); - } + + }; +no_more: + cpr->cp_raw_cons = raw_cons; B_CP_DB_REARM(cpr, cpr->cp_raw_cons); } @@ -102,14 +112,15 @@ void bnxt_disable_int(struct bnxt *bp) struct bnxt_cp_ring_info *cpr = bp->def_cp_ring; /* Only the default completion ring */ - B_CP_DIS_DB(cpr, cpr->cp_raw_cons); + if (cpr != NULL && cpr->cp_doorbell != NULL) + B_CP_DB_DISARM(cpr); } void bnxt_enable_int(struct bnxt *bp) { struct bnxt_cp_ring_info *cpr = bp->def_cp_ring; - B_CP_DB_REARM(cpr, cpr->cp_raw_cons); + B_CP_DB_ARM(cpr); } int bnxt_setup_int(struct bnxt *bp) @@ -126,7 +137,7 @@ int bnxt_setup_int(struct bnxt *bp) for (i = 0; i < total_vecs; i++) { bp->irq_tbl[i].vector = i; snprintf(bp->irq_tbl[i].name, len, - "%s-%d", bp->eth_dev->data->name, i); + "%s-%d", bp->eth_dev->device->name, i); bp->irq_tbl[i].handler = bnxt_int_handler; } } else { @@ -136,7 +147,7 @@ int bnxt_setup_int(struct bnxt *bp) return 0; setup_exit: - RTE_LOG(ERR, PMD, "bnxt_irq_tbl setup failed"); + RTE_LOG(ERR, PMD, "bnxt_irq_tbl setup failed\n"); return rc; } diff --git a/drivers/net/bnxt/bnxt_ring.c b/drivers/net/bnxt/bnxt_ring.c index 0fafa13f..9d0ae277 100644 --- a/drivers/net/bnxt/bnxt_ring.c +++ b/drivers/net/bnxt/bnxt_ring.c @@ -31,7 +31,9 @@ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ +#include <rte_bitmap.h> #include <rte_memzone.h> +#include <unistd.h> #include "bnxt.h" #include "bnxt_cpr.h" @@ -96,6 +98,8 @@ int bnxt_alloc_rings(struct bnxt *bp, uint16_t qidx, struct rte_pci_device *pdev = bp->pdev; const struct rte_memzone *mz = NULL; char mz_name[RTE_MEMZONE_NAMESIZE]; + phys_addr_t mz_phys_addr; + int sz; int stats_len = (tx_ring_info || rx_ring_info) ? RTE_CACHE_LINE_ROUNDUP(sizeof(struct ctx_hw_stats64)) : 0; @@ -112,8 +116,15 @@ int bnxt_alloc_rings(struct bnxt *bp, uint16_t qidx, int rx_vmem_len = rx_ring_info ? RTE_CACHE_LINE_ROUNDUP(rx_ring_info-> rx_ring_struct->vmem_size) : 0; + int ag_vmem_start = 0; + int ag_vmem_len = 0; + int cp_ring_start = 0; + + ag_vmem_start = rx_vmem_start + rx_vmem_len; + ag_vmem_len = rx_ring_info ? RTE_CACHE_LINE_ROUNDUP( + rx_ring_info->ag_ring_struct->vmem_size) : 0; + cp_ring_start = ag_vmem_start + ag_vmem_len; - int cp_ring_start = rx_vmem_start + rx_vmem_len; int cp_ring_len = RTE_CACHE_LINE_ROUNDUP(cp_ring->ring_size * sizeof(struct cmpl_base)); @@ -127,7 +138,23 @@ int bnxt_alloc_rings(struct bnxt *bp, uint16_t qidx, RTE_CACHE_LINE_ROUNDUP(rx_ring_info->rx_ring_struct->ring_size * sizeof(struct rx_prod_pkt_bd)) : 0; - int total_alloc_len = rx_ring_start + rx_ring_len; + int ag_ring_start = rx_ring_start + rx_ring_len; + int ag_ring_len = rx_ring_len * AGG_RING_SIZE_FACTOR; + + int ag_bitmap_start = ag_ring_start + ag_ring_len; + int ag_bitmap_len = rx_ring_info ? + RTE_CACHE_LINE_ROUNDUP(rte_bitmap_get_memory_footprint( + rx_ring_info->rx_ring_struct->ring_size * + AGG_RING_SIZE_FACTOR)) : 0; + + int tpa_info_start = ag_bitmap_start + ag_bitmap_len; + int tpa_info_len = rx_ring_info ? + RTE_CACHE_LINE_ROUNDUP(BNXT_TPA_MAX * + sizeof(struct bnxt_tpa_info)) : 0; + + int total_alloc_len = tpa_info_start; + if (bp->eth_dev->data->dev_conf.rxmode.enable_lro) + total_alloc_len += tpa_info_len; snprintf(mz_name, RTE_MEMZONE_NAMESIZE, "bnxt_%04x:%02x:%02x:%02x-%04x_%s", pdev->addr.domain, @@ -136,21 +163,37 @@ int bnxt_alloc_rings(struct bnxt *bp, uint16_t qidx, mz_name[RTE_MEMZONE_NAMESIZE - 1] = 0; mz = rte_memzone_lookup(mz_name); if (!mz) { - mz = rte_memzone_reserve(mz_name, total_alloc_len, + mz = rte_memzone_reserve_aligned(mz_name, total_alloc_len, SOCKET_ID_ANY, RTE_MEMZONE_2MB | - RTE_MEMZONE_SIZE_HINT_ONLY); + RTE_MEMZONE_SIZE_HINT_ONLY, + getpagesize()); if (mz == NULL) return -ENOMEM; } memset(mz->addr, 0, mz->len); + mz_phys_addr = mz->phys_addr; + if ((unsigned long)mz->addr == mz_phys_addr) { + RTE_LOG(WARNING, PMD, + "Memzone physical address same as virtual.\n"); + RTE_LOG(WARNING, PMD, + "Using rte_mem_virt2phy()\n"); + for (sz = 0; sz < total_alloc_len; sz += getpagesize()) + rte_mem_lock_page(((char *)mz->addr) + sz); + mz_phys_addr = rte_mem_virt2phy(mz->addr); + if (mz_phys_addr == 0) { + RTE_LOG(ERR, PMD, + "unable to map ring address to physical memory\n"); + return -ENOMEM; + } + } if (tx_ring_info) { tx_ring = tx_ring_info->tx_ring_struct; tx_ring->bd = ((char *)mz->addr + tx_ring_start); tx_ring_info->tx_desc_ring = (struct tx_bd_long *)tx_ring->bd; - tx_ring->bd_dma = mz->phys_addr + tx_ring_start; + tx_ring->bd_dma = mz_phys_addr + tx_ring_start; tx_ring_info->tx_desc_mapping = tx_ring->bd_dma; tx_ring->mem_zone = (const void *)mz; @@ -170,7 +213,7 @@ int bnxt_alloc_rings(struct bnxt *bp, uint16_t qidx, rx_ring->bd = ((char *)mz->addr + rx_ring_start); rx_ring_info->rx_desc_ring = (struct rx_prod_pkt_bd *)rx_ring->bd; - rx_ring->bd_dma = mz->phys_addr + rx_ring_start; + rx_ring->bd_dma = mz_phys_addr + rx_ring_start; rx_ring_info->rx_desc_mapping = rx_ring->bd_dma; rx_ring->mem_zone = (const void *)mz; @@ -182,10 +225,39 @@ int bnxt_alloc_rings(struct bnxt *bp, uint16_t qidx, rx_ring_info->rx_buf_ring = (struct bnxt_sw_rx_bd *)rx_ring->vmem; } + + rx_ring = rx_ring_info->ag_ring_struct; + + rx_ring->bd = ((char *)mz->addr + ag_ring_start); + rx_ring_info->ag_desc_ring = + (struct rx_prod_pkt_bd *)rx_ring->bd; + rx_ring->bd_dma = mz->phys_addr + ag_ring_start; + rx_ring_info->ag_desc_mapping = rx_ring->bd_dma; + rx_ring->mem_zone = (const void *)mz; + + if (!rx_ring->bd) + return -ENOMEM; + if (rx_ring->vmem_size) { + rx_ring->vmem = + (void **)((char *)mz->addr + ag_vmem_start); + rx_ring_info->ag_buf_ring = + (struct bnxt_sw_rx_bd *)rx_ring->vmem; + } + + rx_ring_info->ag_bitmap = + rte_bitmap_init(rx_ring_info->rx_ring_struct->ring_size * + AGG_RING_SIZE_FACTOR, (uint8_t *)mz->addr + + ag_bitmap_start, ag_bitmap_len); + + /* TPA info */ + if (bp->eth_dev->data->dev_conf.rxmode.enable_lro) + rx_ring_info->tpa_info = + ((struct bnxt_tpa_info *)((char *)mz->addr + + tpa_info_start)); } cp_ring->bd = ((char *)mz->addr + cp_ring_start); - cp_ring->bd_dma = mz->phys_addr + cp_ring_start; + cp_ring->bd_dma = mz_phys_addr + cp_ring_start; cp_ring_info->cp_desc_ring = cp_ring->bd; cp_ring_info->cp_desc_mapping = cp_ring->bd_dma; cp_ring->mem_zone = (const void *)mz; @@ -196,7 +268,7 @@ int bnxt_alloc_rings(struct bnxt *bp, uint16_t qidx, *cp_ring->vmem = ((char *)mz->addr + stats_len); if (stats_len) { cp_ring_info->hw_stats = mz->addr; - cp_ring_info->hw_stats_map = mz->phys_addr; + cp_ring_info->hw_stats_map = mz_phys_addr; } cp_ring_info->hw_stats_ctx_id = HWRM_NA_SIGNATURE; return 0; @@ -213,21 +285,6 @@ int bnxt_alloc_hwrm_rings(struct bnxt *bp) unsigned int i; int rc = 0; - /* Default completion ring */ - { - struct bnxt_cp_ring_info *cpr = bp->def_cp_ring; - struct bnxt_ring *cp_ring = cpr->cp_ring_struct; - - rc = bnxt_hwrm_ring_alloc(bp, cp_ring, - HWRM_RING_ALLOC_INPUT_RING_TYPE_CMPL, - 0, HWRM_NA_SIGNATURE); - if (rc) - goto err_out; - cpr->cp_doorbell = pci_dev->mem_resource[2].addr; - B_CP_DIS_DB(cpr, cpr->cp_raw_cons); - bp->grp_info[0].cp_fw_ring_id = cp_ring->fw_ring_id; - } - for (i = 0; i < bp->rx_cp_nr_rings; i++) { struct bnxt_rx_queue *rxq = bp->rx_queues[i]; struct bnxt_cp_ring_info *cpr = rxq->cp_ring; @@ -235,35 +292,64 @@ int bnxt_alloc_hwrm_rings(struct bnxt *bp) struct bnxt_rx_ring_info *rxr = rxq->rx_ring; struct bnxt_ring *ring = rxr->rx_ring_struct; unsigned int idx = i + 1; + unsigned int map_idx = idx + bp->rx_cp_nr_rings; + + bp->grp_info[i].fw_stats_ctx = cpr->hw_stats_ctx_id; /* Rx cmpl */ rc = bnxt_hwrm_ring_alloc(bp, cp_ring, - HWRM_RING_ALLOC_INPUT_RING_TYPE_CMPL, - idx, HWRM_NA_SIGNATURE); + HWRM_RING_ALLOC_INPUT_RING_TYPE_L2_CMPL, + idx, HWRM_NA_SIGNATURE, + HWRM_NA_SIGNATURE); if (rc) goto err_out; cpr->cp_doorbell = (char *)pci_dev->mem_resource[2].addr + idx * 0x80; - bp->grp_info[idx].cp_fw_ring_id = cp_ring->fw_ring_id; + bp->grp_info[i].cp_fw_ring_id = cp_ring->fw_ring_id; B_CP_DIS_DB(cpr, cpr->cp_raw_cons); /* Rx ring */ rc = bnxt_hwrm_ring_alloc(bp, ring, HWRM_RING_ALLOC_INPUT_RING_TYPE_RX, - idx, cpr->hw_stats_ctx_id); + idx, cpr->hw_stats_ctx_id, + cp_ring->fw_ring_id); if (rc) goto err_out; rxr->rx_prod = 0; rxr->rx_doorbell = (char *)pci_dev->mem_resource[2].addr + idx * 0x80; - bp->grp_info[idx].rx_fw_ring_id = ring->fw_ring_id; + bp->grp_info[i].rx_fw_ring_id = ring->fw_ring_id; B_RX_DB(rxr->rx_doorbell, rxr->rx_prod); + + ring = rxr->ag_ring_struct; + /* Agg ring */ + if (ring == NULL) + RTE_LOG(ERR, PMD, "Alloc AGG Ring is NULL!\n"); + + rc = bnxt_hwrm_ring_alloc(bp, ring, + HWRM_RING_ALLOC_INPUT_RING_TYPE_RX, + map_idx, HWRM_NA_SIGNATURE, + cp_ring->fw_ring_id); + if (rc) + goto err_out; + RTE_LOG(DEBUG, PMD, "Alloc AGG Done!\n"); + rxr->ag_prod = 0; + rxr->ag_doorbell = + (char *)pci_dev->mem_resource[2].addr + + map_idx * 0x80; + bp->grp_info[i].ag_fw_ring_id = ring->fw_ring_id; + B_RX_DB(rxr->ag_doorbell, rxr->ag_prod); + + rxq->rx_buf_use_size = BNXT_MAX_MTU + ETHER_HDR_LEN + + ETHER_CRC_LEN + (2 * VLAN_TAG_SIZE); if (bnxt_init_one_rx_ring(rxq)) { - RTE_LOG(ERR, PMD, "bnxt_init_one_rx_ring failed!"); + RTE_LOG(ERR, PMD, "bnxt_init_one_rx_ring failed!\n"); bnxt_rx_queue_release_op(rxq); return -ENOMEM; } B_RX_DB(rxr->rx_doorbell, rxr->rx_prod); + B_RX_DB(rxr->ag_doorbell, rxr->ag_prod); + rxq->index = idx; } for (i = 0; i < bp->tx_cp_nr_rings; i++) { @@ -272,29 +358,34 @@ int bnxt_alloc_hwrm_rings(struct bnxt *bp) struct bnxt_ring *cp_ring = cpr->cp_ring_struct; struct bnxt_tx_ring_info *txr = txq->tx_ring; struct bnxt_ring *ring = txr->tx_ring_struct; - unsigned int idx = 1 + bp->rx_cp_nr_rings + i; + unsigned int idx = i + 1 + bp->rx_cp_nr_rings; + + /* Account for AGG Rings. AGG ring cnt = Rx Cmpl ring cnt */ + idx += bp->rx_cp_nr_rings; /* Tx cmpl */ rc = bnxt_hwrm_ring_alloc(bp, cp_ring, - HWRM_RING_ALLOC_INPUT_RING_TYPE_CMPL, - idx, HWRM_NA_SIGNATURE); + HWRM_RING_ALLOC_INPUT_RING_TYPE_L2_CMPL, + idx, HWRM_NA_SIGNATURE, + HWRM_NA_SIGNATURE); if (rc) goto err_out; cpr->cp_doorbell = (char *)pci_dev->mem_resource[2].addr + idx * 0x80; - bp->grp_info[idx].cp_fw_ring_id = cp_ring->fw_ring_id; B_CP_DIS_DB(cpr, cpr->cp_raw_cons); /* Tx ring */ rc = bnxt_hwrm_ring_alloc(bp, ring, HWRM_RING_ALLOC_INPUT_RING_TYPE_TX, - idx, cpr->hw_stats_ctx_id); + idx, cpr->hw_stats_ctx_id, + cp_ring->fw_ring_id); if (rc) goto err_out; txr->tx_doorbell = (char *)pci_dev->mem_resource[2].addr + idx * 0x80; + txq->index = idx; } err_out: diff --git a/drivers/net/bnxt/bnxt_ring.h b/drivers/net/bnxt/bnxt_ring.h index 8656549a..6d1eb588 100644 --- a/drivers/net/bnxt/bnxt_ring.h +++ b/drivers/net/bnxt/bnxt_ring.h @@ -57,7 +57,8 @@ #define DEFAULT_RX_RING_SIZE 256 #define DEFAULT_TX_RING_SIZE 256 -#define MAX_TPA 128 +#define BNXT_TPA_MAX 64 +#define AGG_RING_SIZE_FACTOR 2 /* These assume 4k pages */ #define MAX_RX_DESC_CNT (8 * 1024) @@ -65,6 +66,7 @@ #define MAX_CP_DESC_CNT (16 * 1024) #define INVALID_HW_RING_ID ((uint16_t)-1) +#define INVALID_STATS_CTX_ID ((uint16_t)-1) struct bnxt_ring { void *bd; diff --git a/drivers/net/bnxt/bnxt_rxq.c b/drivers/net/bnxt/bnxt_rxq.c index cddf17d5..0793820b 100644 --- a/drivers/net/bnxt/bnxt_rxq.c +++ b/drivers/net/bnxt/bnxt_rxq.c @@ -76,6 +76,7 @@ int bnxt_mq_rx_configure(struct bnxt *bp) rc = -ENOMEM; goto err_out; } + vnic->flags |= BNXT_VNIC_INFO_BCAST; STAILQ_INSERT_TAIL(&bp->ff_pool[0], vnic, next); bp->nr_vnics++; @@ -84,9 +85,8 @@ int bnxt_mq_rx_configure(struct bnxt *bp) vnic->func_default = true; vnic->ff_pool_idx = 0; - vnic->start_grp_id = 1; - vnic->end_grp_id = vnic->start_grp_id + - bp->rx_cp_nr_rings - 1; + vnic->start_grp_id = 0; + vnic->end_grp_id = vnic->start_grp_id; filter = bnxt_alloc_filter(bp); if (!filter) { RTE_LOG(ERR, PMD, "L2 filter alloc failed\n"); @@ -121,13 +121,16 @@ int bnxt_mq_rx_configure(struct bnxt *bp) } /* For each pool, allocate MACVLAN CFA rule & VNIC */ if (!pools) { + pools = RTE_MIN(bp->max_vnics, + RTE_MIN(bp->max_l2_ctx, + RTE_MIN(bp->max_rsscos_ctx, ETH_64_POOLS))); RTE_LOG(ERR, PMD, "VMDq pool not set, defaulted to 64\n"); pools = ETH_64_POOLS; } nb_q_per_grp = bp->rx_cp_nr_rings / pools; - start_grp_id = 1; - end_grp_id = start_grp_id + nb_q_per_grp - 1; + start_grp_id = 0; + end_grp_id = nb_q_per_grp; ring_idx = 0; for (i = 0; i < pools; i++) { @@ -138,6 +141,7 @@ int bnxt_mq_rx_configure(struct bnxt *bp) rc = -ENOMEM; goto err_out; } + vnic->flags |= BNXT_VNIC_INFO_BCAST; STAILQ_INSERT_TAIL(&bp->ff_pool[i], vnic, next); bp->nr_vnics++; @@ -178,6 +182,7 @@ int bnxt_mq_rx_configure(struct bnxt *bp) rc = -ENOMEM; goto err_out; } + vnic->flags |= BNXT_VNIC_INFO_BCAST; /* Partition the rx queues for the single pool */ for (i = 0; i < bp->rx_cp_nr_rings; i++) { rxq = bp->eth_dev->data->rx_queues[i]; @@ -188,9 +193,8 @@ int bnxt_mq_rx_configure(struct bnxt *bp) vnic->func_default = true; vnic->ff_pool_idx = 0; - vnic->start_grp_id = 1; - vnic->end_grp_id = vnic->start_grp_id + - bp->rx_cp_nr_rings - 1; + vnic->start_grp_id = 0; + vnic->end_grp_id = bp->rx_cp_nr_rings; filter = bnxt_alloc_filter(bp); if (!filter) { RTE_LOG(ERR, PMD, "L2 filter alloc failed\n"); @@ -213,9 +217,10 @@ err_out: return rc; } -static void bnxt_rx_queue_release_mbufs(struct bnxt_rx_queue *rxq __rte_unused) +static void bnxt_rx_queue_release_mbufs(struct bnxt_rx_queue *rxq) { struct bnxt_sw_rx_bd *sw_ring; + struct bnxt_tpa_info *tpa_info; uint16_t i; if (rxq) { @@ -228,6 +233,27 @@ static void bnxt_rx_queue_release_mbufs(struct bnxt_rx_queue *rxq __rte_unused) } } } + /* Free up mbufs in Agg ring */ + sw_ring = rxq->rx_ring->ag_buf_ring; + if (sw_ring) { + for (i = 0; i < rxq->nb_rx_desc; i++) { + if (sw_ring[i].mbuf) { + rte_pktmbuf_free_seg(sw_ring[i].mbuf); + sw_ring[i].mbuf = NULL; + } + } + } + + /* Free up mbufs in TPA */ + tpa_info = rxq->rx_ring->tpa_info; + if (tpa_info) { + for (i = 0; i < BNXT_TPA_MAX; i++) { + if (tpa_info[i].mbuf) { + rte_pktmbuf_free_seg(tpa_info[i].mbuf); + tpa_info[i].mbuf = NULL; + } + } + } } } @@ -251,6 +277,8 @@ void bnxt_rx_queue_release_op(void *rx_queue) /* Free RX ring hardware descriptors */ bnxt_free_ring(rxq->rx_ring->rx_ring_struct); + /* Free RX Agg ring hardware descriptors */ + bnxt_free_ring(rxq->rx_ring->ag_ring_struct); /* Free RX completion ring hardware descriptors */ bnxt_free_ring(rxq->cp_ring->cp_ring_struct); @@ -273,7 +301,7 @@ int bnxt_rx_queue_setup_op(struct rte_eth_dev *eth_dev, int rc = 0; if (!nb_desc || nb_desc > MAX_RX_DESC_CNT) { - RTE_LOG(ERR, PMD, "nb_desc %d is invalid", nb_desc); + RTE_LOG(ERR, PMD, "nb_desc %d is invalid\n", nb_desc); rc = -EINVAL; goto out; } @@ -286,7 +314,7 @@ int bnxt_rx_queue_setup_op(struct rte_eth_dev *eth_dev, rxq = rte_zmalloc_socket("bnxt_rx_queue", sizeof(struct bnxt_rx_queue), RTE_CACHE_LINE_SIZE, socket_id); if (!rxq) { - RTE_LOG(ERR, PMD, "bnxt_rx_queue allocation failed!"); + RTE_LOG(ERR, PMD, "bnxt_rx_queue allocation failed!\n"); rc = -ENOMEM; goto out; } @@ -295,6 +323,9 @@ int bnxt_rx_queue_setup_op(struct rte_eth_dev *eth_dev, rxq->nb_rx_desc = nb_desc; rxq->rx_free_thresh = rx_conf->rx_free_thresh; + RTE_LOG(DEBUG, PMD, "RX Buf size is %d\n", rxq->rx_buf_use_size); + RTE_LOG(DEBUG, PMD, "RX Buf MTU %d\n", eth_dev->data->mtu); + rc = bnxt_init_rx_ring_struct(rxq, socket_id); if (rc) goto out; @@ -308,7 +339,8 @@ int bnxt_rx_queue_setup_op(struct rte_eth_dev *eth_dev, /* Allocate RX ring hardware descriptors */ if (bnxt_alloc_rings(bp, queue_idx, NULL, rxq->rx_ring, rxq->cp_ring, "rxr")) { - RTE_LOG(ERR, PMD, "ring_dma_zone_reserve for rx_ring failed!"); + RTE_LOG(ERR, PMD, + "ring_dma_zone_reserve for rx_ring failed!\n"); bnxt_rx_queue_release_op(rxq); rc = -ENOMEM; goto out; diff --git a/drivers/net/bnxt/bnxt_rxq.h b/drivers/net/bnxt/bnxt_rxq.h index 95543298..01aaa007 100644 --- a/drivers/net/bnxt/bnxt_rxq.h +++ b/drivers/net/bnxt/bnxt_rxq.h @@ -52,12 +52,15 @@ struct bnxt_rx_queue { uint8_t crc_len; /* 0 if CRC stripped, 4 otherwise */ struct bnxt *bp; + int index; struct bnxt_vnic_info *vnic; uint32_t rx_buf_size; uint32_t rx_buf_use_size; /* useable size */ struct bnxt_rx_ring_info *rx_ring; struct bnxt_cp_ring_info *cp_ring; + + struct bnxt_tpa_info *rx_tpa; }; void bnxt_free_rxq_stats(struct bnxt_rx_queue *rxq); diff --git a/drivers/net/bnxt/bnxt_rxr.c b/drivers/net/bnxt/bnxt_rxr.c index 5d93de26..bee67d33 100644 --- a/drivers/net/bnxt/bnxt_rxr.c +++ b/drivers/net/bnxt/bnxt_rxr.c @@ -34,6 +34,7 @@ #include <inttypes.h> #include <stdbool.h> +#include <rte_bitmap.h> #include <rte_byteorder.h> #include <rte_malloc.h> #include <rte_memory.h> @@ -67,8 +68,37 @@ static inline int bnxt_alloc_rx_data(struct bnxt_rx_queue *rxq, struct rte_mbuf *data; data = __bnxt_alloc_rx_data(rxq->mb_pool); - if (!data) + if (!data) { + rte_atomic64_inc(&rxq->bp->rx_mbuf_alloc_fail); return -ENOMEM; + } + + rx_buf->mbuf = data; + + rxbd->addr = rte_cpu_to_le_64(RTE_MBUF_DATA_DMA_ADDR(rx_buf->mbuf)); + + return 0; +} + +static inline int bnxt_alloc_ag_data(struct bnxt_rx_queue *rxq, + struct bnxt_rx_ring_info *rxr, + uint16_t prod) +{ + struct rx_prod_pkt_bd *rxbd = &rxr->ag_desc_ring[prod]; + struct bnxt_sw_rx_bd *rx_buf = &rxr->ag_buf_ring[prod]; + struct rte_mbuf *data; + + data = __bnxt_alloc_rx_data(rxq->mb_pool); + if (!data) { + rte_atomic64_inc(&rxq->bp->rx_mbuf_alloc_fail); + return -ENOMEM; + } + + if (rxbd == NULL) + RTE_LOG(ERR, PMD, "Jumbo Frame. rxbd is NULL\n"); + if (rx_buf == NULL) + RTE_LOG(ERR, PMD, "Jumbo Frame. rx_buf is NULL\n"); + rx_buf->mbuf = data; @@ -77,24 +107,232 @@ static inline int bnxt_alloc_rx_data(struct bnxt_rx_queue *rxq, return 0; } -static void bnxt_reuse_rx_mbuf(struct bnxt_rx_ring_info *rxr, uint16_t cons, +static inline void bnxt_reuse_rx_mbuf(struct bnxt_rx_ring_info *rxr, struct rte_mbuf *mbuf) { - uint16_t prod = rxr->rx_prod; + uint16_t prod = RING_NEXT(rxr->rx_ring_struct, rxr->rx_prod); struct bnxt_sw_rx_bd *prod_rx_buf; - struct rx_prod_pkt_bd *prod_bd, *cons_bd; + struct rx_prod_pkt_bd *prod_bd; prod_rx_buf = &rxr->rx_buf_ring[prod]; + RTE_ASSERT(prod_rx_buf->mbuf == NULL); + RTE_ASSERT(mbuf != NULL); + prod_rx_buf->mbuf = mbuf; prod_bd = &rxr->rx_desc_ring[prod]; - cons_bd = &rxr->rx_desc_ring[cons]; + + prod_bd->addr = rte_cpu_to_le_64(RTE_MBUF_DATA_DMA_ADDR(mbuf)); + + rxr->rx_prod = prod; +} + +#ifdef BNXT_DEBUG +static void bnxt_reuse_ag_mbuf(struct bnxt_rx_ring_info *rxr, uint16_t cons, + struct rte_mbuf *mbuf) +{ + uint16_t prod = rxr->ag_prod; + struct bnxt_sw_rx_bd *prod_rx_buf; + struct rx_prod_pkt_bd *prod_bd, *cons_bd; + + prod_rx_buf = &rxr->ag_buf_ring[prod]; + + prod_rx_buf->mbuf = mbuf; + + prod_bd = &rxr->ag_desc_ring[prod]; + cons_bd = &rxr->ag_desc_ring[cons]; prod_bd->addr = cons_bd->addr; } +#endif + +static inline +struct rte_mbuf *bnxt_consume_rx_buf(struct bnxt_rx_ring_info *rxr, + uint16_t cons) +{ + struct bnxt_sw_rx_bd *cons_rx_buf; + struct rte_mbuf *mbuf; + + cons_rx_buf = &rxr->rx_buf_ring[cons]; + RTE_ASSERT(cons_rx_buf->mbuf != NULL); + mbuf = cons_rx_buf->mbuf; + cons_rx_buf->mbuf = NULL; + return mbuf; +} + +static void bnxt_tpa_start(struct bnxt_rx_queue *rxq, + struct rx_tpa_start_cmpl *tpa_start, + struct rx_tpa_start_cmpl_hi *tpa_start1) +{ + struct bnxt_rx_ring_info *rxr = rxq->rx_ring; + uint8_t agg_id = rte_le_to_cpu_32(tpa_start->agg_id & + RX_TPA_START_CMPL_AGG_ID_MASK) >> RX_TPA_START_CMPL_AGG_ID_SFT; + uint16_t data_cons; + struct bnxt_tpa_info *tpa_info; + struct rte_mbuf *mbuf; + + data_cons = tpa_start->opaque; + tpa_info = &rxr->tpa_info[agg_id]; + + mbuf = bnxt_consume_rx_buf(rxr, data_cons); + + bnxt_reuse_rx_mbuf(rxr, tpa_info->mbuf); + + tpa_info->mbuf = mbuf; + tpa_info->len = rte_le_to_cpu_32(tpa_start->len); + + mbuf->nb_segs = 1; + mbuf->next = NULL; + mbuf->pkt_len = rte_le_to_cpu_32(tpa_start->len); + mbuf->data_len = mbuf->pkt_len; + mbuf->port = rxq->port_id; + mbuf->ol_flags = PKT_RX_LRO; + if (likely(tpa_start->flags_type & + rte_cpu_to_le_32(RX_TPA_START_CMPL_FLAGS_RSS_VALID))) { + mbuf->hash.rss = rte_le_to_cpu_32(tpa_start->rss_hash); + mbuf->ol_flags |= PKT_RX_RSS_HASH; + } else { + mbuf->hash.fdir.id = rte_le_to_cpu_16(tpa_start1->cfa_code); + mbuf->ol_flags |= PKT_RX_FDIR | PKT_RX_FDIR_ID; + } + if (tpa_start1->flags2 & + rte_cpu_to_le_32(RX_TPA_START_CMPL_FLAGS2_META_FORMAT_VLAN)) { + mbuf->vlan_tci = rte_le_to_cpu_32(tpa_start1->metadata); + mbuf->ol_flags |= PKT_RX_VLAN_PKT; + } + if (likely(tpa_start1->flags2 & + rte_cpu_to_le_32(RX_TPA_START_CMPL_FLAGS2_L4_CS_CALC))) + mbuf->ol_flags |= PKT_RX_L4_CKSUM_GOOD; + + /* recycle next mbuf */ + data_cons = RING_NEXT(rxr->rx_ring_struct, data_cons); + bnxt_reuse_rx_mbuf(rxr, bnxt_consume_rx_buf(rxr, data_cons)); +} + +static int bnxt_agg_bufs_valid(struct bnxt_cp_ring_info *cpr, + uint8_t agg_bufs, uint32_t raw_cp_cons) +{ + uint16_t last_cp_cons; + struct rx_pkt_cmpl *agg_cmpl; + + raw_cp_cons = ADV_RAW_CMP(raw_cp_cons, agg_bufs); + last_cp_cons = RING_CMP(cpr->cp_ring_struct, raw_cp_cons); + agg_cmpl = (struct rx_pkt_cmpl *)&cpr->cp_desc_ring[last_cp_cons]; + return CMP_VALID(agg_cmpl, raw_cp_cons, cpr->cp_ring_struct); +} + +/* TPA consume agg buffer out of order, allocate connected data only */ +static int bnxt_prod_ag_mbuf(struct bnxt_rx_queue *rxq) +{ + struct bnxt_rx_ring_info *rxr = rxq->rx_ring; + uint16_t next = RING_NEXT(rxr->ag_ring_struct, rxr->ag_prod); + + /* TODO batch allocation for better performance */ + while (rte_bitmap_get(rxr->ag_bitmap, next)) { + if (unlikely(bnxt_alloc_ag_data(rxq, rxr, next))) { + RTE_LOG(ERR, PMD, + "agg mbuf alloc failed: prod=0x%x\n", next); + break; + } + rte_bitmap_clear(rxr->ag_bitmap, next); + rxr->ag_prod = next; + next = RING_NEXT(rxr->ag_ring_struct, next); + } + + return 0; +} + +static int bnxt_rx_pages(struct bnxt_rx_queue *rxq, + struct rte_mbuf *mbuf, uint32_t *tmp_raw_cons, + uint8_t agg_buf) +{ + struct bnxt_cp_ring_info *cpr = rxq->cp_ring; + struct bnxt_rx_ring_info *rxr = rxq->rx_ring; + int i; + uint16_t cp_cons, ag_cons; + struct rx_pkt_cmpl *rxcmp; + struct rte_mbuf *last = mbuf; + + for (i = 0; i < agg_buf; i++) { + struct bnxt_sw_rx_bd *ag_buf; + struct rte_mbuf *ag_mbuf; + *tmp_raw_cons = NEXT_RAW_CMP(*tmp_raw_cons); + cp_cons = RING_CMP(cpr->cp_ring_struct, *tmp_raw_cons); + rxcmp = (struct rx_pkt_cmpl *) + &cpr->cp_desc_ring[cp_cons]; + +#ifdef BNXT_DEBUG + bnxt_dump_cmpl(cp_cons, rxcmp); +#endif + + ag_cons = rxcmp->opaque; + RTE_ASSERT(ag_cons <= rxr->ag_ring_struct->ring_mask); + ag_buf = &rxr->ag_buf_ring[ag_cons]; + ag_mbuf = ag_buf->mbuf; + RTE_ASSERT(ag_mbuf != NULL); -static uint16_t bnxt_rx_pkt(struct rte_mbuf **rx_pkt, + ag_mbuf->data_len = rte_le_to_cpu_16(rxcmp->len); + + mbuf->nb_segs++; + mbuf->pkt_len += ag_mbuf->data_len; + + last->next = ag_mbuf; + last = ag_mbuf; + + ag_buf->mbuf = NULL; + + /* + * As aggregation buffer consumed out of order in TPA module, + * use bitmap to track freed slots to be allocated and notified + * to NIC + */ + rte_bitmap_set(rxr->ag_bitmap, ag_cons); + } + bnxt_prod_ag_mbuf(rxq); + return 0; +} + +static inline struct rte_mbuf *bnxt_tpa_end( + struct bnxt_rx_queue *rxq, + uint32_t *raw_cp_cons, + struct rx_tpa_end_cmpl *tpa_end, + struct rx_tpa_end_cmpl_hi *tpa_end1 __rte_unused) +{ + struct bnxt_cp_ring_info *cpr = rxq->cp_ring; + struct bnxt_rx_ring_info *rxr = rxq->rx_ring; + uint8_t agg_id = (tpa_end->agg_id & RX_TPA_END_CMPL_AGG_ID_MASK) + >> RX_TPA_END_CMPL_AGG_ID_SFT; + struct rte_mbuf *mbuf; + uint8_t agg_bufs; + struct bnxt_tpa_info *tpa_info; + + tpa_info = &rxr->tpa_info[agg_id]; + mbuf = tpa_info->mbuf; + RTE_ASSERT(mbuf != NULL); + + rte_prefetch0(mbuf); + agg_bufs = (rte_le_to_cpu_32(tpa_end->agg_bufs_v1) & + RX_TPA_END_CMPL_AGG_BUFS_MASK) >> RX_TPA_END_CMPL_AGG_BUFS_SFT; + if (agg_bufs) { + if (!bnxt_agg_bufs_valid(cpr, agg_bufs, *raw_cp_cons)) + return NULL; + bnxt_rx_pages(rxq, mbuf, raw_cp_cons, agg_bufs); + } + mbuf->l4_len = tpa_end->payload_offset; + + struct rte_mbuf *new_data = __bnxt_alloc_rx_data(rxq->mb_pool); + RTE_ASSERT(new_data != NULL); + if (!new_data) { + rte_atomic64_inc(&rxq->bp->rx_mbuf_alloc_fail); + return NULL; + } + tpa_info->mbuf = new_data; + + return mbuf; +} + +static int bnxt_rx_pkt(struct rte_mbuf **rx_pkt, struct bnxt_rx_queue *rxq, uint32_t *raw_cons) { struct bnxt_cp_ring_info *cpr = rxq->cp_ring; @@ -104,9 +342,13 @@ static uint16_t bnxt_rx_pkt(struct rte_mbuf **rx_pkt, uint32_t tmp_raw_cons = *raw_cons; uint16_t cons, prod, cp_cons = RING_CMP(cpr->cp_ring_struct, tmp_raw_cons); - struct bnxt_sw_rx_bd *rx_buf; +#ifdef BNXT_DEBUG + uint16_t ag_cons; +#endif struct rte_mbuf *mbuf; int rc = 0; + uint8_t agg_buf = 0; + uint16_t cmp_type; rxcmp = (struct rx_pkt_cmpl *) &cpr->cp_desc_ring[cp_cons]; @@ -118,14 +360,39 @@ static uint16_t bnxt_rx_pkt(struct rte_mbuf **rx_pkt, if (!CMP_VALID(rxcmp1, tmp_raw_cons, cpr->cp_ring_struct)) return -EBUSY; + cmp_type = CMP_TYPE(rxcmp); + if (cmp_type == RX_PKT_CMPL_TYPE_RX_L2_TPA_START) { + bnxt_tpa_start(rxq, (struct rx_tpa_start_cmpl *)rxcmp, + (struct rx_tpa_start_cmpl_hi *)rxcmp1); + rc = -EINVAL; /* Continue w/o new mbuf */ + goto next_rx; + } else if (cmp_type == RX_PKT_CMPL_TYPE_RX_L2_TPA_END) { + mbuf = bnxt_tpa_end(rxq, &tmp_raw_cons, + (struct rx_tpa_end_cmpl *)rxcmp, + (struct rx_tpa_end_cmpl_hi *)rxcmp1); + if (unlikely(!mbuf)) + return -EBUSY; + *rx_pkt = mbuf; + goto next_rx; + } else if (cmp_type != 0x11) { + rc = -EINVAL; + goto next_rx; + } + + agg_buf = (rxcmp->agg_bufs_v1 & RX_PKT_CMPL_AGG_BUFS_MASK) + >> RX_PKT_CMPL_AGG_BUFS_SFT; + if (agg_buf && !bnxt_agg_bufs_valid(cpr, agg_buf, tmp_raw_cons)) + return -EBUSY; + prod = rxr->rx_prod; - /* EW - GRO deferred to phase 3 */ cons = rxcmp->opaque; - rx_buf = &rxr->rx_buf_ring[cons]; - mbuf = rx_buf->mbuf; + mbuf = bnxt_consume_rx_buf(rxr, cons); rte_prefetch0(mbuf); + if (mbuf == NULL) + return -ENOMEM; + mbuf->nb_segs = 1; mbuf->next = NULL; mbuf->pkt_len = rxcmp->len; @@ -139,6 +406,10 @@ static uint16_t bnxt_rx_pkt(struct rte_mbuf **rx_pkt, mbuf->hash.fdir.id = rxcmp1->cfa_code; mbuf->ol_flags |= PKT_RX_FDIR | PKT_RX_FDIR_ID; } + + if (agg_buf) + bnxt_rx_pages(rxq, mbuf, &tmp_raw_cons, agg_buf); + if (rxcmp1->flags2 & RX_PKT_CMPL_FLAGS2_META_FORMAT_VLAN) { mbuf->vlan_tci = rxcmp1->metadata & (RX_PKT_CMPL_METADATA_VID_MASK | @@ -147,14 +418,17 @@ static uint16_t bnxt_rx_pkt(struct rte_mbuf **rx_pkt, mbuf->ol_flags |= PKT_RX_VLAN_PKT; } - rx_buf->mbuf = NULL; +#ifdef BNXT_DEBUG if (rxcmp1->errors_v2 & RX_CMP_L2_ERRORS) { /* Re-install the mbuf back to the rx ring */ bnxt_reuse_rx_mbuf(rxr, cons, mbuf); + if (agg_buf) + bnxt_reuse_ag_mbuf(rxr, ag_cons, mbuf); rc = -EIO; goto next_rx; } +#endif /* * TODO: Redesign this.... * If the allocation fails, the packet does not get received. @@ -170,24 +444,20 @@ static uint16_t bnxt_rx_pkt(struct rte_mbuf **rx_pkt, * calls in favour of a tight loop with the same function being called * in it. */ + prod = RING_NEXT(rxr->rx_ring_struct, prod); if (bnxt_alloc_rx_data(rxq, rxr, prod)) { RTE_LOG(ERR, PMD, "mbuf alloc failed with prod=0x%x\n", prod); rc = -ENOMEM; - goto next_rx; } - + rxr->rx_prod = prod; /* * All MBUFs are allocated with the same size under DPDK, * no optimization for rx_copy_thresh */ - /* AGG buf operation is deferred */ - - /* EW - VLAN reception. Must compare against the ol_flags */ - *rx_pkt = mbuf; + next_rx: - rxr->rx_prod = RING_NEXT(rxr->rx_ring_struct, prod); *raw_cons = tmp_raw_cons; @@ -203,8 +473,9 @@ uint16_t bnxt_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint32_t raw_cons = cpr->cp_raw_cons; uint32_t cons; int nb_rx_pkts = 0; - bool rx_event = false; struct rx_pkt_cmpl *rxcmp; + uint16_t prod = rxr->rx_prod; + uint16_t ag_prod = rxr->ag_prod; /* Handle RX burst request */ while (1) { @@ -222,26 +493,27 @@ uint16_t bnxt_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, rc = bnxt_rx_pkt(&rx_pkts[nb_rx_pkts], rxq, &raw_cons); if (likely(!rc)) nb_rx_pkts++; - else if (rc == -EBUSY) /* partial completion */ + if (rc == -EBUSY) /* partial completion */ break; - rx_event = true; } raw_cons = NEXT_RAW_CMP(raw_cons); if (nb_rx_pkts == nb_pkts) break; } - if (raw_cons == cpr->cp_raw_cons) { + + cpr->cp_raw_cons = raw_cons; + if (prod == rxr->rx_prod && ag_prod == rxr->ag_prod) { /* * For PMD, there is no need to keep on pushing to REARM * the doorbell if there are no new completions */ return nb_rx_pkts; } - cpr->cp_raw_cons = raw_cons; B_CP_DIS_DB(cpr, cpr->cp_raw_cons); - if (rx_event) - B_RX_DB(rxr->rx_doorbell, rxr->rx_prod); + B_RX_DB(rxr->rx_doorbell, rxr->rx_prod); + /* Ring the AGG ring DB */ + B_RX_DB(rxr->ag_doorbell, rxr->ag_prod); return nb_rx_pkts; } @@ -257,6 +529,12 @@ void bnxt_free_rx_rings(struct bnxt *bp) bnxt_free_ring(rxq->rx_ring->rx_ring_struct); rte_free(rxq->rx_ring->rx_ring_struct); + + /* Free the Aggregator ring */ + bnxt_free_ring(rxq->rx_ring->ag_ring_struct); + rte_free(rxq->rx_ring->ag_ring_struct); + rxq->rx_ring->ag_ring_struct = NULL; + rte_free(rxq->rx_ring); bnxt_free_ring(rxq->cp_ring->cp_ring_struct); @@ -270,13 +548,11 @@ void bnxt_free_rx_rings(struct bnxt *bp) int bnxt_init_rx_ring_struct(struct bnxt_rx_queue *rxq, unsigned int socket_id) { - struct bnxt *bp = rxq->bp; struct bnxt_cp_ring_info *cpr; struct bnxt_rx_ring_info *rxr; struct bnxt_ring *ring; - rxq->rx_buf_use_size = bp->eth_dev->data->mtu + - ETHER_HDR_LEN + ETHER_CRC_LEN + + rxq->rx_buf_use_size = BNXT_MAX_MTU + ETHER_HDR_LEN + ETHER_CRC_LEN + (2 * VLAN_TAG_SIZE); rxq->rx_buf_size = rxq->rx_buf_use_size + sizeof(struct rte_mbuf); @@ -313,13 +589,29 @@ int bnxt_init_rx_ring_struct(struct bnxt_rx_queue *rxq, unsigned int socket_id) if (ring == NULL) return -ENOMEM; cpr->cp_ring_struct = ring; - ring->ring_size = rxr->rx_ring_struct->ring_size * 2; + ring->ring_size = rte_align32pow2(rxr->rx_ring_struct->ring_size * + (2 + AGG_RING_SIZE_FACTOR)); ring->ring_mask = ring->ring_size - 1; ring->bd = (void *)cpr->cp_desc_ring; ring->bd_dma = cpr->cp_desc_mapping; ring->vmem_size = 0; ring->vmem = NULL; + /* Allocate Aggregator rings */ + ring = rte_zmalloc_socket("bnxt_rx_ring_struct", + sizeof(struct bnxt_ring), + RTE_CACHE_LINE_SIZE, socket_id); + if (ring == NULL) + return -ENOMEM; + rxr->ag_ring_struct = ring; + ring->ring_size = rte_align32pow2(rxq->nb_rx_desc * + AGG_RING_SIZE_FACTOR); + ring->ring_mask = ring->ring_size - 1; + ring->bd = (void *)rxr->ag_desc_ring; + ring->bd_dma = rxr->ag_desc_mapping; + ring->vmem_size = ring->ring_size * sizeof(struct bnxt_sw_rx_bd); + ring->vmem = (void **)&rxr->ag_buf_ring; + return 0; } @@ -332,8 +624,8 @@ static void bnxt_init_rxbds(struct bnxt_ring *ring, uint32_t type, if (!rx_bd_ring) return; for (j = 0; j < ring->ring_size; j++) { - rx_bd_ring[j].flags_type = type; - rx_bd_ring[j].len = len; + rx_bd_ring[j].flags_type = rte_cpu_to_le_16(type); + rx_bd_ring[j].len = rte_cpu_to_le_16(len); rx_bd_ring[j].opaque = j; } } @@ -344,12 +636,17 @@ int bnxt_init_one_rx_ring(struct bnxt_rx_queue *rxq) struct bnxt_ring *ring; uint32_t prod, type; unsigned int i; + uint16_t size; + + size = rte_pktmbuf_data_room_size(rxq->mb_pool) - RTE_PKTMBUF_HEADROOM; + if (rxq->rx_buf_use_size <= size) + size = rxq->rx_buf_use_size; - type = RX_PROD_PKT_BD_TYPE_RX_PROD_PKT | RX_PROD_PKT_BD_FLAGS_EOP_PAD; + type = RX_PROD_PKT_BD_TYPE_RX_PROD_PKT; rxr = rxq->rx_ring; ring = rxr->rx_ring_struct; - bnxt_init_rxbds(ring, type, rxq->rx_buf_use_size); + bnxt_init_rxbds(ring, type, size); prod = rxr->rx_prod; for (i = 0; i < ring->ring_size; i++) { @@ -362,6 +659,36 @@ int bnxt_init_one_rx_ring(struct bnxt_rx_queue *rxq) rxr->rx_prod = prod; prod = RING_NEXT(rxr->rx_ring_struct, prod); } + RTE_LOG(DEBUG, PMD, "%s\n", __func__); + + ring = rxr->ag_ring_struct; + type = RX_PROD_AGG_BD_TYPE_RX_PROD_AGG; + bnxt_init_rxbds(ring, type, size); + prod = rxr->ag_prod; + + for (i = 0; i < ring->ring_size; i++) { + if (bnxt_alloc_ag_data(rxq, rxr, prod) != 0) { + RTE_LOG(WARNING, PMD, + "init'ed AG ring %d with %d/%d mbufs only\n", + rxq->queue_id, i, ring->ring_size); + break; + } + rxr->ag_prod = prod; + prod = RING_NEXT(rxr->ag_ring_struct, prod); + } + RTE_LOG(DEBUG, PMD, "%s AGG Done!\n", __func__); + + if (rxr->tpa_info) { + for (i = 0; i < BNXT_TPA_MAX; i++) { + rxr->tpa_info[i].mbuf = + __bnxt_alloc_rx_data(rxq->mb_pool); + if (!rxr->tpa_info[i].mbuf) { + rte_atomic64_inc(&rxq->bp->rx_mbuf_alloc_fail); + return -ENOMEM; + } + } + } + RTE_LOG(DEBUG, PMD, "%s TPA alloc Done!\n", __func__); return 0; } diff --git a/drivers/net/bnxt/bnxt_rxr.h b/drivers/net/bnxt/bnxt_rxr.h index f766b26c..f8d6dc80 100644 --- a/drivers/net/bnxt/bnxt_rxr.h +++ b/drivers/net/bnxt/bnxt_rxr.h @@ -37,20 +37,66 @@ #define B_RX_DB(db, prod) \ (*(uint32_t *)db = (DB_KEY_RX | prod)) +#define BNXT_TPA_L4_SIZE(x) \ + { \ + typeof(x) hdr_info = (x); \ + (((hdr_info) & 0xf8000000) ? ((hdr_info) >> 27) : 32) \ + } + +#define BNXT_TPA_INNER_L3_OFF(hdr_info) \ + (((hdr_info) >> 18) & 0x1ff) + +#define BNXT_TPA_INNER_L2_OFF(hdr_info) \ + (((hdr_info) >> 9) & 0x1ff) + +#define BNXT_TPA_OUTER_L3_OFF(hdr_info) \ + ((hdr_info) & 0x1ff) + +enum pkt_hash_types { + PKT_HASH_TYPE_NONE, /* Undefined type */ + PKT_HASH_TYPE_L2, /* Input: src_MAC, dest_MAC */ + PKT_HASH_TYPE_L3, /* Input: src_IP, dst_IP */ + PKT_HASH_TYPE_L4, /* Input: src_IP, dst_IP, src_port, dst_port */ +}; + +struct bnxt_tpa_info { + struct rte_mbuf *mbuf; + uint16_t len; + unsigned short gso_type; + uint32_t flags2; + uint32_t metadata; + enum pkt_hash_types hash_type; + uint32_t rss_hash; + uint32_t hdr_info; +}; + struct bnxt_sw_rx_bd { struct rte_mbuf *mbuf; /* data associated with RX descriptor */ }; struct bnxt_rx_ring_info { uint16_t rx_prod; + uint16_t ag_prod; void *rx_doorbell; + void *ag_doorbell; struct rx_prod_pkt_bd *rx_desc_ring; + struct rx_prod_pkt_bd *ag_desc_ring; struct bnxt_sw_rx_bd *rx_buf_ring; /* sw ring */ + struct bnxt_sw_rx_bd *ag_buf_ring; /* sw ring */ phys_addr_t rx_desc_mapping; + phys_addr_t ag_desc_mapping; struct bnxt_ring *rx_ring_struct; + struct bnxt_ring *ag_ring_struct; + + /* + * To deal with out of order return from TPA, use free buffer indicator + */ + struct rte_bitmap *ag_bitmap; + + struct bnxt_tpa_info *tpa_info; }; uint16_t bnxt_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, diff --git a/drivers/net/bnxt/bnxt_stats.c b/drivers/net/bnxt/bnxt_stats.c index 40c9cac1..d7d0e35c 100644 --- a/drivers/net/bnxt/bnxt_stats.c +++ b/drivers/net/bnxt/bnxt_stats.c @@ -43,6 +43,171 @@ #include "bnxt_txq.h" #include "hsi_struct_def_dpdk.h" +static const struct bnxt_xstats_name_off bnxt_rx_stats_strings[] = { + {"rx_64b_frames", offsetof(struct rx_port_stats, + rx_64b_frames)}, + {"rx_65b_127b_frames", offsetof(struct rx_port_stats, + rx_65b_127b_frames)}, + {"rx_128b_255b_frames", offsetof(struct rx_port_stats, + rx_128b_255b_frames)}, + {"rx_256b_511b_frames", offsetof(struct rx_port_stats, + rx_256b_511b_frames)}, + {"rx_512b_1023b_frames", offsetof(struct rx_port_stats, + rx_512b_1023b_frames)}, + {"rx_1024b_1518_frames", offsetof(struct rx_port_stats, + rx_1024b_1518_frames)}, + {"rx_good_vlan_frames", offsetof(struct rx_port_stats, + rx_good_vlan_frames)}, + {"rx_1519b_2047b_frames", offsetof(struct rx_port_stats, + rx_1519b_2047b_frames)}, + {"rx_2048b_4095b_frames", offsetof(struct rx_port_stats, + rx_2048b_4095b_frames)}, + {"rx_4096b_9216b_frames", offsetof(struct rx_port_stats, + rx_4096b_9216b_frames)}, + {"rx_9217b_16383b_frames", offsetof(struct rx_port_stats, + rx_9217b_16383b_frames)}, + {"rx_total_frames", offsetof(struct rx_port_stats, + rx_total_frames)}, + {"rx_ucast_frames", offsetof(struct rx_port_stats, + rx_ucast_frames)}, + {"rx_mcast_frames", offsetof(struct rx_port_stats, + rx_mcast_frames)}, + {"rx_bcast_frames", offsetof(struct rx_port_stats, + rx_bcast_frames)}, + {"rx_fcs_err_frames", offsetof(struct rx_port_stats, + rx_fcs_err_frames)}, + {"rx_ctrl_frames", offsetof(struct rx_port_stats, + rx_ctrl_frames)}, + {"rx_pause_frames", offsetof(struct rx_port_stats, + rx_pause_frames)}, + {"rx_pfc_frames", offsetof(struct rx_port_stats, + rx_pfc_frames)}, + {"rx_align_err_frames", offsetof(struct rx_port_stats, + rx_align_err_frames)}, + {"rx_ovrsz_frames", offsetof(struct rx_port_stats, + rx_ovrsz_frames)}, + {"rx_jbr_frames", offsetof(struct rx_port_stats, + rx_jbr_frames)}, + {"rx_mtu_err_frames", offsetof(struct rx_port_stats, + rx_mtu_err_frames)}, + {"rx_tagged_frames", offsetof(struct rx_port_stats, + rx_tagged_frames)}, + {"rx_double_tagged_frames", offsetof(struct rx_port_stats, + rx_double_tagged_frames)}, + {"rx_good_frames", offsetof(struct rx_port_stats, + rx_good_frames)}, + {"rx_undrsz_frames", offsetof(struct rx_port_stats, + rx_undrsz_frames)}, + {"rx_eee_lpi_events", offsetof(struct rx_port_stats, + rx_eee_lpi_events)}, + {"rx_eee_lpi_duration", offsetof(struct rx_port_stats, + rx_eee_lpi_duration)}, + {"rx_bytes", offsetof(struct rx_port_stats, + rx_bytes)}, + {"rx_runt_bytes", offsetof(struct rx_port_stats, + rx_runt_bytes)}, + {"rx_runt_frames", offsetof(struct rx_port_stats, + rx_runt_frames)}, +}; + +static const struct bnxt_xstats_name_off bnxt_tx_stats_strings[] = { + {"tx_64b_frames", offsetof(struct tx_port_stats, + tx_64b_frames)}, + {"tx_65b_127b_frames", offsetof(struct tx_port_stats, + tx_65b_127b_frames)}, + {"tx_128b_255b_frames", offsetof(struct tx_port_stats, + tx_128b_255b_frames)}, + {"tx_256b_511b_frames", offsetof(struct tx_port_stats, + tx_256b_511b_frames)}, + {"tx_512b_1023b_frames", offsetof(struct tx_port_stats, + tx_512b_1023b_frames)}, + {"tx_1024b_1518_frames", offsetof(struct tx_port_stats, + tx_1024b_1518_frames)}, + {"tx_good_vlan_frames", offsetof(struct tx_port_stats, + tx_good_vlan_frames)}, + {"tx_1519b_2047_frames", offsetof(struct tx_port_stats, + tx_1519b_2047_frames)}, + {"tx_2048b_4095b_frames", offsetof(struct tx_port_stats, + tx_2048b_4095b_frames)}, + {"tx_4096b_9216b_frames", offsetof(struct tx_port_stats, + tx_4096b_9216b_frames)}, + {"tx_9217b_16383b_frames", offsetof(struct tx_port_stats, + tx_9217b_16383b_frames)}, + {"tx_good_frames", offsetof(struct tx_port_stats, + tx_good_frames)}, + {"tx_total_frames", offsetof(struct tx_port_stats, + tx_total_frames)}, + {"tx_ucast_frames", offsetof(struct tx_port_stats, + tx_ucast_frames)}, + {"tx_mcast_frames", offsetof(struct tx_port_stats, + tx_mcast_frames)}, + {"tx_bcast_frames", offsetof(struct tx_port_stats, + tx_bcast_frames)}, + {"tx_pause_frames", offsetof(struct tx_port_stats, + tx_pause_frames)}, + {"tx_pfc_frames", offsetof(struct tx_port_stats, + tx_pfc_frames)}, + {"tx_jabber_frames", offsetof(struct tx_port_stats, + tx_jabber_frames)}, + {"tx_fcs_err_frames", offsetof(struct tx_port_stats, + tx_fcs_err_frames)}, + {"tx_err", offsetof(struct tx_port_stats, + tx_err)}, + {"tx_fifo_underruns", offsetof(struct tx_port_stats, + tx_fifo_underruns)}, + {"tx_eee_lpi_events", offsetof(struct tx_port_stats, + tx_eee_lpi_events)}, + {"tx_eee_lpi_duration", offsetof(struct tx_port_stats, + tx_eee_lpi_duration)}, + {"tx_total_collisions", offsetof(struct tx_port_stats, + tx_total_collisions)}, + {"tx_bytes", offsetof(struct tx_port_stats, + tx_bytes)}, +}; + +static const struct bnxt_xstats_name_off bnxt_func_stats_strings[] = { + {"tx_ucast_pkts", offsetof(struct hwrm_func_qstats_output, + tx_ucast_pkts)}, + {"tx_mcast_pkts", offsetof(struct hwrm_func_qstats_output, + tx_mcast_pkts)}, + {"tx_bcast_pkts", offsetof(struct hwrm_func_qstats_output, + tx_bcast_pkts)}, + {"tx_err_pkts", offsetof(struct hwrm_func_qstats_output, + tx_err_pkts)}, + {"tx_drop_pkts", offsetof(struct hwrm_func_qstats_output, + tx_drop_pkts)}, + {"tx_ucast_bytes", offsetof(struct hwrm_func_qstats_output, + tx_ucast_bytes)}, + {"tx_mcast_bytes", offsetof(struct hwrm_func_qstats_output, + tx_mcast_bytes)}, + {"tx_bcast_bytes", offsetof(struct hwrm_func_qstats_output, + tx_bcast_bytes)}, + {"rx_ucast_pkts", offsetof(struct hwrm_func_qstats_output, + rx_ucast_pkts)}, + {"rx_mcast_pkts", offsetof(struct hwrm_func_qstats_output, + rx_mcast_pkts)}, + {"rx_bcast_pkts", offsetof(struct hwrm_func_qstats_output, + rx_bcast_pkts)}, + {"rx_err_pkts", offsetof(struct hwrm_func_qstats_output, + rx_err_pkts)}, + {"rx_drop_pkts", offsetof(struct hwrm_func_qstats_output, + rx_drop_pkts)}, + {"rx_ucast_bytes", offsetof(struct hwrm_func_qstats_output, + rx_ucast_bytes)}, + {"rx_mcast_bytes", offsetof(struct hwrm_func_qstats_output, + rx_mcast_bytes)}, + {"rx_bcast_bytes", offsetof(struct hwrm_func_qstats_output, + rx_bcast_bytes)}, + {"rx_agg_pkts", offsetof(struct hwrm_func_qstats_output, + rx_agg_pkts)}, + {"rx_agg_bytes", offsetof(struct hwrm_func_qstats_output, + rx_agg_bytes)}, + {"rx_agg_events", offsetof(struct hwrm_func_qstats_output, + rx_agg_events)}, + {"rx_agg_aborts", offsetof(struct hwrm_func_qstats_output, + rx_agg_aborts)}, +}; + /* * Statistics functions */ @@ -74,64 +239,18 @@ void bnxt_stats_get_op(struct rte_eth_dev *eth_dev, for (i = 0; i < bp->rx_cp_nr_rings; i++) { struct bnxt_rx_queue *rxq = bp->rx_queues[i]; struct bnxt_cp_ring_info *cpr = rxq->cp_ring; - struct ctx_hw_stats64 *hw_stats = - (struct ctx_hw_stats64 *)cpr->hw_stats; - - bnxt_stats->q_ipackets[i] += - rte_le_to_cpu_64(hw_stats->rx_ucast_pkts); - bnxt_stats->q_ipackets[i] += - rte_le_to_cpu_64(hw_stats->rx_mcast_pkts); - bnxt_stats->q_ipackets[i] += - rte_le_to_cpu_64(hw_stats->rx_bcast_pkts); - - bnxt_stats->q_ibytes[i] += - rte_le_to_cpu_64(hw_stats->rx_ucast_bytes); - bnxt_stats->q_ibytes[i] += - rte_le_to_cpu_64(hw_stats->rx_mcast_bytes); - bnxt_stats->q_ibytes[i] += - rte_le_to_cpu_64(hw_stats->rx_bcast_bytes); - - /* - * TBD: No clear mapping to this... we don't seem - * to have a stat specifically for dropped due to - * insufficient mbufs. - */ - bnxt_stats->q_errors[i] = 0; - - /* These get replaced once the *_QSTATS commands work */ - bnxt_stats->ipackets += bnxt_stats->q_ipackets[i]; - bnxt_stats->ibytes += bnxt_stats->q_ibytes[i]; - bnxt_stats->imissed += bnxt_stats->q_errors[i]; - bnxt_stats->ierrors += - rte_le_to_cpu_64(hw_stats->rx_discard_pkts); + + bnxt_hwrm_ctx_qstats(bp, cpr->hw_stats_ctx_id, i, bnxt_stats); } for (i = 0; i < bp->tx_cp_nr_rings; i++) { struct bnxt_tx_queue *txq = bp->tx_queues[i]; struct bnxt_cp_ring_info *cpr = txq->cp_ring; - struct ctx_hw_stats64 *hw_stats = - (struct ctx_hw_stats64 *)cpr->hw_stats; - - bnxt_stats->q_opackets[i] += - rte_le_to_cpu_64(hw_stats->tx_ucast_pkts); - bnxt_stats->q_opackets[i] += - rte_le_to_cpu_64(hw_stats->tx_mcast_pkts); - bnxt_stats->q_opackets[i] += - rte_le_to_cpu_64(hw_stats->tx_bcast_pkts); - - bnxt_stats->q_obytes[i] += - rte_le_to_cpu_64(hw_stats->tx_ucast_bytes); - bnxt_stats->q_obytes[i] += - rte_le_to_cpu_64(hw_stats->tx_mcast_bytes); - bnxt_stats->q_obytes[i] += - rte_le_to_cpu_64(hw_stats->tx_bcast_bytes); - - /* These get replaced once the *_QSTATS commands work */ - bnxt_stats->opackets += bnxt_stats->q_opackets[i]; - bnxt_stats->obytes += bnxt_stats->q_obytes[i]; - bnxt_stats->oerrors += rte_le_to_cpu_64(hw_stats->tx_drop_pkts); - bnxt_stats->oerrors += rte_le_to_cpu_64(hw_stats->tx_discard_pkts); + + bnxt_hwrm_ctx_qstats(bp, cpr->hw_stats_ctx_id, i, bnxt_stats); } + bnxt_hwrm_func_qstats(bp, 0xffff, bnxt_stats); + bnxt_stats->rx_nombuf = rte_atomic64_read(&bp->rx_mbuf_alloc_fail); } void bnxt_stats_reset_op(struct rte_eth_dev *eth_dev) @@ -139,4 +258,103 @@ void bnxt_stats_reset_op(struct rte_eth_dev *eth_dev) struct bnxt *bp = (struct bnxt *)eth_dev->data->dev_private; bnxt_clear_all_hwrm_stat_ctxs(bp); + rte_atomic64_clear(&bp->rx_mbuf_alloc_fail); +} + +int bnxt_dev_xstats_get_op(struct rte_eth_dev *eth_dev, + struct rte_eth_xstat *xstats, unsigned int n) +{ + struct bnxt *bp = (struct bnxt *)eth_dev->data->dev_private; + + unsigned int count, i; + uint64_t tx_drop_pkts; + + if (!(bp->flags & BNXT_FLAG_PORT_STATS)) { + RTE_LOG(ERR, PMD, "xstats not supported for VF\n"); + return 0; + } + + bnxt_hwrm_port_qstats(bp); + bnxt_hwrm_func_qstats_tx_drop(bp, 0xffff, &tx_drop_pkts); + + count = RTE_DIM(bnxt_rx_stats_strings) + + RTE_DIM(bnxt_tx_stats_strings) + 1; /* For tx_drop_pkts */ + + if (n < count) + return count; + + count = 0; + for (i = 0; i < RTE_DIM(bnxt_rx_stats_strings); i++) { + uint64_t *rx_stats = (uint64_t *)bp->hw_rx_port_stats; + xstats[count].value = rte_le_to_cpu_64( + *(uint64_t *)((char *)rx_stats + + bnxt_rx_stats_strings[i].offset)); + count++; + } + + for (i = 0; i < RTE_DIM(bnxt_tx_stats_strings); i++) { + uint64_t *tx_stats = (uint64_t *)bp->hw_tx_port_stats; + xstats[count].value = rte_le_to_cpu_64( + *(uint64_t *)((char *)tx_stats + + bnxt_tx_stats_strings[i].offset)); + count++; + } + + /* The Tx drop pkts aka the Anti spoof coounter */ + xstats[count].value = rte_le_to_cpu_64(tx_drop_pkts); + count++; + + return count; +} + +int bnxt_dev_xstats_get_names_op(__rte_unused struct rte_eth_dev *eth_dev, + struct rte_eth_xstat_name *xstats_names, + __rte_unused unsigned int limit) +{ + /* Account for the Tx drop pkts aka the Anti spoof counter */ + const unsigned int stat_cnt = RTE_DIM(bnxt_rx_stats_strings) + + RTE_DIM(bnxt_tx_stats_strings) + 1; + unsigned int i, count; + + if (xstats_names != NULL) { + count = 0; + + for (i = 0; i < RTE_DIM(bnxt_rx_stats_strings); i++) { + snprintf(xstats_names[count].name, + sizeof(xstats_names[count].name), + "%s", + bnxt_rx_stats_strings[i].name); + count++; + } + + for (i = 0; i < RTE_DIM(bnxt_tx_stats_strings); i++) { + snprintf(xstats_names[count].name, + sizeof(xstats_names[count].name), + "%s", + bnxt_tx_stats_strings[i].name); + count++; + } + + snprintf(xstats_names[count].name, + sizeof(xstats_names[count].name), + "%s", + bnxt_func_stats_strings[4].name); + count++; + } + return stat_cnt; +} + +void bnxt_dev_xstats_reset_op(struct rte_eth_dev *eth_dev) +{ + struct bnxt *bp = (struct bnxt *)eth_dev->data->dev_private; + + if (bp->flags & BNXT_FLAG_PORT_STATS && !BNXT_NPAR_PF(bp)) + bnxt_hwrm_port_clr_stats(bp); + + if (BNXT_VF(bp)) + RTE_LOG(ERR, PMD, "Operation not supported on a VF device\n"); + if (BNXT_NPAR_PF(bp)) + RTE_LOG(ERR, PMD, "Operation not supported on a MF device\n"); + if (!(bp->flags & BNXT_FLAG_PORT_STATS)) + RTE_LOG(ERR, PMD, "Operation not supported\n"); } diff --git a/drivers/net/bnxt/bnxt_stats.h b/drivers/net/bnxt/bnxt_stats.h index 65408a44..b6d133ef 100644 --- a/drivers/net/bnxt/bnxt_stats.h +++ b/drivers/net/bnxt/bnxt_stats.h @@ -40,5 +40,15 @@ void bnxt_free_stats(struct bnxt *bp); void bnxt_stats_get_op(struct rte_eth_dev *eth_dev, struct rte_eth_stats *bnxt_stats); void bnxt_stats_reset_op(struct rte_eth_dev *eth_dev); +int bnxt_dev_xstats_get_names_op(__rte_unused struct rte_eth_dev *eth_dev, + struct rte_eth_xstat_name *xstats_names, + __rte_unused unsigned int limit); +int bnxt_dev_xstats_get_op(struct rte_eth_dev *eth_dev, + struct rte_eth_xstat *xstats, unsigned int n); +void bnxt_dev_xstats_reset_op(struct rte_eth_dev *eth_dev); +struct bnxt_xstats_name_off { + char name[RTE_ETH_XSTATS_NAME_SIZE]; + uint64_t offset; +}; #endif diff --git a/drivers/net/bnxt/bnxt_txr.c b/drivers/net/bnxt/bnxt_txr.c index 0d15bb1e..6870b16d 100644 --- a/drivers/net/bnxt/bnxt_txr.c +++ b/drivers/net/bnxt/bnxt_txr.c @@ -213,7 +213,8 @@ static uint16_t bnxt_start_xmit(struct rte_mbuf *tx_pkt, /* TSO */ txbd1->lflags = TX_BD_LONG_LFLAGS_LSO; txbd1->hdr_size = tx_pkt->l2_len + tx_pkt->l3_len + - tx_pkt->l4_len; + tx_pkt->l4_len + tx_pkt->outer_l2_len + + tx_pkt->outer_l3_len; txbd1->mss = tx_pkt->tso_segsz; } else if (tx_pkt->ol_flags & (PKT_TX_TCP_CKSUM | diff --git a/drivers/net/bnxt/bnxt_vnic.c b/drivers/net/bnxt/bnxt_vnic.c index 33fdde2f..db9fb079 100644 --- a/drivers/net/bnxt/bnxt_vnic.c +++ b/drivers/net/bnxt/bnxt_vnic.c @@ -69,21 +69,14 @@ void bnxt_init_vnics(struct bnxt *bp) uint16_t max_vnics; int i, j; - if (BNXT_PF(bp)) { - struct bnxt_pf_info *pf = &bp->pf; - - max_vnics = pf->max_vnics; - } else { - struct bnxt_vf_info *vf = &bp->vf; - - max_vnics = vf->max_vnics; - } + max_vnics = bp->max_vnics; STAILQ_INIT(&bp->free_vnic_list); for (i = 0; i < max_vnics; i++) { vnic = &bp->vnic_info[i]; vnic->fw_vnic_id = (uint16_t)HWRM_NA_SIGNATURE; - vnic->fw_rss_cos_lb_ctx = (uint16_t)HWRM_NA_SIGNATURE; - vnic->ctx_is_rss_cos_lb = HW_CONTEXT_NONE; + vnic->rss_rule = (uint16_t)HWRM_NA_SIGNATURE; + vnic->cos_rule = (uint16_t)HWRM_NA_SIGNATURE; + vnic->lb_rule = (uint16_t)HWRM_NA_SIGNATURE; for (j = 0; j < MAX_QUEUES_PER_VNIC; j++) vnic->fw_grp_ids[j] = (uint16_t)HWRM_NA_SIGNATURE; @@ -177,19 +170,13 @@ int bnxt_alloc_vnic_attributes(struct bnxt *bp) char mz_name[RTE_MEMZONE_NAMESIZE]; uint32_t entry_length = RTE_CACHE_LINE_ROUNDUP( HW_HASH_INDEX_SIZE * sizeof(*vnic->rss_table) + - HW_HASH_KEY_SIZE); + HW_HASH_KEY_SIZE + + BNXT_MAX_MC_ADDRS * ETHER_ADDR_LEN); uint16_t max_vnics; int i; + phys_addr_t mz_phys_addr; - if (BNXT_PF(bp)) { - struct bnxt_pf_info *pf = &bp->pf; - - max_vnics = pf->max_vnics; - } else { - struct bnxt_vf_info *vf = &bp->vf; - - max_vnics = vf->max_vnics; - } + max_vnics = bp->max_vnics; snprintf(mz_name, RTE_MEMZONE_NAMESIZE, "bnxt_%04x:%02x:%02x:%02x_vnicattr", pdev->addr.domain, pdev->addr.bus, pdev->addr.devid, pdev->addr.function); @@ -204,6 +191,19 @@ int bnxt_alloc_vnic_attributes(struct bnxt *bp) if (!mz) return -ENOMEM; } + mz_phys_addr = mz->phys_addr; + if ((unsigned long)mz->addr == mz_phys_addr) { + RTE_LOG(WARNING, PMD, + "Memzone physical address same as virtual.\n"); + RTE_LOG(WARNING, PMD, + "Using rte_mem_virt2phy()\n"); + mz_phys_addr = rte_mem_virt2phy(mz->addr); + if (mz_phys_addr == 0) { + RTE_LOG(ERR, PMD, + "unable to map vnic address to physical memory\n"); + return -ENOMEM; + } + } for (i = 0; i < max_vnics; i++) { vnic = &bp->vnic_info[i]; @@ -213,12 +213,16 @@ int bnxt_alloc_vnic_attributes(struct bnxt *bp) (void *)((char *)mz->addr + (entry_length * i)); memset(vnic->rss_table, -1, entry_length); - vnic->rss_table_dma_addr = mz->phys_addr + (entry_length * i); + vnic->rss_table_dma_addr = mz_phys_addr + (entry_length * i); vnic->rss_hash_key = (void *)((char *)vnic->rss_table + HW_HASH_INDEX_SIZE * sizeof(*vnic->rss_table)); vnic->rss_hash_key_dma_addr = vnic->rss_table_dma_addr + HW_HASH_INDEX_SIZE * sizeof(*vnic->rss_table); + vnic->mc_list = (void *)((char *)vnic->rss_hash_key + + HW_HASH_KEY_SIZE); + vnic->mc_list_dma_addr = vnic->rss_hash_key_dma_addr + + HW_HASH_KEY_SIZE; } return 0; @@ -232,15 +236,7 @@ void bnxt_free_vnic_mem(struct bnxt *bp) if (bp->vnic_info == NULL) return; - if (BNXT_PF(bp)) { - struct bnxt_pf_info *pf = &bp->pf; - - max_vnics = pf->max_vnics; - } else { - struct bnxt_vf_info *vf = &bp->vf; - - max_vnics = vf->max_vnics; - } + max_vnics = bp->max_vnics; for (i = 0; i < max_vnics; i++) { vnic = &bp->vnic_info[i]; if (vnic->fw_vnic_id != (uint16_t)HWRM_NA_SIGNATURE) { @@ -258,15 +254,7 @@ int bnxt_alloc_vnic_mem(struct bnxt *bp) struct bnxt_vnic_info *vnic_mem; uint16_t max_vnics; - if (BNXT_PF(bp)) { - struct bnxt_pf_info *pf = &bp->pf; - - max_vnics = pf->max_vnics; - } else { - struct bnxt_vf_info *vf = &bp->vf; - - max_vnics = vf->max_vnics; - } + max_vnics = bp->max_vnics; /* Allocate memory for VNIC pool and filter pool */ vnic_mem = rte_zmalloc("bnxt_vnic_info", max_vnics * sizeof(struct bnxt_vnic_info), 0); diff --git a/drivers/net/bnxt/bnxt_vnic.h b/drivers/net/bnxt/bnxt_vnic.h index 9671ba42..993f2212 100644 --- a/drivers/net/bnxt/bnxt_vnic.h +++ b/drivers/net/bnxt/bnxt_vnic.h @@ -42,8 +42,7 @@ struct bnxt_vnic_info { uint8_t ff_pool_idx; uint16_t fw_vnic_id; /* returned by Chimp during alloc */ - uint16_t fw_rss_cos_lb_ctx; - uint16_t ctx_is_rss_cos_lb; + uint16_t rss_rule; #define MAX_NUM_TRAFFIC_CLASSES 8 #define MAX_NUM_RSS_QUEUES_PER_VNIC 16 #define MAX_QUEUES_PER_VNIC (MAX_NUM_RSS_QUEUES_PER_VNIC + \ @@ -51,17 +50,34 @@ struct bnxt_vnic_info { uint16_t start_grp_id; uint16_t end_grp_id; uint16_t fw_grp_ids[MAX_QUEUES_PER_VNIC]; + uint16_t dflt_ring_grp; + uint16_t mru; uint16_t hash_type; phys_addr_t rss_table_dma_addr; uint16_t *rss_table; phys_addr_t rss_hash_key_dma_addr; void *rss_hash_key; + phys_addr_t mc_list_dma_addr; + char *mc_list; + uint32_t mc_addr_cnt; +#define BNXT_MAX_MC_ADDRS 16 uint32_t flags; #define BNXT_VNIC_INFO_PROMISC (1 << 0) #define BNXT_VNIC_INFO_ALLMULTI (1 << 1) +#define BNXT_VNIC_INFO_BCAST (1 << 2) +#define BNXT_VNIC_INFO_UCAST (1 << 3) +#define BNXT_VNIC_INFO_MCAST (1 << 4) +#define BNXT_VNIC_INFO_TAGGED (1 << 5) +#define BNXT_VNIC_INFO_UNTAGGED (1 << 6) + uint16_t cos_rule; + uint16_t lb_rule; bool vlan_strip; bool func_default; + bool bd_stall; + bool roce_dual; + bool roce_only; + bool rss_dflt_cr; STAILQ_HEAD(, bnxt_filter_info) filter; }; diff --git a/drivers/net/bnxt/hsi_struct_def_dpdk.h b/drivers/net/bnxt/hsi_struct_def_dpdk.h index f0248377..cb8660af 100644 --- a/drivers/net/bnxt/hsi_struct_def_dpdk.h +++ b/drivers/net/bnxt/hsi_struct_def_dpdk.h @@ -1,7 +1,7 @@ /*- * BSD LICENSE * - * Copyright(c) Broadcom Limited. + * Copyright(c) 2001-2017 Broadcom Limited. * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -31,74 +31,89 @@ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ -#ifndef _HSI_STRUCT_DEF_EXTERNAL_H_ -#define _HSI_STRUCT_DEF_EXTERNAL_H_ - -/* - * per-context HW statistics -- chip view - */ - -struct ctx_hw_stats64 { - uint64_t rx_ucast_pkts; - uint64_t rx_mcast_pkts; - uint64_t rx_bcast_pkts; - uint64_t rx_drop_pkts; - uint64_t rx_discard_pkts; - uint64_t rx_ucast_bytes; - uint64_t rx_mcast_bytes; - uint64_t rx_bcast_bytes; - - uint64_t tx_ucast_pkts; - uint64_t tx_mcast_pkts; - uint64_t tx_bcast_pkts; - uint64_t tx_drop_pkts; - uint64_t tx_discard_pkts; - uint64_t tx_ucast_bytes; - uint64_t tx_mcast_bytes; - uint64_t tx_bcast_bytes; - - uint64_t tpa_pkts; - uint64_t tpa_bytes; - uint64_t tpa_events; - uint64_t tpa_aborts; -} __attribute__((packed)); - -/* HW Resource Manager Specification 1.5.1 */ +#ifndef _HSI_STRUCT_DEF_DPDK_ +#define _HSI_STRUCT_DEF_DPDK_ +/* HSI and HWRM Specification 1.7.7 */ #define HWRM_VERSION_MAJOR 1 -#define HWRM_VERSION_MINOR 5 -#define HWRM_VERSION_UPDATE 1 - -#define HWRM_VERSION_STR "1.5.1" +#define HWRM_VERSION_MINOR 7 +#define HWRM_VERSION_UPDATE 7 +#define HWRM_VERSION_STR "1.7.7" /* * Following is the signature for HWRM message field that indicates not - * applicable (All F's). Need to cast it the size of the field if needed. + * applicable (All F's). Need to cast it the size of the field if needed. */ -#define HWRM_NA_SIGNATURE ((uint32_t)(-1)) +#define HWRM_NA_SIGNATURE ((uint32_t)(-1)) #define HWRM_MAX_REQ_LEN (128) /* hwrm_func_buf_rgtr */ -#define HWRM_MAX_RESP_LEN (176) /* hwrm_func_qstats */ -#define HW_HASH_INDEX_SIZE 0x80 /* 7 bit indirection table index. */ -#define HW_HASH_KEY_SIZE 40 +#define HWRM_MAX_RESP_LEN (248) /* hwrm_selftest_qlist */ +#define HW_HASH_INDEX_SIZE 0x80 /* 7 bit indirection table index. */ +#define HW_HASH_KEY_SIZE 40 #define HWRM_RESP_VALID_KEY 1 /* valid key for HWRM response */ +#define HWRM_ROCE_SP_HSI_VERSION_MAJOR 1 +#define HWRM_ROCE_SP_HSI_VERSION_MINOR 7 +#define HWRM_ROCE_SP_HSI_VERSION_UPDATE 4 /* * Request types */ #define HWRM_VER_GET (UINT32_C(0x0)) +#define HWRM_FUNC_BUF_UNRGTR (UINT32_C(0xe)) +#define HWRM_FUNC_VF_CFG (UINT32_C(0xf)) + /* Reserved for future use */ +#define RESERVED1 (UINT32_C(0x10)) #define HWRM_FUNC_RESET (UINT32_C(0x11)) +#define HWRM_FUNC_GETFID (UINT32_C(0x12)) +#define HWRM_FUNC_VF_ALLOC (UINT32_C(0x13)) +#define HWRM_FUNC_VF_FREE (UINT32_C(0x14)) #define HWRM_FUNC_QCAPS (UINT32_C(0x15)) #define HWRM_FUNC_QCFG (UINT32_C(0x16)) +#define HWRM_FUNC_CFG (UINT32_C(0x17)) +#define HWRM_FUNC_QSTATS (UINT32_C(0x18)) +#define HWRM_FUNC_CLR_STATS (UINT32_C(0x19)) #define HWRM_FUNC_DRV_UNRGTR (UINT32_C(0x1a)) +#define HWRM_FUNC_VF_RESC_FREE (UINT32_C(0x1b)) +#define HWRM_FUNC_VF_VNIC_IDS_QUERY (UINT32_C(0x1c)) #define HWRM_FUNC_DRV_RGTR (UINT32_C(0x1d)) +#define HWRM_FUNC_DRV_QVER (UINT32_C(0x1e)) +#define HWRM_FUNC_BUF_RGTR (UINT32_C(0x1f)) #define HWRM_PORT_PHY_CFG (UINT32_C(0x20)) +#define HWRM_PORT_MAC_CFG (UINT32_C(0x21)) +#define HWRM_PORT_QSTATS (UINT32_C(0x23)) +#define HWRM_PORT_LPBK_QSTATS (UINT32_C(0x24)) +#define HWRM_PORT_CLR_STATS (UINT32_C(0x25)) #define HWRM_PORT_PHY_QCFG (UINT32_C(0x27)) +#define HWRM_PORT_MAC_QCFG (UINT32_C(0x28)) +#define HWRM_PORT_PHY_QCAPS (UINT32_C(0x2a)) +#define HWRM_PORT_LED_CFG (UINT32_C(0x2d)) +#define HWRM_PORT_LED_QCFG (UINT32_C(0x2e)) +#define HWRM_PORT_LED_QCAPS (UINT32_C(0x2f)) #define HWRM_QUEUE_QPORTCFG (UINT32_C(0x30)) +#define HWRM_QUEUE_QCFG (UINT32_C(0x31)) +#define HWRM_QUEUE_CFG (UINT32_C(0x32)) +#define HWRM_FUNC_VLAN_CFG (UINT32_C(0x33)) +#define HWRM_FUNC_VLAN_QCFG (UINT32_C(0x34)) +#define HWRM_QUEUE_PFCENABLE_QCFG (UINT32_C(0x35)) +#define HWRM_QUEUE_PFCENABLE_CFG (UINT32_C(0x36)) +#define HWRM_QUEUE_PRI2COS_QCFG (UINT32_C(0x37)) +#define HWRM_QUEUE_PRI2COS_CFG (UINT32_C(0x38)) +#define HWRM_QUEUE_COS2BW_QCFG (UINT32_C(0x39)) +#define HWRM_QUEUE_COS2BW_CFG (UINT32_C(0x3a)) +#define HWRM_VNIC_ALLOC (UINT32_C(0x40)) #define HWRM_VNIC_ALLOC (UINT32_C(0x40)) #define HWRM_VNIC_FREE (UINT32_C(0x41)) #define HWRM_VNIC_CFG (UINT32_C(0x42)) +#define HWRM_VNIC_QCFG (UINT32_C(0x43)) +#define HWRM_VNIC_TPA_CFG (UINT32_C(0x44)) #define HWRM_VNIC_RSS_CFG (UINT32_C(0x46)) +#define HWRM_VNIC_RSS_QCFG (UINT32_C(0x47)) +#define HWRM_VNIC_PLCMODES_CFG (UINT32_C(0x48)) +#define HWRM_VNIC_PLCMODES_QCFG (UINT32_C(0x49)) +#define HWRM_VNIC_QCAPS (UINT32_C(0x4a)) #define HWRM_RING_ALLOC (UINT32_C(0x50)) #define HWRM_RING_FREE (UINT32_C(0x51)) +#define HWRM_RING_CMPL_RING_QAGGINT_PARAMS (UINT32_C(0x52)) +#define HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS (UINT32_C(0x53)) +#define HWRM_RING_RESET (UINT32_C(0x5e)) #define HWRM_RING_GRP_ALLOC (UINT32_C(0x60)) #define HWRM_RING_GRP_FREE (UINT32_C(0x61)) #define HWRM_VNIC_RSS_COS_LB_CTX_ALLOC (UINT32_C(0x70)) @@ -107,16 +122,71 @@ struct ctx_hw_stats64 { #define HWRM_CFA_L2_FILTER_FREE (UINT32_C(0x91)) #define HWRM_CFA_L2_FILTER_CFG (UINT32_C(0x92)) #define HWRM_CFA_L2_SET_RX_MASK (UINT32_C(0x93)) + /* Reserved for future use */ +#define HWRM_CFA_VLAN_ANTISPOOF_CFG (UINT32_C(0x94)) +#define HWRM_CFA_TUNNEL_FILTER_ALLOC (UINT32_C(0x95)) +#define HWRM_CFA_TUNNEL_FILTER_FREE (UINT32_C(0x96)) +#define HWRM_CFA_NTUPLE_FILTER_ALLOC (UINT32_C(0x99)) +#define HWRM_CFA_NTUPLE_FILTER_FREE (UINT32_C(0x9a)) +#define HWRM_CFA_NTUPLE_FILTER_CFG (UINT32_C(0x9b)) +#define HWRM_TUNNEL_DST_PORT_QUERY (UINT32_C(0xa0)) +#define HWRM_TUNNEL_DST_PORT_ALLOC (UINT32_C(0xa1)) +#define HWRM_TUNNEL_DST_PORT_FREE (UINT32_C(0xa2)) #define HWRM_STAT_CTX_ALLOC (UINT32_C(0xb0)) #define HWRM_STAT_CTX_FREE (UINT32_C(0xb1)) +#define HWRM_STAT_CTX_QUERY (UINT32_C(0xb2)) #define HWRM_STAT_CTX_CLR_STATS (UINT32_C(0xb3)) +#define HWRM_FW_RESET (UINT32_C(0xc0)) +#define HWRM_FW_QSTATUS (UINT32_C(0xc1)) #define HWRM_EXEC_FWD_RESP (UINT32_C(0xd0)) +#define HWRM_REJECT_FWD_RESP (UINT32_C(0xd1)) +#define HWRM_FWD_RESP (UINT32_C(0xd2)) +#define HWRM_FWD_ASYNC_EVENT_CMPL (UINT32_C(0xd3)) +#define HWRM_TEMP_MONITOR_QUERY (UINT32_C(0xe0)) +#define HWRM_WOL_FILTER_ALLOC (UINT32_C(0xf0)) +#define HWRM_WOL_FILTER_FREE (UINT32_C(0xf1)) +#define HWRM_WOL_FILTER_QCFG (UINT32_C(0xf2)) +#define HWRM_WOL_REASON_QCFG (UINT32_C(0xf3)) +#define HWRM_DBG_DUMP (UINT32_C(0xff14)) +#define HWRM_NVM_VALIDATE_OPTION (UINT32_C(0xffef)) +#define HWRM_NVM_FLUSH (UINT32_C(0xfff0)) +#define HWRM_NVM_GET_VARIABLE (UINT32_C(0xfff1)) +#define HWRM_NVM_SET_VARIABLE (UINT32_C(0xfff2)) +#define HWRM_NVM_INSTALL_UPDATE (UINT32_C(0xfff3)) +#define HWRM_NVM_MODIFY (UINT32_C(0xfff4)) +#define HWRM_NVM_VERIFY_UPDATE (UINT32_C(0xfff5)) +#define HWRM_NVM_GET_DEV_INFO (UINT32_C(0xfff6)) +#define HWRM_NVM_ERASE_DIR_ENTRY (UINT32_C(0xfff7)) +#define HWRM_NVM_MOD_DIR_ENTRY (UINT32_C(0xfff8)) +#define HWRM_NVM_FIND_DIR_ENTRY (UINT32_C(0xfff9)) +#define HWRM_NVM_GET_DIR_ENTRIES (UINT32_C(0xfffa)) +#define HWRM_NVM_GET_DIR_INFO (UINT32_C(0xfffb)) +#define HWRM_NVM_RAW_DUMP (UINT32_C(0xfffc)) +#define HWRM_NVM_READ (UINT32_C(0xfffd)) +#define HWRM_NVM_WRITE (UINT32_C(0xfffe)) +#define HWRM_NVM_RAW_WRITE_BLK (UINT32_C(0xffff)) -/* Return Codes */ -#define HWRM_ERR_CODE_INVALID_PARAMS (UINT32_C(0x2)) -#define HWRM_ERR_CODE_RESOURCE_ACCESS_DENIED (UINT32_C(0x3)) - -/* Short TX BD (16 bytes) */ +/* + * Note: The Host Software Interface (HSI) and Hardware Resource Manager (HWRM) + * specification describes the data structures used in Ethernet packet or RDMA + * message data transfers as well as an abstract interface for managing Ethernet + * NIC hardware resources. + */ +/* Ethernet Data path Host Structures */ +/* + * Description: The following three sections document the host structures used + * between device and software drivers for communicating Ethernet packets. + */ +/* BD Ring Structures */ +/* + * Description: This structure is used to inform the NIC of a location for and + * an aggregation buffer that will be used for packet data that is received. An + * aggregation buffer creates a different kind of completion operation for a + * packet where a variable number of BDs may be used to place the packet in the + * host. RX Rings that have aggregation buffers are known as aggregation rings + * and must contain only aggregation buffers. + */ +/* Short TX BD (16 bytes) */ struct tx_bd_short { uint16_t flags_type; /* @@ -149,10 +219,10 @@ struct tx_bd_short { /* * This value indicates how many 16B BD locations are consumed * in the ring by this packet. A value of 1 indicates that this - * BD is the only BD (and that the it is a short BD). A value of + * BD is the only BD (and that the it is a short BD). A value of * 3 indicates either 3 short BDs or 1 long BD and one short BD * in the packet. A value of 0 indicates that there are 32 BD - * locations in the packet (the maximum). This field is valid + * locations in the packet (the maximum). This field is valid * only on the first BD of a packet. */ #define TX_BD_SHORT_FLAGS_BD_CNT_MASK UINT32_C(0x1f00) @@ -173,7 +243,8 @@ struct tx_bd_short { #define TX_BD_SHORT_FLAGS_LHINT_LT2K (UINT32_C(0x2) << 13) /* indicates packet length >= 2KB */ #define TX_BD_SHORT_FLAGS_LHINT_GTE2K (UINT32_C(0x3) << 13) - #define TX_BD_SHORT_FLAGS_LHINT_LAST TX_BD_SHORT_FLAGS_LHINT_GTE2K + #define TX_BD_SHORT_FLAGS_LHINT_LAST \ + TX_BD_SHORT_FLAGS_LHINT_GTE2K /* * If set to 1, the device immediately updates the Send Consumer * Index after the buffer associated with this descriptor has @@ -213,7 +284,7 @@ struct tx_bd_short { */ } __attribute__((packed)); -/* Long TX BD (32 bytes split to 2 16-byte struct) */ +/* Long TX BD (32 bytes split to 2 16-byte struct) */ struct tx_bd_long { uint16_t flags_type; /* @@ -246,10 +317,10 @@ struct tx_bd_long { /* * This value indicates how many 16B BD locations are consumed * in the ring by this packet. A value of 1 indicates that this - * BD is the only BD (and that the it is a short BD). A value of + * BD is the only BD (and that the it is a short BD). A value of * 3 indicates either 3 short BDs or 1 long BD and one short BD * in the packet. A value of 0 indicates that there are 32 BD - * locations in the packet (the maximum). This field is valid + * locations in the packet (the maximum). This field is valid * only on the first BD of a packet. */ #define TX_BD_LONG_FLAGS_BD_CNT_MASK UINT32_C(0x1f00) @@ -270,7 +341,8 @@ struct tx_bd_long { #define TX_BD_LONG_FLAGS_LHINT_LT2K (UINT32_C(0x2) << 13) /* indicates packet length >= 2KB */ #define TX_BD_LONG_FLAGS_LHINT_GTE2K (UINT32_C(0x3) << 13) - #define TX_BD_LONG_FLAGS_LHINT_LAST TX_BD_LONG_FLAGS_LHINT_GTE2K + #define TX_BD_LONG_FLAGS_LHINT_LAST \ + TX_BD_LONG_FLAGS_LHINT_GTE2K /* * If set to 1, the device immediately updates the Send Consumer * Index after the buffer associated with this descriptor has @@ -360,7 +432,7 @@ struct tx_bd_long_hi { * bit is set, outer UDP checksum will be calculated for the * following cases: 1. Packets with tcp_udp_chksum flag set to * offload checksum for inner packet AND the inner packet is - * TCP/UDP. If the inner packet is ICMP for example (non- + * TCP/UDP. If the inner packet is ICMP for example (non- * TCP/UDP), even if the tcp_udp_chksum is set, the outer UDP * checksum will not be calculated. 2. Packets with lso flag set * which implies inner TCP checksum calculation as part of LSO @@ -392,7 +464,7 @@ struct tx_bd_long_hi { * to one when LSO is '1', then the IPID of the tunnel IP header * will be incremented for each subsequent segment of an LSO * operation. The flag is ignored if the LSO packet is a normal - * (non-tunneled) TCP packet. + * (non-tunneled) TCP packet. */ #define TX_BD_LONG_LFLAGS_T_IPID UINT32_C(0x80) /* @@ -460,7 +532,7 @@ struct tx_bd_long_hi { #define TX_BD_LONG_CFA_META_VLAN_TPID_TPID9300 (UINT32_C(0x4) << 16) /* Value programmed in CFA VLANTPID register. */ #define TX_BD_LONG_CFA_META_VLAN_TPID_TPIDCFG (UINT32_C(0x5) << 16) - #define TX_BD_LONG_CFA_META_VLAN_TPID_LAST \ + #define TX_BD_LONG_CFA_META_VLAN_TPID_LAST \ TX_BD_LONG_CFA_META_VLAN_TPID_TPIDCFG /* When key=1, This is the VLAN tag TPID select value. */ #define TX_BD_LONG_CFA_META_VLAN_RESERVED_MASK UINT32_C(0xff80000) @@ -474,15 +546,16 @@ struct tx_bd_long_hi { /* No editing */ #define TX_BD_LONG_CFA_META_KEY_NONE (UINT32_C(0x0) << 28) /* - * - meta[17:16] - TPID select value (0 = + * - meta[17:16] - TPID select value (0 = * 0x8100). - meta[15:12] - PRI/DE value. - * meta[11:0] - VID value. */ #define TX_BD_LONG_CFA_META_KEY_VLAN_TAG (UINT32_C(0x1) << 28) - #define TX_BD_LONG_CFA_META_KEY_LAST TX_BD_LONG_CFA_META_KEY_VLAN_TAG + #define TX_BD_LONG_CFA_META_KEY_LAST \ + TX_BD_LONG_CFA_META_KEY_VLAN_TAG } __attribute__((packed)); -/* RX Producer Packet BD (16 bytes) */ +/* RX Producer Packet BD (16 bytes) */ struct rx_prod_pkt_bd { uint16_t flags_type; /* This value identifies the type of buffer descriptor. */ @@ -490,7 +563,7 @@ struct rx_prod_pkt_bd { #define RX_PROD_PKT_BD_TYPE_SFT 0 /* * Indicates that this BD is 16B long and is an - * RX Producer (ie. empty) buffer descriptor. + * RX Producer (ie. empty) buffer descriptor. */ #define RX_PROD_PKT_BD_TYPE_RX_PROD_PKT UINT32_C(0x4) /* @@ -558,7 +631,7 @@ struct rx_prod_pkt_bd { /* Completion Ring Structures */ /* Note: This structure is used by the HWRM to communicate HWRM Error. */ -/* Base Completion Record (16 bytes) */ +/* Base Completion Record (16 bytes) */ struct cmpl_base { uint16_t type; /* unused is 10 b */ @@ -637,7 +710,7 @@ struct cmpl_base { /* info4 is 32 b */ } __attribute__((packed)); -/* TX Completion Record (16 bytes) */ +/* TX Completion Record (16 bytes) */ struct tx_cmpl { uint16_t flags_type; /* @@ -689,7 +762,7 @@ struct tx_cmpl { #define TX_CMPL_ERRORS_BUFFER_ERROR_NO_ERROR (UINT32_C(0x0) << 1) /* Bad Format: BDs were not formatted correctly. */ #define TX_CMPL_ERRORS_BUFFER_ERROR_BAD_FMT (UINT32_C(0x2) << 1) - #define TX_CMPL_ERRORS_BUFFER_ERROR_LAST \ + #define TX_CMPL_ERRORS_BUFFER_ERROR_LAST \ TX_CMPL_ERRORS_BUFFER_ERROR_BAD_FMT /* * When this bit is '1', it indicates that the length of the @@ -726,7 +799,7 @@ struct tx_cmpl { /* unused3 is 32 b */ } __attribute__((packed)); -/* RX Packet Completion Record (32 bytes split to 2 16-byte struct) */ +/* RX Packet Completion Record (32 bytes split to 2 16-byte struct) */ struct rx_pkt_cmpl { uint16_t flags_type; /* @@ -741,7 +814,9 @@ struct rx_pkt_cmpl { * RX L2 completion: Completion of and L2 RX * packet. Length = 32B */ - #define RX_PKT_CMPL_TYPE_RX_L2 UINT32_C(0x11) + #define RX_PKT_CMPL_TYPE_RX_L2 UINT32_C(0x11) + #define RX_PKT_CMPL_TYPE_RX_L2_TPA_START UINT32_C(0x13) + #define RX_PKT_CMPL_TYPE_RX_L2_TPA_END UINT32_C(0x15) /* * When this bit is '1', it indicates a packet that has an error * of some type. Type of error is indicated in error_flags. @@ -761,10 +836,12 @@ struct rx_pkt_cmpl { * field. */ #define RX_PKT_CMPL_FLAGS_PLACEMENT_HDS (UINT32_C(0x2) << 7) - #define RX_PKT_CMPL_FLAGS_PLACEMENT_LAST RX_PKT_CMPL_FLAGS_PLACEMENT_HDS + #define RX_PKT_CMPL_FLAGS_PLACEMENT_LAST \ + RX_PKT_CMPL_FLAGS_PLACEMENT_HDS /* This bit is '1' if the RSS field in this completion is valid. */ #define RX_PKT_CMPL_FLAGS_RSS_VALID UINT32_C(0x400) /* unused is 1 b */ + #define RX_PKT_CMPL_FLAGS_UNUSED UINT32_C(0x800) /* * This value indicates what the inner packet determined for the * packet was. @@ -820,7 +897,8 @@ struct rx_pkt_cmpl { * that a timestamp was taken for the packet. */ #define RX_PKT_CMPL_FLAGS_ITYPE_PTP_W_TIMESTAMP (UINT32_C(0x9) << 12) - #define RX_PKT_CMPL_FLAGS_ITYPE_LAST RX_PKT_CMPL_FLAGS_ITYPE_PTP_W_TIMESTAMP + #define RX_PKT_CMPL_FLAGS_ITYPE_LAST \ + RX_PKT_CMPL_FLAGS_ITYPE_PTP_W_TIMESTAMP #define RX_PKT_CMPL_FLAGS_MASK UINT32_C(0xffc0) #define RX_PKT_CMPL_FLAGS_SFT 6 uint16_t len; @@ -938,7 +1016,7 @@ struct rx_pkt_cmpl_hi { * the vlan TPID value. */ #define RX_PKT_CMPL_FLAGS2_META_FORMAT_VLAN (UINT32_C(0x1) << 4) - #define RX_PKT_CMPL_FLAGS2_META_FORMAT_LAST \ + #define RX_PKT_CMPL_FLAGS2_META_FORMAT_LAST \ RX_PKT_CMPL_FLAGS2_META_FORMAT_VLAN /* * This field indicates the IP type for the inner-most IP @@ -988,15 +1066,18 @@ struct rx_pkt_cmpl_hi { * means that the packet could not be placed * into 7 physical buffers or less. */ - #define RX_PKT_CMPL_ERRORS_BUFFER_ERROR_DID_NOT_FIT (UINT32_C(0x1) << 1) + #define RX_PKT_CMPL_ERRORS_BUFFER_ERROR_DID_NOT_FIT \ + (UINT32_C(0x1) << 1) /* * Not On Chip: All BDs needed for the packet * were not on-chip when the packet arrived. */ - #define RX_PKT_CMPL_ERRORS_BUFFER_ERROR_NOT_ON_CHIP (UINT32_C(0x2) << 1) + #define RX_PKT_CMPL_ERRORS_BUFFER_ERROR_NOT_ON_CHIP \ + (UINT32_C(0x2) << 1) /* Bad Format: BDs were not formatted correctly. */ - #define RX_PKT_CMPL_ERRORS_BUFFER_ERROR_BAD_FORMAT (UINT32_C(0x3) << 1) - #define RX_PKT_CMPL_ERRORS_BUFFER_ERROR_LAST \ + #define RX_PKT_CMPL_ERRORS_BUFFER_ERROR_BAD_FORMAT \ + (UINT32_C(0x3) << 1) + #define RX_PKT_CMPL_ERRORS_BUFFER_ERROR_LAST \ RX_PKT_CMPL_ERRORS_BUFFER_ERROR_BAD_FORMAT /* This indicates that there was an error in the IP header checksum. */ #define RX_PKT_CMPL_ERRORS_IP_CS_ERROR UINT32_C(0x10) @@ -1037,39 +1118,45 @@ struct rx_pkt_cmpl_hi { * match expectation from L2 Ethertype for IPv4 * and IPv6 in the tunnel header. */ - #define RX_PKT_CMPL_ERRORS_T_PKT_ERROR_T_L3_BAD_VERSION (UINT32_C(0x1) << 9) + #define RX_PKT_CMPL_ERRORS_T_PKT_ERROR_T_L3_BAD_VERSION \ + (UINT32_C(0x1) << 9) /* * Indicates that header length is out of range * in the tunnel header. Valid for IPv4. */ - #define RX_PKT_CMPL_ERRORS_T_PKT_ERROR_T_L3_BAD_HDR_LEN (UINT32_C(0x2) << 9) + #define RX_PKT_CMPL_ERRORS_T_PKT_ERROR_T_L3_BAD_HDR_LEN \ + (UINT32_C(0x2) << 9) /* * Indicates that the physical packet is shorter * than that claimed by the PPPoE header length * for a tunnel PPPoE packet. */ - #define RX_PKT_CMPL_ERRORS_T_PKT_ERROR_TUNNEL_TOTAL_ERROR (UINT32_C(0x3) << 9) + #define RX_PKT_CMPL_ERRORS_T_PKT_ERROR_TUNNEL_TOTAL_ERROR \ + (UINT32_C(0x3) << 9) /* * Indicates that physical packet is shorter * than that claimed by the tunnel l3 header * length. Valid for IPv4, or IPv6 tunnel packet * packets. */ - #define RX_PKT_CMPL_ERRORS_T_PKT_ERROR_T_IP_TOTAL_ERROR (UINT32_C(0x4) << 9) + #define RX_PKT_CMPL_ERRORS_T_PKT_ERROR_T_IP_TOTAL_ERROR \ + (UINT32_C(0x4) << 9) /* * Indicates that the physical packet is shorter * than that claimed by the tunnel UDP header * length for a tunnel UDP packet that is not * fragmented. */ - #define RX_PKT_CMPL_ERRORS_T_PKT_ERROR_T_UDP_TOTAL_ERROR (UINT32_C(0x5) << 9) + #define RX_PKT_CMPL_ERRORS_T_PKT_ERROR_T_UDP_TOTAL_ERROR \ + (UINT32_C(0x5) << 9) /* * indicates that the IPv4 TTL or IPv6 hop limit - * check have failed (e.g. TTL = 0) in the + * check have failed (e.g. TTL = 0) in the * tunnel header. Valid for IPv4, and IPv6. */ - #define RX_PKT_CMPL_ERRORS_T_PKT_ERROR_T_L3_BAD_TTL (UINT32_C(0x6) << 9) - #define RX_PKT_CMPL_ERRORS_T_PKT_ERROR_LAST \ + #define RX_PKT_CMPL_ERRORS_T_PKT_ERROR_T_L3_BAD_TTL \ + (UINT32_C(0x6) << 9) + #define RX_PKT_CMPL_ERRORS_T_PKT_ERROR_LAST \ RX_PKT_CMPL_ERRORS_T_PKT_ERROR_T_L3_BAD_TTL /* * This indicates that there was an error in the inner portion @@ -1089,15 +1176,17 @@ struct rx_pkt_cmpl_hi { * and IPv6 or that option other than VFT was * parsed on FCoE packet. */ - #define RX_PKT_CMPL_ERRORS_PKT_ERROR_L3_BAD_VERSION (UINT32_C(0x1) << 12) + #define RX_PKT_CMPL_ERRORS_PKT_ERROR_L3_BAD_VERSION \ + (UINT32_C(0x1) << 12) /* * indicates that header length is out of range. * Valid for IPv4 and RoCE */ - #define RX_PKT_CMPL_ERRORS_PKT_ERROR_L3_BAD_HDR_LEN (UINT32_C(0x2) << 12) + #define RX_PKT_CMPL_ERRORS_PKT_ERROR_L3_BAD_HDR_LEN \ + (UINT32_C(0x2) << 12) /* * indicates that the IPv4 TTL or IPv6 hop limit - * check have failed (e.g. TTL = 0). Valid for + * check have failed (e.g. TTL = 0). Valid for * IPv4, and IPv6 */ #define RX_PKT_CMPL_ERRORS_PKT_ERROR_L3_BAD_TTL (UINT32_C(0x3) << 12) @@ -1106,18 +1195,21 @@ struct rx_pkt_cmpl_hi { * than that claimed by the l3 header length. * Valid for IPv4, IPv6 packet or RoCE packets. */ - #define RX_PKT_CMPL_ERRORS_PKT_ERROR_IP_TOTAL_ERROR (UINT32_C(0x4) << 12) + #define RX_PKT_CMPL_ERRORS_PKT_ERROR_IP_TOTAL_ERROR \ + (UINT32_C(0x4) << 12) /* * Indicates that the physical packet is shorter * than that claimed by the UDP header length * for a UDP packet that is not fragmented. */ - #define RX_PKT_CMPL_ERRORS_PKT_ERROR_UDP_TOTAL_ERROR (UINT32_C(0x5) << 12) + #define RX_PKT_CMPL_ERRORS_PKT_ERROR_UDP_TOTAL_ERROR \ + (UINT32_C(0x5) << 12) /* * Indicates that TCP header length > IP * payload. Valid for TCP packets only. */ - #define RX_PKT_CMPL_ERRORS_PKT_ERROR_L4_BAD_HDR_LEN (UINT32_C(0x6) << 12) + #define RX_PKT_CMPL_ERRORS_PKT_ERROR_L4_BAD_HDR_LEN \ + (UINT32_C(0x6) << 12) /* Indicates that TCP header length < 5. Valid for TCP. */ #define RX_PKT_CMPL_ERRORS_PKT_ERROR_L4_BAD_HDR_LEN_TOO_SMALL \ (UINT32_C(0x7) << 12) @@ -1126,9 +1218,9 @@ struct rx_pkt_cmpl_hi { * TCP header size that does not match data * offset in TCP header. Valid for TCP. */ - #define RX_PKT_CMPL_ERRORS_PKT_ERROR_L4_BAD_OPT_LEN \ + #define RX_PKT_CMPL_ERRORS_PKT_ERROR_L4_BAD_OPT_LEN \ (UINT32_C(0x8) << 12) - #define RX_PKT_CMPL_ERRORS_PKT_ERROR_LAST \ + #define RX_PKT_CMPL_ERRORS_PKT_ERROR_LAST \ RX_PKT_CMPL_ERRORS_PKT_ERROR_L4_BAD_OPT_LEN #define RX_PKT_CMPL_ERRORS_MASK UINT32_C(0xfffe) #define RX_PKT_CMPL_ERRORS_SFT 1 @@ -1149,7 +1241,474 @@ struct rx_pkt_cmpl_hi { #define RX_PKT_CMPL_REORDER_SFT 0 } __attribute__((packed)); -/* HWRM Forwarded Request (16 bytes) */ +/* RX L2 TPA Start Completion Record (32 bytes split to 2 16-byte struct) */ +struct rx_tpa_start_cmpl { + uint16_t flags_type; + /* + * This field indicates the exact type of the completion. By + * convention, the LSB identifies the length of the record in + * 16B units. Even values indicate 16B records. Odd values + * indicate 32B records. + */ + #define RX_TPA_START_CMPL_TYPE_MASK UINT32_C(0x3f) + #define RX_TPA_START_CMPL_TYPE_SFT 0 + /* + * RX L2 TPA Start Completion: Completion at the + * beginning of a TPA operation. Length = 32B + */ + #define RX_TPA_START_CMPL_TYPE_RX_TPA_START UINT32_C(0x13) + /* This bit will always be '0' for TPA start completions. */ + #define RX_TPA_START_CMPL_FLAGS_ERROR UINT32_C(0x40) + /* This field indicates how the packet was placed in the buffer. */ + #define RX_TPA_START_CMPL_FLAGS_PLACEMENT_MASK UINT32_C(0x380) + #define RX_TPA_START_CMPL_FLAGS_PLACEMENT_SFT 7 + /* + * Jumbo: TPA Packet was placed using jumbo + * algorithm. This means that the first buffer + * will be filled with data before moving to + * aggregation buffers. Each aggregation buffer + * will be filled before moving to the next + * aggregation buffer. + */ + #define RX_TPA_START_CMPL_FLAGS_PLACEMENT_JUMBO (UINT32_C(0x1) << 7) + /* + * Header/Data Separation: Packet was placed + * using Header/Data separation algorithm. The + * separation location is indicated by the itype + * field. + */ + #define RX_TPA_START_CMPL_FLAGS_PLACEMENT_HDS (UINT32_C(0x2) << 7) + /* + * GRO/Jumbo: Packet will be placed using + * GRO/Jumbo where the first packet is filled + * with data. Subsequent packets will be placed + * such that any one packet does not span two + * aggregation buffers unless it starts at the + * beginning of an aggregation buffer. + */ + #define RX_TPA_START_CMPL_FLAGS_PLACEMENT_GRO_JUMBO \ + (UINT32_C(0x5) << 7) + /* + * GRO/Header-Data Separation: Packet will be + * placed using GRO/HDS where the header is in + * the first packet. Payload of each packet will + * be placed such that any one packet does not + * span two aggregation buffers unless it starts + * at the beginning of an aggregation buffer. + */ + #define RX_TPA_START_CMPL_FLAGS_PLACEMENT_GRO_HDS (UINT32_C(0x6) << 7) + #define RX_TPA_START_CMPL_FLAGS_PLACEMENT_LAST \ + RX_TPA_START_CMPL_FLAGS_PLACEMENT_GRO_HDS + /* This bit is '1' if the RSS field in this completion is valid. */ + #define RX_TPA_START_CMPL_FLAGS_RSS_VALID UINT32_C(0x400) + /* unused is 1 b */ + #define RX_TPA_START_CMPL_FLAGS_UNUSED UINT32_C(0x800) + /* + * This value indicates what the inner packet determined for the + * packet was. + */ + #define RX_TPA_START_CMPL_FLAGS_ITYPE_MASK UINT32_C(0xf000) + #define RX_TPA_START_CMPL_FLAGS_ITYPE_SFT 12 + /* TCP Packet: Indicates that the packet was IP and TCP. */ + #define RX_TPA_START_CMPL_FLAGS_ITYPE_TCP (UINT32_C(0x2) << 12) + #define RX_TPA_START_CMPL_FLAGS_ITYPE_LAST \ + RX_TPA_START_CMPL_FLAGS_ITYPE_TCP + #define RX_TPA_START_CMPL_FLAGS_MASK UINT32_C(0xffc0) + #define RX_TPA_START_CMPL_FLAGS_SFT 6 + uint16_t len; + /* + * This value indicates the amount of packet data written to the + * buffer the opaque field in this completion corresponds to. + */ + uint32_t opaque; + /* + * This is a copy of the opaque field from the RX BD this + * completion corresponds to. + */ + uint8_t v1; + /* unused1 is 7 b */ + /* + * This value is written by the NIC such that it will be + * different for each pass through the completion queue. The + * even passes will write 1. The odd passes will write 0. + */ + #define RX_TPA_START_CMPL_V1 UINT32_C(0x1) + /* unused1 is 7 b */ + uint8_t rss_hash_type; + /* + * This is the RSS hash type for the packet. The value is packed + * {tuple_extrac_op[1:0],rss_profile_id[4:0],tuple_extrac_op[2]} + * . The value of tuple_extrac_op provides the information about + * what fields the hash was computed on. * 0: The RSS hash was + * computed over source IP address, destination IP address, + * source port, and destination port of inner IP and TCP or UDP + * headers. Note: For non-tunneled packets, the packet headers + * are considered inner packet headers for the RSS hash + * computation purpose. * 1: The RSS hash was computed over + * source IP address and destination IP address of inner IP + * header. Note: For non-tunneled packets, the packet headers + * are considered inner packet headers for the RSS hash + * computation purpose. * 2: The RSS hash was computed over + * source IP address, destination IP address, source port, and + * destination port of IP and TCP or UDP headers of outer tunnel + * headers. Note: For non-tunneled packets, this value is not + * applicable. * 3: The RSS hash was computed over source IP + * address and destination IP address of IP header of outer + * tunnel headers. Note: For non-tunneled packets, this value is + * not applicable. Note that 4-tuples values listed above are + * applicable for layer 4 protocols supported and enabled for + * RSS in the hardware, HWRM firmware, and drivers. For example, + * if RSS hash is supported and enabled for TCP traffic only, + * then the values of tuple_extract_op corresponding to 4-tuples + * are only valid for TCP traffic. + */ + uint16_t agg_id; + /* + * This is the aggregation ID that the completion is associated + * with. Use this number to correlate the TPA start completion + * with the TPA end completion. + */ + /* unused2 is 9 b */ + /* + * This is the aggregation ID that the completion is associated + * with. Use this number to correlate the TPA start completion + * with the TPA end completion. + */ + #define RX_TPA_START_CMPL_AGG_ID_MASK UINT32_C(0xfe00) + #define RX_TPA_START_CMPL_AGG_ID_SFT 9 + uint32_t rss_hash; + /* + * This value is the RSS hash value calculated for the packet + * based on the mode bits and key value in the VNIC. + */ +} __attribute__((packed)); + +/* last 16 bytes of RX L2 TPA Start Completion Record */ +struct rx_tpa_start_cmpl_hi { + uint32_t flags2; + /* + * This indicates that the ip checksum was calculated for the + * inner packet and that the sum passed for all segments + * included in the aggregation. + */ + #define RX_TPA_START_CMPL_FLAGS2_IP_CS_CALC UINT32_C(0x1) + /* + * This indicates that the TCP, UDP or ICMP checksum was + * calculated for the inner packet and that the sum passed for + * all segments included in the aggregation. + */ + #define RX_TPA_START_CMPL_FLAGS2_L4_CS_CALC UINT32_C(0x2) + /* + * This indicates that the ip checksum was calculated for the + * tunnel header and that the sum passed for all segments + * included in the aggregation. + */ + #define RX_TPA_START_CMPL_FLAGS2_T_IP_CS_CALC UINT32_C(0x4) + /* + * This indicates that the UDP checksum was calculated for the + * tunnel packet and that the sum passed for all segments + * included in the aggregation. + */ + #define RX_TPA_START_CMPL_FLAGS2_T_L4_CS_CALC UINT32_C(0x8) + /* This value indicates what format the metadata field is. */ + #define RX_TPA_START_CMPL_FLAGS2_META_FORMAT_MASK UINT32_C(0xf0) + #define RX_TPA_START_CMPL_FLAGS2_META_FORMAT_SFT 4 + /* No metadata informtaion. Value is zero. */ + #define RX_TPA_START_CMPL_FLAGS2_META_FORMAT_NONE (UINT32_C(0x0) << 4) + /* + * The metadata field contains the VLAN tag and + * TPID value. - metadata[11:0] contains the + * vlan VID value. - metadata[12] contains the + * vlan DE value. - metadata[15:13] contains the + * vlan PRI value. - metadata[31:16] contains + * the vlan TPID value. + */ + #define RX_TPA_START_CMPL_FLAGS2_META_FORMAT_VLAN (UINT32_C(0x1) << 4) + #define RX_TPA_START_CMPL_FLAGS2_META_FORMAT_LAST \ + RX_TPA_START_CMPL_FLAGS2_META_FORMAT_VLAN + /* + * This field indicates the IP type for the inner-most IP + * header. A value of '0' indicates IPv4. A value of '1' + * indicates IPv6. + */ + #define RX_TPA_START_CMPL_FLAGS2_IP_TYPE UINT32_C(0x100) + uint32_t metadata; + /* + * This is data from the CFA block as indicated by the + * meta_format field. + */ + /* When meta_format=1, this value is the VLAN VID. */ + #define RX_TPA_START_CMPL_METADATA_VID_MASK UINT32_C(0xfff) + #define RX_TPA_START_CMPL_METADATA_VID_SFT 0 + /* When meta_format=1, this value is the VLAN DE. */ + #define RX_TPA_START_CMPL_METADATA_DE UINT32_C(0x1000) + /* When meta_format=1, this value is the VLAN PRI. */ + #define RX_TPA_START_CMPL_METADATA_PRI_MASK UINT32_C(0xe000) + #define RX_TPA_START_CMPL_METADATA_PRI_SFT 13 + /* When meta_format=1, this value is the VLAN TPID. */ + #define RX_TPA_START_CMPL_METADATA_TPID_MASK UINT32_C(0xffff0000) + #define RX_TPA_START_CMPL_METADATA_TPID_SFT 16 + uint16_t v2; + /* unused4 is 15 b */ + /* + * This value is written by the NIC such that it will be + * different for each pass through the completion queue. The + * even passes will write 1. The odd passes will write 0. + */ + #define RX_TPA_START_CMPL_V2 UINT32_C(0x1) + /* unused4 is 15 b */ + uint16_t cfa_code; + /* + * This field identifies the CFA action rule that was used for + * this packet. + */ + uint32_t inner_l4_size_inner_l3_offset_inner_l2_offset_outer_l3_offset; + /* + * This is the size in bytes of the inner most L4 header. This + * can be subtracted from the payload_offset to determine the + * start of the inner most L4 header. + */ + /* + * This is the offset from the beginning of the packet in bytes + * for the outer L3 header. If there is no outer L3 header, then + * this value is zero. + */ + #define RX_TPA_START_CMPL_OUTER_L3_OFFSET_MASK UINT32_C(0x1ff) + #define RX_TPA_START_CMPL_OUTER_L3_OFFSET_SFT 0 + /* + * This is the offset from the beginning of the packet in bytes + * for the inner most L2 header. + */ + #define RX_TPA_START_CMPL_INNER_L2_OFFSET_MASK UINT32_C(0x3fe00) + #define RX_TPA_START_CMPL_INNER_L2_OFFSET_SFT 9 + /* + * This is the offset from the beginning of the packet in bytes + * for the inner most L3 header. + */ + #define RX_TPA_START_CMPL_INNER_L3_OFFSET_MASK UINT32_C(0x7fc0000) + #define RX_TPA_START_CMPL_INNER_L3_OFFSET_SFT 18 + /* + * This is the size in bytes of the inner most L4 header. This + * can be subtracted from the payload_offset to determine the + * start of the inner most L4 header. + */ + #define RX_TPA_START_CMPL_INNER_L4_SIZE_MASK UINT32_C(0xf8000000) + #define RX_TPA_START_CMPL_INNER_L4_SIZE_SFT 27 +} __attribute__((packed)); + +/* RX TPA End Completion Record (32 bytes split to 2 16-byte struct) */ +struct rx_tpa_end_cmpl { + uint16_t flags_type; + /* + * This field indicates the exact type of the completion. By + * convention, the LSB identifies the length of the record in + * 16B units. Even values indicate 16B records. Odd values + * indicate 32B records. + */ + #define RX_TPA_END_CMPL_TYPE_MASK UINT32_C(0x3f) + #define RX_TPA_END_CMPL_TYPE_SFT 0 + /* + * RX L2 TPA End Completion: Completion at the + * end of a TPA operation. Length = 32B + */ + #define RX_TPA_END_CMPL_TYPE_RX_TPA_END UINT32_C(0x15) + /* + * When this bit is '1', it indicates a packet that has an error + * of some type. Type of error is indicated in error_flags. + */ + #define RX_TPA_END_CMPL_FLAGS_ERROR UINT32_C(0x40) + /* This field indicates how the packet was placed in the buffer. */ + #define RX_TPA_END_CMPL_FLAGS_PLACEMENT_MASK UINT32_C(0x380) + #define RX_TPA_END_CMPL_FLAGS_PLACEMENT_SFT 7 + /* + * Jumbo: TPA Packet was placed using jumbo + * algorithm. This means that the first buffer + * will be filled with data before moving to + * aggregation buffers. Each aggregation buffer + * will be filled before moving to the next + * aggregation buffer. + */ + #define RX_TPA_END_CMPL_FLAGS_PLACEMENT_JUMBO (UINT32_C(0x1) << 7) + /* + * Header/Data Separation: Packet was placed + * using Header/Data separation algorithm. The + * separation location is indicated by the itype + * field. + */ + #define RX_TPA_END_CMPL_FLAGS_PLACEMENT_HDS (UINT32_C(0x2) << 7) + /* + * GRO/Jumbo: Packet will be placed using + * GRO/Jumbo where the first packet is filled + * with data. Subsequent packets will be placed + * such that any one packet does not span two + * aggregation buffers unless it starts at the + * beginning of an aggregation buffer. + */ + #define RX_TPA_END_CMPL_FLAGS_PLACEMENT_GRO_JUMBO (UINT32_C(0x5) << 7) + /* + * GRO/Header-Data Separation: Packet will be + * placed using GRO/HDS where the header is in + * the first packet. Payload of each packet will + * be placed such that any one packet does not + * span two aggregation buffers unless it starts + * at the beginning of an aggregation buffer. + */ + #define RX_TPA_END_CMPL_FLAGS_PLACEMENT_GRO_HDS (UINT32_C(0x6) << 7) + #define RX_TPA_END_CMPL_FLAGS_PLACEMENT_LAST \ + RX_TPA_END_CMPL_FLAGS_PLACEMENT_GRO_HDS + /* unused is 2 b */ + #define RX_TPA_END_CMPL_FLAGS_UNUSED_MASK UINT32_C(0xc00) + #define RX_TPA_END_CMPL_FLAGS_UNUSED_SFT 10 + /* + * This value indicates what the inner packet determined for the + * packet was. - 2 TCP Packet Indicates that the packet was IP + * and TCP. This indicates that the ip_cs field is valid and + * that the tcp_udp_cs field is valid and contains the TCP + * checksum. This also indicates that the payload_offset field + * is valid. + */ + #define RX_TPA_END_CMPL_FLAGS_ITYPE_MASK UINT32_C(0xf000) + #define RX_TPA_END_CMPL_FLAGS_ITYPE_SFT 12 + #define RX_TPA_END_CMPL_FLAGS_MASK UINT32_C(0xffc0) + #define RX_TPA_END_CMPL_FLAGS_SFT 6 + uint16_t len; + /* + * This value is zero for TPA End completions. There is no data + * in the buffer that corresponds to the opaque value in this + * completion. + */ + uint32_t opaque; + /* + * This is a copy of the opaque field from the RX BD this + * completion corresponds to. + */ + uint8_t agg_bufs_v1; + /* unused1 is 1 b */ + /* + * This value is written by the NIC such that it will be + * different for each pass through the completion queue. The + * even passes will write 1. The odd passes will write 0. + */ + #define RX_TPA_END_CMPL_V1 UINT32_C(0x1) + /* + * This value is the number of aggregation buffers that follow + * this entry in the completion ring that are a part of this + * aggregation packet. If the value is zero, then the packet is + * completely contained in the buffer space provided in the + * aggregation start completion. + */ + #define RX_TPA_END_CMPL_AGG_BUFS_MASK UINT32_C(0x7e) + #define RX_TPA_END_CMPL_AGG_BUFS_SFT 1 + /* unused1 is 1 b */ + uint8_t tpa_segs; + /* This value is the number of segments in the TPA operation. */ + uint8_t payload_offset; + /* + * This value indicates the offset in bytes from the beginning + * of the packet where the inner payload starts. This value is + * valid for TCP, UDP, FCoE, and RoCE packets. A value of zero + * indicates an offset of 256 bytes. + */ + uint8_t agg_id; + /* + * This is the aggregation ID that the completion is associated + * with. Use this number to correlate the TPA start completion + * with the TPA end completion. + */ + /* unused2 is 1 b */ + /* + * This is the aggregation ID that the completion is associated + * with. Use this number to correlate the TPA start completion + * with the TPA end completion. + */ + #define RX_TPA_END_CMPL_AGG_ID_MASK UINT32_C(0xfe) + #define RX_TPA_END_CMPL_AGG_ID_SFT 1 + uint32_t tsdelta; + /* + * For non-GRO packets, this value is the timestamp delta + * between earliest and latest timestamp values for TPA packet. + * If packets were not time stamped, then delta will be zero. + * For GRO packets, this field is zero except for the following + * sub-fields. - tsdelta[31] Timestamp present indication. When + * '0', no Timestamp option is in the packet. When '1', then a + * Timestamp option is present in the packet. + */ +} __attribute__((packed)); + +/* last 16 bytes of RX TPA End Completion Record */ +struct rx_tpa_end_cmpl_hi { + uint32_t tpa_dup_acks; + /* unused3 is 28 b */ + /* + * This value is the number of duplicate ACKs that have been + * received as part of the TPA operation. + */ + #define RX_TPA_END_CMPL_TPA_DUP_ACKS_MASK UINT32_C(0xf) + #define RX_TPA_END_CMPL_TPA_DUP_ACKS_SFT 0 + /* unused3 is 28 b */ + uint16_t tpa_seg_len; + /* + * This value is the valid when TPA completion is active. It + * indicates the length of the longest segment of the TPA + * operation for LRO mode and the length of the first segment in + * GRO mode. This value may be used by GRO software to re- + * construct the original packet stream from the TPA packet. + * This is the length of all but the last segment for GRO. In + * LRO mode this value may be used to indicate MSS size to the + * stack. + */ + uint16_t unused_3; + /* unused4 is 16 b */ + uint16_t errors_v2; + /* + * This value is written by the NIC such that it will be + * different for each pass through the completion queue. The + * even passes will write 1. The odd passes will write 0. + */ + #define RX_TPA_END_CMPL_V2 UINT32_C(0x1) + /* + * This error indicates that there was some sort of problem with + * the BDs for the packet that was found after part of the + * packet was already placed. The packet should be treated as + * invalid. + */ + #define RX_TPA_END_CMPL_ERRORS_BUFFER_ERROR_MASK UINT32_C(0xe) + #define RX_TPA_END_CMPL_ERRORS_BUFFER_ERROR_SFT 1 + /* + * This error occurs when there is a fatal HW + * problem in the chip only. It indicates that + * there were not BDs on chip but that there was + * adequate reservation. provided by the TPA + * block. + */ + #define RX_TPA_END_CMPL_ERRORS_BUFFER_ERROR_NOT_ON_CHIP \ + (UINT32_C(0x2) << 1) + /* + * This error occurs when TPA block was not + * configured to reserve adequate BDs for TPA + * operations on this RX ring. All data for the + * TPA operation was not placed. This error can + * also be generated when the number of segments + * is not programmed correctly in TPA and the 33 + * total aggregation buffers allowed for the TPA + * operation has been exceeded. + */ + #define RX_TPA_END_CMPL_ERRORS_BUFFER_ERROR_RSV_ERROR \ + (UINT32_C(0x4) << 1) + #define RX_TPA_END_CMPL_ERRORS_BUFFER_ERROR_LAST \ + RX_TPA_END_CMPL_ERRORS_BUFFER_ERROR_RSV_ERROR + #define RX_TPA_END_CMPL_ERRORS_MASK UINT32_C(0xfffe) + #define RX_TPA_END_CMPL_ERRORS_SFT 1 + uint16_t unused_4; + /* unused5 is 16 b */ + uint32_t start_opaque; + /* + * This is the opaque value that was completed for the TPA start + * completion that corresponds to this TPA end completion. + */ +} __attribute__((packed)); + +/* HWRM Forwarded Request (16 bytes) */ struct hwrm_fwd_req_cmpl { uint16_t req_len_type; /* Length of forwarded request in bytes. */ @@ -1188,7 +1747,7 @@ struct hwrm_fwd_req_cmpl { #define HWRM_FWD_REQ_CMPL_REQ_BUF_ADDR_SFT 1 } __attribute__((packed)); -/* HWRM Asynchronous Event Completion Record (16 bytes) */ +/* HWRM Asynchronous Event Completion Record (16 bytes) */ struct hwrm_async_event_cmpl { uint16_t type; /* unused1 is 10 b */ @@ -1210,19 +1769,20 @@ struct hwrm_async_event_cmpl { /* Link MTU changed */ #define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_LINK_MTU_CHANGE UINT32_C(0x1) /* Link speed changed */ - #define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_LINK_SPEED_CHANGE UINT32_C(0x2) + #define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_LINK_SPEED_CHANGE UINT32_C(0x2) /* DCB Configuration changed */ - #define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_DCB_CONFIG_CHANGE UINT32_C(0x3) + #define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_DCB_CONFIG_CHANGE UINT32_C(0x3) /* Port connection not allowed */ #define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_PORT_CONN_NOT_ALLOWED UINT32_C(0x4) /* Link speed configuration was not allowed */ - #define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_LINK_SPEED_CFG_NOT_ALLOWED UINT32_C(0x5) + #define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_LINK_SPEED_CFG_NOT_ALLOWED \ + UINT32_C(0x5) /* Link speed configuration change */ #define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_LINK_SPEED_CFG_CHANGE UINT32_C(0x6) /* Port PHY configuration change */ #define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_PORT_PHY_CFG_CHANGE UINT32_C(0x7) /* Function driver unloaded */ - #define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_FUNC_DRVR_UNLOAD UINT32_C(0x10) + #define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_FUNC_DRVR_UNLOAD UINT32_C(0x10) /* Function driver loaded */ #define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_FUNC_DRVR_LOAD UINT32_C(0x11) /* Function FLR related processing has completed */ @@ -1231,12 +1791,13 @@ struct hwrm_async_event_cmpl { #define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_PF_DRVR_UNLOAD UINT32_C(0x20) /* PF driver loaded */ #define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_PF_DRVR_LOAD UINT32_C(0x21) - /* VF Function Level Reset (FLR) */ + /* VF Function Level Reset (FLR) */ #define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_VF_FLR UINT32_C(0x30) /* VF MAC Address Change */ #define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_VF_MAC_ADDR_CHANGE UINT32_C(0x31) /* PF-VF communication channel status change. */ - #define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_PF_VF_COMM_STATUS_CHANGE UINT32_C(0x32) + #define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_PF_VF_COMM_STATUS_CHANGE \ + UINT32_C(0x32) /* VF Configuration Change */ #define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_VF_CFG_CHANGE UINT32_C(0x33) /* HWRM Error */ @@ -1255,69 +1816,13 @@ struct hwrm_async_event_cmpl { #define HWRM_ASYNC_EVENT_CMPL_OPAQUE_MASK UINT32_C(0xfe) #define HWRM_ASYNC_EVENT_CMPL_OPAQUE_SFT 1 uint8_t timestamp_lo; - /* 8-lsb timestamp from POR (100-msec resolution) */ + /* 8-lsb timestamp from POR (100-msec resolution) */ uint16_t timestamp_hi; - /* 16-lsb timestamp from POR (100-msec resolution) */ + /* 16-lsb timestamp from POR (100-msec resolution) */ uint32_t event_data1; /* Event specific data */ } __attribute__((packed)); -/* - * Note: The Hardware Resource Manager (HWRM) manages various hardware resources - * inside the chip. The HWRM is implemented in firmware, and runs on embedded - * processors inside the chip. This firmware service is vital part of the chip. - * The chip can not be used by a driver or HWRM client without the HWRM. - */ - -/* Input (16 bytes) */ -struct input { - uint16_t req_type; - /* - * This value indicates what type of request this is. The format - * for the rest of the command is determined by this field. - */ - uint16_t cmpl_ring; - /* - * This value indicates the what completion ring the request - * will be optionally completed on. If the value is -1, then no - * CR completion will be generated. Any other value must be a - * valid CR ring_id value for this function. - */ - uint16_t seq_id; - /* This value indicates the command sequence number. */ - uint16_t target_id; - /* - * Target ID of this command. 0x0 - 0xFFF8 - Used for function - * ids 0xFFF8 - 0xFFFE - Reserved for internal processors 0xFFFF - * - HWRM - */ - uint64_t resp_addr; - /* - * This is the host address where the response will be written - * when the request is complete. This area must be 16B aligned - * and must be cleared to zero before the request is made. - */ -} __attribute__((packed)); - -/* Output (8 bytes) */ -struct output { - uint16_t error_code; - /* - * Pass/Fail or error type Note: receiver to verify the in - * parameters, and fail the call with an error when appropriate - */ - uint16_t req_type; - /* This field returns the type of original request. */ - uint16_t seq_id; - /* This field provides original sequence number of the command. */ - uint16_t resp_len; - /* - * This field is the length of the response in bytes. The last - * byte of the response is a valid flag that will read as '1' - * when the command has been completely written to memory. - */ -} __attribute__((packed)); - /* hwrm_ver_get */ /* * Description: This function is called by a driver to determine the HWRM @@ -1327,7 +1832,7 @@ struct output { * interface or firmware version with major = 0, minor = 0, and update = 0 shall * be considered an invalid version. */ -/* Input (24 bytes) */ +/* Input (24 bytes) */ struct hwrm_ver_get_input { uint16_t req_type; /* @@ -1384,7 +1889,7 @@ struct hwrm_ver_get_input { uint8_t unused_0[5]; } __attribute__((packed)); -/* Output (128 bytes) */ +/* Output (128 bytes) */ struct hwrm_ver_get_output { uint16_t error_code; /* @@ -1454,7 +1959,7 @@ struct hwrm_ver_get_output { /* * This field is a reserved field. This field can be used to * represent firmware branches or customer specific releases - * tied to a specific (major,minor,update) version of the HWRM + * tied to a specific (major,minor,update) version of the HWRM * firmware. */ uint8_t mgmt_fw_maj; @@ -1477,7 +1982,7 @@ struct hwrm_ver_get_output { /* * This field is a reserved field. This field can be used to * represent firmware branches or customer specific releases - * tied to a specific (major,minor,update) version + * tied to a specific (major,minor,update) version */ uint8_t netctrl_fw_maj; /* @@ -1500,7 +2005,7 @@ struct hwrm_ver_get_output { /* * This field is a reserved field. This field can be used to * represent firmware branches or customer specific releases - * tied to a specific (major,minor,update) version + * tied to a specific (major,minor,update) version */ uint32_t dev_caps_cfg; /* @@ -1512,13 +2017,27 @@ struct hwrm_ver_get_output { * supported. If set to 0, then secure firmware update behavior * is not supported. */ - #define HWRM_VER_GET_OUTPUT_DEV_CAPS_CFG_SECURE_FW_UPD_SUPPORTED UINT32_C(0x1) + #define HWRM_VER_GET_OUTPUT_DEV_CAPS_CFG_SECURE_FW_UPD_SUPPORTED \ + UINT32_C(0x1) /* * If set to 1, then firmware based DCBX agent is supported. If * set to 0, then firmware based DCBX agent capability is not * supported on this device. */ - #define HWRM_VER_GET_OUTPUT_DEV_CAPS_CFG_FW_DCBX_AGENT_SUPPORTED UINT32_C(0x2) + #define HWRM_VER_GET_OUTPUT_DEV_CAPS_CFG_FW_DCBX_AGENT_SUPPORTED \ + UINT32_C(0x2) + /* + * If set to 1, then HWRM short command format is supported. If + * set to 0, then HWRM short command format is not supported. + */ + #define HWRM_VER_GET_OUTPUT_DEV_CAPS_CFG_SHORT_CMD_SUPPORTED \ + UINT32_C(0x4) + /* + * If set to 1, then HWRM short command format is required. If + * set to 0, then HWRM short command format is not required. + */ + #define HWRM_VER_GET_OUTPUT_DEV_CAPS_CFG_SHORT_CMD_INPUTUIRED \ + UINT32_C(0x8) uint8_t roce_fw_maj; /* * This field represents the major version of RoCE firmware. A @@ -1539,22 +2058,22 @@ struct hwrm_ver_get_output { /* * This field is a reserved field. This field can be used to * represent firmware branches or customer specific releases - * tied to a specific (major,minor,update) version + * tied to a specific (major,minor,update) version */ char hwrm_fw_name[16]; /* - * This field represents the name of HWRM FW (ASCII chars with + * This field represents the name of HWRM FW (ASCII chars with * NULL at the end). */ char mgmt_fw_name[16]; /* - * This field represents the name of mgmt FW (ASCII chars with + * This field represents the name of mgmt FW (ASCII chars with * NULL at the end). */ char netctrl_fw_name[16]; /* * This field represents the name of network control firmware - * (ASCII chars with NULL at the end). + * (ASCII chars with NULL at the end). */ uint32_t reserved2[4]; /* @@ -1563,7 +2082,7 @@ struct hwrm_ver_get_output { */ char roce_fw_name[16]; /* - * This field represents the name of RoCE FW (ASCII chars with + * This field represents the name of RoCE FW (ASCII chars with * NULL at the end). */ uint16_t chip_num; @@ -1584,7 +2103,7 @@ struct hwrm_ver_get_output { /* FPGA platform of the chip. */ #define HWRM_VER_GET_OUTPUT_CHIP_PLATFORM_TYPE_FPGA UINT32_C(0x1) /* Palladium platform of the chip. */ - #define HWRM_VER_GET_OUTPUT_CHIP_PLATFORM_TYPE_PALLADIUM UINT32_C(0x2) + #define HWRM_VER_GET_OUTPUT_CHIP_PLATFORM_TYPE_PALLADIUM UINT32_C(0x2) uint16_t max_req_win_len; /* * This field returns the maximum value of request window that @@ -1614,7 +2133,7 @@ struct hwrm_ver_get_output { /* hwrm_func_reset */ /* - * Description: This command resets a hardware function (PCIe function) and + * Description: This command resets a hardware function (PCIe function) and * frees any resources used by the function. This command shall be initiated by * the driver after an FLR has occurred to prepare the function for re-use. This * command may also be initiated by a driver prior to doing it's own @@ -1626,7 +2145,7 @@ struct hwrm_ver_get_output { * idled. The command returns all the resources owned by the function so a new * driver may allocate and configure resources normally. */ -/* Input (24 bytes) */ +/* Input (24 bytes) */ struct hwrm_func_reset_input { uint16_t req_type; /* @@ -1668,7 +2187,7 @@ struct hwrm_func_reset_input { /* This value indicates the level of a function reset. */ /* * Reset the caller function and its children - * VFs (if any). If no children functions exist, + * VFs (if any). If no children functions exist, * then reset the caller function only. */ #define HWRM_FUNC_RESET_INPUT_FUNC_RESET_LEVEL_RESETALL UINT32_C(0x0) @@ -1681,7 +2200,8 @@ struct hwrm_func_reset_input { * It is an error to specify this level by a PF * driver with no children VFs. */ - #define HWRM_FUNC_RESET_INPUT_FUNC_RESET_LEVEL_RESETCHILDREN UINT32_C(0x2) + #define HWRM_FUNC_RESET_INPUT_FUNC_RESET_LEVEL_RESETCHILDREN \ + UINT32_C(0x2) /* * Reset a specific VF of the caller function * driver if the caller is the parent PF driver. @@ -1694,7 +2214,7 @@ struct hwrm_func_reset_input { uint8_t unused_0; } __attribute__((packed)); -/* Output (16 bytes) */ +/* Output (16 bytes) */ struct hwrm_func_reset_output { uint16_t error_code; /* @@ -1734,7 +2254,7 @@ struct hwrm_func_reset_output { * physical function. The output FID value is needed to configure Rings and * MSI-X vectors so their DMA operations appear correctly on the PCI bus. */ -/* Input (24 bytes) */ +/* Input (24 bytes) */ struct hwrm_func_qcaps_input { uint16_t req_type; /* @@ -1765,12 +2285,12 @@ struct hwrm_func_qcaps_input { uint16_t fid; /* * Function ID of the function that is being queried. 0xFF... - * (All Fs) if the query is for the requesting function. + * (All Fs) if the query is for the requesting function. */ uint16_t unused_0[3]; } __attribute__((packed)); -/* Output (80 bytes) */ +/* Output (80 bytes) */ struct hwrm_func_qcaps_output { uint16_t error_code; /* @@ -1795,54 +2315,56 @@ struct hwrm_func_qcaps_output { uint16_t port_id; /* * Port ID of port that this function is associated with. Valid - * only for the PF. 0xFF... (All Fs) if this function is not - * associated with any port. 0xFF... (All Fs) if this function + * only for the PF. 0xFF... (All Fs) if this function is not + * associated with any port. 0xFF... (All Fs) if this function * is called from a VF. */ uint32_t flags; /* If 1, then Push mode is supported on this function. */ - #define HWRM_FUNC_QCAPS_OUTPUT_FLAGS_PUSH_MODE_SUPPORTED UINT32_C(0x1) + #define HWRM_FUNC_QCAPS_OUTPUT_FLAGS_PUSH_MODE_SUPPORTED UINT32_C(0x1) /* * If 1, then the global MSI-X auto-masking is enabled for the * device. */ - #define HWRM_FUNC_QCAPS_OUTPUT_FLAGS_GLOBAL_MSIX_AUTOMASKING UINT32_C(0x2) + #define HWRM_FUNC_QCAPS_OUTPUT_FLAGS_GLOBAL_MSIX_AUTOMASKING \ + UINT32_C(0x2) /* - * If 1, then the Precision Time Protocol (PTP) processing is + * If 1, then the Precision Time Protocol (PTP) processing is * supported on this function. The HWRM should enable PTP on - * only a single Physical Function (PF) per port. + * only a single Physical Function (PF) per port. */ #define HWRM_FUNC_QCAPS_OUTPUT_FLAGS_PTP_SUPPORTED UINT32_C(0x4) /* - * If 1, then RDMA over Converged Ethernet (RoCE) v1 is + * If 1, then RDMA over Converged Ethernet (RoCE) v1 is * supported on this function. */ - #define HWRM_FUNC_QCAPS_OUTPUT_FLAGS_ROCE_V1_SUPPORTED UINT32_C(0x8) + #define HWRM_FUNC_QCAPS_OUTPUT_FLAGS_ROCE_V1_SUPPORTED UINT32_C(0x8) /* - * If 1, then RDMA over Converged Ethernet (RoCE) v2 is + * If 1, then RDMA over Converged Ethernet (RoCE) v2 is * supported on this function. */ - #define HWRM_FUNC_QCAPS_OUTPUT_FLAGS_ROCE_V2_SUPPORTED UINT32_C(0x10) + #define HWRM_FUNC_QCAPS_OUTPUT_FLAGS_ROCE_V2_SUPPORTED UINT32_C(0x10) /* * If 1, then control and configuration of WoL magic packet are * supported on this function. */ - #define HWRM_FUNC_QCAPS_OUTPUT_FLAGS_WOL_MAGICPKT_SUPPORTED UINT32_C(0x20) + #define HWRM_FUNC_QCAPS_OUTPUT_FLAGS_WOL_MAGICPKT_SUPPORTED \ + UINT32_C(0x20) /* * If 1, then control and configuration of bitmap pattern packet * are supported on this function. */ - #define HWRM_FUNC_QCAPS_OUTPUT_FLAGS_WOL_BMP_SUPPORTED UINT32_C(0x40) + #define HWRM_FUNC_QCAPS_OUTPUT_FLAGS_WOL_BMP_SUPPORTED UINT32_C(0x40) /* * If set to 1, then the control and configuration of rate limit * of an allocated TX ring on the queried function is supported. */ - #define HWRM_FUNC_QCAPS_OUTPUT_FLAGS_TX_RING_RL_SUPPORTED UINT32_C(0x80) + #define HWRM_FUNC_QCAPS_OUTPUT_FLAGS_TX_RING_RL_SUPPORTED UINT32_C(0x80) /* * If 1, then control and configuration of minimum and maximum * bandwidths are supported on the queried function. */ - #define HWRM_FUNC_QCAPS_OUTPUT_FLAGS_TX_BW_CFG_SUPPORTED UINT32_C(0x100) + #define HWRM_FUNC_QCAPS_OUTPUT_FLAGS_TX_BW_CFG_SUPPORTED UINT32_C(0x100) /* * If the query is for a VF, then this flag shall be ignored. If * this query is for a PF and this flag is set to 1, then the PF @@ -1851,7 +2373,8 @@ struct hwrm_func_qcaps_output { * set to 0, then the PF does not have the capability to set the * rate limits on the TX rings of its children VFs. */ - #define HWRM_FUNC_QCAPS_OUTPUT_FLAGS_VF_TX_RING_RL_SUPPORTED UINT32_C(0x200) + #define HWRM_FUNC_QCAPS_OUTPUT_FLAGS_VF_TX_RING_RL_SUPPORTED \ + UINT32_C(0x200) /* * If the query is for a VF, then this flag shall be ignored. If * this query is for a PF and this flag is set to 1, then the PF @@ -1861,7 +2384,17 @@ struct hwrm_func_qcaps_output { * capability to set the minimum or maximum bandwidths for its * children VFs. */ - #define HWRM_FUNC_QCAPS_OUTPUT_FLAGS_VF_BW_CFG_SUPPORTED UINT32_C(0x400) + #define HWRM_FUNC_QCAPS_OUTPUT_FLAGS_VF_BW_CFG_SUPPORTED UINT32_C(0x400) + /* + * Standard TX Ring mode is used for the allocation of TX ring + * and underlying scheduling resources that allow bandwidth + * reservation and limit settings on the queried function. If + * set to 1, then standard TX ring mode is supported on the + * queried function. If set to 0, then standard TX ring mode is + * not available on the queried function. + */ + #define HWRM_FUNC_QCAPS_OUTPUT_FLAGS_STD_TX_RING_MODE_SUPPORTED \ + UINT32_C(0x800) uint8_t mac_address[6]; /* * This value is current MAC address configured for this @@ -1901,7 +2434,7 @@ struct hwrm_func_qcaps_output { uint16_t first_vf_id; /* * The identifier for the first VF enabled on a PF. This is - * valid only on the PF with SR-IOV enabled. 0xFF... (All Fs) if + * valid only on the PF with SR-IOV enabled. 0xFF... (All Fs) if * this command is called on a PF with SR-IOV disabled or on a * VF. */ @@ -1909,7 +2442,7 @@ struct hwrm_func_qcaps_output { /* * The maximum number of VFs that can be allocated to the * function. This is valid only on the PF with SR-IOV enabled. - * 0xFF... (All Fs) if this command is called on a PF with SR- + * 0xFF... (All Fs) if this command is called on a PF with SR- * IOV disabled or on a VF. */ uint16_t max_stat_ctx; @@ -1929,22 +2462,22 @@ struct hwrm_func_qcaps_output { */ uint32_t max_tx_em_flows; /* - * The maximum number of Exact Match (EM) flows that can be + * The maximum number of Exact Match (EM) flows that can be * offloaded by this function on the TX side. */ uint32_t max_tx_wm_flows; /* - * The maximum number of Wildcard Match (WM) flows that can be + * The maximum number of Wildcard Match (WM) flows that can be * offloaded by this function on the TX side. */ uint32_t max_rx_em_flows; /* - * The maximum number of Exact Match (EM) flows that can be + * The maximum number of Exact Match (EM) flows that can be * offloaded by this function on the RX side. */ uint32_t max_rx_wm_flows; /* - * The maximum number of Wildcard Match (WM) flows that can be + * The maximum number of Wildcard Match (WM) flows that can be * offloaded by this function on the RX side. */ uint32_t max_mcast_filters; @@ -1968,7 +2501,7 @@ struct hwrm_func_qcaps_output { * be allocated to the function. This number indicates the * maximum number of TX rings that can be assigned strict * priorities out of the maximum number of TX rings that can be - * allocated (max_tx_rings) to the function. + * allocated (max_tx_rings) to the function. */ uint8_t unused_0; uint8_t valid; @@ -1989,9 +2522,13 @@ struct hwrm_func_qcaps_output { * allows a physical function driver to query virtual functions that are * children of the physical function. The output FID value is needed to * configure Rings and MSI-X vectors so their DMA operations appear correctly on - * the PCI bus. + * the PCI bus. This command should be called by every driver after + * 'hwrm_func_cfg' to get the actual number of resources allocated by the HWRM. + * The values returned by hwrm_func_qcfg are the values the driver shall use. + * These values may be different than what was originally requested in the + * 'hwrm_func_cfg' command. */ -/* Input (24 bytes) */ +/* Input (24 bytes) */ struct hwrm_func_qcfg_input { uint16_t req_type; /* @@ -2022,12 +2559,12 @@ struct hwrm_func_qcfg_input { uint16_t fid; /* * Function ID of the function that is being queried. 0xFF... - * (All Fs) if the query is for the requesting function. + * (All Fs) if the query is for the requesting function. */ uint16_t unused_0[3]; } __attribute__((packed)); -/* Output (72 bytes) */ +/* Output (72 bytes) */ struct hwrm_func_qcfg_output { uint16_t error_code; /* @@ -2052,7 +2589,7 @@ struct hwrm_func_qcfg_output { uint16_t port_id; /* * Port ID of port that this function is associated with. - * 0xFF... (All Fs) if this function is not associated with any + * 0xFF... (All Fs) if this function is not associated with any * port. */ uint16_t vlan; @@ -2060,15 +2597,15 @@ struct hwrm_func_qcfg_output { * This value is the current VLAN setting for this function. The * value of 0 for this field indicates no priority tagging or * VLAN is used. This field's format is same as 802.1Q Tag's Tag - * Control Information (TCI) format that includes both Priority - * Code Point (PCP) and VLAN Identifier (VID). + * Control Information (TCI) format that includes both Priority + * Code Point (PCP) and VLAN Identifier (VID). */ uint16_t flags; /* * If 1, then magic packet based Out-Of-Box WoL is enabled on * the port associated with this function. */ - #define HWRM_FUNC_QCFG_OUTPUT_FLAGS_OOB_WOL_MAGICPKT_ENABLED \ + #define HWRM_FUNC_QCFG_OUTPUT_FLAGS_OOB_WOL_MAGICPKT_ENABLED \ UINT32_C(0x1) /* * If 1, then bitmap pattern based Out-Of-Box WoL packet is @@ -2080,8 +2617,33 @@ struct hwrm_func_qcfg_output { * on the port associated with this function. If set to 0, then * DCBX agent is not running in the firmware. */ - #define HWRM_FUNC_QCFG_OUTPUT_FLAGS_FW_DCBX_AGENT_ENABLED \ + #define HWRM_FUNC_QCFG_OUTPUT_FLAGS_FW_DCBX_AGENT_ENABLED \ UINT32_C(0x4) + /* + * Standard TX Ring mode is used for the allocation of TX ring + * and underlying scheduling resources that allow bandwidth + * reservation and limit settings on the queried function. If + * set to 1, then standard TX ring mode is enabled on the + * queried function. If set to 0, then the standard TX ring mode + * is disabled on the queried function. In this extended TX ring + * resource mode, the minimum and maximum bandwidth settings are + * not supported to allow the allocation of TX rings to span + * multiple scheduler nodes. + */ + #define HWRM_FUNC_QCFG_OUTPUT_FLAGS_STD_TX_RING_MODE_ENABLED \ + UINT32_C(0x8) + /* + * If set to 1 then FW based LLDP agent is enabled and running + * on the port associated with this function. If set to 0 then + * the LLDP agent is not running in the firmware. + */ + #define HWRM_FUNC_QCFG_OUTPUT_FLAGS_FW_LLDP_AGENT_ENABLED UINT32_C(0x10) + /* + * If set to 1, then multi-host mode is active for this + * function. If set to 0, then multi-host mode is inactive for + * this function or not applicable for this device. + */ + #define HWRM_FUNC_QCFG_OUTPUT_FLAGS_MULTI_HOST UINT32_C(0x20) uint8_t mac_address[6]; /* * This value is current MAC address configured for this @@ -2091,8 +2653,8 @@ struct hwrm_func_qcfg_output { uint16_t pci_id; /* * This value is current PCI ID of this function. If ARI is - * enabled, then it is Bus Number (8b):Function Number(8b). - * Otherwise, it is Bus Number (8b):Device Number (4b):Function + * enabled, then it is Bus Number (8b):Function Number(8b). + * Otherwise, it is Bus Number (8b):Device Number (4b):Function * Number(4b). */ uint16_t alloc_rsscos_ctx; @@ -2146,22 +2708,27 @@ struct hwrm_func_qcfg_output { /* Multiple physical functions */ #define HWRM_FUNC_QCFG_OUTPUT_PORT_PARTITION_TYPE_MPFS UINT32_C(0x1) /* Network Partitioning 1.0 */ - #define HWRM_FUNC_QCFG_OUTPUT_PORT_PARTITION_TYPE_NPAR1_0 \ - UINT32_C(0x2) + #define HWRM_FUNC_QCFG_OUTPUT_PORT_PARTITION_TYPE_NPAR1_0 UINT32_C(0x2) /* Network Partitioning 1.5 */ - #define HWRM_FUNC_QCFG_OUTPUT_PORT_PARTITION_TYPE_NPAR1_5 \ - UINT32_C(0x3) + #define HWRM_FUNC_QCFG_OUTPUT_PORT_PARTITION_TYPE_NPAR1_5 UINT32_C(0x3) /* Network Partitioning 2.0 */ - #define HWRM_FUNC_QCFG_OUTPUT_PORT_PARTITION_TYPE_NPAR2_0 \ - UINT32_C(0x4) + #define HWRM_FUNC_QCFG_OUTPUT_PORT_PARTITION_TYPE_NPAR2_0 UINT32_C(0x4) /* Unknown */ - #define HWRM_FUNC_QCFG_OUTPUT_PORT_PARTITION_TYPE_UNKNOWN \ - UINT32_C(0xff) - uint8_t unused_0; + #define HWRM_FUNC_QCFG_OUTPUT_PORT_PARTITION_TYPE_UNKNOWN UINT32_C(0xff) + uint8_t port_pf_cnt; + /* + * This field will indicate number of physical functions on this + * port_partition. HWRM shall return unavail (i.e. value of 0) + * for this field when this command is used to query VF's + * configuration or from older firmware that doesn't support + * this field. + */ + /* number of PFs is not available */ + #define HWRM_FUNC_QCFG_OUTPUT_PORT_PF_CNT_UNAVAIL UINT32_C(0x0) uint16_t dflt_vnic_id; /* The default VNIC ID assigned to a function that is being queried. */ + uint8_t unused_0; uint8_t unused_1; - uint8_t unused_2; uint32_t min_bw; /* * Minimum BW allocated for this function. The HWRM will @@ -2169,26 +2736,41 @@ struct hwrm_func_qcfg_output { * for the scheduler inside the device. A value of 0 indicates * the minimum bandwidth is not configured. */ - /* Bandwidth value */ - #define HWRM_FUNC_QCFG_OUTPUT_MIN_BW_BW_VALUE_MASK \ - UINT32_C(0xfffffff) + /* The bandwidth value. */ + #define HWRM_FUNC_QCFG_OUTPUT_MIN_BW_BW_VALUE_MASK UINT32_C(0xfffffff) #define HWRM_FUNC_QCFG_OUTPUT_MIN_BW_BW_VALUE_SFT 0 - /* Reserved */ - #define HWRM_FUNC_QCFG_OUTPUT_MIN_BW_RSVD UINT32_C(0x10000000) + /* The granularity of the value (bits or bytes). */ + #define HWRM_FUNC_QCFG_OUTPUT_MIN_BW_SCALE UINT32_C(0x10000000) + /* Value is in bits. */ + #define HWRM_FUNC_QCFG_OUTPUT_MIN_BW_SCALE_BITS (UINT32_C(0x0) << 28) + /* Value is in bytes. */ + #define HWRM_FUNC_QCFG_OUTPUT_MIN_BW_SCALE_BYTES \ + (UINT32_C(0x1) << 28) + #define HWRM_FUNC_QCFG_OUTPUT_MIN_BW_SCALE_LAST \ + FUNC_QCFG_OUTPUT_MIN_BW_SCALE_BYTES /* bw_value_unit is 3 b */ #define HWRM_FUNC_QCFG_OUTPUT_MIN_BW_BW_VALUE_UNIT_MASK \ UINT32_C(0xe0000000) #define HWRM_FUNC_QCFG_OUTPUT_MIN_BW_BW_VALUE_UNIT_SFT 29 - /* Value is in Mbps */ - #define HWRM_FUNC_QCFG_OUTPUT_MIN_BW_BW_VALUE_UNIT_MBPS \ + /* Value is in Mb or MB (base 10). */ + #define HWRM_FUNC_QCFG_OUTPUT_MIN_BW_BW_VALUE_UNIT_MEGA \ (UINT32_C(0x0) << 29) + /* Value is in Kb or KB (base 10). */ + #define HWRM_FUNC_QCFG_OUTPUT_MIN_BW_BW_VALUE_UNIT_KILO \ + (UINT32_C(0x2) << 29) + /* Value is in bits or bytes. */ + #define HWRM_FUNC_QCFG_OUTPUT_MIN_BW_BW_VALUE_UNIT_BASE \ + (UINT32_C(0x4) << 29) + /* Value is in Gb or GB (base 10). */ + #define HWRM_FUNC_QCFG_OUTPUT_MIN_BW_BW_VALUE_UNIT_GIGA \ + (UINT32_C(0x6) << 29) /* Value is in 1/100th of a percentage of total bandwidth. */ - #define HWRM_FUNC_QCFG_OUTPUT_MIN_BW_BW_VALUE_UNIT_PERCENT1_100 \ + #define HWRM_FUNC_QCFG_OUTPUT_MIN_BW_BW_VALUE_UNIT_PERCENT1_100 \ (UINT32_C(0x1) << 29) /* Invalid unit */ - #define HWRM_FUNC_QCFG_OUTPUT_MIN_BW_BW_VALUE_UNIT_INVALID \ + #define HWRM_FUNC_QCFG_OUTPUT_MIN_BW_BW_VALUE_UNIT_INVALID \ (UINT32_C(0x7) << 29) - #define HWRM_FUNC_QCFG_OUTPUT_MIN_BW_BW_VALUE_UNIT_LAST \ + #define HWRM_FUNC_QCFG_OUTPUT_MIN_BW_BW_VALUE_UNIT_LAST \ FUNC_QCFG_OUTPUT_MIN_BW_BW_VALUE_UNIT_INVALID uint32_t max_bw; /* @@ -2197,43 +2779,58 @@ struct hwrm_func_qcfg_output { * for the scheduler inside the device. A value of 0 indicates * that the maximum bandwidth is not configured. */ - /* Bandwidth value */ - #define HWRM_FUNC_QCFG_OUTPUT_MAX_BW_BW_VALUE_MASK \ - UINT32_C(0xfffffff) + /* The bandwidth value. */ + #define HWRM_FUNC_QCFG_OUTPUT_MAX_BW_BW_VALUE_MASK UINT32_C(0xfffffff) #define HWRM_FUNC_QCFG_OUTPUT_MAX_BW_BW_VALUE_SFT 0 - /* Reserved */ - #define HWRM_FUNC_QCFG_OUTPUT_MAX_BW_RSVD UINT32_C(0x10000000) + /* The granularity of the value (bits or bytes). */ + #define HWRM_FUNC_QCFG_OUTPUT_MAX_BW_SCALE UINT32_C(0x10000000) + /* Value is in bits. */ + #define HWRM_FUNC_QCFG_OUTPUT_MAX_BW_SCALE_BITS (UINT32_C(0x0) << 28) + /* Value is in bytes. */ + #define HWRM_FUNC_QCFG_OUTPUT_MAX_BW_SCALE_BYTES \ + (UINT32_C(0x1) << 28) + #define HWRM_FUNC_QCFG_OUTPUT_MAX_BW_SCALE_LAST \ + FUNC_QCFG_OUTPUT_MAX_BW_SCALE_BYTES /* bw_value_unit is 3 b */ #define HWRM_FUNC_QCFG_OUTPUT_MAX_BW_BW_VALUE_UNIT_MASK \ UINT32_C(0xe0000000) #define HWRM_FUNC_QCFG_OUTPUT_MAX_BW_BW_VALUE_UNIT_SFT 29 - /* Value is in Mbps */ - #define HWRM_FUNC_QCFG_OUTPUT_MAX_BW_BW_VALUE_UNIT_MBPS \ + /* Value is in Mb or MB (base 10). */ + #define HWRM_FUNC_QCFG_OUTPUT_MAX_BW_BW_VALUE_UNIT_MEGA \ (UINT32_C(0x0) << 29) + /* Value is in Kb or KB (base 10). */ + #define HWRM_FUNC_QCFG_OUTPUT_MAX_BW_BW_VALUE_UNIT_KILO \ + (UINT32_C(0x2) << 29) + /* Value is in bits or bytes. */ + #define HWRM_FUNC_QCFG_OUTPUT_MAX_BW_BW_VALUE_UNIT_BASE \ + (UINT32_C(0x4) << 29) + /* Value is in Gb or GB (base 10). */ + #define HWRM_FUNC_QCFG_OUTPUT_MAX_BW_BW_VALUE_UNIT_GIGA \ + (UINT32_C(0x6) << 29) /* Value is in 1/100th of a percentage of total bandwidth. */ - #define HWRM_FUNC_QCFG_OUTPUT_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 \ + #define HWRM_FUNC_QCFG_OUTPUT_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 \ (UINT32_C(0x1) << 29) /* Invalid unit */ - #define HWRM_FUNC_QCFG_OUTPUT_MAX_BW_BW_VALUE_UNIT_INVALID \ + #define HWRM_FUNC_QCFG_OUTPUT_MAX_BW_BW_VALUE_UNIT_INVALID \ (UINT32_C(0x7) << 29) - #define HWRM_FUNC_QCFG_OUTPUT_MAX_BW_BW_VALUE_UNIT_LAST \ + #define HWRM_FUNC_QCFG_OUTPUT_MAX_BW_BW_VALUE_UNIT_LAST \ FUNC_QCFG_OUTPUT_MAX_BW_BW_VALUE_UNIT_INVALID uint8_t evb_mode; /* * This value indicates the Edge virtual bridge mode for the * domain that this function belongs to. */ - /* No Edge Virtual Bridging (EVB) */ + /* No Edge Virtual Bridging (EVB) */ #define HWRM_FUNC_QCFG_OUTPUT_EVB_MODE_NO_EVB UINT32_C(0x0) - /* Virtual Ethernet Bridge (VEB) */ + /* Virtual Ethernet Bridge (VEB) */ #define HWRM_FUNC_QCFG_OUTPUT_EVB_MODE_VEB UINT32_C(0x1) - /* Virtual Ethernet Port Aggregator (VEPA) */ + /* Virtual Ethernet Port Aggregator (VEPA) */ #define HWRM_FUNC_QCFG_OUTPUT_EVB_MODE_VEPA UINT32_C(0x2) - uint8_t unused_3; + uint8_t unused_2; uint16_t alloc_vfs; /* * The number of VFs that are allocated to the function. This is - * valid only on the PF with SR-IOV enabled. 0xFF... (All Fs) if + * valid only on the PF with SR-IOV enabled. 0xFF... (All Fs) if * this command is called on a PF with SR-IOV disabled or on a * VF. */ @@ -2247,9 +2844,957 @@ struct hwrm_func_qcfg_output { uint16_t alloc_sp_tx_rings; /* * The number of strict priority transmit rings out of currently - * allocated TX rings to the function (alloc_tx_rings). + * allocated TX rings to the function (alloc_tx_rings). + */ + uint8_t unused_3; + uint8_t valid; + /* + * This field is used in Output records to indicate that the + * output is completely written to RAM. This field should be + * read as '1' to indicate that the output has been completely + * written. When writing a command completion or response to an + * internal processor, the order of writes has to be such that + * this field is written last. + */ +} __attribute__((packed)); + +/* hwrm_func_vlan_qcfg */ +/* + * Description: This command should be called by PF driver to get the current + * C-TAG, S-TAG and correcponsing PCP and TPID values configured for the + * function. + */ +/* Input (24 bytes) */ +struct hwrm_func_vlan_qcfg_input { + uint16_t req_type; + /* + * This value indicates what type of request this is. The format + * for the rest of the command is determined by this field. + */ + uint16_t cmpl_ring; + /* + * This value indicates the what completion ring the request + * will be optionally completed on. If the value is -1, then no + * CR completion will be generated. Any other value must be a + * valid CR ring_id value for this function. + */ + uint16_t seq_id; + /* This value indicates the command sequence number. */ + uint16_t target_id; + /* + * Target ID of this command. 0x0 - 0xFFF8 - Used for function + * ids 0xFFF8 - 0xFFFE - Reserved for internal processors 0xFFFF + * - HWRM + */ + uint64_t resp_addr; + /* + * This is the host address where the response will be written + * when the request is complete. This area must be 16B aligned + * and must be cleared to zero before the request is made. + */ + uint16_t fid; + /* + * Function ID of the function that is being configured. If set + * to 0xFF... (All Fs), then the configuration is for the + * requesting function. */ + uint16_t unused_0[3]; +}; + +/* Output (40 bytes) */ +struct hwrm_func_vlan_qcfg_output { + uint16_t error_code; + /* + * Pass/Fail or error type Note: receiver to verify the in + * parameters, and fail the call with an error when appropriate + */ + uint16_t req_type; + /* This field returns the type of original request. */ + uint16_t seq_id; + /* This field provides original sequence number of the command. */ + uint16_t resp_len; + /* + * This field is the length of the response in bytes. The last + * byte of the response is a valid flag that will read as '1' + * when the command has been completely written to memory. + */ + uint32_t unused_0; + uint8_t unused_1; + uint8_t unused_2; + uint8_t unused_3; + uint8_t valid; + /* + * This field is used in Output records to indicate that the + * output is completely written to RAM. This field should be + * read as '1' to indicate that the output has been completely + * written. When writing a command completion or response to an + * internal processor, the order of writes has to be such that + * this field is written last. + */ + uint16_t stag_vid; + /* S-TAG VLAN identifier configured for the function. */ + uint8_t stag_pcp; + /* S-TAG PCP value configured for the function. */ uint8_t unused_4; + uint16_t stag_tpid; + /* + * S-TAG TPID value configured for the function. This field is + * specified in network byte order. + */ + uint16_t ctag_vid; + /* C-TAG VLAN identifier configured for the function. */ + uint8_t ctag_pcp; + /* C-TAG PCP value configured for the function. */ + uint8_t unused_5; + uint16_t ctag_tpid; + /* + * C-TAG TPID value configured for the function. This field is + * specified in network byte order. + */ + uint32_t rsvd2; + /* Future use. */ + uint32_t rsvd3; + /* Future use. */ + uint32_t unused_6; +}; + +/* hwrm_func_vlan_cfg */ +/* + * Description: This command allows PF driver to configure C-TAG, S-TAG and + * corresponding PCP and TPID values for a function. + */ +/* Input (48 bytes) */ +struct hwrm_func_vlan_cfg_input { + uint16_t req_type; + /* + * This value indicates what type of request this is. The format + * for the rest of the command is determined by this field. + */ + uint16_t cmpl_ring; + /* + * This value indicates the what completion ring the request + * will be optionally completed on. If the value is -1, then no + * CR completion will be generated. Any other value must be a + * valid CR ring_id value for this function. + */ + uint16_t seq_id; + /* This value indicates the command sequence number. */ + uint16_t target_id; + /* + * Target ID of this command. 0x0 - 0xFFF8 - Used for function + * ids 0xFFF8 - 0xFFFE - Reserved for internal processors 0xFFFF + * - HWRM + */ + uint64_t resp_addr; + /* + * This is the host address where the response will be written + * when the request is complete. This area must be 16B aligned + * and must be cleared to zero before the request is made. + */ + uint16_t fid; + /* + * Function ID of the function that is being configured. If set + * to 0xFF... (All Fs), then the configuration is for the + * requesting function. + */ + uint8_t unused_0; + uint8_t unused_1; + uint32_t enables; + /* This bit must be '1' for the stag_vid field to be configured. */ + #define HWRM_FUNC_VLAN_CFG_INPUT_ENABLES_STAG_VID UINT32_C(0x1) + /* This bit must be '1' for the ctag_vid field to be configured. */ + #define HWRM_FUNC_VLAN_CFG_INPUT_ENABLES_CTAG_VID UINT32_C(0x2) + /* This bit must be '1' for the stag_pcp field to be configured. */ + #define HWRM_FUNC_VLAN_CFG_INPUT_ENABLES_STAG_PCP UINT32_C(0x4) + /* This bit must be '1' for the ctag_pcp field to be configured. */ + #define HWRM_FUNC_VLAN_CFG_INPUT_ENABLES_CTAG_PCP UINT32_C(0x8) + /* This bit must be '1' for the stag_tpid field to be configured. */ + #define HWRM_FUNC_VLAN_CFG_INPUT_ENABLES_STAG_TPID UINT32_C(0x10) + /* This bit must be '1' for the ctag_tpid field to be configured. */ + #define HWRM_FUNC_VLAN_CFG_INPUT_ENABLES_CTAG_TPID UINT32_C(0x20) + uint16_t stag_vid; + /* S-TAG VLAN identifier configured for the function. */ + uint8_t stag_pcp; + /* S-TAG PCP value configured for the function. */ + uint8_t unused_2; + uint16_t stag_tpid; + /* + * S-TAG TPID value configured for the function. This field is + * specified in network byte order. + */ + uint16_t ctag_vid; + /* C-TAG VLAN identifier configured for the function. */ + uint8_t ctag_pcp; + /* C-TAG PCP value configured for the function. */ + uint8_t unused_3; + uint16_t ctag_tpid; + /* + * C-TAG TPID value configured for the function. This field is + * specified in network byte order. + */ + uint32_t rsvd1; + /* Future use. */ + uint32_t rsvd2; + /* Future use. */ + uint32_t unused_4; +}; + +/* Output (16 bytes) */ +struct hwrm_func_vlan_cfg_output { + uint16_t error_code; + /* + * Pass/Fail or error type Note: receiver to verify the in + * parameters, and fail the call with an error when appropriate + */ + uint16_t req_type; + /* This field returns the type of original request. */ + uint16_t seq_id; + /* This field provides original sequence number of the command. */ + uint16_t resp_len; + /* + * This field is the length of the response in bytes. The last + * byte of the response is a valid flag that will read as '1' + * when the command has been completely written to memory. + */ + uint32_t unused_0; + uint8_t unused_1; + uint8_t unused_2; + uint8_t unused_3; + uint8_t valid; + /* + * This field is used in Output records to indicate that the + * output is completely written to RAM. This field should be + * read as '1' to indicate that the output has been completely + * written. When writing a command completion or response to an + * internal processor, the order of writes has to be such that + * this field is written last. + */ +}; + +/* hwrm_func_cfg */ +/* + * Description: This command allows configuration of a PF by the corresponding + * PF driver. This command also allows configuration of a child VF by its parent + * PF driver. The input FID value is used to indicate what function is being + * configured. This allows a PF driver to configure the PF owned by itself or a + * virtual function that is a child of the PF. This command allows to reserve + * resources for a VF by its parent PF. To reverse the process, the command + * should be called with all enables flags cleared for resources. This will free + * allocated resources for the VF and return them to the resource pool. If this + * command is requested by a VF driver to configure or reserve resources, then + * the HWRM shall fail this command. If default MAC address and/or VLAN are + * provided in this command, then the HWRM shall set up appropriate MAC/VLAN + * filters for the function that is being configured. If source properties + * checks are enabled and default MAC address and/or IP address are provided in + * this command, then the HWRM shall set appropriate source property checks + * based on provided MAC and/or IP addresses. The parent PF driver should not + * set MTU/MRU for a VF using this command. This is to allow MTU/MRU setting by + * the VF driver. If the MTU or MRU for a VF is set by the PF driver, then the + * HWRM should ignore it. A function's MTU/MRU should be set prior to allocating + * RX VNICs or TX rings. A PF driver calls hwrm_func_cfg to allocate resources + * for itself or its children VFs. All function drivers shall call hwrm_func_cfg + * to reserve resources. A request to hwrm_func_cfg may not be fully granted; + * that is, a request for resources may be larger than what can be supported by + * the device and the HWRM will allocate the best set of resources available, + * but that may be less than requested. If all the amounts requested could not + * be fulfilled, the HWRM shall allocate what it could and return a status code + * of success. A function driver should call hwrm_func_qcfg immediately after + * hwrm_func_cfg to determine what resources were assigned to the configured + * function. A call by a PF driver to hwrm_func_cfg to allocate resources for + * itself shall only allocate resources for the PF driver to use, not for its + * children VFs. Likewise, a call to hwrm_func_qcfg shall return the resources + * available for the PF driver to use, not what is available to its children + * VFs. + */ +/* Input (88 bytes) */ +struct hwrm_func_cfg_input { + uint16_t req_type; + /* + * This value indicates what type of request this is. The format + * for the rest of the command is determined by this field. + */ + uint16_t cmpl_ring; + /* + * This value indicates the what completion ring the request + * will be optionally completed on. If the value is -1, then no + * CR completion will be generated. Any other value must be a + * valid CR ring_id value for this function. + */ + uint16_t seq_id; + /* This value indicates the command sequence number. */ + uint16_t target_id; + /* + * Target ID of this command. 0x0 - 0xFFF8 - Used for function + * ids 0xFFF8 - 0xFFFE - Reserved for internal processors 0xFFFF + * - HWRM + */ + uint64_t resp_addr; + /* + * This is the host address where the response will be written + * when the request is complete. This area must be 16B aligned + * and must be cleared to zero before the request is made. + */ + uint16_t fid; + /* + * Function ID of the function that is being configured. If set + * to 0xFF... (All Fs), then the the configuration is for the + * requesting function. + */ + uint8_t unused_0; + uint8_t unused_1; + uint32_t flags; + /* + * When this bit is '1', the function is disabled with source + * MAC address check. This is an anti-spoofing check. If this + * flag is set, then the function shall be configured to + * disallow transmission of frames with the source MAC address + * that is configured for this function. + */ + #define HWRM_FUNC_CFG_INPUT_FLAGS_SRC_MAC_ADDR_CHECK_DISABLE \ + UINT32_C(0x1) + /* + * When this bit is '1', the function is enabled with source MAC + * address check. This is an anti-spoofing check. If this flag + * is set, then the function shall be configured to allow + * transmission of frames with the source MAC address that is + * configured for this function. + */ + #define HWRM_FUNC_CFG_INPUT_FLAGS_SRC_MAC_ADDR_CHECK_ENABLE \ + UINT32_C(0x2) + /* reserved */ + #define HWRM_FUNC_CFG_INPUT_FLAGS_RSVD_MASK UINT32_C(0x1fc) + #define HWRM_FUNC_CFG_INPUT_FLAGS_RSVD_SFT 2 + /* + * Standard TX Ring mode is used for the allocation of TX ring + * and underlying scheduling resources that allow bandwidth + * reservation and limit settings on the queried function. If + * set to 1, then standard TX ring mode is requested to be + * enabled on the function being configured. + */ + #define HWRM_FUNC_CFG_INPUT_FLAGS_STD_TX_RING_MODE_ENABLE \ + UINT32_C(0x200) + /* + * Standard TX Ring mode is used for the allocation of TX ring + * and underlying scheduling resources that allow bandwidth + * reservation and limit settings on the queried function. If + * set to 1, then the standard TX ring mode is requested to be + * disabled on the function being configured. In this extended + * TX ring resource mode, the minimum and maximum bandwidth + * settings are not supported to allow the allocation of TX + * rings to span multiple scheduler nodes. + */ + #define HWRM_FUNC_CFG_INPUT_FLAGS_STD_TX_RING_MODE_DISABLE \ + UINT32_C(0x400) + /* + * If this bit is set, virtual mac address configured in this + * command will be persistent over warm boot. + */ + #define HWRM_FUNC_CFG_INPUT_FLAGS_VIRT_MAC_PERSIST UINT32_C(0x800) + /* + * This bit only applies to the VF. If this bit is set, the + * statistic context counters will not be cleared when the + * statistic context is freed or a function reset is called on + * VF. This bit will be cleared when the PF is unloaded or a + * function reset is called on the PF. + */ + #define HWRM_FUNC_CFG_INPUT_FLAGS_NO_AUTOCLEAR_STATISTIC \ + UINT32_C(0x1000) + uint32_t enables; + /* This bit must be '1' for the mtu field to be configured. */ + #define HWRM_FUNC_CFG_INPUT_ENABLES_MTU UINT32_C(0x1) + /* This bit must be '1' for the mru field to be configured. */ + #define HWRM_FUNC_CFG_INPUT_ENABLES_MRU UINT32_C(0x2) + /* + * This bit must be '1' for the num_rsscos_ctxs field to be + * configured. + */ + #define HWRM_FUNC_CFG_INPUT_ENABLES_NUM_RSSCOS_CTXS UINT32_C(0x4) + /* + * This bit must be '1' for the num_cmpl_rings field to be + * configured. + */ + #define HWRM_FUNC_CFG_INPUT_ENABLES_NUM_CMPL_RINGS UINT32_C(0x8) + /* This bit must be '1' for the num_tx_rings field to be configured. */ + #define HWRM_FUNC_CFG_INPUT_ENABLES_NUM_TX_RINGS UINT32_C(0x10) + /* This bit must be '1' for the num_rx_rings field to be configured. */ + #define HWRM_FUNC_CFG_INPUT_ENABLES_NUM_RX_RINGS UINT32_C(0x20) + /* This bit must be '1' for the num_l2_ctxs field to be configured. */ + #define HWRM_FUNC_CFG_INPUT_ENABLES_NUM_L2_CTXS UINT32_C(0x40) + /* This bit must be '1' for the num_vnics field to be configured. */ + #define HWRM_FUNC_CFG_INPUT_ENABLES_NUM_VNICS UINT32_C(0x80) + /* + * This bit must be '1' for the num_stat_ctxs field to be + * configured. + */ + #define HWRM_FUNC_CFG_INPUT_ENABLES_NUM_STAT_CTXS UINT32_C(0x100) + /* + * This bit must be '1' for the dflt_mac_addr field to be + * configured. + */ + #define HWRM_FUNC_CFG_INPUT_ENABLES_DFLT_MAC_ADDR UINT32_C(0x200) + /* This bit must be '1' for the dflt_vlan field to be configured. */ + #define HWRM_FUNC_CFG_INPUT_ENABLES_DFLT_VLAN UINT32_C(0x400) + /* This bit must be '1' for the dflt_ip_addr field to be configured. */ + #define HWRM_FUNC_CFG_INPUT_ENABLES_DFLT_IP_ADDR UINT32_C(0x800) + /* This bit must be '1' for the min_bw field to be configured. */ + #define HWRM_FUNC_CFG_INPUT_ENABLES_MIN_BW UINT32_C(0x1000) + /* This bit must be '1' for the max_bw field to be configured. */ + #define HWRM_FUNC_CFG_INPUT_ENABLES_MAX_BW UINT32_C(0x2000) + /* + * This bit must be '1' for the async_event_cr field to be + * configured. + */ + #define HWRM_FUNC_CFG_INPUT_ENABLES_ASYNC_EVENT_CR UINT32_C(0x4000) + /* + * This bit must be '1' for the vlan_antispoof_mode field to be + * configured. + */ + #define HWRM_FUNC_CFG_INPUT_ENABLES_VLAN_ANTISPOOF_MODE UINT32_C(0x8000) + /* + * This bit must be '1' for the allowed_vlan_pris field to be + * configured. + */ + #define HWRM_FUNC_CFG_INPUT_ENABLES_ALLOWED_VLAN_PRIS UINT32_C(0x10000) + /* This bit must be '1' for the evb_mode field to be configured. */ + #define HWRM_FUNC_CFG_INPUT_ENABLES_EVB_MODE UINT32_C(0x20000) + /* + * This bit must be '1' for the num_mcast_filters field to be + * configured. + */ + #define HWRM_FUNC_CFG_INPUT_ENABLES_NUM_MCAST_FILTERS UINT32_C(0x40000) + /* + * This bit must be '1' for the num_hw_ring_grps field to be + * configured. + */ + #define HWRM_FUNC_CFG_INPUT_ENABLES_NUM_HW_RING_GRPS UINT32_C(0x80000) + uint16_t mtu; + /* + * The maximum transmission unit of the function. The HWRM + * should make sure that the mtu of the function does not exceed + * the mtu of the physical port that this function is associated + * with. In addition to configuring mtu per function, it is + * possible to configure mtu per transmit ring. By default, the + * mtu of each transmit ring associated with a function is equal + * to the mtu of the function. The HWRM should make sure that + * the mtu of each transmit ring that is assigned to a function + * has a valid mtu. + */ + uint16_t mru; + /* + * The maximum receive unit of the function. The HWRM should + * make sure that the mru of the function does not exceed the + * mru of the physical port that this function is associated + * with. In addition to configuring mru per function, it is + * possible to configure mru per vnic. By default, the mru of + * each vnic associated with a function is equal to the mru of + * the function. The HWRM should make sure that the mru of each + * vnic that is assigned to a function has a valid mru. + */ + uint16_t num_rsscos_ctxs; + /* The number of RSS/COS contexts requested for the function. */ + uint16_t num_cmpl_rings; + /* + * The number of completion rings requested for the function. + * This does not include the rings allocated to any children + * functions if any. + */ + uint16_t num_tx_rings; + /* + * The number of transmit rings requested for the function. This + * does not include the rings allocated to any children + * functions if any. + */ + uint16_t num_rx_rings; + /* + * The number of receive rings requested for the function. This + * does not include the rings allocated to any children + * functions if any. + */ + uint16_t num_l2_ctxs; + /* The requested number of L2 contexts for the function. */ + uint16_t num_vnics; + /* The requested number of vnics for the function. */ + uint16_t num_stat_ctxs; + /* The requested number of statistic contexts for the function. */ + uint16_t num_hw_ring_grps; + /* + * The number of HW ring groups that should be reserved for this + * function. + */ + uint8_t dflt_mac_addr[6]; + /* The default MAC address for the function being configured. */ + uint16_t dflt_vlan; + /* + * The default VLAN for the function being configured. This + * field's format is same as 802.1Q Tag's Tag Control + * Information (TCI) format that includes both Priority Code + * Point (PCP) and VLAN Identifier (VID). + */ + uint32_t dflt_ip_addr[4]; + /* + * The default IP address for the function being configured. + * This address is only used in enabling source property check. + */ + uint32_t min_bw; + /* + * Minimum BW allocated for this function. The HWRM will + * translate this value into byte counter and time interval used + * for the scheduler inside the device. + */ + /* The bandwidth value. */ + #define HWRM_FUNC_CFG_INPUT_MIN_BW_BW_VALUE_MASK UINT32_C(0xfffffff) + #define HWRM_FUNC_CFG_INPUT_MIN_BW_BW_VALUE_SFT 0 + /* The granularity of the value (bits or bytes). */ + #define HWRM_FUNC_CFG_INPUT_MIN_BW_SCALE UINT32_C(0x10000000) + /* Value is in bits. */ + #define HWRM_FUNC_CFG_INPUT_MIN_BW_SCALE_BITS (UINT32_C(0x0) << 28) + /* Value is in bytes. */ + #define HWRM_FUNC_CFG_INPUT_MIN_BW_SCALE_BYTES (UINT32_C(0x1) << 28) + #define HWRM_FUNC_CFG_INPUT_MIN_BW_SCALE_LAST \ + FUNC_CFG_INPUT_MIN_BW_SCALE_BYTES + /* bw_value_unit is 3 b */ + #define HWRM_FUNC_CFG_INPUT_MIN_BW_BW_VALUE_UNIT_MASK \ + UINT32_C(0xe0000000) + #define HWRM_FUNC_CFG_INPUT_MIN_BW_BW_VALUE_UNIT_SFT 29 + /* Value is in Mb or MB (base 10). */ + #define HWRM_FUNC_CFG_INPUT_MIN_BW_BW_VALUE_UNIT_MEGA \ + (UINT32_C(0x0) << 29) + /* Value is in Kb or KB (base 10). */ + #define HWRM_FUNC_CFG_INPUT_MIN_BW_BW_VALUE_UNIT_KILO \ + (UINT32_C(0x2) << 29) + /* Value is in bits or bytes. */ + #define HWRM_FUNC_CFG_INPUT_MIN_BW_BW_VALUE_UNIT_BASE \ + (UINT32_C(0x4) << 29) + /* Value is in Gb or GB (base 10). */ + #define HWRM_FUNC_CFG_INPUT_MIN_BW_BW_VALUE_UNIT_GIGA \ + (UINT32_C(0x6) << 29) + /* Value is in 1/100th of a percentage of total bandwidth. */ + #define HWRM_FUNC_CFG_INPUT_MIN_BW_BW_VALUE_UNIT_PERCENT1_100 \ + (UINT32_C(0x1) << 29) + /* Invalid unit */ + #define HWRM_FUNC_CFG_INPUT_MIN_BW_BW_VALUE_UNIT_INVALID \ + (UINT32_C(0x7) << 29) + #define HWRM_FUNC_CFG_INPUT_MIN_BW_BW_VALUE_UNIT_LAST \ + FUNC_CFG_INPUT_MIN_BW_BW_VALUE_UNIT_INVALID + uint32_t max_bw; + /* + * Maximum BW allocated for this function. The HWRM will + * translate this value into byte counter and time interval used + * for the scheduler inside the device. + */ + /* The bandwidth value. */ + #define HWRM_FUNC_CFG_INPUT_MAX_BW_BW_VALUE_MASK \ + UINT32_C(0xfffffff) + #define HWRM_FUNC_CFG_INPUT_MAX_BW_BW_VALUE_SFT 0 + /* The granularity of the value (bits or bytes). */ + #define HWRM_FUNC_CFG_INPUT_MAX_BW_SCALE UINT32_C(0x10000000) + /* Value is in bits. */ + #define HWRM_FUNC_CFG_INPUT_MAX_BW_SCALE_BITS (UINT32_C(0x0) << 28) + /* Value is in bytes. */ + #define HWRM_FUNC_CFG_INPUT_MAX_BW_SCALE_BYTES (UINT32_C(0x1) << 28) + #define HWRM_FUNC_CFG_INPUT_MAX_BW_SCALE_LAST \ + FUNC_CFG_INPUT_MAX_BW_SCALE_BYTES + /* bw_value_unit is 3 b */ + #define HWRM_FUNC_CFG_INPUT_MAX_BW_BW_VALUE_UNIT_MASK \ + UINT32_C(0xe0000000) + #define HWRM_FUNC_CFG_INPUT_MAX_BW_BW_VALUE_UNIT_SFT 29 + /* Value is in Mb or MB (base 10). */ + #define HWRM_FUNC_CFG_INPUT_MAX_BW_BW_VALUE_UNIT_MEGA \ + (UINT32_C(0x0) << 29) + /* Value is in Kb or KB (base 10). */ + #define HWRM_FUNC_CFG_INPUT_MAX_BW_BW_VALUE_UNIT_KILO \ + (UINT32_C(0x2) << 29) + /* Value is in bits or bytes. */ + #define HWRM_FUNC_CFG_INPUT_MAX_BW_BW_VALUE_UNIT_BASE \ + (UINT32_C(0x4) << 29) + /* Value is in Gb or GB (base 10). */ + #define HWRM_FUNC_CFG_INPUT_MAX_BW_BW_VALUE_UNIT_GIGA \ + (UINT32_C(0x6) << 29) + /* Value is in 1/100th of a percentage of total bandwidth. */ + #define HWRM_FUNC_CFG_INPUT_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 \ + (UINT32_C(0x1) << 29) + /* Invalid unit */ + #define HWRM_FUNC_CFG_INPUT_MAX_BW_BW_VALUE_UNIT_INVALID \ + (UINT32_C(0x7) << 29) + #define HWRM_FUNC_CFG_INPUT_MAX_BW_BW_VALUE_UNIT_LAST \ + FUNC_CFG_INPUT_MAX_BW_BW_VALUE_UNIT_INVALID + uint16_t async_event_cr; + /* + * ID of the target completion ring for receiving asynchronous + * event completions. If this field is not valid, then the HWRM + * shall use the default completion ring of the function that is + * being configured as the target completion ring for providing + * any asynchronous event completions for that function. If this + * field is valid, then the HWRM shall use the completion ring + * identified by this ID as the target completion ring for + * providing any asynchronous event completions for the function + * that is being configured. + */ + uint8_t vlan_antispoof_mode; + /* VLAN Anti-spoofing mode. */ + /* No VLAN anti-spoofing checks are enabled */ + #define HWRM_FUNC_CFG_INPUT_VLAN_ANTISPOOF_MODE_NOCHECK UINT32_C(0x0) + /* Validate VLAN against the configured VLAN(s) */ + #define HWRM_FUNC_CFG_INPUT_VLAN_ANTISPOOF_MODE_VALIDATE_VLAN \ + UINT32_C(0x1) + /* Insert VLAN if it does not exist, otherwise discard */ + #define HWRM_FUNC_CFG_INPUT_VLAN_ANTISPOOF_MODE_INSERT_IF_VLANDNE \ + UINT32_C(0x2) + /* + * Insert VLAN if it does not exist, override + * VLAN if it exists + */ + #define \ + HWRM_FUNC_CFG_INPUT_VLAN_ANTISPOOF_MODE_INSERT_OR_OVERRIDE_VLAN \ + UINT32_C(0x3) + uint8_t allowed_vlan_pris; + /* + * This bit field defines VLAN PRIs that are allowed on this + * function. If nth bit is set, then VLAN PRI n is allowed on + * this function. + */ + uint8_t evb_mode; + /* + * The HWRM shall allow a PF driver to change EVB mode for the + * partition it belongs to. The HWRM shall not allow a VF driver + * to change the EVB mode. The HWRM shall take into account the + * switching of EVB mode from one to another and reconfigure + * hardware resources as appropriately. The switching from VEB + * to VEPA mode requires the disabling of the loopback traffic. + * Additionally, source knock outs are handled differently in + * VEB and VEPA modes. + */ + /* No Edge Virtual Bridging (EVB) */ + #define HWRM_FUNC_CFG_INPUT_EVB_MODE_NO_EVB UINT32_C(0x0) + /* Virtual Ethernet Bridge (VEB) */ + #define HWRM_FUNC_CFG_INPUT_EVB_MODE_VEB UINT32_C(0x1) + /* Virtual Ethernet Port Aggregator (VEPA) */ + #define HWRM_FUNC_CFG_INPUT_EVB_MODE_VEPA UINT32_C(0x2) + uint8_t unused_2; + uint16_t num_mcast_filters; + /* + * The number of multicast filters that should be reserved for + * this function on the RX side. + */ +} __attribute__((packed)); + +/* Output (16 bytes) */ +struct hwrm_func_cfg_output { + uint16_t error_code; + /* + * Pass/Fail or error type Note: receiver to verify the in + * parameters, and fail the call with an error when appropriate + */ + uint16_t req_type; + /* This field returns the type of original request. */ + uint16_t seq_id; + /* This field provides original sequence number of the command. */ + uint16_t resp_len; + /* + * This field is the length of the response in bytes. The last + * byte of the response is a valid flag that will read as '1' + * when the command has been completely written to memory. + */ + uint32_t unused_0; + uint8_t unused_1; + uint8_t unused_2; + uint8_t unused_3; + uint8_t valid; + /* + * This field is used in Output records to indicate that the + * output is completely written to RAM. This field should be + * read as '1' to indicate that the output has been completely + * written. When writing a command completion or response to an + * internal processor, the order of writes has to be such that + * this field is written last. + */ +} __attribute__((packed)); + +/* hwrm_func_qstats */ +/* + * Description: This command returns statistics of a function. The input FID + * value is used to indicate what function is being queried. This allows a + * physical function driver to query virtual functions that are children of the + * physical function. The HWRM shall return any unsupported counter with a value + * of 0xFFFFFFFF for 32-bit counters and 0xFFFFFFFFFFFFFFFF for 64-bit counters. + */ +/* Input (24 bytes) */ +struct hwrm_func_qstats_input { + uint16_t req_type; + /* + * This value indicates what type of request this is. The format + * for the rest of the command is determined by this field. + */ + uint16_t cmpl_ring; + /* + * This value indicates the what completion ring the request + * will be optionally completed on. If the value is -1, then no + * CR completion will be generated. Any other value must be a + * valid CR ring_id value for this function. + */ + uint16_t seq_id; + /* This value indicates the command sequence number. */ + uint16_t target_id; + /* + * Target ID of this command. 0x0 - 0xFFF8 - Used for function + * ids 0xFFF8 - 0xFFFE - Reserved for internal processors 0xFFFF + * - HWRM + */ + uint64_t resp_addr; + /* + * This is the host address where the response will be written + * when the request is complete. This area must be 16B aligned + * and must be cleared to zero before the request is made. + */ + uint16_t fid; + /* + * Function ID of the function that is being queried. 0xFF... + * (All Fs) if the query is for the requesting function. + */ + uint16_t unused_0[3]; +} __attribute__((packed)); + +/* Output (176 bytes) */ +struct hwrm_func_qstats_output { + uint16_t error_code; + /* + * Pass/Fail or error type Note: receiver to verify the in + * parameters, and fail the call with an error when appropriate + */ + uint16_t req_type; + /* This field returns the type of original request. */ + uint16_t seq_id; + /* This field provides original sequence number of the command. */ + uint16_t resp_len; + /* + * This field is the length of the response in bytes. The last + * byte of the response is a valid flag that will read as '1' + * when the command has been completely written to memory. + */ + uint64_t tx_ucast_pkts; + /* Number of transmitted unicast packets on the function. */ + uint64_t tx_mcast_pkts; + /* Number of transmitted multicast packets on the function. */ + uint64_t tx_bcast_pkts; + /* Number of transmitted broadcast packets on the function. */ + uint64_t tx_err_pkts; + /* + * Number of transmitted packets that were discarded due to + * internal NIC resource problems. For transmit, this can only + * happen if TMP is configured to allow dropping in HOL blocking + * conditions, which is not a normal configuration. + */ + uint64_t tx_drop_pkts; + /* + * Number of dropped packets on transmit path on the function. + * These are packets that have been marked for drop by the TE + * CFA block or are packets that exceeded the transmit MTU limit + * for the function. + */ + uint64_t tx_ucast_bytes; + /* Number of transmitted bytes for unicast traffic on the function. */ + uint64_t tx_mcast_bytes; + /* + * Number of transmitted bytes for multicast traffic on the + * function. + */ + uint64_t tx_bcast_bytes; + /* + * Number of transmitted bytes for broadcast traffic on the + * function. + */ + uint64_t rx_ucast_pkts; + /* Number of received unicast packets on the function. */ + uint64_t rx_mcast_pkts; + /* Number of received multicast packets on the function. */ + uint64_t rx_bcast_pkts; + /* Number of received broadcast packets on the function. */ + uint64_t rx_err_pkts; + /* + * Number of received packets that were discarded on the + * function due to resource limitations. This can happen for 3 + * reasons. # The BD used for the packet has a bad format. # + * There were no BDs available in the ring for the packet. # + * There were no BDs available on-chip for the packet. + */ + uint64_t rx_drop_pkts; + /* + * Number of dropped packets on received path on the function. + * These are packets that have been marked for drop by the RE + * CFA. + */ + uint64_t rx_ucast_bytes; + /* Number of received bytes for unicast traffic on the function. */ + uint64_t rx_mcast_bytes; + /* Number of received bytes for multicast traffic on the function. */ + uint64_t rx_bcast_bytes; + /* Number of received bytes for broadcast traffic on the function. */ + uint64_t rx_agg_pkts; + /* Number of aggregated unicast packets on the function. */ + uint64_t rx_agg_bytes; + /* Number of aggregated unicast bytes on the function. */ + uint64_t rx_agg_events; + /* Number of aggregation events on the function. */ + uint64_t rx_agg_aborts; + /* Number of aborted aggregations on the function. */ + uint32_t unused_0; + uint8_t unused_1; + uint8_t unused_2; + uint8_t unused_3; + uint8_t valid; + /* + * This field is used in Output records to indicate that the + * output is completely written to RAM. This field should be + * read as '1' to indicate that the output has been completely + * written. When writing a command completion or response to an + * internal processor, the order of writes has to be such that + * this field is written last. + */ +} __attribute__((packed)); + +/* hwrm_func_clr_stats */ +/* + * Description: This command clears statistics of a function. The input FID + * value is used to indicate what function's statistics is being cleared. This + * allows a physical function driver to clear statistics of virtual functions + * that are children of the physical function. + */ +/* Input (24 bytes) */ +struct hwrm_func_clr_stats_input { + uint16_t req_type; + /* + * This value indicates what type of request this is. The format + * for the rest of the command is determined by this field. + */ + uint16_t cmpl_ring; + /* + * This value indicates the what completion ring the request + * will be optionally completed on. If the value is -1, then no + * CR completion will be generated. Any other value must be a + * valid CR ring_id value for this function. + */ + uint16_t seq_id; + /* This value indicates the command sequence number. */ + uint16_t target_id; + /* + * Target ID of this command. 0x0 - 0xFFF8 - Used for function + * ids 0xFFF8 - 0xFFFE - Reserved for internal processors 0xFFFF + * - HWRM + */ + uint64_t resp_addr; + /* + * This is the host address where the response will be written + * when the request is complete. This area must be 16B aligned + * and must be cleared to zero before the request is made. + */ + uint16_t fid; + /* + * Function ID of the function. 0xFF... (All Fs) if the query is + * for the requesting function. + */ + uint16_t unused_0[3]; +} __attribute__((packed)); + +/* Output (16 bytes) */ +struct hwrm_func_clr_stats_output { + uint16_t error_code; + /* + * Pass/Fail or error type Note: receiver to verify the in + * parameters, and fail the call with an error when appropriate + */ + uint16_t req_type; + /* This field returns the type of original request. */ + uint16_t seq_id; + /* This field provides original sequence number of the command. */ + uint16_t resp_len; + /* + * This field is the length of the response in bytes. The last + * byte of the response is a valid flag that will read as '1' + * when the command has been completely written to memory. + */ + uint32_t unused_0; + uint8_t unused_1; + uint8_t unused_2; + uint8_t unused_3; + uint8_t valid; + /* + * This field is used in Output records to indicate that the + * output is completely written to RAM. This field should be + * read as '1' to indicate that the output has been completely + * written. When writing a command completion or response to an + * internal processor, the order of writes has to be such that + * this field is written last. + */ +} __attribute__((packed)); + +/* hwrm_func_vf_vnic_ids_query */ +/* Description: This command is used to query vf vnic ids. */ +/* Input (32 bytes) */ +struct hwrm_func_vf_vnic_ids_query_input { + uint16_t req_type; + /* + * This value indicates what type of request this is. The format + * for the rest of the command is determined by this field. + */ + uint16_t cmpl_ring; + /* + * This value indicates the what completion ring the request + * will be optionally completed on. If the value is -1, then no + * CR completion will be generated. Any other value must be a + * valid CR ring_id value for this function. + */ + uint16_t seq_id; + /* This value indicates the command sequence number. */ + uint16_t target_id; + /* + * Target ID of this command. 0x0 - 0xFFF8 - Used for function + * ids 0xFFF8 - 0xFFFE - Reserved for internal processors 0xFFFF + * - HWRM + */ + uint64_t resp_addr; + /* + * This is the host address where the response will be written + * when the request is complete. This area must be 16B aligned + * and must be cleared to zero before the request is made. + */ + uint16_t vf_id; + /* + * This value is used to identify a Virtual Function (VF). The + * scope of VF ID is local within a PF. + */ + uint8_t unused_0; + uint8_t unused_1; + uint32_t max_vnic_id_cnt; + /* Max number of vnic ids in vnic id table */ + uint64_t vnic_id_tbl_addr; + /* This is the address for VF VNIC ID table */ +} __attribute__((packed)); + +/* Output (16 bytes) */ +struct hwrm_func_vf_vnic_ids_query_output { + uint16_t error_code; + /* + * Pass/Fail or error type Note: receiver to verify the in + * parameters, and fail the call with an error when appropriate + */ + uint16_t req_type; + /* This field returns the type of original request. */ + uint16_t seq_id; + /* This field provides original sequence number of the command. */ + uint16_t resp_len; + /* + * This field is the length of the response in bytes. The last + * byte of the response is a valid flag that will read as '1' + * when the command has been completely written to memory. + */ + uint32_t vnic_id_cnt; + /* + * Actual number of vnic ids Each VNIC ID is written as a 32-bit + * number. + */ + uint8_t unused_0; + uint8_t unused_1; + uint8_t unused_2; uint8_t valid; /* * This field is used in Output records to indicate that the @@ -2268,7 +3813,7 @@ struct hwrm_func_qcfg_output { * function driver shall use this command during the driver initialization right * after the HWRM version discovery and default ring resources allocation. */ -/* Input (80 bytes) */ +/* Input (80 bytes) */ struct hwrm_func_drv_rgtr_input { uint16_t req_type; /* @@ -2324,9 +3869,12 @@ struct hwrm_func_drv_rgtr_input { * This bit must be '1' for the async_event_fwd field to be * configured. */ - #define HWRM_FUNC_DRV_RGTR_INPUT_ENABLES_ASYNC_EVENT_FWD UINT32_C(0x10) + #define HWRM_FUNC_DRV_RGTR_INPUT_ENABLES_ASYNC_EVENT_FWD UINT32_C(0x10) uint16_t os_type; - /* This value indicates the type of OS. */ + /* + * This value indicates the type of OS. The values are based on + * CIM_OperatingSystem.mof file as published by the DMTF. + */ /* Unknown */ #define HWRM_FUNC_DRV_RGTR_INPUT_OS_TYPE_UNKNOWN UINT32_C(0x0) /* Other OS not listed below. */ @@ -2374,17 +3922,17 @@ struct hwrm_func_drv_rgtr_input { uint32_t async_event_fwd[8]; /* * This is a 256-bit bit mask provided by the function driver - * (PF or VF driver) to indicate the list of asynchronous event + * (PF or VF driver) to indicate the list of asynchronous event * completions to be forwarded. Nth bit refers to the Nth * event_id. Setting Nth bit to 1 by the function driver shall * result in the HWRM forwarding asynchronous event completion - * with event_id equal to N. If all bits are set to 0 (value of + * with event_id equal to N. If all bits are set to 0 (value of * 0), then the HWRM shall not forward any asynchronous event * completion to this function driver. */ } __attribute__((packed)); -/* Output (16 bytes) */ +/* Output (16 bytes) */ struct hwrm_func_drv_rgtr_output { uint16_t error_code; /* @@ -2422,7 +3970,7 @@ struct hwrm_func_drv_rgtr_output { * the HWRM. A function driver shall implement this command. A function driver * shall use this command during the driver unloading. */ -/* Input (24 bytes) */ +/* Input (24 bytes) */ struct hwrm_func_drv_unrgtr_input { uint16_t req_type; /* @@ -2455,11 +4003,12 @@ struct hwrm_func_drv_unrgtr_input { * When this bit is '1', the function driver is notifying the * HWRM to prepare for the shutdown. */ - #define HWRM_FUNC_DRV_UNRGTR_INPUT_FLAGS_PREPARE_FOR_SHUTDOWN UINT32_C(0x1) + #define HWRM_FUNC_DRV_UNRGTR_INPUT_FLAGS_PREPARE_FOR_SHUTDOWN \ + UINT32_C(0x1) uint32_t unused_0; } __attribute__((packed)); -/* Output (16 bytes) */ +/* Output (16 bytes) */ struct hwrm_func_drv_unrgtr_output { uint16_t error_code; /* @@ -2491,6 +4040,319 @@ struct hwrm_func_drv_unrgtr_output { */ } __attribute__((packed)); +/* hwrm_func_buf_rgtr */ +/* + * Description: This command is used by the PF driver to register buffers used + * in the PF-VF communication with the HWRM. The PF driver uses this command to + * register buffers for each PF-VF channel. A parent PF may issue this command + * per child VF. If VF ID is not valid, then this command is used to register + * buffers for all children VFs of the PF. + */ +/* Input (128 bytes) */ +struct hwrm_func_buf_rgtr_input { + uint16_t req_type; + /* + * This value indicates what type of request this is. The format + * for the rest of the command is determined by this field. + */ + uint16_t cmpl_ring; + /* + * This value indicates the what completion ring the request + * will be optionally completed on. If the value is -1, then no + * CR completion will be generated. Any other value must be a + * valid CR ring_id value for this function. + */ + uint16_t seq_id; + /* This value indicates the command sequence number. */ + uint16_t target_id; + /* + * Target ID of this command. 0x0 - 0xFFF8 - Used for function + * ids 0xFFF8 - 0xFFFE - Reserved for internal processors 0xFFFF + * - HWRM + */ + uint64_t resp_addr; + /* + * This is the host address where the response will be written + * when the request is complete. This area must be 16B aligned + * and must be cleared to zero before the request is made. + */ + uint32_t enables; + /* This bit must be '1' for the vf_id field to be configured. */ + #define HWRM_FUNC_BUF_RGTR_INPUT_ENABLES_VF_ID UINT32_C(0x1) + /* This bit must be '1' for the err_buf_addr field to be configured. */ + #define HWRM_FUNC_BUF_RGTR_INPUT_ENABLES_ERR_BUF_ADDR UINT32_C(0x2) + uint16_t vf_id; + /* + * This value is used to identify a Virtual Function (VF). The + * scope of VF ID is local within a PF. + */ + uint16_t req_buf_num_pages; + /* + * This field represents the number of pages used for request + * buffer(s). + */ + uint16_t req_buf_page_size; + /* This field represents the page size used for request buffer(s). */ + /* 16 bytes */ + #define HWRM_FUNC_BUF_RGTR_INPUT_INPUT_BUF_PAGE_SIZE_16B UINT32_C(0x4) + /* 4 Kbytes */ + #define HWRM_FUNC_BUF_RGTR_INPUT_INPUT_BUF_PAGE_SIZE_4K UINT32_C(0xc) + /* 8 Kbytes */ + #define HWRM_FUNC_BUF_RGTR_INPUT_INPUT_BUF_PAGE_SIZE_8K UINT32_C(0xd) + /* 64 Kbytes */ + #define HWRM_FUNC_BUF_RGTR_INPUT_INPUT_BUF_PAGE_SIZE_64K UINT32_C(0x10) + /* 2 Mbytes */ + #define HWRM_FUNC_BUF_RGTR_INPUT_INPUT_BUF_PAGE_SIZE_2M UINT32_C(0x15) + /* 4 Mbytes */ + #define HWRM_FUNC_BUF_RGTR_INPUT_INPUT_BUF_PAGE_SIZE_4M UINT32_C(0x16) + /* 1 Gbytes */ + #define HWRM_FUNC_BUF_RGTR_INPUT_INPUT_BUF_PAGE_SIZE_1G UINT32_C(0x1e) + uint16_t req_buf_len; + /* The length of the request buffer per VF in bytes. */ + uint16_t resp_buf_len; + /* The length of the response buffer in bytes. */ + uint8_t unused_0; + uint8_t unused_1; + uint64_t req_buf_page_addr[10]; + /* This field represents the page address of req buffer. */ + uint64_t error_buf_addr; + /* + * This field is used to receive the error reporting from the + * chipset. Only applicable for PFs. + */ + uint64_t resp_buf_addr; + /* This field is used to receive the response forwarded by the HWRM. */ +} __attribute__((packed)); + +/* Output (16 bytes) */ +struct hwrm_func_buf_rgtr_output { + uint16_t error_code; + /* + * Pass/Fail or error type Note: receiver to verify the in + * parameters, and fail the call with an error when appropriate + */ + uint16_t req_type; + /* This field returns the type of original request. */ + uint16_t seq_id; + /* This field provides original sequence number of the command. */ + uint16_t resp_len; + /* + * This field is the length of the response in bytes. The last + * byte of the response is a valid flag that will read as '1' + * when the command has been completely written to memory. + */ + uint32_t unused_0; + uint8_t unused_1; + uint8_t unused_2; + uint8_t unused_3; + uint8_t valid; + /* + * This field is used in Output records to indicate that the + * output is completely written to RAM. This field should be + * read as '1' to indicate that the output has been completely + * written. When writing a command completion or response to an + * internal processor, the order of writes has to be such that + * this field is written last. + */ +} __attribute__((packed)); + +/* hwrm_func_buf_unrgtr */ +/* + * Description: This command is used by the PF driver to unregister buffers used + * in the PF-VF communication with the HWRM. The PF driver uses this command to + * unregister buffers for PF-VF communication. A parent PF may issue this + * command to unregister buffers for communication between the PF and a specific + * VF. If the VF ID is not valid, then this command is used to unregister + * buffers used for communications with all children VFs of the PF. + */ +/* Input (24 bytes) */ +struct hwrm_func_buf_unrgtr_input { + uint16_t req_type; + /* + * This value indicates what type of request this is. The format + * for the rest of the command is determined by this field. + */ + uint16_t cmpl_ring; + /* + * This value indicates the what completion ring the request + * will be optionally completed on. If the value is -1, then no + * CR completion will be generated. Any other value must be a + * valid CR ring_id value for this function. + */ + uint16_t seq_id; + /* This value indicates the command sequence number. */ + uint16_t target_id; + /* + * Target ID of this command. 0x0 - 0xFFF8 - Used for function + * ids 0xFFF8 - 0xFFFE - Reserved for internal processors 0xFFFF + * - HWRM + */ + uint64_t resp_addr; + /* + * This is the host address where the response will be written + * when the request is complete. This area must be 16B aligned + * and must be cleared to zero before the request is made. + */ + uint32_t enables; + /* This bit must be '1' for the vf_id field to be configured. */ + #define HWRM_FUNC_BUF_UNRGTR_INPUT_ENABLES_VF_ID UINT32_C(0x1) + uint16_t vf_id; + /* + * This value is used to identify a Virtual Function (VF). The + * scope of VF ID is local within a PF. + */ + uint16_t unused_0; +} __attribute__((packed)); + +/* Output (16 bytes) */ +struct hwrm_func_buf_unrgtr_output { + uint16_t error_code; + /* + * Pass/Fail or error type Note: receiver to verify the in + * parameters, and fail the call with an error when appropriate + */ + uint16_t req_type; + /* This field returns the type of original request. */ + uint16_t seq_id; + /* This field provides original sequence number of the command. */ + uint16_t resp_len; + /* + * This field is the length of the response in bytes. The last + * byte of the response is a valid flag that will read as '1' + * when the command has been completely written to memory. + */ + uint32_t unused_0; + uint8_t unused_1; + uint8_t unused_2; + uint8_t unused_3; + uint8_t valid; + /* + * This field is used in Output records to indicate that the + * output is completely written to RAM. This field should be + * read as '1' to indicate that the output has been completely + * written. When writing a command completion or response to an + * internal processor, the order of writes has to be such that + * this field is written last. + */ +} __attribute__((packed)); + +/* hwrm_func_vf_cfg */ +/* + * Description: This command allows configuration of a VF by its driver. If this + * function is called by a PF driver, then the HWRM shall fail this command. If + * guest VLAN and/or MAC address are provided in this command, then the HWRM + * shall set up appropriate MAC/VLAN filters for the VF that is being + * configured. A VF driver should set VF MTU/MRU using this command prior to + * allocating RX VNICs or TX rings for the corresponding VF. + */ +/* Input (32 bytes) */ + +struct hwrm_func_vf_cfg_input { + uint16_t req_type; + /* + * This value indicates what type of request this is. The format for the + * rest of the command is determined by this field. + */ + uint16_t cmpl_ring; + /* + * This value indicates the what completion ring the request will be + * optionally completed on. If the value is -1, then no CR completion + * will be generated. Any other value must be a valid CR ring_id value + * for this function. + */ + uint16_t seq_id; + /* This value indicates the command sequence number. */ + uint16_t target_id; + /* + * Target ID of this command. 0x0 - 0xFFF8 - Used for function ids + * 0xFFF8 - 0xFFFE - Reserved for internal processors 0xFFFF - HWRM + */ + uint64_t resp_addr; + /* + * This is the host address where the response will be written when the + * request is complete. This area must be 16B aligned and must be + * cleared to zero before the request is made. + */ + uint32_t enables; + /* This bit must be '1' for the mtu field to be configured. */ + #define HWRM_FUNC_VF_CFG_INPUT_ENABLES_MTU UINT32_C(0x1) + /* This bit must be '1' for the guest_vlan field to be configured. */ + #define HWRM_FUNC_VF_CFG_INPUT_ENABLES_GUEST_VLAN UINT32_C(0x2) + /* + * This bit must be '1' for the async_event_cr field to be configured. + */ + #define HWRM_FUNC_VF_CFG_INPUT_ENABLES_ASYNC_EVENT_CR UINT32_C(0x4) + /* This bit must be '1' for the dflt_mac_addr field to be configured. */ + #define HWRM_FUNC_VF_CFG_INPUT_ENABLES_DFLT_MAC_ADDR UINT32_C(0x8) + uint16_t mtu; + /* + * The maximum transmission unit requested on the function. The HWRM + * should make sure that the mtu of the function does not exceed the mtu + * of the physical port that this function is associated with. In + * addition to requesting mtu per function, it is possible to configure + * mtu per transmit ring. By default, the mtu of each transmit ring + * associated with a function is equal to the mtu of the function. The + * HWRM should make sure that the mtu of each transmit ring that is + * assigned to a function has a valid mtu. + */ + uint16_t guest_vlan; + /* + * The guest VLAN for the function being configured. This field's format + * is same as 802.1Q Tag's Tag Control Information (TCI) format that + * includes both Priority Code Point (PCP) and VLAN Identifier (VID). + */ + uint16_t async_event_cr; + /* + * ID of the target completion ring for receiving asynchronous event + * completions. If this field is not valid, then the HWRM shall use the + * default completion ring of the function that is being configured as + * the target completion ring for providing any asynchronous event + * completions for that function. If this field is valid, then the HWRM + * shall use the completion ring identified by this ID as the target + * completion ring for providing any asynchronous event completions for + * the function that is being configured. + */ + uint8_t dflt_mac_addr[6]; + /* + * This value is the current MAC address requested by the VF driver to + * be configured on this VF. A value of 00-00-00-00-00-00 indicates no + * MAC address configuration is requested by the VF driver. The parent + * PF driver may reject or overwrite this MAC address. + */ +} __attribute__((packed)); + +/* Output (16 bytes) */ + +struct hwrm_func_vf_cfg_output { + uint16_t error_code; + /* + * Pass/Fail or error type Note: receiver to verify the in parameters, + * and fail the call with an error when appropriate + */ + uint16_t req_type; + /* This field returns the type of original request. */ + uint16_t seq_id; + /* This field provides original sequence number of the command. */ + uint16_t resp_len; + /* + * This field is the length of the response in bytes. The last + * byte of the response is a valid flag that will read as '1' + * when the command has been completely written to memory. + */ + uint32_t unused_0; + uint8_t unused_1; + uint8_t unused_2; + uint8_t unused_3; + uint8_t valid; + /* + * This field is used in Output records to indicate that the output is + * completely written to RAM. This field should be read as '1' to + * indicate that the output has been completely written. When writing a + * command completion or response to an internal processor, the order of + * writes has to be such that this field is written last. + */ +} __attribute__((packed)); + /* hwrm_port_phy_cfg */ /* * Description: This command configures the PHY device for the port. It allows @@ -2500,7 +4362,7 @@ struct hwrm_func_drv_unrgtr_output { * configure PHY using this command. In a network partition mode, a PF driver * shall not be allowed to configure PHY using this command. */ -/* Input (56 bytes) */ +/* Input (56 bytes) */ struct hwrm_port_phy_cfg_input { uint16_t req_type; /* @@ -2540,19 +4402,8 @@ struct hwrm_port_phy_cfg_input { * PHY configuration and settings specified in this command. */ #define HWRM_PORT_PHY_CFG_INPUT_FLAGS_RESET_PHY UINT32_C(0x1) - /* - * When this bit is set to '1', the link shall be forced to be - * taken down. # When this bit is set to '1", all other command - * input settings related to the link speed shall be ignored. - * Once the link state is forced down, it can be explicitly - * cleared from that state by setting this flag to '0'. # If - * this flag is set to '0', then the link shall be cleared from - * forced down state if the link is in forced down state. There - * may be conditions (e.g. out-of-band or sideband configuration - * changes for the link) outside the scope of the HWRM - * implementation that may clear forced down link state. - */ - #define HWRM_PORT_PHY_CFG_INPUT_FLAGS_FORCE_LINK_DOWN UINT32_C(0x2) + /* deprecated bit. Do not use!!! */ + #define HWRM_PORT_PHY_CFG_INPUT_FLAGS_DEPRECATED UINT32_C(0x2) /* * When this bit is set to '1', the link shall be forced to the * force_link_speed value. When this bit is set to '1', the HWRM @@ -2570,14 +4421,14 @@ struct hwrm_port_phy_cfg_input { */ #define HWRM_PORT_PHY_CFG_INPUT_FLAGS_RESTART_AUTONEG UINT32_C(0x8) /* - * When this bit is set to '1', Energy Efficient Ethernet (EEE) + * When this bit is set to '1', Energy Efficient Ethernet (EEE) * is requested to be enabled on this link. If EEE is not * supported on this port, then this flag shall be ignored by * the HWRM. */ #define HWRM_PORT_PHY_CFG_INPUT_FLAGS_EEE_ENABLE UINT32_C(0x10) /* - * When this bit is set to '1', Energy Efficient Ethernet (EEE) + * When this bit is set to '1', Energy Efficient Ethernet (EEE) * is requested to be disabled on this link. If EEE is not * supported on this port, then this flag shall be ignored by * the HWRM. @@ -2598,49 +4449,67 @@ struct hwrm_port_phy_cfg_input { * ignored by the HWRM. If EEE is disabled on this port, then * this flag shall be ignored by the HWRM. */ - #define HWRM_PORT_PHY_CFG_INPUT_FLAGS_EEE_TX_LPI_DISABLE UINT32_C(0x80) + #define HWRM_PORT_PHY_CFG_INPUT_FLAGS_EEE_TX_LPI_DISABLE UINT32_C(0x80) /* * When set to 1, then the HWRM shall enable FEC * autonegotitation on this port if supported. When set to 0, * then this flag shall be ignored. If FEC autonegotiation is * not supported, then the HWRM shall ignore this flag. */ - #define HWRM_PORT_PHY_CFG_INPUT_FLAGS_FEC_AUTONEG_ENABLE UINT32_C(0x100) + #define HWRM_PORT_PHY_CFG_INPUT_FLAGS_FEC_AUTONEG_ENABLE UINT32_C(0x100) /* * When set to 1, then the HWRM shall disable FEC * autonegotiation on this port if supported. When set to 0, * then this flag shall be ignored. If FEC autonegotiation is * not supported, then the HWRM shall ignore this flag. */ - #define HWRM_PORT_PHY_CFG_INPUT_FLAGS_FEC_AUTONEG_DISABLE UINT32_C(0x200) + #define HWRM_PORT_PHY_CFG_INPUT_FLAGS_FEC_AUTONEG_DISABLE \ + UINT32_C(0x200) /* - * When set to 1, then the HWRM shall enable FEC CLAUSE 74 (Fire + * When set to 1, then the HWRM shall enable FEC CLAUSE 74 (Fire * Code) on this port if supported. When set to 0, then this * flag shall be ignored. If FEC CLAUSE 74 is not supported, * then the HWRM shall ignore this flag. */ - #define HWRM_PORT_PHY_CFG_INPUT_FLAGS_FEC_CLAUSE74_ENABLE UINT32_C(0x400) + #define HWRM_PORT_PHY_CFG_INPUT_FLAGS_FEC_CLAUSE74_ENABLE \ + UINT32_C(0x400) /* * When set to 1, then the HWRM shall disable FEC CLAUSE 74 - * (Fire Code) on this port if supported. When set to 0, then + * (Fire Code) on this port if supported. When set to 0, then * this flag shall be ignored. If FEC CLAUSE 74 is not * supported, then the HWRM shall ignore this flag. */ - #define HWRM_PORT_PHY_CFG_INPUT_FLAGS_FEC_CLAUSE74_DISABLE UINT32_C(0x800) + #define HWRM_PORT_PHY_CFG_INPUT_FLAGS_FEC_CLAUSE74_DISABLE \ + UINT32_C(0x800) /* - * When set to 1, then the HWRM shall enable FEC CLAUSE 91 (Reed + * When set to 1, then the HWRM shall enable FEC CLAUSE 91 (Reed * Solomon) on this port if supported. When set to 0, then this * flag shall be ignored. If FEC CLAUSE 91 is not supported, * then the HWRM shall ignore this flag. */ - #define HWRM_PORT_PHY_CFG_INPUT_FLAGS_FEC_CLAUSE91_ENABLE UINT32_C(0x1000) + #define HWRM_PORT_PHY_CFG_INPUT_FLAGS_FEC_CLAUSE91_ENABLE \ + UINT32_C(0x1000) /* * When set to 1, then the HWRM shall disable FEC CLAUSE 91 - * (Reed Solomon) on this port if supported. When set to 0, then + * (Reed Solomon) on this port if supported. When set to 0, then * this flag shall be ignored. If FEC CLAUSE 91 is not * supported, then the HWRM shall ignore this flag. */ - #define HWRM_PORT_PHY_CFG_INPUT_FLAGS_FEC_CLAUSE91_DISABLE UINT32_C(0x2000) + #define HWRM_PORT_PHY_CFG_INPUT_FLAGS_FEC_CLAUSE91_DISABLE \ + UINT32_C(0x2000) + /* + * When this bit is set to '1', the link shall be forced to be + * taken down. # When this bit is set to '1", all other command + * input settings related to the link speed shall be ignored. + * Once the link state is forced down, it can be explicitly + * cleared from that state by setting this flag to '0'. # If + * this flag is set to '0', then the link shall be cleared from + * forced down state if the link is in forced down state. There + * may be conditions (e.g. out-of-band or sideband configuration + * changes for the link) outside the scope of the HWRM + * implementation that may clear forced down link state. + */ + #define HWRM_PORT_PHY_CFG_INPUT_FLAGS_FORCE_LINK_DWN UINT32_C(0x4000) uint32_t enables; /* This bit must be '1' for the auto_mode field to be configured. */ #define HWRM_PORT_PHY_CFG_INPUT_ENABLES_AUTO_MODE UINT32_C(0x1) @@ -2657,7 +4526,8 @@ struct hwrm_port_phy_cfg_input { * This bit must be '1' for the auto_link_speed_mask field to be * configured. */ - #define HWRM_PORT_PHY_CFG_INPUT_ENABLES_AUTO_LINK_SPEED_MASK UINT32_C(0x10) + #define HWRM_PORT_PHY_CFG_INPUT_ENABLES_AUTO_LINK_SPEED_MASK \ + UINT32_C(0x10) /* This bit must be '1' for the wirespeed field to be configured. */ #define HWRM_PORT_PHY_CFG_INPUT_ENABLES_WIOUTPUTEED UINT32_C(0x20) /* This bit must be '1' for the lpbk field to be configured. */ @@ -2670,7 +4540,8 @@ struct hwrm_port_phy_cfg_input { * This bit must be '1' for the eee_link_speed_mask field to be * configured. */ - #define HWRM_PORT_PHY_CFG_INPUT_ENABLES_EEE_LINK_SPEED_MASK UINT32_C(0x200) + #define HWRM_PORT_PHY_CFG_INPUT_ENABLES_EEE_LINK_SPEED_MASK \ + UINT32_C(0x200) /* This bit must be '1' for the tx_lpi_timer field to be configured. */ #define HWRM_PORT_PHY_CFG_INPUT_ENABLES_TX_LPI_TIMER UINT32_C(0x400) uint16_t port_id; @@ -2772,7 +4643,7 @@ struct hwrm_port_phy_cfg_input { * set to 1, auto_pause bits should be ignored and should be set * to 0. */ - #define HWRM_PORT_PHY_CFG_INPUT_AUTO_PAUSE_AUTONEG_PAUSE UINT32_C(0x4) + #define HWRM_PORT_PHY_CFG_INPUT_AUTO_PAUSE_AUTONEG_PAUSE UINT32_C(0x4) uint8_t unused_0; uint16_t auto_link_speed; /* @@ -2808,34 +4679,46 @@ struct hwrm_port_phy_cfg_input { * autoneg_mode is "mask". If unsupported speed is enabled an * error will be generated. */ - /* 100Mb link speed (Half-duplex) */ - #define HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_100MBHD UINT32_C(0x1) - /* 100Mb link speed (Full-duplex) */ - #define HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_100MB UINT32_C(0x2) - /* 1Gb link speed (Half-duplex) */ - #define HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_1GBHD UINT32_C(0x4) - /* 1Gb link speed (Full-duplex) */ - #define HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_1GB UINT32_C(0x8) + /* 100Mb link speed (Half-duplex) */ + #define HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_100MBHD \ + UINT32_C(0x1) + /* 100Mb link speed (Full-duplex) */ + #define HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_100MB \ + UINT32_C(0x2) + /* 1Gb link speed (Half-duplex) */ + #define HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_1GBHD \ + UINT32_C(0x4) + /* 1Gb link speed (Full-duplex) */ + #define HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_1GB \ + UINT32_C(0x8) /* 2Gb link speed */ - #define HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_2GB UINT32_C(0x10) + #define HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_2GB \ + UINT32_C(0x10) /* 2.5Gb link speed */ - #define HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_2_5GB UINT32_C(0x20) + #define HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_2_5GB \ + UINT32_C(0x20) /* 10Gb link speed */ - #define HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_10GB UINT32_C(0x40) + #define HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_10GB UINT32_C(0x40) /* 20Gb link speed */ - #define HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_20GB UINT32_C(0x80) + #define HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_20GB UINT32_C(0x80) /* 25Gb link speed */ - #define HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_25GB UINT32_C(0x100) + #define HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_25GB \ + UINT32_C(0x100) /* 40Gb link speed */ - #define HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_40GB UINT32_C(0x200) + #define HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_40GB \ + UINT32_C(0x200) /* 50Gb link speed */ - #define HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_50GB UINT32_C(0x400) + #define HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_50GB \ + UINT32_C(0x400) /* 100Gb link speed */ - #define HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_100GB UINT32_C(0x800) - /* 10Mb link speed (Half-duplex) */ - #define HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_10MBHD UINT32_C(0x1000) - /* 10Mb link speed (Full-duplex) */ - #define HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_10MB UINT32_C(0x2000) + #define HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_100GB \ + UINT32_C(0x800) + /* 10Mb link speed (Half-duplex) */ + #define HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_10MBHD \ + UINT32_C(0x1000) + /* 10Mb link speed (Full-duplex) */ + #define HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_10MB \ + UINT32_C(0x2000) uint8_t wirespeed; /* This value controls the wirespeed feature. */ /* Wirespeed feature is disabled. */ @@ -2878,7 +4761,7 @@ struct hwrm_port_phy_cfg_input { uint32_t preemphasis; /* * This value controls the pre-emphasis to be used for the link. - * Driver should not set this value (use enable.preemphasis = 0) + * Driver should not set this value (use enable.preemphasis = 0) * unless driver is sure of setting. Normally HWRM FW will * determine proper pre-emphasis. */ @@ -2892,19 +4775,19 @@ struct hwrm_port_phy_cfg_input { * speed shall be provided in this mask. */ /* Reserved */ - #define HWRM_PORT_PHY_CFG_INPUT_EEE_LINK_SPEED_MASK_RSVD1 UINT32_C(0x1) - /* 100Mb link speed (Full-duplex) */ - #define HWRM_PORT_PHY_CFG_INPUT_EEE_LINK_SPEED_MASK_100MB UINT32_C(0x2) + #define HWRM_PORT_PHY_CFG_INPUT_EEE_LINK_SPEED_MASK_RSVD1 UINT32_C(0x1) + /* 100Mb link speed (Full-duplex) */ + #define HWRM_PORT_PHY_CFG_INPUT_EEE_LINK_SPEED_MASK_100MB UINT32_C(0x2) /* Reserved */ - #define HWRM_PORT_PHY_CFG_INPUT_EEE_LINK_SPEED_MASK_RSVD2 UINT32_C(0x4) - /* 1Gb link speed (Full-duplex) */ + #define HWRM_PORT_PHY_CFG_INPUT_EEE_LINK_SPEED_MASK_RSVD2 UINT32_C(0x4) + /* 1Gb link speed (Full-duplex) */ #define HWRM_PORT_PHY_CFG_INPUT_EEE_LINK_SPEED_MASK_1GB UINT32_C(0x8) /* Reserved */ - #define HWRM_PORT_PHY_CFG_INPUT_EEE_LINK_SPEED_MASK_RSVD3 UINT32_C(0x10) + #define HWRM_PORT_PHY_CFG_INPUT_EEE_LINK_SPEED_MASK_RSVD3 UINT32_C(0x10) /* Reserved */ - #define HWRM_PORT_PHY_CFG_INPUT_EEE_LINK_SPEED_MASK_RSVD4 UINT32_C(0x20) + #define HWRM_PORT_PHY_CFG_INPUT_EEE_LINK_SPEED_MASK_RSVD4 UINT32_C(0x20) /* 10Gb link speed */ - #define HWRM_PORT_PHY_CFG_INPUT_EEE_LINK_SPEED_MASK_10GB UINT32_C(0x40) + #define HWRM_PORT_PHY_CFG_INPUT_EEE_LINK_SPEED_MASK_10GB UINT32_C(0x40) uint8_t unused_2; uint8_t unused_3; uint32_t tx_lpi_timer; @@ -2913,11 +4796,11 @@ struct hwrm_port_phy_cfg_input { * Reuested setting of TX LPI timer in microseconds. This field * is valid only when EEE is enabled and TX LPI is enabled. */ - #define HWRM_PORT_PHY_CFG_INPUT_TX_LPI_TIMER_MASK UINT32_C(0xffffff) + #define HWRM_PORT_PHY_CFG_INPUT_TX_LPI_TIMER_MASK UINT32_C(0xffffff) #define HWRM_PORT_PHY_CFG_INPUT_TX_LPI_TIMER_SFT 0 } __attribute__((packed)); -/* Output (16 bytes) */ +/* Output (16 bytes) */ struct hwrm_port_phy_cfg_output { uint16_t error_code; /* @@ -2951,7 +4834,7 @@ struct hwrm_port_phy_cfg_output { /* hwrm_port_phy_qcfg */ /* Description: This command queries the PHY configuration for the port. */ -/* Input (24 bytes) */ +/* Input (24 bytes) */ struct hwrm_port_phy_qcfg_input { uint16_t req_type; /* @@ -2984,7 +4867,7 @@ struct hwrm_port_phy_qcfg_input { uint16_t unused_0[3]; } __attribute__((packed)); -/* Output (96 bytes) */ +/* Output (96 bytes) */ struct hwrm_port_phy_qcfg_output { uint16_t error_code; /* @@ -3062,50 +4945,47 @@ struct hwrm_port_phy_qcfg_output { * each speed that is supported, the corrresponding bit will be * set to '1'. */ - /* 100Mb link speed (Half-duplex) */ - #define HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_SPEEDS_100MBHD \ - UINT32_C(0x1) - /* 100Mb link speed (Full-duplex) */ - #define HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_SPEEDS_100MB UINT32_C(0x2) - /* 1Gb link speed (Half-duplex) */ - #define HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_SPEEDS_1GBHD UINT32_C(0x4) - /* 1Gb link speed (Full-duplex) */ + /* 100Mb link speed (Half-duplex) */ + #define HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_SPEEDS_100MBHD UINT32_C(0x1) + /* 100Mb link speed (Full-duplex) */ + #define HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_SPEEDS_100MB UINT32_C(0x2) + /* 1Gb link speed (Half-duplex) */ + #define HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_SPEEDS_1GBHD UINT32_C(0x4) + /* 1Gb link speed (Full-duplex) */ #define HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_SPEEDS_1GB UINT32_C(0x8) /* 2Gb link speed */ #define HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_SPEEDS_2GB UINT32_C(0x10) /* 2.5Gb link speed */ - #define HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_SPEEDS_2_5GB UINT32_C(0x20) + #define HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_SPEEDS_2_5GB UINT32_C(0x20) /* 10Gb link speed */ - #define HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_SPEEDS_10GB UINT32_C(0x40) + #define HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_SPEEDS_10GB UINT32_C(0x40) /* 20Gb link speed */ - #define HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_SPEEDS_20GB UINT32_C(0x80) + #define HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_SPEEDS_20GB UINT32_C(0x80) /* 25Gb link speed */ - #define HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_SPEEDS_25GB UINT32_C(0x100) + #define HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_SPEEDS_25GB UINT32_C(0x100) /* 40Gb link speed */ - #define HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_SPEEDS_40GB UINT32_C(0x200) + #define HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_SPEEDS_40GB UINT32_C(0x200) /* 50Gb link speed */ - #define HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_SPEEDS_50GB UINT32_C(0x400) + #define HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_SPEEDS_50GB UINT32_C(0x400) /* 100Gb link speed */ - #define HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_SPEEDS_100GB UINT32_C(0x800) - /* 10Mb link speed (Half-duplex) */ + #define HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_SPEEDS_100GB UINT32_C(0x800) + /* 10Mb link speed (Half-duplex) */ #define HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_SPEEDS_10MBHD UINT32_C(0x1000) - /* 10Mb link speed (Full-duplex) */ - #define HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_SPEEDS_10MB UINT32_C(0x2000) + /* 10Mb link speed (Full-duplex) */ + #define HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_SPEEDS_10MB UINT32_C(0x2000) uint16_t force_link_speed; /* * Current setting of forced link speed. When the link speed is * not being forced, this value shall be set to 0. */ /* 100Mb link speed */ - #define HWRM_PORT_PHY_QCFG_OUTPUT_FORCE_LINK_SPEED_100MB \ - UINT32_C(0x1) + #define HWRM_PORT_PHY_QCFG_OUTPUT_FORCE_LINK_SPEED_100MB UINT32_C(0x1) /* 1Gb link speed */ #define HWRM_PORT_PHY_QCFG_OUTPUT_FORCE_LINK_SPEED_1GB UINT32_C(0xa) /* 2Gb link speed */ #define HWRM_PORT_PHY_QCFG_OUTPUT_FORCE_LINK_SPEED_2GB UINT32_C(0x14) /* 2.5Gb link speed */ - #define HWRM_PORT_PHY_QCFG_OUTPUT_FORCE_LINK_SPEED_2_5GB \ - UINT32_C(0x19) + #define HWRM_PORT_PHY_QCFG_OUTPUT_FORCE_LINK_SPEED_2_5GB UINT32_C(0x19) /* 10Gb link speed */ #define HWRM_PORT_PHY_QCFG_OUTPUT_FORCE_LINK_SPEED_10GB UINT32_C(0x64) /* 20Mb link speed */ @@ -3113,14 +4993,13 @@ struct hwrm_port_phy_qcfg_output { /* 25Gb link speed */ #define HWRM_PORT_PHY_QCFG_OUTPUT_FORCE_LINK_SPEED_25GB UINT32_C(0xfa) /* 40Gb link speed */ - #define HWRM_PORT_PHY_QCFG_OUTPUT_FORCE_LINK_SPEED_40GB UINT32_C(0x190) + #define HWRM_PORT_PHY_QCFG_OUTPUT_FORCE_LINK_SPEED_40GB UINT32_C(0x190) /* 50Gb link speed */ - #define HWRM_PORT_PHY_QCFG_OUTPUT_FORCE_LINK_SPEED_50GB UINT32_C(0x1f4) + #define HWRM_PORT_PHY_QCFG_OUTPUT_FORCE_LINK_SPEED_50GB UINT32_C(0x1f4) /* 100Gb link speed */ - #define HWRM_PORT_PHY_QCFG_OUTPUT_FORCE_LINK_SPEED_100GB \ - UINT32_C(0x3e8) + #define HWRM_PORT_PHY_QCFG_OUTPUT_FORCE_LINK_SPEED_100GB UINT32_C(0x3e8) /* 10Mb link speed */ - #define HWRM_PORT_PHY_QCFG_OUTPUT_FORCE_LINK_SPEED_10MB UINT32_C(0xffff) + #define HWRM_PORT_PHY_QCFG_OUTPUT_FORCE_LINK_SPEED_10MB UINT32_C(0xffff) uint8_t auto_mode; /* Current setting of auto negotiation mode. */ /* @@ -3142,8 +5021,7 @@ struct hwrm_port_phy_qcfg_output { * DEPRECATED. An HWRM client should not use * this mode. */ - #define HWRM_PORT_PHY_QCFG_OUTPUT_AUTO_MODE_ONE_OR_BELOW \ - UINT32_C(0x3) + #define HWRM_PORT_PHY_QCFG_OUTPUT_AUTO_MODE_ONE_OR_BELOW UINT32_C(0x3) /* * Select the speeds based on the corresponding * link speed mask value that is provided. @@ -3177,8 +5055,7 @@ struct hwrm_port_phy_qcfg_output { * set to 1, auto_pause bits should be ignored and should be set * to 0. */ - #define HWRM_PORT_PHY_QCFG_OUTPUT_AUTO_PAUSE_AUTONEG_PAUSE \ - UINT32_C(0x4) + #define HWRM_PORT_PHY_QCFG_OUTPUT_AUTO_PAUSE_AUTONEG_PAUSE UINT32_C(0x4) uint16_t auto_link_speed; /* * Current setting for auto_link_speed. This field is only valid @@ -3203,9 +5080,9 @@ struct hwrm_port_phy_qcfg_output { /* 50Gb link speed */ #define HWRM_PORT_PHY_QCFG_OUTPUT_AUTO_LINK_SPEED_50GB UINT32_C(0x1f4) /* 100Gb link speed */ - #define HWRM_PORT_PHY_QCFG_OUTPUT_AUTO_LINK_SPEED_100GB UINT32_C(0x3e8) + #define HWRM_PORT_PHY_QCFG_OUTPUT_AUTO_LINK_SPEED_100GB UINT32_C(0x3e8) /* 10Mb link speed */ - #define HWRM_PORT_PHY_QCFG_OUTPUT_AUTO_LINK_SPEED_10MB UINT32_C(0xffff) + #define HWRM_PORT_PHY_QCFG_OUTPUT_AUTO_LINK_SPEED_10MB UINT32_C(0xffff) uint16_t auto_link_speed_mask; /* * Current setting for auto_link_speed_mask that is used to @@ -3214,23 +5091,22 @@ struct hwrm_port_phy_qcfg_output { * in this field shall be a subset of supported speeds on this * port. */ - /* 100Mb link speed (Half-duplex) */ + /* 100Mb link speed (Half-duplex) */ #define HWRM_PORT_PHY_QCFG_OUTPUT_AUTO_LINK_SPEED_MASK_100MBHD \ UINT32_C(0x1) - /* 100Mb link speed (Full-duplex) */ - #define HWRM_PORT_PHY_QCFG_OUTPUT_AUTO_LINK_SPEED_MASK_100MB \ + /* 100Mb link speed (Full-duplex) */ + #define HWRM_PORT_PHY_QCFG_OUTPUT_AUTO_LINK_SPEED_MASK_100MB \ UINT32_C(0x2) - /* 1Gb link speed (Half-duplex) */ - #define HWRM_PORT_PHY_QCFG_OUTPUT_AUTO_LINK_SPEED_MASK_1GBHD \ + /* 1Gb link speed (Half-duplex) */ + #define HWRM_PORT_PHY_QCFG_OUTPUT_AUTO_LINK_SPEED_MASK_1GBHD \ UINT32_C(0x4) - /* 1Gb link speed (Full-duplex) */ - #define HWRM_PORT_PHY_QCFG_OUTPUT_AUTO_LINK_SPEED_MASK_1GB \ - UINT32_C(0x8) + /* 1Gb link speed (Full-duplex) */ + #define HWRM_PORT_PHY_QCFG_OUTPUT_AUTO_LINK_SPEED_MASK_1GB UINT32_C(0x8) /* 2Gb link speed */ #define HWRM_PORT_PHY_QCFG_OUTPUT_AUTO_LINK_SPEED_MASK_2GB \ UINT32_C(0x10) /* 2.5Gb link speed */ - #define HWRM_PORT_PHY_QCFG_OUTPUT_AUTO_LINK_SPEED_MASK_2_5GB \ + #define HWRM_PORT_PHY_QCFG_OUTPUT_AUTO_LINK_SPEED_MASK_2_5GB \ UINT32_C(0x20) /* 10Gb link speed */ #define HWRM_PORT_PHY_QCFG_OUTPUT_AUTO_LINK_SPEED_MASK_10GB \ @@ -3248,12 +5124,12 @@ struct hwrm_port_phy_qcfg_output { #define HWRM_PORT_PHY_QCFG_OUTPUT_AUTO_LINK_SPEED_MASK_50GB \ UINT32_C(0x400) /* 100Gb link speed */ - #define HWRM_PORT_PHY_QCFG_OUTPUT_AUTO_LINK_SPEED_MASK_100GB \ + #define HWRM_PORT_PHY_QCFG_OUTPUT_AUTO_LINK_SPEED_MASK_100GB \ UINT32_C(0x800) - /* 10Mb link speed (Half-duplex) */ + /* 10Mb link speed (Half-duplex) */ #define HWRM_PORT_PHY_QCFG_OUTPUT_AUTO_LINK_SPEED_MASK_10MBHD \ UINT32_C(0x1000) - /* 10Mb link speed (Full-duplex) */ + /* 10Mb link speed (Full-duplex) */ #define HWRM_PORT_PHY_QCFG_OUTPUT_AUTO_LINK_SPEED_MASK_10MB \ UINT32_C(0x2000) uint8_t wirespeed; @@ -3302,18 +5178,16 @@ struct hwrm_port_phy_qcfg_output { /* Module is inserted and accepted */ #define HWRM_PORT_PHY_QCFG_OUTPUT_MODULE_STATUS_NONE UINT32_C(0x0) /* Module is rejected and transmit side Laser is disabled. */ - #define HWRM_PORT_PHY_QCFG_OUTPUT_MODULE_STATUS_DISABLETX \ - UINT32_C(0x1) + #define HWRM_PORT_PHY_QCFG_OUTPUT_MODULE_STATUS_DISABLETX UINT32_C(0x1) /* Module mismatch warning. */ - #define HWRM_PORT_PHY_QCFG_OUTPUT_MODULE_STATUS_WARNINGMSG \ - UINT32_C(0x2) + #define HWRM_PORT_PHY_QCFG_OUTPUT_MODULE_STATUS_WARNINGMSG UINT32_C(0x2) /* Module is rejected and powered down. */ #define HWRM_PORT_PHY_QCFG_OUTPUT_MODULE_STATUS_PWRDOWN UINT32_C(0x3) /* Module is not inserted. */ - #define HWRM_PORT_PHY_QCFG_OUTPUT_MODULE_STATUS_NOTINSERTED \ + #define HWRM_PORT_PHY_QCFG_OUTPUT_MODULE_STATUS_NOTINSERTED \ UINT32_C(0x4) /* Module status is not applicable. */ - #define HWRM_PORT_PHY_QCFG_OUTPUT_MODULE_STATUS_NOTAPPLICABLE \ + #define HWRM_PORT_PHY_QCFG_OUTPUT_MODULE_STATUS_NOTAPPLICABLE \ UINT32_C(0xff) uint32_t preemphasis; /* Current setting for preemphasis. */ @@ -3329,13 +5203,13 @@ struct hwrm_port_phy_qcfg_output { #define HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_UNKNOWN UINT32_C(0x0) /* BASE-CR */ #define HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_BASECR UINT32_C(0x1) - /* BASE-KR4 (Deprecated) */ + /* BASE-KR4 (Deprecated) */ #define HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_BASEKR4 UINT32_C(0x2) /* BASE-LR */ #define HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_BASELR UINT32_C(0x3) /* BASE-SR */ #define HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_BASESR UINT32_C(0x4) - /* BASE-KR2 (Deprecated) */ + /* BASE-KR2 (Deprecated) */ #define HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_BASEKR2 UINT32_C(0x5) /* BASE-KX */ #define HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_BASEKX UINT32_C(0x6) @@ -3347,6 +5221,35 @@ struct hwrm_port_phy_qcfg_output { #define HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_BASETE UINT32_C(0x9) /* SGMII connected external PHY */ #define HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_SGMIIEXTPHY UINT32_C(0xa) + /* 25G_BASECR_CA_L */ + #define HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_25G_BASECR_CA_L UINT32_C(0xb) + /* 25G_BASECR_CA_S */ + #define HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_25G_BASECR_CA_S UINT32_C(0xc) + /* 25G_BASECR_CA_N */ + #define HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_25G_BASECR_CA_N UINT32_C(0xd) + /* 25G_BASESR */ + #define HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_25G_BASESR UINT32_C(0xe) + /* 100G_BASECR4 */ + #define HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_100G_BASECR4 UINT32_C(0xf) + /* 100G_BASESR4 */ + #define HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_100G_BASESR4 UINT32_C(0x10) + /* 100G_BASELR4 */ + #define HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_100G_BASELR4 UINT32_C(0x11) + /* 100G_BASEER4 */ + #define HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_100G_BASEER4 UINT32_C(0x12) + /* 100G_BASESR10 */ + #define HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_100G_BASESR10 UINT32_C(0x13) + /* 40G_BASECR4 */ + #define HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_40G_BASECR4 UINT32_C(0x14) + /* 40G_BASESR4 */ + #define HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_40G_BASESR4 UINT32_C(0x15) + /* 40G_BASELR4 */ + #define HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_40G_BASELR4 UINT32_C(0x16) + /* 40G_BASEER4 */ + #define HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_40G_BASEER4 UINT32_C(0x17) + /* 40G_ACTIVE_CABLE */ + #define HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_40G_ACTIVE_CABLE \ + UINT32_C(0x18) uint8_t media_type; /* This value represents a media type. */ /* Unknown */ @@ -3360,35 +5263,34 @@ struct hwrm_port_phy_qcfg_output { uint8_t xcvr_pkg_type; /* This value represents a transceiver type. */ /* PHY and MAC are in the same package */ - #define HWRM_PORT_PHY_QCFG_OUTPUT_XCVR_PKG_TYPE_XCVR_INTERNAL \ + #define HWRM_PORT_PHY_QCFG_OUTPUT_XCVR_PKG_TYPE_XCVR_INTERNAL \ UINT32_C(0x1) /* PHY and MAC are in different packages */ - #define HWRM_PORT_PHY_QCFG_OUTPUT_XCVR_PKG_TYPE_XCVR_EXTERNAL \ + #define HWRM_PORT_PHY_QCFG_OUTPUT_XCVR_PKG_TYPE_XCVR_EXTERNAL \ UINT32_C(0x2) uint8_t eee_config_phy_addr; /* * This field represents flags related to EEE configuration. * These EEE configuration flags are valid only when the - * auto_mode is not set to none (in other words autonegotiation + * auto_mode is not set to none (in other words autonegotiation * is enabled). */ /* This field represents PHY address. */ #define HWRM_PORT_PHY_QCFG_OUTPUT_PHY_ADDR_MASK UINT32_C(0x1f) #define HWRM_PORT_PHY_QCFG_OUTPUT_PHY_ADDR_SFT 0 /* - * When set to 1, Energy Efficient Ethernet (EEE) mode is + * When set to 1, Energy Efficient Ethernet (EEE) mode is * enabled. Speeds for autoneg with EEE mode enabled are based * on eee_link_speed_mask. */ - #define HWRM_PORT_PHY_QCFG_OUTPUT_EEE_CONFIG_EEE_ENABLED \ - UINT32_C(0x20) + #define HWRM_PORT_PHY_QCFG_OUTPUT_EEE_CONFIG_EEE_ENABLED UINT32_C(0x20) /* * This flag is valid only when eee_enabled is set to 1. # If * eee_enabled is set to 0, then EEE mode is disabled and this * flag shall be ignored. # If eee_enabled is set to 1 and this - * flag is set to 1, then Energy Efficient Ethernet (EEE) mode + * flag is set to 1, then Energy Efficient Ethernet (EEE) mode * is enabled and in use. # If eee_enabled is set to 1 and this - * flag is set to 0, then Energy Efficient Ethernet (EEE) mode + * flag is set to 0, then Energy Efficient Ethernet (EEE) mode * is enabled but is currently not in use. */ #define HWRM_PORT_PHY_QCFG_OUTPUT_EEE_CONFIG_EEE_ACTIVE UINT32_C(0x40) @@ -3396,16 +5298,16 @@ struct hwrm_port_phy_qcfg_output { * This flag is valid only when eee_enabled is set to 1. # If * eee_enabled is set to 0, then EEE mode is disabled and this * flag shall be ignored. # If eee_enabled is set to 1 and this - * flag is set to 1, then Energy Efficient Ethernet (EEE) mode + * flag is set to 1, then Energy Efficient Ethernet (EEE) mode * is enabled and TX LPI is enabled. # If eee_enabled is set to * 1 and this flag is set to 0, then Energy Efficient Ethernet - * (EEE) mode is enabled but TX LPI is disabled. + * (EEE) mode is enabled but TX LPI is disabled. */ #define HWRM_PORT_PHY_QCFG_OUTPUT_EEE_CONFIG_EEE_TX_LPI UINT32_C(0x80) /* * This field represents flags related to EEE configuration. * These EEE configuration flags are valid only when the - * auto_mode is not set to none (in other words autonegotiation + * auto_mode is not set to none (in other words autonegotiation * is enabled). */ #define HWRM_PORT_PHY_QCFG_OUTPUT_EEE_CONFIG_MASK UINT32_C(0xe0) @@ -3427,16 +5329,16 @@ struct hwrm_port_phy_qcfg_output { * The advertised speeds for the port by the link partner. Each * advertised speed will be set to '1'. */ - /* 100Mb link speed (Half-duplex) */ + /* 100Mb link speed (Half-duplex) */ #define HWRM_PORT_PHY_QCFG_OUTPUT_LINK_PARTNER_ADV_SPEEDS_100MBHD \ UINT32_C(0x1) - /* 100Mb link speed (Full-duplex) */ + /* 100Mb link speed (Full-duplex) */ #define HWRM_PORT_PHY_QCFG_OUTPUT_LINK_PARTNER_ADV_SPEEDS_100MB \ UINT32_C(0x2) - /* 1Gb link speed (Half-duplex) */ + /* 1Gb link speed (Half-duplex) */ #define HWRM_PORT_PHY_QCFG_OUTPUT_LINK_PARTNER_ADV_SPEEDS_1GBHD \ UINT32_C(0x4) - /* 1Gb link speed (Full-duplex) */ + /* 1Gb link speed (Full-duplex) */ #define HWRM_PORT_PHY_QCFG_OUTPUT_LINK_PARTNER_ADV_SPEEDS_1GB \ UINT32_C(0x8) /* 2Gb link speed */ @@ -3463,10 +5365,10 @@ struct hwrm_port_phy_qcfg_output { /* 100Gb link speed */ #define HWRM_PORT_PHY_QCFG_OUTPUT_LINK_PARTNER_ADV_SPEEDS_100GB \ UINT32_C(0x800) - /* 10Mb link speed (Half-duplex) */ + /* 10Mb link speed (Half-duplex) */ #define HWRM_PORT_PHY_QCFG_OUTPUT_LINK_PARTNER_ADV_SPEEDS_10MBHD \ UINT32_C(0x1000) - /* 10Mb link speed (Full-duplex) */ + /* 10Mb link speed (Full-duplex) */ #define HWRM_PORT_PHY_QCFG_OUTPUT_LINK_PARTNER_ADV_SPEEDS_10MB \ UINT32_C(0x2000) uint8_t link_partner_adv_auto_mode; @@ -3483,13 +5385,14 @@ struct hwrm_port_phy_qcfg_output { /* Select all possible speeds for autoneg mode. */ #define \ HWRM_PORT_PHY_QCFG_OUTPUT_LINK_PARTNER_ADV_AUTO_MODE_ALL_SPEEDS \ - UINT32_C(0x1) + UINT32_C(0x1) /* * Select only the auto_link_speed speed for * autoneg mode. This mode has been DEPRECATED. * An HWRM client should not use this mode. */ - #define HWRM_PORT_PHY_QCFG_OUTPUT_LINK_PARTNER_ADV_AUTO_MODE_ONE_SPEED \ + #define \ + HWRM_PORT_PHY_QCFG_OUTPUT_LINK_PARTNER_ADV_AUTO_MODE_ONE_SPEED \ UINT32_C(0x2) /* * Select the auto_link_speed or any speed below @@ -3499,14 +5402,14 @@ struct hwrm_port_phy_qcfg_output { */ #define \ HWRM_PORT_PHY_QCFG_OUTPUT_LINK_PARTNER_ADV_AUTO_MODE_ONE_OR_BELOW \ - UINT32_C(0x3) + UINT32_C(0x3) /* * Select the speeds based on the corresponding * link speed mask value that is provided. */ #define \ HWRM_PORT_PHY_QCFG_OUTPUT_LINK_PARTNER_ADV_AUTO_MODE_SPEED_MASK \ - UINT32_C(0x4) + UINT32_C(0x4) uint8_t link_partner_adv_pause; /* The advertised pause settings on the port by the link partner. */ /* @@ -3532,13 +5435,13 @@ struct hwrm_port_phy_qcfg_output { /* Reserved */ #define HWRM_PORT_PHY_QCFG_OUTPUT_ADV_EEE_LINK_SPEED_MASK_RSVD1 \ UINT32_C(0x1) - /* 100Mb link speed (Full-duplex) */ + /* 100Mb link speed (Full-duplex) */ #define HWRM_PORT_PHY_QCFG_OUTPUT_ADV_EEE_LINK_SPEED_MASK_100MB \ UINT32_C(0x2) /* Reserved */ #define HWRM_PORT_PHY_QCFG_OUTPUT_ADV_EEE_LINK_SPEED_MASK_RSVD2 \ UINT32_C(0x4) - /* 1Gb link speed (Full-duplex) */ + /* 1Gb link speed (Full-duplex) */ #define HWRM_PORT_PHY_QCFG_OUTPUT_ADV_EEE_LINK_SPEED_MASK_1GB \ UINT32_C(0x8) /* Reserved */ @@ -3559,31 +5462,31 @@ struct hwrm_port_phy_qcfg_output { /* Reserved */ #define \ HWRM_PORT_PHY_QCFG_OUTPUT_LINK_PARTNER_ADV_EEE_LINK_SPEED_MASK_RSVD1 \ - UINT32_C(0x1) - /* 100Mb link speed (Full-duplex) */ + UINT32_C(0x1) + /* 100Mb link speed (Full-duplex) */ #define \ HWRM_PORT_PHY_QCFG_OUTPUT_LINK_PARTNER_ADV_EEE_LINK_SPEED_MASK_100MB \ - UINT32_C(0x2) + UINT32_C(0x2) /* Reserved */ #define \ HWRM_PORT_PHY_QCFG_OUTPUT_LINK_PARTNER_ADV_EEE_LINK_SPEED_MASK_RSVD2 \ - UINT32_C(0x4) - /* 1Gb link speed (Full-duplex) */ + UINT32_C(0x4) + /* 1Gb link speed (Full-duplex) */ #define \ HWRM_PORT_PHY_QCFG_OUTPUT_LINK_PARTNER_ADV_EEE_LINK_SPEED_MASK_1GB \ - UINT32_C(0x8) + UINT32_C(0x8) /* Reserved */ #define \ HWRM_PORT_PHY_QCFG_OUTPUT_LINK_PARTNER_ADV_EEE_LINK_SPEED_MASK_RSVD3 \ - UINT32_C(0x10) + UINT32_C(0x10) /* Reserved */ #define \ HWRM_PORT_PHY_QCFG_OUTPUT_LINK_PARTNER_ADV_EEE_LINK_SPEED_MASK_RSVD4 \ - UINT32_C(0x20) + UINT32_C(0x20) /* 10Gb link speed */ #define \ HWRM_PORT_PHY_QCFG_OUTPUT_LINK_PARTNER_ADV_EEE_LINK_SPEED_MASK_10GB \ - UINT32_C(0x40) + UINT32_C(0x40) uint32_t xcvr_identifier_type_tx_lpi_timer; /* This value represents transceiver identifier type. */ /* @@ -3591,32 +5494,31 @@ struct hwrm_port_phy_qcfg_output { * is valid only when_eee_enabled flag is set to 1 and * tx_lpi_enabled is set to 1. */ - #define HWRM_PORT_PHY_QCFG_OUTPUT_TX_LPI_TIMER_MASK \ - UINT32_C(0xffffff) + #define HWRM_PORT_PHY_QCFG_OUTPUT_TX_LPI_TIMER_MASK UINT32_C(0xffffff) #define HWRM_PORT_PHY_QCFG_OUTPUT_TX_LPI_TIMER_SFT 0 /* This value represents transceiver identifier type. */ #define HWRM_PORT_PHY_QCFG_OUTPUT_XCVR_IDENTIFIER_TYPE_MASK \ UINT32_C(0xff000000) #define HWRM_PORT_PHY_QCFG_OUTPUT_XCVR_IDENTIFIER_TYPE_SFT 24 /* Unknown */ - #define HWRM_PORT_PHY_QCFG_OUTPUT_XCVR_IDENTIFIER_TYPE_UNKNOWN \ + #define HWRM_PORT_PHY_QCFG_OUTPUT_XCVR_IDENTIFIER_TYPE_UNKNOWN \ (UINT32_C(0x0) << 24) /* SFP/SFP+/SFP28 */ - #define HWRM_PORT_PHY_QCFG_OUTPUT_XCVR_IDENTIFIER_TYPE_SFP \ + #define HWRM_PORT_PHY_QCFG_OUTPUT_XCVR_IDENTIFIER_TYPE_SFP \ (UINT32_C(0x3) << 24) /* QSFP */ - #define HWRM_PORT_PHY_QCFG_OUTPUT_XCVR_IDENTIFIER_TYPE_QSFP \ + #define HWRM_PORT_PHY_QCFG_OUTPUT_XCVR_IDENTIFIER_TYPE_QSFP \ (UINT32_C(0xc) << 24) /* QSFP+ */ - #define HWRM_PORT_PHY_QCFG_OUTPUT_XCVR_IDENTIFIER_TYPE_QSFPPLUS \ + #define HWRM_PORT_PHY_QCFG_OUTPUT_XCVR_IDENTIFIER_TYPE_QSFPPLUS \ (UINT32_C(0xd) << 24) /* QSFP28 */ - #define HWRM_PORT_PHY_QCFG_OUTPUT_XCVR_IDENTIFIER_TYPE_QSFP28 \ + #define HWRM_PORT_PHY_QCFG_OUTPUT_XCVR_IDENTIFIER_TYPE_QSFP28 \ (UINT32_C(0x11) << 24) uint16_t fec_cfg; /* * This value represents the current configuration of Forward - * Error Correction (FEC) on the port. + * Error Correction (FEC) on the port. */ /* * When set to 1, then FEC is not supported on this port. If @@ -3627,7 +5529,7 @@ struct hwrm_port_phy_qcfg_output { * then the HWRM shall set this flag to 1 when reporting FEC * capability. */ - #define HWRM_PORT_PHY_QCFG_OUTPUT_FEC_CFG_FEC_NONE_SUPPORTED \ + #define HWRM_PORT_PHY_QCFG_OUTPUT_FEC_CFG_FEC_NONE_SUPPORTED \ UINT32_C(0x1) /* * When set to 1, then FEC autonegotiation is supported on this @@ -3645,30 +5547,30 @@ struct hwrm_port_phy_qcfg_output { #define HWRM_PORT_PHY_QCFG_OUTPUT_FEC_CFG_FEC_AUTONEG_ENABLED \ UINT32_C(0x4) /* - * When set to 1, then FEC CLAUSE 74 (Fire Code) is supported on - * this port. When set to 0, then FEC CLAUSE 74 (Fire Code) is + * When set to 1, then FEC CLAUSE 74 (Fire Code) is supported on + * this port. When set to 0, then FEC CLAUSE 74 (Fire Code) is * not supported on this port. */ #define HWRM_PORT_PHY_QCFG_OUTPUT_FEC_CFG_FEC_CLAUSE74_SUPPORTED \ UINT32_C(0x8) /* - * When set to 1, then FEC CLAUSE 74 (Fire Code) is enabled on - * this port. When set to 0, then FEC CLAUSE 74 (Fire Code) is + * When set to 1, then FEC CLAUSE 74 (Fire Code) is enabled on + * this port. When set to 0, then FEC CLAUSE 74 (Fire Code) is * disabled if supported. This flag should be ignored if FEC * CLAUSE 74 is not supported on this port. */ #define HWRM_PORT_PHY_QCFG_OUTPUT_FEC_CFG_FEC_CLAUSE74_ENABLED \ UINT32_C(0x10) /* - * When set to 1, then FEC CLAUSE 91 (Reed Solomon) is supported - * on this port. When set to 0, then FEC CLAUSE 91 (Reed + * When set to 1, then FEC CLAUSE 91 (Reed Solomon) is supported + * on this port. When set to 0, then FEC CLAUSE 91 (Reed * Solomon) is not supported on this port. */ #define HWRM_PORT_PHY_QCFG_OUTPUT_FEC_CFG_FEC_CLAUSE91_SUPPORTED \ UINT32_C(0x20) /* - * When set to 1, then FEC CLAUSE 91 (Reed Solomon) is enabled - * on this port. When set to 0, then FEC CLAUSE 91 (Reed + * When set to 1, then FEC CLAUSE 91 (Reed Solomon) is enabled + * on this port. When set to 0, then FEC CLAUSE 91 (Reed * Solomon) is disabled if supported. This flag should be * ignored if FEC CLAUSE 91 is not supported on this port. */ @@ -3704,6 +5606,1132 @@ struct hwrm_port_phy_qcfg_output { */ } __attribute__((packed)); +/* hwrm_port_qstats */ +/* Description: This function returns per port Ethernet statistics. */ +/* Input (40 bytes) */ +struct hwrm_port_qstats_input { + uint16_t req_type; + /* + * This value indicates what type of request this is. The format + * for the rest of the command is determined by this field. + */ + uint16_t cmpl_ring; + /* + * This value indicates the what completion ring the request + * will be optionally completed on. If the value is -1, then no + * CR completion will be generated. Any other value must be a + * valid CR ring_id value for this function. + */ + uint16_t seq_id; + /* This value indicates the command sequence number. */ + uint16_t target_id; + /* + * Target ID of this command. 0x0 - 0xFFF8 - Used for function + * ids 0xFFF8 - 0xFFFE - Reserved for internal processors 0xFFFF + * - HWRM + */ + uint64_t resp_addr; + /* + * This is the host address where the response will be written + * when the request is complete. This area must be 16B aligned + * and must be cleared to zero before the request is made. + */ + uint16_t port_id; + /* Port ID of port that is being queried. */ + uint8_t unused_0; + uint8_t unused_1; + uint8_t unused_2[3]; + uint8_t unused_3; + uint64_t tx_stat_host_addr; + /* This is the host address where Tx port statistics will be stored */ + uint64_t rx_stat_host_addr; + /* This is the host address where Rx port statistics will be stored */ +} __attribute__((packed)); + +/* Output (16 bytes) */ +struct hwrm_port_qstats_output { + uint16_t error_code; + /* + * Pass/Fail or error type Note: receiver to verify the in + * parameters, and fail the call with an error when appropriate + */ + uint16_t req_type; + /* This field returns the type of original request. */ + uint16_t seq_id; + /* This field provides original sequence number of the command. */ + uint16_t resp_len; + /* + * This field is the length of the response in bytes. The last + * byte of the response is a valid flag that will read as '1' + * when the command has been completely written to memory. + */ + uint16_t tx_stat_size; + /* The size of TX port statistics block in bytes. */ + uint16_t rx_stat_size; + /* The size of RX port statistics block in bytes. */ + uint8_t unused_0; + uint8_t unused_1; + uint8_t unused_2; + uint8_t valid; + /* + * This field is used in Output records to indicate that the + * output is completely written to RAM. This field should be + * read as '1' to indicate that the output has been completely + * written. When writing a command completion or response to an + * internal processor, the order of writes has to be such that + * this field is written last. + */ +} __attribute__((packed)); + +/* hwrm_port_clr_stats */ +/* + * Description: This function clears per port statistics. The HWRM shall not + * allow a VF driver to clear port statistics. The HWRM shall not allow a PF + * driver to clear port statistics in a partitioning mode. The HWRM may allow a + * PF driver to clear port statistics in the non-partitioning mode. + */ +/* Input (24 bytes) */ +struct hwrm_port_clr_stats_input { + uint16_t req_type; + /* + * This value indicates what type of request this is. The format + * for the rest of the command is determined by this field. + */ + uint16_t cmpl_ring; + /* + * This value indicates the what completion ring the request + * will be optionally completed on. If the value is -1, then no + * CR completion will be generated. Any other value must be a + * valid CR ring_id value for this function. + */ + uint16_t seq_id; + /* This value indicates the command sequence number. */ + uint16_t target_id; + /* + * Target ID of this command. 0x0 - 0xFFF8 - Used for function + * ids 0xFFF8 - 0xFFFE - Reserved for internal processors 0xFFFF + * - HWRM + */ + uint64_t resp_addr; + /* + * This is the host address where the response will be written + * when the request is complete. This area must be 16B aligned + * and must be cleared to zero before the request is made. + */ + uint16_t port_id; + /* Port ID of port that is being queried. */ + uint16_t unused_0[3]; +} __attribute__((packed)); + +/* Output (16 bytes) */ +struct hwrm_port_clr_stats_output { + uint16_t error_code; + /* + * Pass/Fail or error type Note: receiver to verify the in + * parameters, and fail the call with an error when appropriate + */ + uint16_t req_type; + /* This field returns the type of original request. */ + uint16_t seq_id; + /* This field provides original sequence number of the command. */ + uint16_t resp_len; + /* + * This field is the length of the response in bytes. The last + * byte of the response is a valid flag that will read as '1' + * when the command has been completely written to memory. + */ + uint32_t unused_0; + uint8_t unused_1; + uint8_t unused_2; + uint8_t unused_3; + uint8_t valid; + /* + * This field is used in Output records to indicate that the + * output is completely written to RAM. This field should be + * read as '1' to indicate that the output has been completely + * written. When writing a command completion or response to an + * internal processor, the order of writes has to be such that + * this field is written last. + */ +} __attribute__((packed)); + +/* hwrm_port_led_cfg */ +/* + * Description: This function is used to configure LEDs on a given port. Each + * port has individual set of LEDs associated with it. These LEDs are used for + * speed/link configuration as well as activity indicator configuration. Up to + * three LEDs can be configured, one for activity and two for speeds. + */ +/* Input (64 bytes) */ +struct hwrm_port_led_cfg_input { + uint16_t req_type; + /* + * This value indicates what type of request this is. The format + * for the rest of the command is determined by this field. + */ + uint16_t cmpl_ring; + /* + * This value indicates the what completion ring the request + * will be optionally completed on. If the value is -1, then no + * CR completion will be generated. Any other value must be a + * valid CR ring_id value for this function. + */ + uint16_t seq_id; + /* This value indicates the command sequence number. */ + uint16_t target_id; + /* + * Target ID of this command. 0x0 - 0xFFF8 - Used for function + * ids 0xFFF8 - 0xFFFE - Reserved for internal processors 0xFFFF + * - HWRM + */ + uint64_t resp_addr; + /* + * This is the host address where the response will be written + * when the request is complete. This area must be 16B aligned + * and must be cleared to zero before the request is made. + */ + uint32_t enables; + /* This bit must be '1' for the led0_id field to be configured. */ + #define HWRM_PORT_LED_CFG_INPUT_ENABLES_LED0_ID UINT32_C(0x1) + /* This bit must be '1' for the led0_state field to be configured. */ + #define HWRM_PORT_LED_CFG_INPUT_ENABLES_LED0_STATE UINT32_C(0x2) + /* This bit must be '1' for the led0_color field to be configured. */ + #define HWRM_PORT_LED_CFG_INPUT_ENABLES_LED0_COLOR UINT32_C(0x4) + /* + * This bit must be '1' for the led0_blink_on field to be + * configured. + */ + #define HWRM_PORT_LED_CFG_INPUT_ENABLES_LED0_BLINK_ON UINT32_C(0x8) + /* + * This bit must be '1' for the led0_blink_off field to be + * configured. + */ + #define HWRM_PORT_LED_CFG_INPUT_ENABLES_LED0_BLINK_OFF UINT32_C(0x10) + /* + * This bit must be '1' for the led0_group_id field to be + * configured. + */ + #define HWRM_PORT_LED_CFG_INPUT_ENABLES_LED0_GROUP_ID UINT32_C(0x20) + /* This bit must be '1' for the led1_id field to be configured. */ + #define HWRM_PORT_LED_CFG_INPUT_ENABLES_LED1_ID UINT32_C(0x40) + /* This bit must be '1' for the led1_state field to be configured. */ + #define HWRM_PORT_LED_CFG_INPUT_ENABLES_LED1_STATE UINT32_C(0x80) + /* This bit must be '1' for the led1_color field to be configured. */ + #define HWRM_PORT_LED_CFG_INPUT_ENABLES_LED1_COLOR UINT32_C(0x100) + /* + * This bit must be '1' for the led1_blink_on field to be + * configured. + */ + #define HWRM_PORT_LED_CFG_INPUT_ENABLES_LED1_BLINK_ON UINT32_C(0x200) + /* + * This bit must be '1' for the led1_blink_off field to be + * configured. + */ + #define HWRM_PORT_LED_CFG_INPUT_ENABLES_LED1_BLINK_OFF UINT32_C(0x400) + /* + * This bit must be '1' for the led1_group_id field to be + * configured. + */ + #define HWRM_PORT_LED_CFG_INPUT_ENABLES_LED1_GROUP_ID UINT32_C(0x800) + /* This bit must be '1' for the led2_id field to be configured. */ + #define HWRM_PORT_LED_CFG_INPUT_ENABLES_LED2_ID UINT32_C(0x1000) + /* This bit must be '1' for the led2_state field to be configured. */ + #define HWRM_PORT_LED_CFG_INPUT_ENABLES_LED2_STATE UINT32_C(0x2000) + /* This bit must be '1' for the led2_color field to be configured. */ + #define HWRM_PORT_LED_CFG_INPUT_ENABLES_LED2_COLOR UINT32_C(0x4000) + /* + * This bit must be '1' for the led2_blink_on field to be + * configured. + */ + #define HWRM_PORT_LED_CFG_INPUT_ENABLES_LED2_BLINK_ON UINT32_C(0x8000) + /* + * This bit must be '1' for the led2_blink_off field to be + * configured. + */ + #define HWRM_PORT_LED_CFG_INPUT_ENABLES_LED2_BLINK_OFF UINT32_C(0x10000) + /* + * This bit must be '1' for the led2_group_id field to be + * configured. + */ + #define HWRM_PORT_LED_CFG_INPUT_ENABLES_LED2_GROUP_ID UINT32_C(0x20000) + /* This bit must be '1' for the led3_id field to be configured. */ + #define HWRM_PORT_LED_CFG_INPUT_ENABLES_LED3_ID UINT32_C(0x40000) + /* This bit must be '1' for the led3_state field to be configured. */ + #define HWRM_PORT_LED_CFG_INPUT_ENABLES_LED3_STATE UINT32_C(0x80000) + /* This bit must be '1' for the led3_color field to be configured. */ + #define HWRM_PORT_LED_CFG_INPUT_ENABLES_LED3_COLOR UINT32_C(0x100000) + /* + * This bit must be '1' for the led3_blink_on field to be + * configured. + */ + #define HWRM_PORT_LED_CFG_INPUT_ENABLES_LED3_BLINK_ON UINT32_C(0x200000) + /* + * This bit must be '1' for the led3_blink_off field to be + * configured. + */ + #define HWRM_PORT_LED_CFG_INPUT_ENABLES_LED3_BLINK_OFF \ + UINT32_C(0x400000) + /* + * This bit must be '1' for the led3_group_id field to be + * configured. + */ + #define HWRM_PORT_LED_CFG_INPUT_ENABLES_LED3_GROUP_ID UINT32_C(0x800000) + uint16_t port_id; + /* Port ID of port whose LEDs are configured. */ + uint8_t num_leds; + /* + * The number of LEDs that are being configured. Up to 4 LEDs + * can be configured with this command. + */ + uint8_t rsvd; + /* Reserved field. */ + uint8_t led0_id; + /* An identifier for the LED #0. */ + uint8_t led0_state; + /* The requested state of the LED #0. */ + /* Default state of the LED */ + #define HWRM_PORT_LED_CFG_INPUT_LED0_STATE_DEFAULT UINT32_C(0x0) + /* Off */ + #define HWRM_PORT_LED_CFG_INPUT_LED0_STATE_OFF UINT32_C(0x1) + /* On */ + #define HWRM_PORT_LED_CFG_INPUT_LED0_STATE_ON UINT32_C(0x2) + /* Blink */ + #define HWRM_PORT_LED_CFG_INPUT_LED0_STATE_BLINK UINT32_C(0x3) + /* Blink Alternately */ + #define HWRM_PORT_LED_CFG_INPUT_LED0_STATE_BLINKALT UINT32_C(0x4) + uint8_t led0_color; + /* The requested color of LED #0. */ + /* Default */ + #define HWRM_PORT_LED_CFG_INPUT_LED0_COLOR_DEFAULT UINT32_C(0x0) + /* Amber */ + #define HWRM_PORT_LED_CFG_INPUT_LED0_COLOR_AMBER UINT32_C(0x1) + /* Green */ + #define HWRM_PORT_LED_CFG_INPUT_LED0_COLOR_GREEN UINT32_C(0x2) + /* Green or Amber */ + #define HWRM_PORT_LED_CFG_INPUT_LED0_COLOR_GREENAMBER UINT32_C(0x3) + uint8_t unused_0; + uint16_t led0_blink_on; + /* + * If the LED #0 state is "blink" or "blinkalt", then this field + * represents the requested time in milliseconds to keep LED on + * between cycles. + */ + uint16_t led0_blink_off; + /* + * If the LED #0 state is "blink" or "blinkalt", then this field + * represents the requested time in milliseconds to keep LED off + * between cycles. + */ + uint8_t led0_group_id; + /* + * An identifier for the group of LEDs that LED #0 belongs to. + * If set to 0, then the LED #0 shall not be grouped and shall + * be treated as an individual resource. For all other non-zero + * values of this field, LED #0 shall be grouped together with + * the LEDs with the same group ID value. + */ + uint8_t rsvd0; + /* Reserved field. */ + uint8_t led1_id; + /* An identifier for the LED #1. */ + uint8_t led1_state; + /* The requested state of the LED #1. */ + /* Default state of the LED */ + #define HWRM_PORT_LED_CFG_INPUT_LED1_STATE_DEFAULT UINT32_C(0x0) + /* Off */ + #define HWRM_PORT_LED_CFG_INPUT_LED1_STATE_OFF UINT32_C(0x1) + /* On */ + #define HWRM_PORT_LED_CFG_INPUT_LED1_STATE_ON UINT32_C(0x2) + /* Blink */ + #define HWRM_PORT_LED_CFG_INPUT_LED1_STATE_BLINK UINT32_C(0x3) + /* Blink Alternately */ + #define HWRM_PORT_LED_CFG_INPUT_LED1_STATE_BLINKALT UINT32_C(0x4) + uint8_t led1_color; + /* The requested color of LED #1. */ + /* Default */ + #define HWRM_PORT_LED_CFG_INPUT_LED1_COLOR_DEFAULT UINT32_C(0x0) + /* Amber */ + #define HWRM_PORT_LED_CFG_INPUT_LED1_COLOR_AMBER UINT32_C(0x1) + /* Green */ + #define HWRM_PORT_LED_CFG_INPUT_LED1_COLOR_GREEN UINT32_C(0x2) + /* Green or Amber */ + #define HWRM_PORT_LED_CFG_INPUT_LED1_COLOR_GREENAMBER UINT32_C(0x3) + uint8_t unused_1; + uint16_t led1_blink_on; + /* + * If the LED #1 state is "blink" or "blinkalt", then this field + * represents the requested time in milliseconds to keep LED on + * between cycles. + */ + uint16_t led1_blink_off; + /* + * If the LED #1 state is "blink" or "blinkalt", then this field + * represents the requested time in milliseconds to keep LED off + * between cycles. + */ + uint8_t led1_group_id; + /* + * An identifier for the group of LEDs that LED #1 belongs to. + * If set to 0, then the LED #1 shall not be grouped and shall + * be treated as an individual resource. For all other non-zero + * values of this field, LED #1 shall be grouped together with + * the LEDs with the same group ID value. + */ + uint8_t rsvd1; + /* Reserved field. */ + uint8_t led2_id; + /* An identifier for the LED #2. */ + uint8_t led2_state; + /* The requested state of the LED #2. */ + /* Default state of the LED */ + #define HWRM_PORT_LED_CFG_INPUT_LED2_STATE_DEFAULT UINT32_C(0x0) + /* Off */ + #define HWRM_PORT_LED_CFG_INPUT_LED2_STATE_OFF UINT32_C(0x1) + /* On */ + #define HWRM_PORT_LED_CFG_INPUT_LED2_STATE_ON UINT32_C(0x2) + /* Blink */ + #define HWRM_PORT_LED_CFG_INPUT_LED2_STATE_BLINK UINT32_C(0x3) + /* Blink Alternately */ + #define HWRM_PORT_LED_CFG_INPUT_LED2_STATE_BLINKALT UINT32_C(0x4) + uint8_t led2_color; + /* The requested color of LED #2. */ + /* Default */ + #define HWRM_PORT_LED_CFG_INPUT_LED2_COLOR_DEFAULT UINT32_C(0x0) + /* Amber */ + #define HWRM_PORT_LED_CFG_INPUT_LED2_COLOR_AMBER UINT32_C(0x1) + /* Green */ + #define HWRM_PORT_LED_CFG_INPUT_LED2_COLOR_GREEN UINT32_C(0x2) + /* Green or Amber */ + #define HWRM_PORT_LED_CFG_INPUT_LED2_COLOR_GREENAMBER UINT32_C(0x3) + uint8_t unused_2; + uint16_t led2_blink_on; + /* + * If the LED #2 state is "blink" or "blinkalt", then this field + * represents the requested time in milliseconds to keep LED on + * between cycles. + */ + uint16_t led2_blink_off; + /* + * If the LED #2 state is "blink" or "blinkalt", then this field + * represents the requested time in milliseconds to keep LED off + * between cycles. + */ + uint8_t led2_group_id; + /* + * An identifier for the group of LEDs that LED #2 belongs to. + * If set to 0, then the LED #2 shall not be grouped and shall + * be treated as an individual resource. For all other non-zero + * values of this field, LED #2 shall be grouped together with + * the LEDs with the same group ID value. + */ + uint8_t rsvd2; + /* Reserved field. */ + uint8_t led3_id; + /* An identifier for the LED #3. */ + uint8_t led3_state; + /* The requested state of the LED #3. */ + /* Default state of the LED */ + #define HWRM_PORT_LED_CFG_INPUT_LED3_STATE_DEFAULT UINT32_C(0x0) + /* Off */ + #define HWRM_PORT_LED_CFG_INPUT_LED3_STATE_OFF UINT32_C(0x1) + /* On */ + #define HWRM_PORT_LED_CFG_INPUT_LED3_STATE_ON UINT32_C(0x2) + /* Blink */ + #define HWRM_PORT_LED_CFG_INPUT_LED3_STATE_BLINK UINT32_C(0x3) + /* Blink Alternately */ + #define HWRM_PORT_LED_CFG_INPUT_LED3_STATE_BLINKALT UINT32_C(0x4) + uint8_t led3_color; + /* The requested color of LED #3. */ + /* Default */ + #define HWRM_PORT_LED_CFG_INPUT_LED3_COLOR_DEFAULT UINT32_C(0x0) + /* Amber */ + #define HWRM_PORT_LED_CFG_INPUT_LED3_COLOR_AMBER UINT32_C(0x1) + /* Green */ + #define HWRM_PORT_LED_CFG_INPUT_LED3_COLOR_GREEN UINT32_C(0x2) + /* Green or Amber */ + #define HWRM_PORT_LED_CFG_INPUT_LED3_COLOR_GREENAMBER UINT32_C(0x3) + uint8_t unused_3; + uint16_t led3_blink_on; + /* + * If the LED #3 state is "blink" or "blinkalt", then this field + * represents the requested time in milliseconds to keep LED on + * between cycles. + */ + uint16_t led3_blink_off; + /* + * If the LED #3 state is "blink" or "blinkalt", then this field + * represents the requested time in milliseconds to keep LED off + * between cycles. + */ + uint8_t led3_group_id; + /* + * An identifier for the group of LEDs that LED #3 belongs to. + * If set to 0, then the LED #3 shall not be grouped and shall + * be treated as an individual resource. For all other non-zero + * values of this field, LED #3 shall be grouped together with + * the LEDs with the same group ID value. + */ + uint8_t rsvd3; + /* Reserved field. */ +} __attribute__((packed)); + +/* Output (16 bytes) */ +struct hwrm_port_led_cfg_output { + uint16_t error_code; + /* + * Pass/Fail or error type Note: receiver to verify the in + * parameters, and fail the call with an error when appropriate + */ + uint16_t req_type; + /* This field returns the type of original request. */ + uint16_t seq_id; + /* This field provides original sequence number of the command. */ + uint16_t resp_len; + /* + * This field is the length of the response in bytes. The last + * byte of the response is a valid flag that will read as '1' + * when the command has been completely written to memory. + */ + uint32_t unused_0; + uint8_t unused_1; + uint8_t unused_2; + uint8_t unused_3; + uint8_t valid; + /* + * This field is used in Output records to indicate that the + * output is completely written to RAM. This field should be + * read as '1' to indicate that the output has been completely + * written. When writing a command completion or response to an + * internal processor, the order of writes has to be such that + * this field is written last. + */ +} __attribute__((packed)); + +/* hwrm_port_led_qcfg */ +/* + * Description: This function is used to query configuration of LEDs on a given + * port. Each port has individual set of LEDs associated with it. These LEDs are + * used for speed/link configuration as well as activity indicator + * configuration. Up to three LEDs can be configured, one for activity and two + * for speeds. + */ +/* Input (24 bytes) */ +struct hwrm_port_led_qcfg_input { + uint16_t req_type; + /* + * This value indicates what type of request this is. The format + * for the rest of the command is determined by this field. + */ + uint16_t cmpl_ring; + /* + * This value indicates the what completion ring the request + * will be optionally completed on. If the value is -1, then no + * CR completion will be generated. Any other value must be a + * valid CR ring_id value for this function. + */ + uint16_t seq_id; + /* This value indicates the command sequence number. */ + uint16_t target_id; + /* + * Target ID of this command. 0x0 - 0xFFF8 - Used for function + * ids 0xFFF8 - 0xFFFE - Reserved for internal processors 0xFFFF + * - HWRM + */ + uint64_t resp_addr; + /* + * This is the host address where the response will be written + * when the request is complete. This area must be 16B aligned + * and must be cleared to zero before the request is made. + */ + uint16_t port_id; + /* Port ID of port whose LED configuration is being queried. */ + uint16_t unused_0[3]; +} __attribute__((packed)); + +/* Output (56 bytes) */ +struct hwrm_port_led_qcfg_output { + uint16_t error_code; + /* + * Pass/Fail or error type Note: receiver to verify the in + * parameters, and fail the call with an error when appropriate + */ + uint16_t req_type; + /* This field returns the type of original request. */ + uint16_t seq_id; + /* This field provides original sequence number of the command. */ + uint16_t resp_len; + /* + * This field is the length of the response in bytes. The last + * byte of the response is a valid flag that will read as '1' + * when the command has been completely written to memory. + */ + uint8_t num_leds; + /* + * The number of LEDs that are configured on this port. Up to 4 + * LEDs can be returned in the response. + */ + uint8_t led0_id; + /* An identifier for the LED #0. */ + uint8_t led0_type; + /* The type of LED #0. */ + /* Speed LED */ + #define HWRM_PORT_LED_QCFG_OUTPUT_LED0_TYPE_SPEED UINT32_C(0x0) + /* Activity LED */ + #define HWRM_PORT_LED_QCFG_OUTPUT_LED0_TYPE_ACTIVITY UINT32_C(0x1) + /* Invalid */ + #define HWRM_PORT_LED_QCFG_OUTPUT_LED0_TYPE_INVALID UINT32_C(0xff) + uint8_t led0_state; + /* The current state of the LED #0. */ + /* Default state of the LED */ + #define HWRM_PORT_LED_QCFG_OUTPUT_LED0_STATE_DEFAULT UINT32_C(0x0) + /* Off */ + #define HWRM_PORT_LED_QCFG_OUTPUT_LED0_STATE_OFF UINT32_C(0x1) + /* On */ + #define HWRM_PORT_LED_QCFG_OUTPUT_LED0_STATE_ON UINT32_C(0x2) + /* Blink */ + #define HWRM_PORT_LED_QCFG_OUTPUT_LED0_STATE_BLINK UINT32_C(0x3) + /* Blink Alternately */ + #define HWRM_PORT_LED_QCFG_OUTPUT_LED0_STATE_BLINKALT UINT32_C(0x4) + uint8_t led0_color; + /* The color of LED #0. */ + /* Default */ + #define HWRM_PORT_LED_QCFG_OUTPUT_LED0_COLOR_DEFAULT UINT32_C(0x0) + /* Amber */ + #define HWRM_PORT_LED_QCFG_OUTPUT_LED0_COLOR_AMBER UINT32_C(0x1) + /* Green */ + #define HWRM_PORT_LED_QCFG_OUTPUT_LED0_COLOR_GREEN UINT32_C(0x2) + /* Green or Amber */ + #define HWRM_PORT_LED_QCFG_OUTPUT_LED0_COLOR_GREENAMBER UINT32_C(0x3) + uint8_t unused_0; + uint16_t led0_blink_on; + /* + * If the LED #0 state is "blink" or "blinkalt", then this field + * represents the requested time in milliseconds to keep LED on + * between cycles. + */ + uint16_t led0_blink_off; + /* + * If the LED #0 state is "blink" or "blinkalt", then this field + * represents the requested time in milliseconds to keep LED off + * between cycles. + */ + uint8_t led0_group_id; + /* + * An identifier for the group of LEDs that LED #0 belongs to. + * If set to 0, then the LED #0 is not grouped. For all other + * non-zero values of this field, LED #0 is grouped together + * with the LEDs with the same group ID value. + */ + uint8_t led1_id; + /* An identifier for the LED #1. */ + uint8_t led1_type; + /* The type of LED #1. */ + /* Speed LED */ + #define HWRM_PORT_LED_QCFG_OUTPUT_LED1_TYPE_SPEED UINT32_C(0x0) + /* Activity LED */ + #define HWRM_PORT_LED_QCFG_OUTPUT_LED1_TYPE_ACTIVITY UINT32_C(0x1) + /* Invalid */ + #define HWRM_PORT_LED_QCFG_OUTPUT_LED1_TYPE_INVALID UINT32_C(0xff) + uint8_t led1_state; + /* The current state of the LED #1. */ + /* Default state of the LED */ + #define HWRM_PORT_LED_QCFG_OUTPUT_LED1_STATE_DEFAULT UINT32_C(0x0) + /* Off */ + #define HWRM_PORT_LED_QCFG_OUTPUT_LED1_STATE_OFF UINT32_C(0x1) + /* On */ + #define HWRM_PORT_LED_QCFG_OUTPUT_LED1_STATE_ON UINT32_C(0x2) + /* Blink */ + #define HWRM_PORT_LED_QCFG_OUTPUT_LED1_STATE_BLINK UINT32_C(0x3) + /* Blink Alternately */ + #define HWRM_PORT_LED_QCFG_OUTPUT_LED1_STATE_BLINKALT UINT32_C(0x4) + uint8_t led1_color; + /* The color of LED #1. */ + /* Default */ + #define HWRM_PORT_LED_QCFG_OUTPUT_LED1_COLOR_DEFAULT UINT32_C(0x0) + /* Amber */ + #define HWRM_PORT_LED_QCFG_OUTPUT_LED1_COLOR_AMBER UINT32_C(0x1) + /* Green */ + #define HWRM_PORT_LED_QCFG_OUTPUT_LED1_COLOR_GREEN UINT32_C(0x2) + /* Green or Amber */ + #define HWRM_PORT_LED_QCFG_OUTPUT_LED1_COLOR_GREENAMBER UINT32_C(0x3) + uint8_t unused_1; + uint16_t led1_blink_on; + /* + * If the LED #1 state is "blink" or "blinkalt", then this field + * represents the requested time in milliseconds to keep LED on + * between cycles. + */ + uint16_t led1_blink_off; + /* + * If the LED #1 state is "blink" or "blinkalt", then this field + * represents the requested time in milliseconds to keep LED off + * between cycles. + */ + uint8_t led1_group_id; + /* + * An identifier for the group of LEDs that LED #1 belongs to. + * If set to 0, then the LED #1 is not grouped. For all other + * non-zero values of this field, LED #1 is grouped together + * with the LEDs with the same group ID value. + */ + uint8_t led2_id; + /* An identifier for the LED #2. */ + uint8_t led2_type; + /* The type of LED #2. */ + /* Speed LED */ + #define HWRM_PORT_LED_QCFG_OUTPUT_LED2_TYPE_SPEED UINT32_C(0x0) + /* Activity LED */ + #define HWRM_PORT_LED_QCFG_OUTPUT_LED2_TYPE_ACTIVITY UINT32_C(0x1) + /* Invalid */ + #define HWRM_PORT_LED_QCFG_OUTPUT_LED2_TYPE_INVALID UINT32_C(0xff) + uint8_t led2_state; + /* The current state of the LED #2. */ + /* Default state of the LED */ + #define HWRM_PORT_LED_QCFG_OUTPUT_LED2_STATE_DEFAULT UINT32_C(0x0) + /* Off */ + #define HWRM_PORT_LED_QCFG_OUTPUT_LED2_STATE_OFF UINT32_C(0x1) + /* On */ + #define HWRM_PORT_LED_QCFG_OUTPUT_LED2_STATE_ON UINT32_C(0x2) + /* Blink */ + #define HWRM_PORT_LED_QCFG_OUTPUT_LED2_STATE_BLINK UINT32_C(0x3) + /* Blink Alternately */ + #define HWRM_PORT_LED_QCFG_OUTPUT_LED2_STATE_BLINKALT UINT32_C(0x4) + uint8_t led2_color; + /* The color of LED #2. */ + /* Default */ + #define HWRM_PORT_LED_QCFG_OUTPUT_LED2_COLOR_DEFAULT UINT32_C(0x0) + /* Amber */ + #define HWRM_PORT_LED_QCFG_OUTPUT_LED2_COLOR_AMBER UINT32_C(0x1) + /* Green */ + #define HWRM_PORT_LED_QCFG_OUTPUT_LED2_COLOR_GREEN UINT32_C(0x2) + /* Green or Amber */ + #define HWRM_PORT_LED_QCFG_OUTPUT_LED2_COLOR_GREENAMBER UINT32_C(0x3) + uint8_t unused_2; + uint16_t led2_blink_on; + /* + * If the LED #2 state is "blink" or "blinkalt", then this field + * represents the requested time in milliseconds to keep LED on + * between cycles. + */ + uint16_t led2_blink_off; + /* + * If the LED #2 state is "blink" or "blinkalt", then this field + * represents the requested time in milliseconds to keep LED off + * between cycles. + */ + uint8_t led2_group_id; + /* + * An identifier for the group of LEDs that LED #2 belongs to. + * If set to 0, then the LED #2 is not grouped. For all other + * non-zero values of this field, LED #2 is grouped together + * with the LEDs with the same group ID value. + */ + uint8_t led3_id; + /* An identifier for the LED #3. */ + uint8_t led3_type; + /* The type of LED #3. */ + /* Speed LED */ + #define HWRM_PORT_LED_QCFG_OUTPUT_LED3_TYPE_SPEED UINT32_C(0x0) + /* Activity LED */ + #define HWRM_PORT_LED_QCFG_OUTPUT_LED3_TYPE_ACTIVITY UINT32_C(0x1) + /* Invalid */ + #define HWRM_PORT_LED_QCFG_OUTPUT_LED3_TYPE_INVALID UINT32_C(0xff) + uint8_t led3_state; + /* The current state of the LED #3. */ + /* Default state of the LED */ + #define HWRM_PORT_LED_QCFG_OUTPUT_LED3_STATE_DEFAULT UINT32_C(0x0) + /* Off */ + #define HWRM_PORT_LED_QCFG_OUTPUT_LED3_STATE_OFF UINT32_C(0x1) + /* On */ + #define HWRM_PORT_LED_QCFG_OUTPUT_LED3_STATE_ON UINT32_C(0x2) + /* Blink */ + #define HWRM_PORT_LED_QCFG_OUTPUT_LED3_STATE_BLINK UINT32_C(0x3) + /* Blink Alternately */ + #define HWRM_PORT_LED_QCFG_OUTPUT_LED3_STATE_BLINKALT UINT32_C(0x4) + uint8_t led3_color; + /* The color of LED #3. */ + /* Default */ + #define HWRM_PORT_LED_QCFG_OUTPUT_LED3_COLOR_DEFAULT UINT32_C(0x0) + /* Amber */ + #define HWRM_PORT_LED_QCFG_OUTPUT_LED3_COLOR_AMBER UINT32_C(0x1) + /* Green */ + #define HWRM_PORT_LED_QCFG_OUTPUT_LED3_COLOR_GREEN UINT32_C(0x2) + /* Green or Amber */ + #define HWRM_PORT_LED_QCFG_OUTPUT_LED3_COLOR_GREENAMBER UINT32_C(0x3) + uint8_t unused_3; + uint16_t led3_blink_on; + /* + * If the LED #3 state is "blink" or "blinkalt", then this field + * represents the requested time in milliseconds to keep LED on + * between cycles. + */ + uint16_t led3_blink_off; + /* + * If the LED #3 state is "blink" or "blinkalt", then this field + * represents the requested time in milliseconds to keep LED off + * between cycles. + */ + uint8_t led3_group_id; + /* + * An identifier for the group of LEDs that LED #3 belongs to. + * If set to 0, then the LED #3 is not grouped. For all other + * non-zero values of this field, LED #3 is grouped together + * with the LEDs with the same group ID value. + */ + uint8_t unused_4; + uint16_t unused_5; + uint8_t unused_6; + uint8_t unused_7; + uint8_t unused_8; + uint8_t valid; + /* + * This field is used in Output records to indicate that the + * output is completely written to RAM. This field should be + * read as '1' to indicate that the output has been completely + * written. When writing a command completion or response to an + * internal processor, the order of writes has to be such that + * this field is written last. + */ +} __attribute__((packed)); + +/* hwrm_port_led_qcaps */ +/* + * Description: This function is used to query capabilities of LEDs on a given + * port. Each port has individual set of LEDs associated with it. These LEDs are + * used for speed/link configuration as well as activity indicator + * configuration. + */ +/* Input (24 bytes) */ +struct hwrm_port_led_qcaps_input { + uint16_t req_type; + /* + * This value indicates what type of request this is. The format + * for the rest of the command is determined by this field. + */ + uint16_t cmpl_ring; + /* + * This value indicates the what completion ring the request + * will be optionally completed on. If the value is -1, then no + * CR completion will be generated. Any other value must be a + * valid CR ring_id value for this function. + */ + uint16_t seq_id; + /* This value indicates the command sequence number. */ + uint16_t target_id; + /* + * Target ID of this command. 0x0 - 0xFFF8 - Used for function + * ids 0xFFF8 - 0xFFFE - Reserved for internal processors 0xFFFF + * - HWRM + */ + uint64_t resp_addr; + /* + * This is the host address where the response will be written + * when the request is complete. This area must be 16B aligned + * and must be cleared to zero before the request is made. + */ + uint16_t port_id; + /* Port ID of port whose LED configuration is being queried. */ + uint16_t unused_0[3]; +} __attribute__((packed)); + +/* Output (48 bytes) */ +struct hwrm_port_led_qcaps_output { + uint16_t error_code; + /* + * Pass/Fail or error type Note: receiver to verify the in + * parameters, and fail the call with an error when appropriate + */ + uint16_t req_type; + /* This field returns the type of original request. */ + uint16_t seq_id; + /* This field provides original sequence number of the command. */ + uint16_t resp_len; + /* + * This field is the length of the response in bytes. The last + * byte of the response is a valid flag that will read as '1' + * when the command has been completely written to memory. + */ + uint8_t num_leds; + /* + * The number of LEDs that are configured on this port. Up to 4 + * LEDs can be returned in the response. + */ + uint8_t unused_0[3]; + /* Reserved for future use. */ + uint8_t led0_id; + /* An identifier for the LED #0. */ + uint8_t led0_type; + /* The type of LED #0. */ + /* Speed LED */ + #define HWRM_PORT_LED_QCAPS_OUTPUT_LED0_TYPE_SPEED UINT32_C(0x0) + /* Activity LED */ + #define HWRM_PORT_LED_QCAPS_OUTPUT_LED0_TYPE_ACTIVITY UINT32_C(0x1) + /* Invalid */ + #define HWRM_PORT_LED_QCAPS_OUTPUT_LED0_TYPE_INVALID UINT32_C(0xff) + uint8_t led0_group_id; + /* + * An identifier for the group of LEDs that LED #0 belongs to. + * If set to 0, then the LED #0 cannot be grouped. For all other + * non-zero values of this field, LED #0 is grouped together + * with the LEDs with the same group ID value. + */ + uint8_t unused_1; + uint16_t led0_state_caps; + /* The states supported by LED #0. */ + /* + * If set to 1, this LED is enabled. If set to 0, this LED is + * disabled. + */ + #define HWRM_PORT_LED_QCAPS_OUTPUT_LED0_STATE_CAPS_ENABLED UINT32_C(0x1) + /* + * If set to 1, off state is supported on this LED. If set to 0, + * off state is not supported on this LED. + */ + #define HWRM_PORT_LED_QCAPS_OUTPUT_LED0_STATE_CAPS_OFF_SUPPORTED \ + UINT32_C(0x2) + /* + * If set to 1, on state is supported on this LED. If set to 0, + * on state is not supported on this LED. + */ + #define HWRM_PORT_LED_QCAPS_OUTPUT_LED0_STATE_CAPS_ON_SUPPORTED \ + UINT32_C(0x4) + /* + * If set to 1, blink state is supported on this LED. If set to + * 0, blink state is not supported on this LED. + */ + #define HWRM_PORT_LED_QCAPS_OUTPUT_LED0_STATE_CAPS_BLINK_SUPPORTED \ + UINT32_C(0x8) + /* + * If set to 1, blink_alt state is supported on this LED. If set + * to 0, blink_alt state is not supported on this LED. + */ + #define HWRM_PORT_LED_QCAPS_OUTPUT_LED0_STATE_CAPS_BLINK_ALT_SUPPORTED \ + UINT32_C(0x10) + uint16_t led0_color_caps; + /* The colors supported by LED #0. */ + /* reserved */ + #define HWRM_PORT_LED_QCAPS_OUTPUT_LED0_COLOR_CAPS_RSVD UINT32_C(0x1) + /* + * If set to 1, Amber color is supported on this LED. If set to + * 0, Amber color is not supported on this LED. + */ + #define HWRM_PORT_LED_QCAPS_OUTPUT_LED0_COLOR_CAPS_AMBER_SUPPORTED \ + UINT32_C(0x2) + /* + * If set to 1, Green color is supported on this LED. If set to + * 0, Green color is not supported on this LED. + */ + #define HWRM_PORT_LED_QCAPS_OUTPUT_LED0_COLOR_CAPS_GREEN_SUPPORTED \ + UINT32_C(0x4) + uint8_t led1_id; + /* An identifier for the LED #1. */ + uint8_t led1_type; + /* The type of LED #1. */ + /* Speed LED */ + #define HWRM_PORT_LED_QCAPS_OUTPUT_LED1_TYPE_SPEED UINT32_C(0x0) + /* Activity LED */ + #define HWRM_PORT_LED_QCAPS_OUTPUT_LED1_TYPE_ACTIVITY UINT32_C(0x1) + /* Invalid */ + #define HWRM_PORT_LED_QCAPS_OUTPUT_LED1_TYPE_INVALID UINT32_C(0xff) + uint8_t led1_group_id; + /* + * An identifier for the group of LEDs that LED #1 belongs to. + * If set to 0, then the LED #0 cannot be grouped. For all other + * non-zero values of this field, LED #0 is grouped together + * with the LEDs with the same group ID value. + */ + uint8_t unused_2; + uint16_t led1_state_caps; + /* The states supported by LED #1. */ + /* + * If set to 1, this LED is enabled. If set to 0, this LED is + * disabled. + */ + #define HWRM_PORT_LED_QCAPS_OUTPUT_LED1_STATE_CAPS_ENABLED UINT32_C(0x1) + /* + * If set to 1, off state is supported on this LED. If set to 0, + * off state is not supported on this LED. + */ + #define HWRM_PORT_LED_QCAPS_OUTPUT_LED1_STATE_CAPS_OFF_SUPPORTED \ + UINT32_C(0x2) + /* + * If set to 1, on state is supported on this LED. If set to 0, + * on state is not supported on this LED. + */ + #define HWRM_PORT_LED_QCAPS_OUTPUT_LED1_STATE_CAPS_ON_SUPPORTED \ + UINT32_C(0x4) + /* + * If set to 1, blink state is supported on this LED. If set to + * 0, blink state is not supported on this LED. + */ + #define HWRM_PORT_LED_QCAPS_OUTPUT_LED1_STATE_CAPS_BLINK_SUPPORTED \ + UINT32_C(0x8) + /* + * If set to 1, blink_alt state is supported on this LED. If set + * to 0, blink_alt state is not supported on this LED. + */ + #define HWRM_PORT_LED_QCAPS_OUTPUT_LED1_STATE_CAPS_BLINK_ALT_SUPPORTED \ + UINT32_C(0x10) + uint16_t led1_color_caps; + /* The colors supported by LED #1. */ + /* reserved */ + #define HWRM_PORT_LED_QCAPS_OUTPUT_LED1_COLOR_CAPS_RSVD UINT32_C(0x1) + /* + * If set to 1, Amber color is supported on this LED. If set to + * 0, Amber color is not supported on this LED. + */ + #define HWRM_PORT_LED_QCAPS_OUTPUT_LED1_COLOR_CAPS_AMBER_SUPPORTED \ + UINT32_C(0x2) + /* + * If set to 1, Green color is supported on this LED. If set to + * 0, Green color is not supported on this LED. + */ + #define HWRM_PORT_LED_QCAPS_OUTPUT_LED1_COLOR_CAPS_GREEN_SUPPORTED \ + UINT32_C(0x4) + uint8_t led2_id; + /* An identifier for the LED #2. */ + uint8_t led2_type; + /* The type of LED #2. */ + /* Speed LED */ + #define HWRM_PORT_LED_QCAPS_OUTPUT_LED2_TYPE_SPEED UINT32_C(0x0) + /* Activity LED */ + #define HWRM_PORT_LED_QCAPS_OUTPUT_LED2_TYPE_ACTIVITY UINT32_C(0x1) + /* Invalid */ + #define HWRM_PORT_LED_QCAPS_OUTPUT_LED2_TYPE_INVALID UINT32_C(0xff) + uint8_t led2_group_id; + /* + * An identifier for the group of LEDs that LED #0 belongs to. + * If set to 0, then the LED #0 cannot be grouped. For all other + * non-zero values of this field, LED #0 is grouped together + * with the LEDs with the same group ID value. + */ + uint8_t unused_3; + uint16_t led2_state_caps; + /* The states supported by LED #2. */ + /* + * If set to 1, this LED is enabled. If set to 0, this LED is + * disabled. + */ + #define HWRM_PORT_LED_QCAPS_OUTPUT_LED2_STATE_CAPS_ENABLED UINT32_C(0x1) + /* + * If set to 1, off state is supported on this LED. If set to 0, + * off state is not supported on this LED. + */ + #define HWRM_PORT_LED_QCAPS_OUTPUT_LED2_STATE_CAPS_OFF_SUPPORTED \ + UINT32_C(0x2) + /* + * If set to 1, on state is supported on this LED. If set to 0, + * on state is not supported on this LED. + */ + #define HWRM_PORT_LED_QCAPS_OUTPUT_LED2_STATE_CAPS_ON_SUPPORTED \ + UINT32_C(0x4) + /* + * If set to 1, blink state is supported on this LED. If set to + * 0, blink state is not supported on this LED. + */ + #define HWRM_PORT_LED_QCAPS_OUTPUT_LED2_STATE_CAPS_BLINK_SUPPORTED \ + UINT32_C(0x8) + /* + * If set to 1, blink_alt state is supported on this LED. If set + * to 0, blink_alt state is not supported on this LED. + */ + #define HWRM_PORT_LED_QCAPS_OUTPUT_LED2_STATE_CAPS_BLINK_ALT_SUPPORTED \ + UINT32_C(0x10) + uint16_t led2_color_caps; + /* The colors supported by LED #2. */ + /* reserved */ + #define HWRM_PORT_LED_QCAPS_OUTPUT_LED2_COLOR_CAPS_RSVD UINT32_C(0x1) + /* + * If set to 1, Amber color is supported on this LED. If set to + * 0, Amber color is not supported on this LED. + */ + #define HWRM_PORT_LED_QCAPS_OUTPUT_LED2_COLOR_CAPS_AMBER_SUPPORTED \ + UINT32_C(0x2) + /* + * If set to 1, Green color is supported on this LED. If set to + * 0, Green color is not supported on this LED. + */ + #define HWRM_PORT_LED_QCAPS_OUTPUT_LED2_COLOR_CAPS_GREEN_SUPPORTED \ + UINT32_C(0x4) + uint8_t led3_id; + /* An identifier for the LED #3. */ + uint8_t led3_type; + /* The type of LED #3. */ + /* Speed LED */ + #define HWRM_PORT_LED_QCAPS_OUTPUT_LED3_TYPE_SPEED UINT32_C(0x0) + /* Activity LED */ + #define HWRM_PORT_LED_QCAPS_OUTPUT_LED3_TYPE_ACTIVITY UINT32_C(0x1) + /* Invalid */ + #define HWRM_PORT_LED_QCAPS_OUTPUT_LED3_TYPE_INVALID UINT32_C(0xff) + uint8_t led3_group_id; + /* + * An identifier for the group of LEDs that LED #3 belongs to. + * If set to 0, then the LED #0 cannot be grouped. For all other + * non-zero values of this field, LED #0 is grouped together + * with the LEDs with the same group ID value. + */ + uint8_t unused_4; + uint16_t led3_state_caps; + /* The states supported by LED #3. */ + /* + * If set to 1, this LED is enabled. If set to 0, this LED is + * disabled. + */ + #define HWRM_PORT_LED_QCAPS_OUTPUT_LED3_STATE_CAPS_ENABLED UINT32_C(0x1) + /* + * If set to 1, off state is supported on this LED. If set to 0, + * off state is not supported on this LED. + */ + #define HWRM_PORT_LED_QCAPS_OUTPUT_LED3_STATE_CAPS_OFF_SUPPORTED \ + UINT32_C(0x2) + /* + * If set to 1, on state is supported on this LED. If set to 0, + * on state is not supported on this LED. + */ + #define HWRM_PORT_LED_QCAPS_OUTPUT_LED3_STATE_CAPS_ON_SUPPORTED \ + UINT32_C(0x4) + /* + * If set to 1, blink state is supported on this LED. If set to + * 0, blink state is not supported on this LED. + */ + #define HWRM_PORT_LED_QCAPS_OUTPUT_LED3_STATE_CAPS_BLINK_SUPPORTED \ + UINT32_C(0x8) + /* + * If set to 1, blink_alt state is supported on this LED. If set + * to 0, blink_alt state is not supported on this LED. + */ + #define HWRM_PORT_LED_QCAPS_OUTPUT_LED3_STATE_CAPS_BLINK_ALT_SUPPORTED \ + UINT32_C(0x10) + uint16_t led3_color_caps; + /* The colors supported by LED #3. */ + /* reserved */ + #define HWRM_PORT_LED_QCAPS_OUTPUT_LED3_COLOR_CAPS_RSVD UINT32_C(0x1) + /* + * If set to 1, Amber color is supported on this LED. If set to + * 0, Amber color is not supported on this LED. + */ + #define HWRM_PORT_LED_QCAPS_OUTPUT_LED3_COLOR_CAPS_AMBER_SUPPORTED \ + UINT32_C(0x2) + /* + * If set to 1, Green color is supported on this LED. If set to + * 0, Green color is not supported on this LED. + */ + #define HWRM_PORT_LED_QCAPS_OUTPUT_LED3_COLOR_CAPS_GREEN_SUPPORTED \ + UINT32_C(0x4) + uint8_t unused_5; + uint8_t unused_6; + uint8_t unused_7; + uint8_t valid; + /* + * This field is used in Output records to indicate that the + * output is completely written to RAM. This field should be + * read as '1' to indicate that the output has been completely + * written. When writing a command completion or response to an + * internal processor, the order of writes has to be such that + * this field is written last. + */ +} __attribute__((packed)); + /* hwrm_queue_qportcfg */ /* * Description: This function is called by a driver to query queue configuration @@ -3715,7 +6743,7 @@ struct hwrm_port_phy_qcfg_output { * then the driver shall only use queues for which service profiles are pre- * configured. */ -/* Input (24 bytes) */ +/* Input (24 bytes) */ struct hwrm_queue_qportcfg_input { uint16_t req_type; /* @@ -3754,7 +6782,7 @@ struct hwrm_queue_qportcfg_input { #define HWRM_QUEUE_QPORTCFG_INPUT_FLAGS_PATH_TX UINT32_C(0x0) /* rx path */ #define HWRM_QUEUE_QPORTCFG_INPUT_FLAGS_PATH_RX UINT32_C(0x1) - #define HWRM_QUEUE_QPORTCFG_INPUT_FLAGS_PATH_LAST \ + #define HWRM_QUEUE_QPORTCFG_INPUT_FLAGS_PATH_LAST \ QUEUE_QPORTCFG_INPUT_FLAGS_PATH_RX uint16_t port_id; /* @@ -3764,7 +6792,7 @@ struct hwrm_queue_qportcfg_input { uint16_t unused_0; } __attribute__((packed)); -/* Output (32 bytes) */ +/* Output (32 bytes) */ struct hwrm_queue_qportcfg_output { uint16_t error_code; /* @@ -3813,18 +6841,18 @@ struct hwrm_queue_qportcfg_output { * TX side is the same as the corresponding queue configuration * on the RX side. */ - #define HWRM_QUEUE_QPORTCFG_OUTPUT_QUEUE_CFG_INFO_ASYM_CFG \ - UINT32_C(0x1) + #define HWRM_QUEUE_QPORTCFG_OUTPUT_QUEUE_CFG_INFO_ASYM_CFG UINT32_C(0x1) uint8_t queue_pfcenable_cfg_allowed; /* * Bitmask indicating which queues can be configured by the * hwrm_queue_pfcenable_cfg command. Each bit represents a - * specific queue where bit 0 represents queue 0 and bit 7 - * represents queue 7. # A value of 0 indicates that the queue - * is not configurable by the hwrm_queue_pfcenable_cfg command. - * # A value of 1 indicates that the queue is configurable. # A - * hwrm_queue_pfcenable_cfg command shall return error when - * trying to configure a queue that is not configurable. + * specific priority where bit 0 represents priority 0 and bit 7 + * represents priority 7. # A value of 0 indicates that the + * priority is not configurable by the hwrm_queue_pfcenable_cfg + * command. # A value of 1 indicates that the priority is + * configurable. # A hwrm_queue_pfcenable_cfg command shall + * return error when trying to configure a priority that is not + * configurable. */ uint8_t queue_pri2cos_cfg_allowed; /* @@ -3860,14 +6888,14 @@ struct hwrm_queue_qportcfg_output { */ uint8_t queue_id0_service_profile; /* This value is applicable to CoS queues only. */ - /* Lossy (best-effort) */ + /* Lossy (best-effort) */ #define HWRM_QUEUE_QPORTCFG_OUTPUT_QUEUE_ID0_SERVICE_PROFILE_LOSSY \ UINT32_C(0x0) /* Lossless */ #define HWRM_QUEUE_QPORTCFG_OUTPUT_QUEUE_ID0_SERVICE_PROFILE_LOSSLESS \ UINT32_C(0x1) /* - * Set to 0xFF... (All Fs) if there is no + * Set to 0xFF... (All Fs) if there is no * service profile specified */ #define HWRM_QUEUE_QPORTCFG_OUTPUT_QUEUE_ID0_SERVICE_PROFILE_UNKNOWN \ @@ -3884,14 +6912,14 @@ struct hwrm_queue_qportcfg_output { */ uint8_t queue_id1_service_profile; /* This value is applicable to CoS queues only. */ - /* Lossy (best-effort) */ + /* Lossy (best-effort) */ #define HWRM_QUEUE_QPORTCFG_OUTPUT_QUEUE_ID1_SERVICE_PROFILE_LOSSY \ UINT32_C(0x0) /* Lossless */ #define HWRM_QUEUE_QPORTCFG_OUTPUT_QUEUE_ID1_SERVICE_PROFILE_LOSSLESS \ UINT32_C(0x1) /* - * Set to 0xFF... (All Fs) if there is no + * Set to 0xFF... (All Fs) if there is no * service profile specified */ #define HWRM_QUEUE_QPORTCFG_OUTPUT_QUEUE_ID1_SERVICE_PROFILE_UNKNOWN \ @@ -3908,14 +6936,14 @@ struct hwrm_queue_qportcfg_output { */ uint8_t queue_id2_service_profile; /* This value is applicable to CoS queues only. */ - /* Lossy (best-effort) */ + /* Lossy (best-effort) */ #define HWRM_QUEUE_QPORTCFG_OUTPUT_QUEUE_ID2_SERVICE_PROFILE_LOSSY \ UINT32_C(0x0) /* Lossless */ #define HWRM_QUEUE_QPORTCFG_OUTPUT_QUEUE_ID2_SERVICE_PROFILE_LOSSLESS \ UINT32_C(0x1) /* - * Set to 0xFF... (All Fs) if there is no + * Set to 0xFF... (All Fs) if there is no * service profile specified */ #define HWRM_QUEUE_QPORTCFG_OUTPUT_QUEUE_ID2_SERVICE_PROFILE_UNKNOWN \ @@ -3932,14 +6960,14 @@ struct hwrm_queue_qportcfg_output { */ uint8_t queue_id3_service_profile; /* This value is applicable to CoS queues only. */ - /* Lossy (best-effort) */ + /* Lossy (best-effort) */ #define HWRM_QUEUE_QPORTCFG_OUTPUT_QUEUE_ID3_SERVICE_PROFILE_LOSSY \ UINT32_C(0x0) /* Lossless */ #define HWRM_QUEUE_QPORTCFG_OUTPUT_QUEUE_ID3_SERVICE_PROFILE_LOSSLESS \ UINT32_C(0x1) /* - * Set to 0xFF... (All Fs) if there is no + * Set to 0xFF... (All Fs) if there is no * service profile specified */ #define HWRM_QUEUE_QPORTCFG_OUTPUT_QUEUE_ID3_SERVICE_PROFILE_UNKNOWN \ @@ -3956,14 +6984,14 @@ struct hwrm_queue_qportcfg_output { */ uint8_t queue_id4_service_profile; /* This value is applicable to CoS queues only. */ - /* Lossy (best-effort) */ + /* Lossy (best-effort) */ #define HWRM_QUEUE_QPORTCFG_OUTPUT_QUEUE_ID4_SERVICE_PROFILE_LOSSY \ UINT32_C(0x0) /* Lossless */ #define HWRM_QUEUE_QPORTCFG_OUTPUT_QUEUE_ID4_SERVICE_PROFILE_LOSSLESS \ UINT32_C(0x1) /* - * Set to 0xFF... (All Fs) if there is no + * Set to 0xFF... (All Fs) if there is no * service profile specified */ #define HWRM_QUEUE_QPORTCFG_OUTPUT_QUEUE_ID4_SERVICE_PROFILE_UNKNOWN \ @@ -3980,14 +7008,14 @@ struct hwrm_queue_qportcfg_output { */ uint8_t queue_id5_service_profile; /* This value is applicable to CoS queues only. */ - /* Lossy (best-effort) */ + /* Lossy (best-effort) */ #define HWRM_QUEUE_QPORTCFG_OUTPUT_QUEUE_ID5_SERVICE_PROFILE_LOSSY \ UINT32_C(0x0) /* Lossless */ #define HWRM_QUEUE_QPORTCFG_OUTPUT_QUEUE_ID5_SERVICE_PROFILE_LOSSLESS \ UINT32_C(0x1) /* - * Set to 0xFF... (All Fs) if there is no + * Set to 0xFF... (All Fs) if there is no * service profile specified */ #define HWRM_QUEUE_QPORTCFG_OUTPUT_QUEUE_ID5_SERVICE_PROFILE_UNKNOWN \ @@ -4004,14 +7032,14 @@ struct hwrm_queue_qportcfg_output { */ uint8_t queue_id6_service_profile; /* This value is applicable to CoS queues only. */ - /* Lossy (best-effort) */ + /* Lossy (best-effort) */ #define HWRM_QUEUE_QPORTCFG_OUTPUT_QUEUE_ID6_SERVICE_PROFILE_LOSSY \ UINT32_C(0x0) /* Lossless */ #define HWRM_QUEUE_QPORTCFG_OUTPUT_QUEUE_ID6_SERVICE_PROFILE_LOSSLESS \ UINT32_C(0x1) /* - * Set to 0xFF... (All Fs) if there is no + * Set to 0xFF... (All Fs) if there is no * service profile specified */ #define HWRM_QUEUE_QPORTCFG_OUTPUT_QUEUE_ID6_SERVICE_PROFILE_UNKNOWN \ @@ -4028,14 +7056,14 @@ struct hwrm_queue_qportcfg_output { */ uint8_t queue_id7_service_profile; /* This value is applicable to CoS queues only. */ - /* Lossy (best-effort) */ + /* Lossy (best-effort) */ #define HWRM_QUEUE_QPORTCFG_OUTPUT_QUEUE_ID7_SERVICE_PROFILE_LOSSY \ UINT32_C(0x0) /* Lossless */ #define HWRM_QUEUE_QPORTCFG_OUTPUT_QUEUE_ID7_SERVICE_PROFILE_LOSSLESS \ UINT32_C(0x1) /* - * Set to 0xFF... (All Fs) if there is no + * Set to 0xFF... (All Fs) if there is no * service profile specified */ #define HWRM_QUEUE_QPORTCFG_OUTPUT_QUEUE_ID7_SERVICE_PROFILE_UNKNOWN \ @@ -4068,7 +7096,7 @@ struct hwrm_queue_qportcfg_output { * enabled, then the internal VNIC to SVIF mapping data structures shall be * programmed at the time of VNIC allocation. */ -/* Input (24 bytes) */ +/* Input (24 bytes) */ struct hwrm_vnic_alloc_input { uint16_t req_type; /* @@ -4105,7 +7133,7 @@ struct hwrm_vnic_alloc_input { uint32_t unused_0; } __attribute__((packed)); -/* Output (16 bytes) */ +/* Output (16 bytes) */ struct hwrm_vnic_alloc_output { uint16_t error_code; /* @@ -4144,7 +7172,7 @@ struct hwrm_vnic_alloc_output { * VNIC as well as the VNIC. Reset and release all resources associated with the * VNIC. */ -/* Input (24 bytes) */ +/* Input (24 bytes) */ struct hwrm_vnic_free_input { uint16_t req_type; /* @@ -4177,7 +7205,7 @@ struct hwrm_vnic_free_input { uint32_t unused_0; } __attribute__((packed)); -/* Output (16 bytes) */ +/* Output (16 bytes) */ struct hwrm_vnic_free_output { uint16_t error_code; /* @@ -4211,7 +7239,7 @@ struct hwrm_vnic_free_output { /* hwrm_vnic_cfg */ /* Description: Configure the RX VNIC structure. */ -/* Input (40 bytes) */ +/* Input (40 bytes) */ struct hwrm_vnic_cfg_input { uint16_t req_type; /* @@ -4271,10 +7299,10 @@ struct hwrm_vnic_cfg_input { * roce_dual_vnic_mode flag is set to '1', then the HWRM client * shall not set this flag to '1'. */ - #define HWRM_VNIC_CFG_INPUT_FLAGS_ROCE_ONLY_VNIC_MODE UINT32_C(0x10) + #define HWRM_VNIC_CFG_INPUT_FLAGS_ROCE_ONLY_VNIC_MODE UINT32_C(0x10) /* * When a VNIC uses one destination ring group for certain - * application (e.g. Receive Flow Steering) where exact match is + * application (e.g. Receive Flow Steering) where exact match is * used to direct packets to a VNIC with one destination ring * group only, there is no need to configure RSS indirection * table for that VNIC as only one destination ring group is @@ -4310,17 +7338,17 @@ struct hwrm_vnic_cfg_input { */ uint16_t rss_rule; /* - * RSS ID for RSS rule/table structure. 0xFF... (All Fs) if + * RSS ID for RSS rule/table structure. 0xFF... (All Fs) if * there is no RSS rule. */ uint16_t cos_rule; /* - * RSS ID for COS rule/table structure. 0xFF... (All Fs) if + * RSS ID for COS rule/table structure. 0xFF... (All Fs) if * there is no COS rule. */ uint16_t lb_rule; /* - * RSS ID for load balancing rule/table structure. 0xFF... (All + * RSS ID for load balancing rule/table structure. 0xFF... (All * Fs) if there is no LB rule. */ uint16_t mru; @@ -4334,7 +7362,7 @@ struct hwrm_vnic_cfg_input { uint32_t unused_0; } __attribute__((packed)); -/* Output (16 bytes) */ +/* Output (16 bytes) */ struct hwrm_vnic_cfg_output { uint16_t error_code; /* @@ -4366,9 +7394,153 @@ struct hwrm_vnic_cfg_output { */ } __attribute__((packed)); +/* hwrm_vnic_qcfg */ +/* + * Description: Query the RX VNIC structure. This function can be used by a PF + * driver to query its own VNIC resource or VNIC resource of its child VF. This + * function can also be used by a VF driver to query its own VNIC resource. + */ +/* Input (32 bytes) */ +struct hwrm_vnic_qcfg_input { + uint16_t req_type; + /* + * This value indicates what type of request this is. The format + * for the rest of the command is determined by this field. + */ + uint16_t cmpl_ring; + /* + * This value indicates the what completion ring the request + * will be optionally completed on. If the value is -1, then no + * CR completion will be generated. Any other value must be a + * valid CR ring_id value for this function. + */ + uint16_t seq_id; + /* This value indicates the command sequence number. */ + uint16_t target_id; + /* + * Target ID of this command. 0x0 - 0xFFF8 - Used for function + * ids 0xFFF8 - 0xFFFE - Reserved for internal processors 0xFFFF + * - HWRM + */ + uint64_t resp_addr; + /* + * This is the host address where the response will be written + * when the request is complete. This area must be 16B aligned + * and must be cleared to zero before the request is made. + */ + uint32_t enables; + /* This bit must be '1' for the vf_id_valid field to be configured. */ + #define HWRM_VNIC_QCFG_INPUT_ENABLES_VF_ID_VALID UINT32_C(0x1) + uint32_t vnic_id; + /* Logical vnic ID */ + uint16_t vf_id; + /* ID of Virtual Function whose VNIC resource is being queried. */ + uint16_t unused_0[3]; +} __attribute__((packed)); + +/* Output (32 bytes) */ +struct hwrm_vnic_qcfg_output { + uint16_t error_code; + /* + * Pass/Fail or error type Note: receiver to verify the in + * parameters, and fail the call with an error when appropriate + */ + uint16_t req_type; + /* This field returns the type of original request. */ + uint16_t seq_id; + /* This field provides original sequence number of the command. */ + uint16_t resp_len; + /* + * This field is the length of the response in bytes. The last + * byte of the response is a valid flag that will read as '1' + * when the command has been completely written to memory. + */ + uint16_t dflt_ring_grp; + /* Default Completion ring for the VNIC. */ + uint16_t rss_rule; + /* + * RSS ID for RSS rule/table structure. 0xFF... (All Fs) if + * there is no RSS rule. + */ + uint16_t cos_rule; + /* + * RSS ID for COS rule/table structure. 0xFF... (All Fs) if + * there is no COS rule. + */ + uint16_t lb_rule; + /* + * RSS ID for load balancing rule/table structure. 0xFF... (All + * Fs) if there is no LB rule. + */ + uint16_t mru; + /* The maximum receive unit of the vnic. */ + uint8_t unused_0; + uint8_t unused_1; + uint32_t flags; + /* + * When this bit is '1', the VNIC is the default VNIC for the + * function. + */ + #define HWRM_VNIC_QCFG_OUTPUT_FLAGS_DEFAULT UINT32_C(0x1) + /* + * When this bit is '1', the VNIC is configured to strip VLAN in + * the RX path. If set to '0', then VLAN stripping is disabled + * on this VNIC. + */ + #define HWRM_VNIC_QCFG_OUTPUT_FLAGS_VLAN_STRIP_MODE UINT32_C(0x2) + /* + * When this bit is '1', the VNIC is configured to buffer + * receive packets in the hardware until the host posts new + * receive buffers. If set to '0', then bd_stall is disabled on + * this VNIC. + */ + #define HWRM_VNIC_QCFG_OUTPUT_FLAGS_BD_STALL_MODE UINT32_C(0x4) + /* + * When this bit is '1', the VNIC is configured to receive both + * RoCE and non-RoCE traffic. If set to '0', then this VNIC is + * not configured to operate in dual VNIC mode. + */ + #define HWRM_VNIC_QCFG_OUTPUT_FLAGS_ROCE_DUAL_VNIC_MODE UINT32_C(0x8) + /* + * When this flag is set to '1', the VNIC is configured to + * receive only RoCE traffic. When this flag is set to '0', the + * VNIC is not configured to receive only RoCE traffic. If + * roce_dual_vnic_mode flag and this flag both are set to '1', + * then it is an invalid configuration of the VNIC. The HWRM + * should not allow that type of mis-configuration by HWRM + * clients. + */ + #define HWRM_VNIC_QCFG_OUTPUT_FLAGS_ROCE_ONLY_VNIC_MODE UINT32_C(0x10) + /* + * When a VNIC uses one destination ring group for certain + * application (e.g. Receive Flow Steering) where exact match is + * used to direct packets to a VNIC with one destination ring + * group only, there is no need to configure RSS indirection + * table for that VNIC as only one destination ring group is + * used. When this bit is set to '1', then the VNIC is enabled + * in a mode where RSS is enabled in the VNIC using a RSS + * context for computing RSS hash but the RSS indirection table + * is not configured. + */ + #define HWRM_VNIC_QCFG_OUTPUT_FLAGS_RSS_DFLT_CR_MODE UINT32_C(0x20) + uint32_t unused_2; + uint8_t unused_3; + uint8_t unused_4; + uint8_t unused_5; + uint8_t valid; + /* + * This field is used in Output records to indicate that the + * output is completely written to RAM. This field should be + * read as '1' to indicate that the output has been completely + * written. When writing a command completion or response to an + * internal processor, the order of writes has to be such that + * this field is written last. + */ +} __attribute__((packed)); + /* hwrm_vnic_rss_cfg */ /* Description: This function is used to enable RSS configuration. */ -/* Input (48 bytes) */ +/* Input (48 bytes) */ struct hwrm_vnic_rss_cfg_input { uint16_t req_type; /* @@ -4441,7 +7613,7 @@ struct hwrm_vnic_rss_cfg_input { uint16_t unused_1[3]; } __attribute__((packed)); -/* Output (16 bytes) */ +/* Output (16 bytes) */ struct hwrm_vnic_rss_cfg_output { uint16_t error_code; /* @@ -4473,9 +7645,295 @@ struct hwrm_vnic_rss_cfg_output { */ } __attribute__((packed)); +/* hwrm_vnic_plcmodes_cfg */ +/* + * Description: This function can be used to set placement mode configuration of + * the VNIC. + */ +/* Input (40 bytes) */ + +struct hwrm_vnic_plcmodes_cfg_input { + uint16_t req_type; + /* + * This value indicates what type of request this is. The format for the + * rest of the command is determined by this field. + */ + uint16_t cmpl_ring; + /* + * This value indicates the what completion ring the request will be + * optionally completed on. If the value is -1, then no CR completion + * will be generated. Any other value must be a valid CR ring_id value + * for this function. + */ + uint16_t seq_id; + /* This value indicates the command sequence number. */ + uint16_t target_id; + /* + * Target ID of this command. 0x0 - 0xFFF8 - Used for function ids + * 0xFFF8 - 0xFFFE - Reserved for internal processors 0xFFFF - HWRM + */ + uint64_t resp_addr; + /* + * This is the host address where the response will be written when the + * request is complete. This area must be 16B aligned and must be + * cleared to zero before the request is made. + */ + uint32_t flags; + /* + * When this bit is '1', the VNIC shall be configured to use regular + * placement algorithm. By default, the regular placement algorithm + * shall be enabled on the VNIC. + */ + #define HWRM_VNIC_PLCMODES_CFG_INPUT_FLAGS_REGULAR_PLACEMENT \ + UINT32_C(0x1) + /* + * When this bit is '1', the VNIC shall be configured use the jumbo + * placement algorithm. + */ + #define HWRM_VNIC_PLCMODES_CFG_INPUT_FLAGS_JUMBO_PLACEMENT \ + UINT32_C(0x2) + /* + * When this bit is '1', the VNIC shall be configured to enable Header- + * Data split for IPv4 packets according to the following rules: # If + * the packet is identified as TCP/IPv4, then the packet is split at the + * beginning of the TCP payload. # If the packet is identified as + * UDP/IPv4, then the packet is split at the beginning of UDP payload. # + * If the packet is identified as non-TCP and non-UDP IPv4 packet, then + * the packet is split at the beginning of the upper layer protocol + * header carried in the IPv4 packet. + */ + #define HWRM_VNIC_PLCMODES_CFG_INPUT_FLAGS_HDS_IPV4 UINT32_C(0x4) + /* + * When this bit is '1', the VNIC shall be configured to enable Header- + * Data split for IPv6 packets according to the following rules: # If + * the packet is identified as TCP/IPv6, then the packet is split at the + * beginning of the TCP payload. # If the packet is identified as + * UDP/IPv6, then the packet is split at the beginning of UDP payload. # + * If the packet is identified as non-TCP and non-UDP IPv6 packet, then + * the packet is split at the beginning of the upper layer protocol + * header carried in the IPv6 packet. + */ + #define HWRM_VNIC_PLCMODES_CFG_INPUT_FLAGS_HDS_IPV6 UINT32_C(0x8) + /* + * When this bit is '1', the VNIC shall be configured to enable Header- + * Data split for FCoE packets at the beginning of FC payload. + */ + #define HWRM_VNIC_PLCMODES_CFG_INPUT_FLAGS_HDS_FCOE UINT32_C(0x10) + /* + * When this bit is '1', the VNIC shall be configured to enable Header- + * Data split for RoCE packets at the beginning of RoCE payload (after + * BTH/GRH headers). + */ + #define HWRM_VNIC_PLCMODES_CFG_INPUT_FLAGS_HDS_ROCE UINT32_C(0x20) + uint32_t enables; + /* + * This bit must be '1' for the jumbo_thresh_valid field to be + * configured. + */ + #define HWRM_VNIC_PLCMODES_CFG_INPUT_ENABLES_JUMBO_THRESH_VALID \ + UINT32_C(0x1) + /* + * This bit must be '1' for the hds_offset_valid field to be configured. + */ + #define HWRM_VNIC_PLCMODES_CFG_INPUT_ENABLES_HDS_OFFSET_VALID \ + UINT32_C(0x2) + /* + * This bit must be '1' for the hds_threshold_valid field to be + * configured. + */ + #define HWRM_VNIC_PLCMODES_CFG_INPUT_ENABLES_HDS_THRESHOLD_VALID \ + UINT32_C(0x4) + uint32_t vnic_id; + /* Logical vnic ID */ + uint16_t jumbo_thresh; + /* + * When jumbo placement algorithm is enabled, this value is used to + * determine the threshold for jumbo placement. Packets with length + * larger than this value will be placed according to the jumbo + * placement algorithm. + */ + uint16_t hds_offset; + /* + * This value is used to determine the offset into packet buffer where + * the split data (payload) will be placed according to one of of HDS + * placement algorithm. The lengths of packet buffers provided for split + * data shall be larger than this value. + */ + uint16_t hds_threshold; + /* + * When one of the HDS placement algorithm is enabled, this value is + * used to determine the threshold for HDS placement. Packets with + * length larger than this value will be placed according to the HDS + * placement algorithm. This value shall be in multiple of 4 bytes. + */ + uint16_t unused_0[3]; +} __attribute__((packed)); + +/* Output (16 bytes) */ + +struct hwrm_vnic_plcmodes_cfg_output { + uint16_t error_code; + /* + * Pass/Fail or error type Note: receiver to verify the in parameters, + * and fail the call with an error when appropriate + */ + uint16_t req_type; + /* This field returns the type of original request. */ + uint16_t seq_id; + /* This field provides original sequence number of the command. */ + uint16_t resp_len; + /* + * This field is the length of the response in bytes. The last byte of + * the response is a valid flag that will read as '1' when the command + * has been completely written to memory. + */ + uint32_t unused_0; + uint8_t unused_1; + uint8_t unused_2; + uint8_t unused_3; + uint8_t valid; + /* + * This field is used in Output records to indicate that the output is + * completely written to RAM. This field should be read as '1' to + * indicate that the output has been completely written. When writing a + * command completion or response to an internal processor, the order of + * writes has to be such that this field is written last. + */ +} __attribute__((packed)); + +/* hwrm_vnic_plcmodes_qcfg */ +/* + * Description: This function can be used to query placement mode configuration + * of the VNIC. + */ +/* Input (24 bytes) */ + +struct hwrm_vnic_plcmodes_qcfg_input { + uint16_t req_type; + /* + * This value indicates what type of request this is. The format for the + * rest of the command is determined by this field. + */ + uint16_t cmpl_ring; + /* + * This value indicates the what completion ring the request will be + * optionally completed on. If the value is -1, then no CR completion + * will be generated. Any other value must be a valid CR ring_id value + * for this function. + */ + uint16_t seq_id; + /* This value indicates the command sequence number. */ + uint16_t target_id; + /* + * Target ID of this command. 0x0 - 0xFFF8 - Used for function ids + * 0xFFF8 - 0xFFFE - Reserved for internal processors 0xFFFF - HWRM + */ + uint64_t resp_addr; + /* + * This is the host address where the response will be written when the + * request is complete. This area must be 16B aligned and must be + * cleared to zero before the request is made. + */ + uint32_t vnic_id; + /* Logical vnic ID */ + uint32_t unused_0; +} __attribute__((packed)); + +/* Output (24 bytes) */ + +struct hwrm_vnic_plcmodes_qcfg_output { + uint16_t error_code; + /* + * Pass/Fail or error type Note: receiver to verify the in parameters, + * and fail the call with an error when appropriate + */ + uint16_t req_type; + /* This field returns the type of original request. */ + uint16_t seq_id; + /* This field provides original sequence number of the command. */ + uint16_t resp_len; + /* + * This field is the length of the response in bytes. The last byte of + * the response is a valid flag that will read as '1' when the command + * has been completely written to memory. + */ + uint32_t flags; + /* + * When this bit is '1', the VNIC is configured to use regular placement + * algorithm. + */ + #define HWRM_VNIC_PLCMODES_QCFG_OUTPUT_FLAGS_REGULAR_PLACEMENT \ + UINT32_C(0x1) + /* + * When this bit is '1', the VNIC is configured to use the jumbo + * placement algorithm. + */ + #define HWRM_VNIC_PLCMODES_QCFG_OUTPUT_FLAGS_JUMBO_PLACEMENT \ + UINT32_C(0x2) + /* + * When this bit is '1', the VNIC is configured to enable Header-Data + * split for IPv4 packets. + */ + #define HWRM_VNIC_PLCMODES_QCFG_OUTPUT_FLAGS_HDS_IPV4 UINT32_C(0x4) + /* + * When this bit is '1', the VNIC is configured to enable Header-Data + * split for IPv6 packets. + */ + #define HWRM_VNIC_PLCMODES_QCFG_OUTPUT_FLAGS_HDS_IPV6 UINT32_C(0x8) + /* + * When this bit is '1', the VNIC is configured to enable Header-Data + * split for FCoE packets. + */ + #define HWRM_VNIC_PLCMODES_QCFG_OUTPUT_FLAGS_HDS_FCOE UINT32_C(0x10) + /* + * When this bit is '1', the VNIC is configured to enable Header-Data + * split for RoCE packets. + */ + #define HWRM_VNIC_PLCMODES_QCFG_OUTPUT_FLAGS_HDS_ROCE UINT32_C(0x20) + /* + * When this bit is '1', the VNIC is configured to be the default VNIC + * of the requesting function. + */ + #define HWRM_VNIC_PLCMODES_QCFG_OUTPUT_FLAGS_DFLT_VNIC UINT32_C(0x40) + uint16_t jumbo_thresh; + /* + * When jumbo placement algorithm is enabled, this value is used to + * determine the threshold for jumbo placement. Packets with length + * larger than this value will be placed according to the jumbo + * placement algorithm. + */ + uint16_t hds_offset; + /* + * This value is used to determine the offset into packet buffer where + * the split data (payload) will be placed according to one of of HDS + * placement algorithm. The lengths of packet buffers provided for split + * data shall be larger than this value. + */ + uint16_t hds_threshold; + /* + * When one of the HDS placement algorithm is enabled, this value is + * used to determine the threshold for HDS placement. Packets with + * length larger than this value will be placed according to the HDS + * placement algorithm. This value shall be in multiple of 4 bytes. + */ + uint8_t unused_0; + uint8_t unused_1; + uint8_t unused_2; + uint8_t unused_3; + uint8_t unused_4; + uint8_t valid; + /* + * This field is used in Output records to indicate that the output is + * completely written to RAM. This field should be read as '1' to + * indicate that the output has been completely written. When writing a + * command completion or response to an internal processor, the order of + * writes has to be such that this field is written last. + */ +} __attribute__((packed)); + /* hwrm_vnic_rss_cos_lb_ctx_alloc */ /* Description: This function is used to allocate COS/Load Balance context. */ -/* Input (16 bytes) */ +/* Input (16 bytes) */ struct hwrm_vnic_rss_cos_lb_ctx_alloc_input { uint16_t req_type; /* @@ -4505,7 +7963,7 @@ struct hwrm_vnic_rss_cos_lb_ctx_alloc_input { */ } __attribute__((packed)); -/* Output (16 bytes) */ +/* Output (16 bytes) */ struct hwrm_vnic_rss_cos_lb_ctx_alloc_output { uint16_t error_code; /* @@ -4542,7 +8000,7 @@ struct hwrm_vnic_rss_cos_lb_ctx_alloc_output { /* hwrm_vnic_rss_cos_lb_ctx_free */ /* Description: This function can be used to free COS/Load Balance context. */ -/* Input (24 bytes) */ +/* Input (24 bytes) */ struct hwrm_vnic_rss_cos_lb_ctx_free_input { uint16_t req_type; /* @@ -4575,7 +8033,7 @@ struct hwrm_vnic_rss_cos_lb_ctx_free_input { uint16_t unused_0[3]; } __attribute__((packed)); -/* Output (16 bytes) */ +/* Output (16 bytes) */ struct hwrm_vnic_rss_cos_lb_ctx_free_output { uint16_t error_code; /* @@ -4607,11 +8065,187 @@ struct hwrm_vnic_rss_cos_lb_ctx_free_output { */ } __attribute__((packed)); +/* hwrm_vnic_tpa_cfg */ +/* Description: This function is used to enable/configure TPA on the VNIC. */ +/* Input (40 bytes) */ +struct hwrm_vnic_tpa_cfg_input { + uint16_t req_type; + /* + * This value indicates what type of request this is. The format + * for the rest of the command is determined by this field. + */ + uint16_t cmpl_ring; + /* + * This value indicates the what completion ring the request + * will be optionally completed on. If the value is -1, then no + * CR completion will be generated. Any other value must be a + * valid CR ring_id value for this function. + */ + uint16_t seq_id; + /* This value indicates the command sequence number. */ + uint16_t target_id; + /* + * Target ID of this command. 0x0 - 0xFFF8 - Used for function + * ids 0xFFF8 - 0xFFFE - Reserved for internal processors 0xFFFF + * - HWRM + */ + uint64_t resp_addr; + /* + * This is the host address where the response will be written + * when the request is complete. This area must be 16B aligned + * and must be cleared to zero before the request is made. + */ + uint32_t flags; + /* + * When this bit is '1', the VNIC shall be configured to perform + * transparent packet aggregation (TPA) of non-tunneled TCP + * packets. + */ + #define HWRM_VNIC_TPA_CFG_INPUT_FLAGS_TPA UINT32_C(0x1) + /* + * When this bit is '1', the VNIC shall be configured to perform + * transparent packet aggregation (TPA) of tunneled TCP packets. + */ + #define HWRM_VNIC_TPA_CFG_INPUT_FLAGS_ENCAP_TPA UINT32_C(0x2) + /* + * When this bit is '1', the VNIC shall be configured to perform + * transparent packet aggregation (TPA) according to Windows + * Receive Segment Coalescing (RSC) rules. + */ + #define HWRM_VNIC_TPA_CFG_INPUT_FLAGS_RSC_WND_UPDATE UINT32_C(0x4) + /* + * When this bit is '1', the VNIC shall be configured to perform + * transparent packet aggregation (TPA) according to Linux + * Generic Receive Offload (GRO) rules. + */ + #define HWRM_VNIC_TPA_CFG_INPUT_FLAGS_GRO UINT32_C(0x8) + /* + * When this bit is '1', the VNIC shall be configured to perform + * transparent packet aggregation (TPA) for TCP packets with IP + * ECN set to non-zero. + */ + #define HWRM_VNIC_TPA_CFG_INPUT_FLAGS_AGG_WITH_ECN UINT32_C(0x10) + /* + * When this bit is '1', the VNIC shall be configured to perform + * transparent packet aggregation (TPA) for GRE tunneled TCP + * packets only if all packets have the same GRE sequence. + */ + #define HWRM_VNIC_TPA_CFG_INPUT_FLAGS_AGG_WITH_SAME_GRE_SEQ \ + UINT32_C(0x20) + /* + * When this bit is '1' and the GRO mode is enabled, the VNIC + * shall be configured to perform transparent packet aggregation + * (TPA) for TCP/IPv4 packets with consecutively increasing + * IPIDs. In other words, the last packet that is being + * aggregated to an already existing aggregation context shall + * have IPID 1 more than the IPID of the last packet that was + * aggregated in that aggregation context. + */ + #define HWRM_VNIC_TPA_CFG_INPUT_FLAGS_GRO_IPID_CHECK UINT32_C(0x40) + /* + * When this bit is '1' and the GRO mode is enabled, the VNIC + * shall be configured to perform transparent packet aggregation + * (TPA) for TCP packets with the same TTL (IPv4) or Hop limit + * (IPv6) value. + */ + #define HWRM_VNIC_TPA_CFG_INPUT_FLAGS_GRO_TTL_CHECK UINT32_C(0x80) + uint32_t enables; + /* This bit must be '1' for the max_agg_segs field to be configured. */ + #define HWRM_VNIC_TPA_CFG_INPUT_ENABLES_MAX_AGG_SEGS UINT32_C(0x1) + /* This bit must be '1' for the max_aggs field to be configured. */ + #define HWRM_VNIC_TPA_CFG_INPUT_ENABLES_MAX_AGGS UINT32_C(0x2) + /* + * This bit must be '1' for the max_agg_timer field to be + * configured. + */ + #define HWRM_VNIC_TPA_CFG_INPUT_ENABLES_MAX_AGG_TIMER UINT32_C(0x4) + /* This bit must be '1' for the min_agg_len field to be configured. */ + #define HWRM_VNIC_TPA_CFG_INPUT_ENABLES_MIN_AGG_LEN UINT32_C(0x8) + uint16_t vnic_id; + /* Logical vnic ID */ + uint16_t max_agg_segs; + /* + * This is the maximum number of TCP segments that can be + * aggregated (unit is Log2). Max value is 31. + */ + /* 1 segment */ + #define HWRM_VNIC_TPA_CFG_INPUT_MAX_AGG_SEGS_1 UINT32_C(0x0) + /* 2 segments */ + #define HWRM_VNIC_TPA_CFG_INPUT_MAX_AGG_SEGS_2 UINT32_C(0x1) + /* 4 segments */ + #define HWRM_VNIC_TPA_CFG_INPUT_MAX_AGG_SEGS_4 UINT32_C(0x2) + /* 8 segments */ + #define HWRM_VNIC_TPA_CFG_INPUT_MAX_AGG_SEGS_8 UINT32_C(0x3) + /* Any segment size larger than this is not valid */ + #define HWRM_VNIC_TPA_CFG_INPUT_MAX_AGG_SEGS_MAX UINT32_C(0x1f) + uint16_t max_aggs; + /* + * This is the maximum number of aggregations this VNIC is + * allowed (unit is Log2). Max value is 7 + */ + /* 1 aggregation */ + #define HWRM_VNIC_TPA_CFG_INPUT_MAX_AGGS_1 UINT32_C(0x0) + /* 2 aggregations */ + #define HWRM_VNIC_TPA_CFG_INPUT_MAX_AGGS_2 UINT32_C(0x1) + /* 4 aggregations */ + #define HWRM_VNIC_TPA_CFG_INPUT_MAX_AGGS_4 UINT32_C(0x2) + /* 8 aggregations */ + #define HWRM_VNIC_TPA_CFG_INPUT_MAX_AGGS_8 UINT32_C(0x3) + /* 16 aggregations */ + #define HWRM_VNIC_TPA_CFG_INPUT_MAX_AGGS_16 UINT32_C(0x4) + /* Any aggregation size larger than this is not valid */ + #define HWRM_VNIC_TPA_CFG_INPUT_MAX_AGGS_MAX UINT32_C(0x7) + uint8_t unused_0; + uint8_t unused_1; + uint32_t max_agg_timer; + /* + * This is the maximum amount of time allowed for an aggregation + * context to complete after it was initiated. + */ + uint32_t min_agg_len; + /* + * This is the minimum amount of payload length required to + * start an aggregation context. + */ +} __attribute__((packed)); + +/* Output (16 bytes) */ +struct hwrm_vnic_tpa_cfg_output { + uint16_t error_code; + /* + * Pass/Fail or error type Note: receiver to verify the in + * parameters, and fail the call with an error when appropriate + */ + uint16_t req_type; + /* This field returns the type of original request. */ + uint16_t seq_id; + /* This field provides original sequence number of the command. */ + uint16_t resp_len; + /* + * This field is the length of the response in bytes. The last + * byte of the response is a valid flag that will read as '1' + * when the command has been completely written to memory. + */ + uint32_t unused_0; + uint8_t unused_1; + uint8_t unused_2; + uint8_t unused_3; + uint8_t valid; + /* + * This field is used in Output records to indicate that the + * output is completely written to RAM. This field should be + * read as '1' to indicate that the output has been completely + * written. When writing a command completion or response to an + * internal processor, the order of writes has to be such that + * this field is written last. + */ +} __attribute__((packed)); + /* hwrm_ring_alloc */ /* * Description: This command allocates and does basic preparation for a ring. */ -/* Input (80 bytes) */ +/* Input (80 bytes) */ struct hwrm_ring_alloc_input { uint16_t req_type; /* @@ -4657,12 +8291,14 @@ struct hwrm_ring_alloc_input { #define HWRM_RING_ALLOC_INPUT_ENABLES_MAX_BW_VALID UINT32_C(0x20) uint8_t ring_type; /* Ring Type. */ - /* Completion Ring (CR) */ - #define HWRM_RING_ALLOC_INPUT_RING_TYPE_CMPL UINT32_C(0x0) - /* TX Ring (TR) */ + /* L2 Completion Ring (CR) */ + #define HWRM_RING_ALLOC_INPUT_RING_TYPE_L2_CMPL UINT32_C(0x0) + /* TX Ring (TR) */ #define HWRM_RING_ALLOC_INPUT_RING_TYPE_TX UINT32_C(0x1) - /* RX Ring (RR) */ + /* RX Ring (RR) */ #define HWRM_RING_ALLOC_INPUT_RING_TYPE_RX UINT32_C(0x2) + /* RoCE Notification Completion Ring (ROCE_CR) */ + #define HWRM_RING_ALLOC_INPUT_RING_TYPE_ROCE_CMPL UINT32_C(0x3) uint8_t unused_0; uint16_t unused_1; uint64_t page_tbl_addr; @@ -4725,23 +8361,22 @@ struct hwrm_ring_alloc_input { * a TX ring. */ /* Arbitration policy used for the ring. */ - #define HWRM_RING_ALLOC_INPUT_RING_ARB_CFG_ARB_POLICY_MASK \ - UINT32_C(0xf) + #define HWRM_RING_ALLOC_INPUT_RING_ARB_CFG_ARB_POLICY_MASK UINT32_C(0xf) #define HWRM_RING_ALLOC_INPUT_RING_ARB_CFG_ARB_POLICY_SFT 0 /* * Use strict priority for the TX ring. Priority * value is specified in arb_policy_param */ - #define HWRM_RING_ALLOC_INPUT_RING_ARB_CFG_ARB_POLICY_SP \ + #define HWRM_RING_ALLOC_INPUT_RING_ARB_CFG_ARB_POLICY_SP \ (UINT32_C(0x1) << 0) /* * Use weighted fair queue arbitration for the * TX ring. Weight is specified in * arb_policy_param */ - #define HWRM_RING_ALLOC_INPUT_RING_ARB_CFG_ARB_POLICY_WFQ \ + #define HWRM_RING_ALLOC_INPUT_RING_ARB_CFG_ARB_POLICY_WFQ \ (UINT32_C(0x2) << 0) - #define HWRM_RING_ALLOC_INPUT_RING_ARB_CFG_ARB_POLICY_LAST \ + #define HWRM_RING_ALLOC_INPUT_RING_ARB_CFG_ARB_POLICY_LAST \ RING_ALLOC_INPUT_RING_ARB_CFG_ARB_POLICY_WFQ /* Reserved field. */ #define HWRM_RING_ALLOC_INPUT_RING_ARB_CFG_RSVD_MASK UINT32_C(0xf0) @@ -4758,7 +8393,7 @@ struct hwrm_ring_alloc_input { */ #define HWRM_RING_ALLOC_INPUT_RING_ARB_CFG_ARB_POLICY_PARAM_MASK \ UINT32_C(0xff00) - #define HWRM_RING_ALLOC_INPUT_RING_ARB_CFG_ARB_POLICY_PARAM_SFT 8 + #define HWRM_RING_ALLOC_INPUT_RING_ARB_CFG_ARB_POLICY_PARAM_SFT 8 uint8_t unused_6; uint8_t unused_7; uint32_t reserved3; @@ -4778,26 +8413,40 @@ struct hwrm_ring_alloc_input { * translate this value into byte counter and time interval used * for this ring inside the device. */ - /* Bandwidth value */ - #define HWRM_RING_ALLOC_INPUT_MAX_BW_BW_VALUE_MASK \ - UINT32_C(0xfffffff) + /* The bandwidth value. */ + #define HWRM_RING_ALLOC_INPUT_MAX_BW_BW_VALUE_MASK UINT32_C(0xfffffff) #define HWRM_RING_ALLOC_INPUT_MAX_BW_BW_VALUE_SFT 0 - /* Reserved */ - #define HWRM_RING_ALLOC_INPUT_MAX_BW_RSVD UINT32_C(0x10000000) + /* The granularity of the value (bits or bytes). */ + #define HWRM_RING_ALLOC_INPUT_MAX_BW_SCALE UINT32_C(0x10000000) + /* Value is in bits. */ + #define HWRM_RING_ALLOC_INPUT_MAX_BW_SCALE_BITS (UINT32_C(0x0) << 28) + /* Value is in bytes. */ + #define HWRM_RING_ALLOC_INPUT_MAX_BW_SCALE_BYTES (UINT32_C(0x1) << 28) + #define HWRM_RING_ALLOC_INPUT_MAX_BW_SCALE_LAST \ + RING_ALLOC_INPUT_MAX_BW_SCALE_BYTES /* bw_value_unit is 3 b */ #define HWRM_RING_ALLOC_INPUT_MAX_BW_BW_VALUE_UNIT_MASK \ UINT32_C(0xe0000000) #define HWRM_RING_ALLOC_INPUT_MAX_BW_BW_VALUE_UNIT_SFT 29 - /* Value is in Mbps */ - #define HWRM_RING_ALLOC_INPUT_MAX_BW_BW_VALUE_UNIT_MBPS \ + /* Value is in Mb or MB (base 10). */ + #define HWRM_RING_ALLOC_INPUT_MAX_BW_BW_VALUE_UNIT_MEGA \ (UINT32_C(0x0) << 29) + /* Value is in Kb or KB (base 10). */ + #define HWRM_RING_ALLOC_INPUT_MAX_BW_BW_VALUE_UNIT_KILO \ + (UINT32_C(0x2) << 29) + /* Value is in bits or bytes. */ + #define HWRM_RING_ALLOC_INPUT_MAX_BW_BW_VALUE_UNIT_BASE \ + (UINT32_C(0x4) << 29) + /* Value is in Gb or GB (base 10). */ + #define HWRM_RING_ALLOC_INPUT_MAX_BW_BW_VALUE_UNIT_GIGA \ + (UINT32_C(0x6) << 29) /* Value is in 1/100th of a percentage of total bandwidth. */ - #define HWRM_RING_ALLOC_INPUT_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 \ + #define HWRM_RING_ALLOC_INPUT_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 \ (UINT32_C(0x1) << 29) /* Invalid unit */ - #define HWRM_RING_ALLOC_INPUT_MAX_BW_BW_VALUE_UNIT_INVALID \ + #define HWRM_RING_ALLOC_INPUT_MAX_BW_BW_VALUE_UNIT_INVALID \ (UINT32_C(0x7) << 29) - #define HWRM_RING_ALLOC_INPUT_MAX_BW_BW_VALUE_UNIT_LAST \ + #define HWRM_RING_ALLOC_INPUT_MAX_BW_BW_VALUE_UNIT_LAST \ RING_ALLOC_INPUT_MAX_BW_BW_VALUE_UNIT_INVALID uint8_t int_mode; /* @@ -4817,7 +8466,7 @@ struct hwrm_ring_alloc_input { uint8_t unused_8[3]; } __attribute__((packed)); -/* Output (16 bytes) */ +/* Output (16 bytes) */ struct hwrm_ring_alloc_output { uint16_t error_code; /* @@ -4858,8 +8507,16 @@ struct hwrm_ring_alloc_output { /* hwrm_ring_free */ /* * Description: This command is used to free a ring and associated resources. + * With QoS and DCBx agents, it is possible the traffic classes will be moved + * from one CoS queue to another. When this occurs, the driver shall call + * 'hwrm_ring_free' to free the allocated rings and then call 'hwrm_ring_alloc' + * to re-allocate each ring and assign it to a new CoS queue. hwrm_ring_free + * shall be called on a ring only after it has been idle for 500ms or more and + * no frames have been posted to the ring during this time. All frames queued + * for transmission shall be completed and at least 500ms time elapsed from the + * last completion before calling this command. */ -/* Input (24 bytes) */ +/* Input (24 bytes) */ struct hwrm_ring_free_input { uint16_t req_type; /* @@ -4889,19 +8546,21 @@ struct hwrm_ring_free_input { */ uint8_t ring_type; /* Ring Type. */ - /* Completion Ring (CR) */ - #define HWRM_RING_FREE_INPUT_RING_TYPE_CMPL UINT32_C(0x0) - /* TX Ring (TR) */ + /* L2 Completion Ring (CR) */ + #define HWRM_RING_FREE_INPUT_RING_TYPE_L2_CMPL UINT32_C(0x0) + /* TX Ring (TR) */ #define HWRM_RING_FREE_INPUT_RING_TYPE_TX UINT32_C(0x1) - /* RX Ring (RR) */ + /* RX Ring (RR) */ #define HWRM_RING_FREE_INPUT_RING_TYPE_RX UINT32_C(0x2) + /* RoCE Notification Completion Ring (ROCE_CR) */ + #define HWRM_RING_FREE_INPUT_RING_TYPE_ROCE_CMPL UINT32_C(0x3) uint8_t unused_0; uint16_t ring_id; /* Physical number of ring allocated. */ uint32_t unused_1; } __attribute__((packed)); -/* Output (16 bytes) */ +/* Output (16 bytes) */ struct hwrm_ring_free_output { uint16_t error_code; /* @@ -4937,7 +8596,7 @@ struct hwrm_ring_free_output { /* * Description: This API allocates and does basic preparation for a ring group. */ -/* Input (24 bytes) */ +/* Input (24 bytes) */ struct hwrm_ring_grp_alloc_input { uint16_t req_type; /* @@ -4972,7 +8631,7 @@ struct hwrm_ring_grp_alloc_input { uint16_t ar; /* * This value identifies the aggregation RR associated with the - * ring group. If this value is 0xFF... (All Fs), then no + * ring group. If this value is 0xFF... (All Fs), then no * Aggregation ring will be set. */ uint16_t sc; @@ -4982,7 +8641,7 @@ struct hwrm_ring_grp_alloc_input { */ } __attribute__((packed)); -/* Output (16 bytes) */ +/* Output (16 bytes) */ struct hwrm_ring_grp_alloc_output { uint16_t error_code; /* @@ -5028,7 +8687,7 @@ struct hwrm_ring_grp_alloc_output { * a part of executing this command, the HWRM shall reset all associated ring * group resources. */ -/* Input (24 bytes) */ +/* Input (24 bytes) */ struct hwrm_ring_grp_free_input { uint16_t req_type; /* @@ -5061,7 +8720,7 @@ struct hwrm_ring_grp_free_input { uint32_t unused_0; } __attribute__((packed)); -/* Output (16 bytes) */ +/* Output (16 bytes) */ struct hwrm_ring_grp_free_output { uint16_t error_code; /* @@ -5095,12 +8754,61 @@ struct hwrm_ring_grp_free_output { /* hwrm_cfa_l2_filter_alloc */ /* - * A filter is used to identify traffic that contains a matching set of - * parameters like unicast or broadcast MAC address or a VLAN tag amongst - * other things which then allows the ASIC to direct the incoming traffic - * to an appropriate VNIC or Rx ring. + * Description: An L2 filter is a filter resource that is used to identify a + * vnic or ring for a packet based on layer 2 fields. Layer 2 fields for + * encapsulated packets include both outer L2 header and/or inner l2 header of + * encapsulated packet. The L2 filter resource covers the following OS specific + * L2 filters. Linux/FreeBSD (per function): # Broadcast enable/disable # List + * of individual multicast filters # All multicast enable/disable filter # + * Unicast filters # Promiscuous mode VMware: # Broadcast enable/disable (per + * physical function) # All multicast enable/disable (per function) # Unicast + * filters per ring or vnic # Promiscuous mode per PF Windows: # Broadcast + * enable/disable (per physical function) # List of individual multicast filters + * (Driver needs to advertise the maximum number of filters supported) # All + * multicast enable/disable per physical function # Unicast filters per vnic # + * Promiscuous mode per PF Implementation notes on the use of VNIC in this + * command: # By default, these filters belong to default vnic for the function. + * # Once these filters are set up, only destination VNIC can be modified. # If + * the destination VNIC is not specified in this command, then the HWRM shall + * only create an l2 context id. HWRM Implementation notes for multicast + * filters: # The hwrm_filter_alloc command can be used to set up multicast + * filters (perfect match or partial match). Each individual function driver can + * set up multicast filters independently. # The HWRM needs to keep track of + * multicast filters set up by function drivers and maintain multicast group + * replication records to enable a subset of functions to receive traffic for a + * specific multicast address. # When a specific multicast filter cannot be set, + * the HWRM shall return an error. In this error case, the driver should fall + * back to using one general filter (rather than specific) for all multicast + * traffic. # When the SR-IOV is enabled, the HWRM needs to additionally track + * source knockout per multicast group record. Examples of setting unicast + * filters: For a unicast MAC based filter, one can use a combination of the + * fields and masks provided in this command to set up the filter. Below are + * some examples: # MAC + no VLAN filter: This filter is used to identify + * traffic that does not contain any VLAN tags and matches destination (or + * source) MAC address. This filter can be set up by setting only l2_addr field + * to be a valid field. All other fields are not valid. The following value is + * set for l2_addr. l2_addr = MAC # MAC + Any VLAN filter: This filter is used + * to identify traffic that carries single VLAN tag and matches (destination or + * source) MAC address. This filter can be set up by setting only l2_addr and + * l2_ovlan_mask fields to be valid fields. All other fields are not valid. The + * following values are set for those two valid fields. l2_addr = MAC, + * l2_ovlan_mask = 0xFFFF # MAC + no VLAN or VLAN ID=0: This filter is used to + * identify untagged traffic that does not contain any VLAN tags or a VLAN tag + * with VLAN ID = 0 and matches destination (or source) MAC address. This filter + * can be set up by setting only l2_addr and l2_ovlan fields to be valid fields. + * All other fields are not valid. The following value are set for l2_addr and + * l2_ovlan. l2_addr = MAC, l2_ovlan = 0x0 # MAC + no VLAN or any VLAN: This + * filter is used to identify traffic that contains zero or 1 VLAN tag and + * matches destination (or source) MAC address. This filter can be set up by + * setting only l2_addr, l2_ovlan, and l2_mask fields to be valid fields. All + * other fields are not valid. The following value are set for l2_addr, + * l2_ovlan, and l2_mask fields. l2_addr = MAC, l2_ovlan = 0x0, l2_ovlan_mask = + * 0xFFFF # MAC + VLAN ID filter: This filter can be set up by setting only + * l2_addr, l2_ovlan, and l2_ovlan_mask fields to be valid fields. All other + * fields are not valid. The following values are set for those three valid + * fields. l2_addr = MAC, l2_ovlan = VLAN ID, l2_ovlan_mask = 0xF000 */ -/* Input (96 bytes) */ +/* Input (96 bytes) */ struct hwrm_cfa_l2_filter_alloc_input { uint16_t req_type; /* @@ -5136,10 +8844,12 @@ struct hwrm_cfa_l2_filter_alloc_input { */ #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_FLAGS_PATH UINT32_C(0x1) /* tx path */ - #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_FLAGS_PATH_TX (UINT32_C(0x0) << 0) + #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_FLAGS_PATH_TX \ + (UINT32_C(0x0) << 0) /* rx path */ - #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_FLAGS_PATH_RX (UINT32_C(0x1) << 0) - #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_FLAGS_PATH_LAST \ + #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_FLAGS_PATH_RX \ + (UINT32_C(0x1) << 0) + #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_FLAGS_PATH_LAST \ CFA_L2_FILTER_ALLOC_INPUT_FLAGS_PATH_RX /* * Setting of this flag indicates the applicability to the @@ -5156,60 +8866,70 @@ struct hwrm_cfa_l2_filter_alloc_input { * should not be specified. If this flag is set, then l2_* * fields refer to fields of outermost L2 header. */ - #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_FLAGS_OUTERMOST UINT32_C(0x8) + #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_FLAGS_OUTERMOST UINT32_C(0x8) uint32_t enables; /* This bit must be '1' for the l2_addr field to be configured. */ - #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_ADDR UINT32_C(0x1) + #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_ADDR UINT32_C(0x1) /* This bit must be '1' for the l2_addr_mask field to be configured. */ - #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_ADDR_MASK UINT32_C(0x2) + #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_ADDR_MASK \ + UINT32_C(0x2) /* This bit must be '1' for the l2_ovlan field to be configured. */ #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_OVLAN UINT32_C(0x4) /* * This bit must be '1' for the l2_ovlan_mask field to be * configured. */ - #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_OVLAN_MASK UINT32_C(0x8) + #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_OVLAN_MASK \ + UINT32_C(0x8) /* This bit must be '1' for the l2_ivlan field to be configured. */ #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_IVLAN UINT32_C(0x10) /* * This bit must be '1' for the l2_ivlan_mask field to be * configured. */ - #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_IVLAN_MASK UINT32_C(0x20) + #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_IVLAN_MASK \ + UINT32_C(0x20) /* This bit must be '1' for the t_l2_addr field to be configured. */ - #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_T_L2_ADDR UINT32_C(0x40) + #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_T_L2_ADDR UINT32_C(0x40) /* * This bit must be '1' for the t_l2_addr_mask field to be * configured. */ - #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_T_L2_ADDR_MASK UINT32_C(0x80) + #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_T_L2_ADDR_MASK \ + UINT32_C(0x80) /* This bit must be '1' for the t_l2_ovlan field to be configured. */ - #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_T_L2_OVLAN UINT32_C(0x100) + #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_T_L2_OVLAN \ + UINT32_C(0x100) /* * This bit must be '1' for the t_l2_ovlan_mask field to be * configured. */ - #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_T_L2_OVLAN_MASK UINT32_C(0x200) + #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_T_L2_OVLAN_MASK \ + UINT32_C(0x200) /* This bit must be '1' for the t_l2_ivlan field to be configured. */ - #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_T_L2_IVLAN UINT32_C(0x400) + #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_T_L2_IVLAN \ + UINT32_C(0x400) /* * This bit must be '1' for the t_l2_ivlan_mask field to be * configured. */ - #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_T_L2_IVLAN_MASK UINT32_C(0x800) + #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_T_L2_IVLAN_MASK \ + UINT32_C(0x800) /* This bit must be '1' for the src_type field to be configured. */ #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_SRC_TYPE UINT32_C(0x1000) /* This bit must be '1' for the src_id field to be configured. */ - #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_SRC_ID UINT32_C(0x2000) + #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_SRC_ID UINT32_C(0x2000) /* This bit must be '1' for the tunnel_type field to be configured. */ - #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_TUNNEL_TYPE UINT32_C(0x4000) + #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_TUNNEL_TYPE \ + UINT32_C(0x4000) /* This bit must be '1' for the dst_id field to be configured. */ - #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_DST_ID UINT32_C(0x8000) + #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_DST_ID UINT32_C(0x8000) /* * This bit must be '1' for the mirror_vnic_id field to be * configured. */ - #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_MIRROR_VNIC_ID UINT32_C(0x10000) + #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_MIRROR_VNIC_ID \ + UINT32_C(0x10000) uint8_t l2_addr[6]; /* * This value sets the match value for the L2 MAC address. @@ -5297,34 +9017,38 @@ struct hwrm_cfa_l2_filter_alloc_input { uint8_t tunnel_type; /* Tunnel Type. */ /* Non-tunnel */ - #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_TUNNEL_TYPE_NONTUNNEL UINT32_C(0x0) - /* Virtual eXtensible Local Area Network (VXLAN) */ - #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_TUNNEL_TYPE_VXLAN UINT32_C(0x1) + #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_TUNNEL_TYPE_NONTUNNEL \ + UINT32_C(0x0) + /* Virtual eXtensible Local Area Network (VXLAN) */ + #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_TUNNEL_TYPE_VXLAN \ + UINT32_C(0x1) /* * Network Virtualization Generic Routing - * Encapsulation (NVGRE) + * Encapsulation (NVGRE) */ - #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_TUNNEL_TYPE_NVGRE UINT32_C(0x2) + #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_TUNNEL_TYPE_NVGRE \ + UINT32_C(0x2) /* - * Generic Routing Encapsulation (GRE) inside + * Generic Routing Encapsulation (GRE) inside * Ethernet payload */ - #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_TUNNEL_TYPE_L2GRE UINT32_C(0x3) + #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_TUNNEL_TYPE_L2GRE UINT32_C(0x3) /* IP in IP */ #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_TUNNEL_TYPE_IPIP UINT32_C(0x4) - /* Generic Network Virtualization Encapsulation (Geneve) */ - #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_TUNNEL_TYPE_GENEVE UINT32_C(0x5) - /* Multi-Protocol Lable Switching (MPLS) */ + /* Generic Network Virtualization Encapsulation (Geneve) */ + #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_TUNNEL_TYPE_GENEVE UINT32_C(0x5) + /* Multi-Protocol Lable Switching (MPLS) */ #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_TUNNEL_TYPE_MPLS UINT32_C(0x6) - /* Stateless Transport Tunnel (STT) */ + /* Stateless Transport Tunnel (STT) */ #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_TUNNEL_TYPE_STT UINT32_C(0x7) /* - * Generic Routing Encapsulation (GRE) inside IP + * Generic Routing Encapsulation (GRE) inside IP * datagram payload */ - #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_TUNNEL_TYPE_IPGRE UINT32_C(0x8) + #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_TUNNEL_TYPE_IPGRE UINT32_C(0x8) /* Any tunneled traffic */ - #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_TUNNEL_TYPE_ANYTUNNEL UINT32_C(0xff) + #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_TUNNEL_TYPE_ANYTUNNEL \ + UINT32_C(0xff) uint8_t unused_7; uint16_t dst_id; /* @@ -5340,11 +9064,14 @@ struct hwrm_cfa_l2_filter_alloc_input { * filter table. */ /* No preference */ - #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_PRI_HINT_NO_PREFER UINT32_C(0x0) + #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_PRI_HINT_NO_PREFER \ + UINT32_C(0x0) /* Above the given filter */ - #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_PRI_HINT_ABOVE_FILTER UINT32_C(0x1) + #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_PRI_HINT_ABOVE_FILTER \ + UINT32_C(0x1) /* Below the given filter */ - #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_PRI_HINT_BELOW_FILTER UINT32_C(0x2) + #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_PRI_HINT_BELOW_FILTER \ + UINT32_C(0x2) /* As high as possible */ #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_PRI_HINT_MAX UINT32_C(0x3) /* As low as possible */ @@ -5359,7 +9086,7 @@ struct hwrm_cfa_l2_filter_alloc_input { */ } __attribute__((packed)); -/* Output (24 bytes) */ +/* Output (24 bytes) */ struct hwrm_cfa_l2_filter_alloc_output { uint16_t error_code; /* @@ -5407,7 +9134,7 @@ struct hwrm_cfa_l2_filter_alloc_output { * Description: Free a L2 filter. The HWRM shall free all associated filter * resources with the L2 filter. */ -/* Input (24 bytes) */ +/* Input (24 bytes) */ struct hwrm_cfa_l2_filter_free_input { uint16_t req_type; /* @@ -5442,7 +9169,7 @@ struct hwrm_cfa_l2_filter_free_input { */ } __attribute__((packed)); -/* Output (16 bytes) */ +/* Output (16 bytes) */ struct hwrm_cfa_l2_filter_free_output { uint16_t error_code; /* @@ -5476,7 +9203,7 @@ struct hwrm_cfa_l2_filter_free_output { /* hwrm_cfa_l2_filter_cfg */ /* Description: Change the configuration of an existing L2 filter */ -/* Input (40 bytes) */ +/* Input (40 bytes) */ struct hwrm_cfa_l2_filter_cfg_input { uint16_t req_type; /* @@ -5512,10 +9239,12 @@ struct hwrm_cfa_l2_filter_cfg_input { */ #define HWRM_CFA_L2_FILTER_CFG_INPUT_FLAGS_PATH UINT32_C(0x1) /* tx path */ - #define HWRM_CFA_L2_FILTER_CFG_INPUT_FLAGS_PATH_TX (UINT32_C(0x0) << 0) + #define HWRM_CFA_L2_FILTER_CFG_INPUT_FLAGS_PATH_TX \ + (UINT32_C(0x0) << 0) /* rx path */ - #define HWRM_CFA_L2_FILTER_CFG_INPUT_FLAGS_PATH_RX (UINT32_C(0x1) << 0) - #define HWRM_CFA_L2_FILTER_CFG_INPUT_FLAGS_PATH_LAST \ + #define HWRM_CFA_L2_FILTER_CFG_INPUT_FLAGS_PATH_RX \ + (UINT32_C(0x1) << 0) + #define HWRM_CFA_L2_FILTER_CFG_INPUT_FLAGS_PATH_LAST \ CFA_L2_FILTER_CFG_INPUT_FLAGS_PATH_RX /* * Setting of this flag indicates drop action. If this flag is @@ -5529,7 +9258,8 @@ struct hwrm_cfa_l2_filter_cfg_input { * This bit must be '1' for the new_mirror_vnic_id field to be * configured. */ - #define HWRM_CFA_L2_FILTER_CFG_INPUT_ENABLES_NEW_MIRROR_VNIC_ID UINT32_C(0x2) + #define HWRM_CFA_L2_FILTER_CFG_INPUT_ENABLES_NEW_MIRROR_VNIC_ID \ + UINT32_C(0x2) uint64_t l2_filter_id; /* * This value identifies a set of CFA data structures used for @@ -5545,7 +9275,7 @@ struct hwrm_cfa_l2_filter_cfg_input { /* New Logical VNIC ID of the VNIC where traffic is mirrored. */ } __attribute__((packed)); -/* Output (16 bytes) */ +/* Output (16 bytes) */ struct hwrm_cfa_l2_filter_cfg_output { uint16_t error_code; /* @@ -5579,7 +9309,7 @@ struct hwrm_cfa_l2_filter_cfg_output { /* hwrm_cfa_l2_set_rx_mask */ /* Description: This command will set rx mask of the function. */ -/* Input (56 bytes) */ +/* Input (56 bytes) */ struct hwrm_cfa_l2_set_rx_mask_input { uint16_t req_type; /* @@ -5632,14 +9362,14 @@ struct hwrm_cfa_l2_set_rx_mask_input { * the promiscuous mode. The HWRM should accept any function to * set up promiscuous mode. The HWRM shall follow the semantics * below for the promiscuous mode support. # When partitioning - * is not enabled on a port (i.e. single PF on the port), then + * is not enabled on a port (i.e. single PF on the port), then * the PF shall be allowed to be in the promiscuous mode. When * the PF is in the promiscuous mode, then it shall receive all * host bound traffic on that port. # When partitioning is - * enabled on a port (i.e. multiple PFs per port) and a PF on + * enabled on a port (i.e. multiple PFs per port) and a PF on * that port is in the promiscuous mode, then the PF receives * all traffic within that partition as identified by a unique - * identifier for the PF (e.g. S-Tag). If a unique outer VLAN + * identifier for the PF (e.g. S-Tag). If a unique outer VLAN * for the PF is specified, then the setting of promiscuous mode * on that PF shall result in the PF receiving all host bound * traffic with matching outer VLAN. # A VF shall can be set in @@ -5652,7 +9382,7 @@ struct hwrm_cfa_l2_set_rx_mask_input { * mode on a function independently from the promiscuous mode * settings on other functions. */ - #define HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_PROMISCUOUS UINT32_C(0x10) + #define HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_PROMISCUOUS UINT32_C(0x10) /* * If this flag is set, the corresponding RX filters shall be * set up to cover multicast/broadcast filters for the outermost @@ -5685,7 +9415,8 @@ struct hwrm_cfa_l2_set_rx_mask_input { * set at most one flag out of vlanonly, vlan_nonvlan, and * anyvlan_nonvlan. */ - #define HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_ANYVLAN_NONVLAN UINT32_C(0x100) + #define HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_ANYVLAN_NONVLAN \ + UINT32_C(0x100) uint64_t mc_tbl_addr; /* This is the address for mcast address tbl. */ uint32_t num_mc_entries; @@ -5708,7 +9439,7 @@ struct hwrm_cfa_l2_set_rx_mask_input { uint32_t unused_1; } __attribute__((packed)); -/* Output (16 bytes) */ +/* Output (16 bytes) */ struct hwrm_cfa_l2_set_rx_mask_output { uint16_t error_code; /* @@ -5740,12 +9471,364 @@ struct hwrm_cfa_l2_set_rx_mask_output { */ } __attribute__((packed)); +/* hwrm_cfa_vlan_antispoof_cfg */ +/* Description: Configures vlan anti-spoof filters for VF. */ +/* Input (32 bytes) */ +struct hwrm_cfa_vlan_antispoof_cfg_input { + uint16_t req_type; + /* + * This value indicates what type of request this is. The format for the + * rest of the command is determined by this field. + */ + uint16_t cmpl_ring; + /* + * This value indicates the what completion ring the request will be + * optionally completed on. If the value is -1, then no CR completion + * will be generated. Any other value must be a valid CR ring_id value + * for this function. + */ + uint16_t seq_id; + /* This value indicates the command sequence number. */ + uint16_t target_id; + /* + * Target ID of this command. 0x0 - 0xFFF8 - Used for function ids + * 0xFFF8 - 0xFFFE - Reserved for internal processors 0xFFFF - HWRM + */ + uint64_t resp_addr; + /* + * This is the host address where the response will be written when the + * request is complete. This area must be 16B aligned and must be + * cleared to zero before the request is made. + */ + uint16_t fid; + /* + * Function ID of the function that is being configured. Only valid for + * a VF FID configured by the PF. + */ + uint8_t unused_0; + uint8_t unused_1; + uint32_t num_vlan_entries; + /* Number of VLAN entries in the vlan_tag_mask_tbl. */ + uint64_t vlan_tag_mask_tbl_addr; + /* + * The vlan_tag_mask_tbl_addr is the DMA address of the VLAN antispoof + * table. Each table entry contains the 16-bit TPID (0x8100 or 0x88a8 + * only), 16-bit VLAN ID, and a 16-bit mask, all in network order to + * match hwrm_cfa_l2_set_rx_mask. For an individual VLAN entry, the mask + * value should be 0xfff for the 12-bit VLAN ID. + */ +}; + +/* Output (16 bytes) */ +struct hwrm_cfa_vlan_antispoof_cfg_output { + uint16_t error_code; + /* + * Pass/Fail or error type Note: receiver to verify the in parameters, + * and fail the call with an error when appropriate + */ + uint16_t req_type; + /* This field returns the type of original request. */ + uint16_t seq_id; + /* This field provides original sequence number of the command. */ + uint16_t resp_len; + /* + * This field is the length of the response in bytes. The last byte of + * the response is a valid flag that will read as '1' when the command + * has been completely written to memory. + */ + uint32_t unused_0; + uint8_t unused_1; + uint8_t unused_2; + uint8_t unused_3; + uint8_t valid; + /* + * This field is used in Output records to indicate that the output is + * completely written to RAM. This field should be read as '1' to + * indicate that the output has been completely written. When writing a + * command completion or response to an internal processor, the order of + * writes has to be such that this field is written last. + */ +}; + +/* hwrm_tunnel_dst_port_query */ +/* + * Description: This function is called by a driver to query tunnel type + * specific destination port configuration. + */ +/* Input (24 bytes) */ +struct hwrm_tunnel_dst_port_query_input { + uint16_t req_type; + /* + * This value indicates what type of request this is. The format + * for the rest of the command is determined by this field. + */ + uint16_t cmpl_ring; + /* + * This value indicates the what completion ring the request + * will be optionally completed on. If the value is -1, then no + * CR completion will be generated. Any other value must be a + * valid CR ring_id value for this function. + */ + uint16_t seq_id; + /* This value indicates the command sequence number. */ + uint16_t target_id; + /* + * Target ID of this command. 0x0 - 0xFFF8 - Used for function + * ids 0xFFF8 - 0xFFFE - Reserved for internal processors 0xFFFF + * - HWRM + */ + uint64_t resp_addr; + /* + * This is the host address where the response will be written + * when the request is complete. This area must be 16B aligned + * and must be cleared to zero before the request is made. + */ + uint8_t tunnel_type; + /* Tunnel Type. */ + /* Virtual eXtensible Local Area Network (VXLAN) */ + #define HWRM_TUNNEL_DST_PORT_QUERY_INPUT_TUNNEL_TYPE_VXLAN \ + UINT32_C(0x1) + /* Generic Network Virtualization Encapsulation (Geneve) */ + #define HWRM_TUNNEL_DST_PORT_QUERY_INPUT_TUNNEL_TYPE_GENEVE \ + UINT32_C(0x5) + uint8_t unused_0[7]; +} __attribute__((packed)); + +/* Output (16 bytes) */ +struct hwrm_tunnel_dst_port_query_output { + uint16_t error_code; + /* + * Pass/Fail or error type Note: receiver to verify the in + * parameters, and fail the call with an error when appropriate + */ + uint16_t req_type; + /* This field returns the type of original request. */ + uint16_t seq_id; + /* This field provides original sequence number of the command. */ + uint16_t resp_len; + /* + * This field is the length of the response in bytes. The last + * byte of the response is a valid flag that will read as '1' + * when the command has been completely written to memory. + */ + uint16_t tunnel_dst_port_id; + /* + * This field represents the identifier of L4 destination port + * used for the given tunnel type. This field is valid for + * specific tunnel types that use layer 4 (e.g. UDP) transports + * for tunneling. + */ + uint16_t tunnel_dst_port_val; + /* + * This field represents the value of L4 destination port + * identified by tunnel_dst_port_id. This field is valid for + * specific tunnel types that use layer 4 (e.g. UDP) transports + * for tunneling. This field is in network byte order. A value + * of 0 means that the destination port is not configured. + */ + uint8_t unused_0; + uint8_t unused_1; + uint8_t unused_2; + uint8_t valid; + /* + * This field is used in Output records to indicate that the + * output is completely written to RAM. This field should be + * read as '1' to indicate that the output has been completely + * written. When writing a command completion or response to an + * internal processor, the order of writes has to be such that + * this field is written last. + */ +} __attribute__((packed)); + +/* hwrm_tunnel_dst_port_alloc */ +/* + * Description: This function is called by a driver to allocate l4 destination + * port for a specific tunnel type. The destination port value is provided in + * the input. If the HWRM supports only one global destination port for a tunnel + * type, then the HWRM shall keep track of its usage as described below. # The + * first caller that allocates a destination port shall always succeed and the + * HWRM shall save the destination port configuration for that tunnel type and + * increment the usage count to 1. # Subsequent callers allocating the same + * destination port for that tunnel type shall succeed and the HWRM shall + * increment the usage count for that port for each subsequent caller that + * succeeds. # Any subsequent caller trying to allocate a different destination + * port for that tunnel type shall fail until the usage count for the original + * destination port goes to zero. # A caller that frees a port will cause the + * usage count for that port to decrement. + */ +/* Input (24 bytes) */ +struct hwrm_tunnel_dst_port_alloc_input { + uint16_t req_type; + /* + * This value indicates what type of request this is. The format + * for the rest of the command is determined by this field. + */ + uint16_t cmpl_ring; + /* + * This value indicates the what completion ring the request + * will be optionally completed on. If the value is -1, then no + * CR completion will be generated. Any other value must be a + * valid CR ring_id value for this function. + */ + uint16_t seq_id; + /* This value indicates the command sequence number. */ + uint16_t target_id; + /* + * Target ID of this command. 0x0 - 0xFFF8 - Used for function + * ids 0xFFF8 - 0xFFFE - Reserved for internal processors 0xFFFF + * - HWRM + */ + uint64_t resp_addr; + /* + * This is the host address where the response will be written + * when the request is complete. This area must be 16B aligned + * and must be cleared to zero before the request is made. + */ + uint8_t tunnel_type; + /* Tunnel Type. */ + /* Virtual eXtensible Local Area Network (VXLAN) */ + #define HWRM_TUNNEL_DST_PORT_ALLOC_INPUT_TUNNEL_TYPE_VXLAN UINT32_C(0x1) + /* Generic Network Virtualization Encapsulation (Geneve) */ + #define HWRM_TUNNEL_DST_PORT_ALLOC_INPUT_TUNNEL_TYPE_GENEVE \ + UINT32_C(0x5) + uint8_t unused_0; + uint16_t tunnel_dst_port_val; + /* + * This field represents the value of L4 destination port used + * for the given tunnel type. This field is valid for specific + * tunnel types that use layer 4 (e.g. UDP) transports for + * tunneling. This field is in network byte order. A value of 0 + * shall fail the command. + */ + uint32_t unused_1; +} __attribute__((packed)); + +/* Output (16 bytes) */ +struct hwrm_tunnel_dst_port_alloc_output { + uint16_t error_code; + /* + * Pass/Fail or error type Note: receiver to verify the in + * parameters, and fail the call with an error when appropriate + */ + uint16_t req_type; + /* This field returns the type of original request. */ + uint16_t seq_id; + /* This field provides original sequence number of the command. */ + uint16_t resp_len; + /* + * This field is the length of the response in bytes. The last + * byte of the response is a valid flag that will read as '1' + * when the command has been completely written to memory. + */ + uint16_t tunnel_dst_port_id; + /* + * Identifier of a tunnel L4 destination port value. Only + * applies to tunnel types that has l4 destination port + * parameters. + */ + uint8_t unused_0; + uint8_t unused_1; + uint8_t unused_2; + uint8_t unused_3; + uint8_t unused_4; + uint8_t valid; + /* + * This field is used in Output records to indicate that the + * output is completely written to RAM. This field should be + * read as '1' to indicate that the output has been completely + * written. When writing a command completion or response to an + * internal processor, the order of writes has to be such that + * this field is written last. + */ +} __attribute__((packed)); + +/* hwrm_tunnel_dst_port_free */ +/* + * Description: This function is called by a driver to free l4 destination port + * for a specific tunnel type. + */ +/* Input (24 bytes) */ +struct hwrm_tunnel_dst_port_free_input { + uint16_t req_type; + /* + * This value indicates what type of request this is. The format + * for the rest of the command is determined by this field. + */ + uint16_t cmpl_ring; + /* + * This value indicates the what completion ring the request + * will be optionally completed on. If the value is -1, then no + * CR completion will be generated. Any other value must be a + * valid CR ring_id value for this function. + */ + uint16_t seq_id; + /* This value indicates the command sequence number. */ + uint16_t target_id; + /* + * Target ID of this command. 0x0 - 0xFFF8 - Used for function + * ids 0xFFF8 - 0xFFFE - Reserved for internal processors 0xFFFF + * - HWRM + */ + uint64_t resp_addr; + /* + * This is the host address where the response will be written + * when the request is complete. This area must be 16B aligned + * and must be cleared to zero before the request is made. + */ + uint8_t tunnel_type; + /* Tunnel Type. */ + /* Virtual eXtensible Local Area Network (VXLAN) */ + #define HWRM_TUNNEL_DST_PORT_FREE_INPUT_TUNNEL_TYPE_VXLAN UINT32_C(0x1) + /* Generic Network Virtualization Encapsulation (Geneve) */ + #define HWRM_TUNNEL_DST_PORT_FREE_INPUT_TUNNEL_TYPE_GENEVE UINT32_C(0x5) + uint8_t unused_0; + uint16_t tunnel_dst_port_id; + /* + * Identifier of a tunnel L4 destination port value. Only + * applies to tunnel types that has l4 destination port + * parameters. + */ + uint32_t unused_1; +} __attribute__((packed)); + +/* Output (16 bytes) */ +struct hwrm_tunnel_dst_port_free_output { + uint16_t error_code; + /* + * Pass/Fail or error type Note: receiver to verify the in + * parameters, and fail the call with an error when appropriate + */ + uint16_t req_type; + /* This field returns the type of original request. */ + uint16_t seq_id; + /* This field provides original sequence number of the command. */ + uint16_t resp_len; + /* + * This field is the length of the response in bytes. The last + * byte of the response is a valid flag that will read as '1' + * when the command has been completely written to memory. + */ + uint32_t unused_0; + uint8_t unused_1; + uint8_t unused_2; + uint8_t unused_3; + uint8_t valid; + /* + * This field is used in Output records to indicate that the + * output is completely written to RAM. This field should be + * read as '1' to indicate that the output has been completely + * written. When writing a command completion or response to an + * internal processor, the order of writes has to be such that + * this field is written last. + */ +} __attribute__((packed)); + /* hwrm_stat_ctx_alloc */ /* * Description: This command allocates and does basic preparation for a stat * context. */ -/* Input (32 bytes) */ +/* Input (32 bytes) */ struct hwrm_stat_ctx_alloc_input { uint16_t req_type; /* @@ -5783,10 +9866,24 @@ struct hwrm_stat_ctx_alloc_input { * used. In this case, the stat block can only be read by * hwrm_stat_ctx_query command. */ - uint32_t unused_0; + uint8_t stat_ctx_flags; + /* + * This field is used to specify statistics context specific + * configuration flags. + */ + /* + * When this bit is set to '1', the statistics context shall be + * allocated for RoCE traffic only. In this case, traffic other + * than offloaded RoCE traffic shall not be included in this + * statistic context. When this bit is set to '0', the + * statistics context shall be used for the network traffic + * other than offloaded RoCE traffic. + */ + #define HWRM_STAT_CTX_ALLOC_INPUT_STAT_CTX_FLAGS_ROCE UINT32_C(0x1) + uint8_t unused_0[3]; } __attribute__((packed)); -/* Output (16 bytes) */ +/* Output (16 bytes) */ struct hwrm_stat_ctx_alloc_output { uint16_t error_code; /* @@ -5821,7 +9918,7 @@ struct hwrm_stat_ctx_alloc_output { /* hwrm_stat_ctx_free */ /* Description: This command is used to free a stat context. */ -/* Input (24 bytes) */ +/* Input (24 bytes) */ struct hwrm_stat_ctx_free_input { uint16_t req_type; /* @@ -5854,7 +9951,7 @@ struct hwrm_stat_ctx_free_input { uint32_t unused_0; } __attribute__((packed)); -/* Output (16 bytes) */ +/* Output (16 bytes) */ struct hwrm_stat_ctx_free_output { uint16_t error_code; /* @@ -5889,7 +9986,7 @@ struct hwrm_stat_ctx_free_output { /* hwrm_stat_ctx_clr_stats */ /* Description: This command clears statistics of a context. */ -/* Input (24 bytes) */ +/* Input (24 bytes) */ struct hwrm_stat_ctx_clr_stats_input { uint16_t req_type; /* @@ -5922,7 +10019,7 @@ struct hwrm_stat_ctx_clr_stats_input { uint32_t unused_0; } __attribute__((packed)); -/* Output (16 bytes) */ +/* Output (16 bytes) */ struct hwrm_stat_ctx_clr_stats_output { uint16_t error_code; /* @@ -5954,6 +10051,113 @@ struct hwrm_stat_ctx_clr_stats_output { */ } __attribute__((packed)); +/* hwrm_stat_ctx_query */ +/* Description: This command returns statistics of a context. */ +/* Input (24 bytes) */ + +struct hwrm_stat_ctx_query_input { + uint16_t req_type; + /* + * This value indicates what type of request this is. The format for the + * rest of the command is determined by this field. + */ + uint16_t cmpl_ring; + /* + * This value indicates the what completion ring the request will be + * optionally completed on. If the value is -1, then no CR completion + * will be generated. Any other value must be a valid CR ring_id value + * for this function. + */ + uint16_t seq_id; + /* This value indicates the command sequence number. */ + uint16_t target_id; + /* + * Target ID of this command. 0x0 - 0xFFF8 - Used for function ids + * 0xFFF8 - 0xFFFE - Reserved for internal processors 0xFFFF - HWRM + */ + uint64_t resp_addr; + /* + * This is the host address where the response will be written when the + * request is complete. This area must be 16B aligned and must be + * cleared to zero before the request is made. + */ + uint32_t stat_ctx_id; + /* ID of the statistics context that is being queried. */ + uint32_t unused_0; +} __attribute__((packed)); + +/* Output (176 bytes) */ + +struct hwrm_stat_ctx_query_output { + uint16_t error_code; + /* + * Pass/Fail or error type Note: receiver to verify the in parameters, + * and fail the call with an error when appropriate + */ + uint16_t req_type; + /* This field returns the type of original request. */ + uint16_t seq_id; + /* This field provides original sequence number of the command. */ + uint16_t resp_len; + /* + * This field is the length of the response in bytes. The last byte of + * the response is a valid flag that will read as '1' when the command + * has been completely written to memory. + */ + uint64_t tx_ucast_pkts; + /* Number of transmitted unicast packets */ + uint64_t tx_mcast_pkts; + /* Number of transmitted multicast packets */ + uint64_t tx_bcast_pkts; + /* Number of transmitted broadcast packets */ + uint64_t tx_err_pkts; + /* Number of transmitted packets with error */ + uint64_t tx_drop_pkts; + /* Number of dropped packets on transmit path */ + uint64_t tx_ucast_bytes; + /* Number of transmitted bytes for unicast traffic */ + uint64_t tx_mcast_bytes; + /* Number of transmitted bytes for multicast traffic */ + uint64_t tx_bcast_bytes; + /* Number of transmitted bytes for broadcast traffic */ + uint64_t rx_ucast_pkts; + /* Number of received unicast packets */ + uint64_t rx_mcast_pkts; + /* Number of received multicast packets */ + uint64_t rx_bcast_pkts; + /* Number of received broadcast packets */ + uint64_t rx_err_pkts; + /* Number of received packets with error */ + uint64_t rx_drop_pkts; + /* Number of dropped packets on received path */ + uint64_t rx_ucast_bytes; + /* Number of received bytes for unicast traffic */ + uint64_t rx_mcast_bytes; + /* Number of received bytes for multicast traffic */ + uint64_t rx_bcast_bytes; + /* Number of received bytes for broadcast traffic */ + uint64_t rx_agg_pkts; + /* Number of aggregated unicast packets */ + uint64_t rx_agg_bytes; + /* Number of aggregated unicast bytes */ + uint64_t rx_agg_events; + /* Number of aggregation events */ + uint64_t rx_agg_aborts; + /* Number of aborted aggregations */ + uint32_t unused_0; + uint8_t unused_1; + uint8_t unused_2; + uint8_t unused_3; + uint8_t valid; + /* + * This field is used in Output records to indicate that the output is + * completely written to RAM. This field should be read as '1' to + * indicate that the output has been completely written. When writing a + * command completion or response to an internal processor, the order of + * writes has to be such that this field is written last. + */ +} __attribute__((packed)); + /* hwrm_exec_fwd_resp */ /* * Description: This command is used to send an encapsulated request to the @@ -5964,7 +10168,7 @@ struct hwrm_stat_ctx_clr_stats_output { * acknowledge the receipt of the encapsulated request and forwarding of the * response. */ -/* Input (128 bytes) */ +/* Input (128 bytes) */ struct hwrm_exec_fwd_resp_input { uint16_t req_type; /* @@ -6008,7 +10212,7 @@ struct hwrm_exec_fwd_resp_input { uint16_t unused_0[3]; } __attribute__((packed)); -/* Output (16 bytes) */ +/* Output (16 bytes) */ struct hwrm_exec_fwd_resp_output { uint16_t error_code; /* @@ -6040,4 +10244,631 @@ struct hwrm_exec_fwd_resp_output { */ } __attribute__((packed)); -#endif +/* hwrm_reject_fwd_resp */ +/* + * Description: This command is used to send an encapsulated request to the + * HWRM. This command instructs the HWRM to reject the request and forward the + * error response of the encapsulated request to the location specified in the + * original request that is encapsulated. The target id of this command shall be + * set to 0xFFFF (HWRM). The response location in this command shall be used to + * acknowledge the receipt of the encapsulated request and forwarding of the + * response. + */ +/* Input (128 bytes) */ +struct hwrm_reject_fwd_resp_input { + uint16_t req_type; + /* + * This value indicates what type of request this is. The format + * for the rest of the command is determined by this field. + */ + uint16_t cmpl_ring; + /* + * This value indicates the what completion ring the request + * will be optionally completed on. If the value is -1, then no + * CR completion will be generated. Any other value must be a + * valid CR ring_id value for this function. + */ + uint16_t seq_id; + /* This value indicates the command sequence number. */ + uint16_t target_id; + /* + * Target ID of this command. 0x0 - 0xFFF8 - Used for function + * ids 0xFFF8 - 0xFFFE - Reserved for internal processors 0xFFFF + * - HWRM + */ + uint64_t resp_addr; + /* + * This is the host address where the response will be written + * when the request is complete. This area must be 16B aligned + * and must be cleared to zero before the request is made. + */ + uint32_t encap_request[26]; + /* + * This is an encapsulated request. This request should be + * rejected by the HWRM and the error response should be + * provided in the response buffer inside the encapsulated + * request. + */ + uint16_t encap_resp_target_id; + /* + * This value indicates the target id of the response to the + * encapsulated request. 0x0 - 0xFFF8 - Used for function ids + * 0xFFF8 - 0xFFFE - Reserved for internal processors 0xFFFF - + * HWRM + */ + uint16_t unused_0[3]; +} __attribute__((packed)); + +/* Output (16 bytes) */ +struct hwrm_reject_fwd_resp_output { + uint16_t error_code; + /* + * Pass/Fail or error type Note: receiver to verify the in + * parameters, and fail the call with an error when appropriate + */ + uint16_t req_type; + /* This field returns the type of original request. */ + uint16_t seq_id; + /* This field provides original sequence number of the command. */ + uint16_t resp_len; + /* + * This field is the length of the response in bytes. The last + * byte of the response is a valid flag that will read as '1' + * when the command has been completely written to memory. + */ + uint32_t unused_0; + uint8_t unused_1; + uint8_t unused_2; + uint8_t unused_3; + uint8_t valid; + /* + * This field is used in Output records to indicate that the + * output is completely written to RAM. This field should be + * read as '1' to indicate that the output has been completely + * written. When writing a command completion or response to an + * internal processor, the order of writes has to be such that + * this field is written last. + */ +} __attribute__((packed)); + +/* Hardware Resource Manager Specification */ +/* Description: This structure is used to specify port description. */ +/* + * Note: The Hardware Resource Manager (HWRM) manages various hardware resources + * inside the chip. The HWRM is implemented in firmware, and runs on embedded + * processors inside the chip. This firmware service is vital part of the chip. + * The chip can not be used by a driver or HWRM client without the HWRM. + */ +/* Input (16 bytes) */ +struct input { + uint16_t req_type; + /* + * This value indicates what type of request this is. The format + * for the rest of the command is determined by this field. + */ + uint16_t cmpl_ring; + /* + * This value indicates the what completion ring the request + * will be optionally completed on. If the value is -1, then no + * CR completion will be generated. Any other value must be a + * valid CR ring_id value for this function. + */ + uint16_t seq_id; + /* This value indicates the command sequence number. */ + uint16_t target_id; + /* + * Target ID of this command. 0x0 - 0xFFF8 - Used for function + * ids 0xFFF8 - 0xFFFE - Reserved for internal processors 0xFFFF + * - HWRM + */ + uint64_t resp_addr; + /* + * This is the host address where the response will be written + * when the request is complete. This area must be 16B aligned + * and must be cleared to zero before the request is made. + */ +} __attribute__((packed)); + +/* Output (8 bytes) */ +struct output { + uint16_t error_code; + /* + * Pass/Fail or error type Note: receiver to verify the in + * parameters, and fail the call with an error when appropriate + */ + uint16_t req_type; + /* This field returns the type of original request. */ + uint16_t seq_id; + /* This field provides original sequence number of the command. */ + uint16_t resp_len; + /* + * This field is the length of the response in bytes. The last + * byte of the response is a valid flag that will read as '1' + * when the command has been completely written to memory. + */ +} __attribute__((packed)); + +/* Short Command Structure (16 bytes) */ +struct hwrm_short_input { + uint16_t req_type; + uint16_t signature; + #define HWRM_SHORT_REQ_SIGNATURE_SHORT_CMD (UINT32_C(0x4321)) + uint16_t unused_0; + uint16_t size; + uint64_t req_addr; +} __attribute__((packed)); + +#define HWRM_GET_HWRM_ERROR_CODE(arg) \ + { \ + typeof(arg) x = (arg); \ + ((x) == 0xf ? "HWRM_ERROR" : \ + ((x) == 0xffff ? "CMD_NOT_SUPPORTED" : \ + ((x) == 0xfffe ? "UNKNOWN_ERR" : \ + ((x) == 0x4 ? "RESOURCE_ALLOC_ERROR" : \ + ((x) == 0x5 ? "INVALID_FLAGS" : \ + ((x) == 0x6 ? "INVALID_ENABLES" : \ + ((x) == 0x0 ? "SUCCESS" : \ + ((x) == 0x1 ? "FAIL" : \ + ((x) == 0x2 ? "INVALID_PARAMS" : \ + ((x) == 0x3 ? "RESOURCE_ACCESS_DENIED" : \ + "Unknown error_code")))))))))) \ + } + +/* Return Codes (8 bytes) */ +struct ret_codes { + uint16_t error_code; + /* These are numbers assigned to return/error codes. */ + /* Request was successfully executed by the HWRM. */ + #define HWRM_ERR_CODE_SUCCESS (UINT32_C(0x0)) + /* THe HWRM failed to execute the request. */ + #define HWRM_ERR_CODE_FAIL (UINT32_C(0x1)) + /* + * The request contains invalid argument(s) or + * input parameters. + */ + #define HWRM_ERR_CODE_INVALID_PARAMS (UINT32_C(0x2)) + /* + * The requester is not allowed to access the + * requested resource. This error code shall be + * provided in a response to a request to query + * or modify an existing resource that is not + * accessible by the requester. + */ + #define HWRM_ERR_CODE_RESOURCE_ACCESS_DENIED (UINT32_C(0x3)) + /* + * The HWRM is unable to allocate the requested + * resource. This code only applies to requests + * for HWRM resource allocations. + */ + #define HWRM_ERR_CODE_RESOURCE_ALLOC_ERROR (UINT32_C(0x4)) + /* Invalid combination of flags is specified in the request. */ + #define HWRM_ERR_CODE_INVALID_FLAGS (UINT32_C(0x5)) + /* + * Invalid combination of enables fields is + * specified in the request. + */ + #define HWRM_ERR_CODE_INVALID_ENABLES (UINT32_C(0x6)) + /* + * Generic HWRM execution error that represents + * an internal error. + */ + #define HWRM_ERR_CODE_HWRM_ERROR (UINT32_C(0xf)) + /* Unknown error */ + #define HWRM_ERR_CODE_UNKNOWN_ERR (UINT32_C(0xfffe)) + /* Unsupported or invalid command */ + #define HWRM_ERR_CODE_CMD_NOT_SUPPORTED (UINT32_C(0xffff)) + uint16_t unused_0[3]; +} __attribute__((packed)); + +/* Output (16 bytes) */ +struct hwrm_err_output { + uint16_t error_code; + /* + * Pass/Fail or error type Note: receiver to verify the in + * parameters, and fail the call with an error when appropriate + */ + uint16_t req_type; + /* This field returns the type of original request. */ + uint16_t seq_id; + /* This field provides original sequence number of the command. */ + uint16_t resp_len; + /* + * This field is the length of the response in bytes. The last + * byte of the response is a valid flag that will read as '1' + * when the command has been completely written to memory. + */ + uint32_t opaque_0; + /* debug info for this error response. */ + uint16_t opaque_1; + /* debug info for this error response. */ + uint8_t cmd_err; + /* + * In the case of an error response, command specific error code + * is returned in this field. + */ + uint8_t valid; + /* + * This field is used in Output records to indicate that the + * output is completely written to RAM. This field should be + * read as '1' to indicate that the output has been completely + * written. When writing a command completion or response to an + * internal processor, the order of writes has to be such that + * this field is written last. + */ +} __attribute__((packed)); + +/* Port Tx Statistics Formats (408 bytes) */ +struct tx_port_stats { + uint64_t tx_64b_frames; + /* Total Number of 64 Bytes frames transmitted */ + uint64_t tx_65b_127b_frames; + /* Total Number of 65-127 Bytes frames transmitted */ + uint64_t tx_128b_255b_frames; + /* Total Number of 128-255 Bytes frames transmitted */ + uint64_t tx_256b_511b_frames; + /* Total Number of 256-511 Bytes frames transmitted */ + uint64_t tx_512b_1023b_frames; + /* Total Number of 512-1023 Bytes frames transmitted */ + uint64_t tx_1024b_1518_frames; + /* Total Number of 1024-1518 Bytes frames transmitted */ + uint64_t tx_good_vlan_frames; + /* + * Total Number of each good VLAN (exludes FCS errors) frame + * transmitted which is 1519 to 1522 bytes in length inclusive + * (excluding framing bits but including FCS bytes). + */ + uint64_t tx_1519b_2047_frames; + /* Total Number of 1519-2047 Bytes frames transmitted */ + uint64_t tx_2048b_4095b_frames; + /* Total Number of 2048-4095 Bytes frames transmitted */ + uint64_t tx_4096b_9216b_frames; + /* Total Number of 4096-9216 Bytes frames transmitted */ + uint64_t tx_9217b_16383b_frames; + /* Total Number of 9217-16383 Bytes frames transmitted */ + uint64_t tx_good_frames; + /* Total Number of good frames transmitted */ + uint64_t tx_total_frames; + /* Total Number of frames transmitted */ + uint64_t tx_ucast_frames; + /* Total number of unicast frames transmitted */ + uint64_t tx_mcast_frames; + /* Total number of multicast frames transmitted */ + uint64_t tx_bcast_frames; + /* Total number of broadcast frames transmitted */ + uint64_t tx_pause_frames; + /* Total number of PAUSE control frames transmitted */ + uint64_t tx_pfc_frames; + /* Total number of PFC/per-priority PAUSE control frames transmitted */ + uint64_t tx_jabber_frames; + /* Total number of jabber frames transmitted */ + uint64_t tx_fcs_err_frames; + /* Total number of frames transmitted with FCS error */ + uint64_t tx_control_frames; + /* Total number of control frames transmitted */ + uint64_t tx_oversz_frames; + /* Total number of over-sized frames transmitted */ + uint64_t tx_single_dfrl_frames; + /* Total number of frames with single deferral */ + uint64_t tx_multi_dfrl_frames; + /* Total number of frames with multiple deferrals */ + uint64_t tx_single_coll_frames; + /* Total number of frames with single collision */ + uint64_t tx_multi_coll_frames; + /* Total number of frames with multiple collisions */ + uint64_t tx_late_coll_frames; + /* Total number of frames with late collisions */ + uint64_t tx_excessive_coll_frames; + /* Total number of frames with excessive collisions */ + uint64_t tx_frag_frames; + /* Total number of fragmented frames transmitted */ + uint64_t tx_err; + /* Total number of transmit errors */ + uint64_t tx_tagged_frames; + /* Total number of single VLAN tagged frames transmitted */ + uint64_t tx_dbl_tagged_frames; + /* Total number of double VLAN tagged frames transmitted */ + uint64_t tx_runt_frames; + /* Total number of runt frames transmitted */ + uint64_t tx_fifo_underruns; + /* Total number of TX FIFO under runs */ + uint64_t tx_pfc_ena_frames_pri0; + /* + * Total number of PFC frames with PFC enabled bit for Pri 0 + * transmitted + */ + uint64_t tx_pfc_ena_frames_pri1; + /* + * Total number of PFC frames with PFC enabled bit for Pri 1 + * transmitted + */ + uint64_t tx_pfc_ena_frames_pri2; + /* + * Total number of PFC frames with PFC enabled bit for Pri 2 + * transmitted + */ + uint64_t tx_pfc_ena_frames_pri3; + /* + * Total number of PFC frames with PFC enabled bit for Pri 3 + * transmitted + */ + uint64_t tx_pfc_ena_frames_pri4; + /* + * Total number of PFC frames with PFC enabled bit for Pri 4 + * transmitted + */ + uint64_t tx_pfc_ena_frames_pri5; + /* + * Total number of PFC frames with PFC enabled bit for Pri 5 + * transmitted + */ + uint64_t tx_pfc_ena_frames_pri6; + /* + * Total number of PFC frames with PFC enabled bit for Pri 6 + * transmitted + */ + uint64_t tx_pfc_ena_frames_pri7; + /* + * Total number of PFC frames with PFC enabled bit for Pri 7 + * transmitted + */ + uint64_t tx_eee_lpi_events; + /* Total number of EEE LPI Events on TX */ + uint64_t tx_eee_lpi_duration; + /* EEE LPI Duration Counter on TX */ + uint64_t tx_llfc_logical_msgs; + /* + * Total number of Link Level Flow Control (LLFC) messages + * transmitted + */ + uint64_t tx_hcfc_msgs; + /* Total number of HCFC messages transmitted */ + uint64_t tx_total_collisions; + /* Total number of TX collisions */ + uint64_t tx_bytes; + /* Total number of transmitted bytes */ + uint64_t tx_xthol_frames; + /* Total number of end-to-end HOL frames */ + uint64_t tx_stat_discard; + /* Total Tx Drops per Port reported by STATS block */ + uint64_t tx_stat_error; + /* Total Tx Error Drops per Port reported by STATS block */ +} __attribute__((packed)); + +/* Port Rx Statistics Formats (528 bytes) */ +struct rx_port_stats { + uint64_t rx_64b_frames; + /* Total Number of 64 Bytes frames received */ + uint64_t rx_65b_127b_frames; + /* Total Number of 65-127 Bytes frames received */ + uint64_t rx_128b_255b_frames; + /* Total Number of 128-255 Bytes frames received */ + uint64_t rx_256b_511b_frames; + /* Total Number of 256-511 Bytes frames received */ + uint64_t rx_512b_1023b_frames; + /* Total Number of 512-1023 Bytes frames received */ + uint64_t rx_1024b_1518_frames; + /* Total Number of 1024-1518 Bytes frames received */ + uint64_t rx_good_vlan_frames; + /* + * Total Number of each good VLAN (exludes FCS errors) frame + * received which is 1519 to 1522 bytes in length inclusive + * (excluding framing bits but including FCS bytes). + */ + uint64_t rx_1519b_2047b_frames; + /* Total Number of 1519-2047 Bytes frames received */ + uint64_t rx_2048b_4095b_frames; + /* Total Number of 2048-4095 Bytes frames received */ + uint64_t rx_4096b_9216b_frames; + /* Total Number of 4096-9216 Bytes frames received */ + uint64_t rx_9217b_16383b_frames; + /* Total Number of 9217-16383 Bytes frames received */ + uint64_t rx_total_frames; + /* Total number of frames received */ + uint64_t rx_ucast_frames; + /* Total number of unicast frames received */ + uint64_t rx_mcast_frames; + /* Total number of multicast frames received */ + uint64_t rx_bcast_frames; + /* Total number of broadcast frames received */ + uint64_t rx_fcs_err_frames; + /* Total number of received frames with FCS error */ + uint64_t rx_ctrl_frames; + /* Total number of control frames received */ + uint64_t rx_pause_frames; + /* Total number of PAUSE frames received */ + uint64_t rx_pfc_frames; + /* Total number of PFC frames received */ + uint64_t rx_unsupported_opcode_frames; + /* Total number of frames received with an unsupported opcode */ + uint64_t rx_unsupported_da_pausepfc_frames; + /* + * Total number of frames received with an unsupported DA for + * pause and PFC + */ + uint64_t rx_wrong_sa_frames; + /* Total number of frames received with an unsupported SA */ + uint64_t rx_align_err_frames; + /* Total number of received packets with alignment error */ + uint64_t rx_oor_len_frames; + /* Total number of received frames with out-of-range length */ + uint64_t rx_code_err_frames; + /* Total number of received frames with error termination */ + uint64_t rx_false_carrier_frames; + /* + * Total number of received frames with a false carrier is + * detected during idle, as defined by RX_ER samples active and + * RXD is 0xE. The event is reported along with the statistics + * generated on the next received frame. Only one false carrier + * condition can be detected and logged between frames. Carrier + * event, valid for 10M/100M speed modes only. + */ + uint64_t rx_ovrsz_frames; + /* Total number of over-sized frames received */ + uint64_t rx_jbr_frames; + /* Total number of jabber packets received */ + uint64_t rx_mtu_err_frames; + /* Total number of received frames with MTU error */ + uint64_t rx_match_crc_frames; + /* Total number of received frames with CRC match */ + uint64_t rx_promiscuous_frames; + /* Total number of frames received promiscuously */ + uint64_t rx_tagged_frames; + /* Total number of received frames with one or two VLAN tags */ + uint64_t rx_double_tagged_frames; + /* Total number of received frames with two VLAN tags */ + uint64_t rx_trunc_frames; + /* Total number of truncated frames received */ + uint64_t rx_good_frames; + /* Total number of good frames (without errors) received */ + uint64_t rx_pfc_xon2xoff_frames_pri0; + /* + * Total number of received PFC frames with transition from XON + * to XOFF on Pri 0 + */ + uint64_t rx_pfc_xon2xoff_frames_pri1; + /* + * Total number of received PFC frames with transition from XON + * to XOFF on Pri 1 + */ + uint64_t rx_pfc_xon2xoff_frames_pri2; + /* + * Total number of received PFC frames with transition from XON + * to XOFF on Pri 2 + */ + uint64_t rx_pfc_xon2xoff_frames_pri3; + /* + * Total number of received PFC frames with transition from XON + * to XOFF on Pri 3 + */ + uint64_t rx_pfc_xon2xoff_frames_pri4; + /* + * Total number of received PFC frames with transition from XON + * to XOFF on Pri 4 + */ + uint64_t rx_pfc_xon2xoff_frames_pri5; + /* + * Total number of received PFC frames with transition from XON + * to XOFF on Pri 5 + */ + uint64_t rx_pfc_xon2xoff_frames_pri6; + /* + * Total number of received PFC frames with transition from XON + * to XOFF on Pri 6 + */ + uint64_t rx_pfc_xon2xoff_frames_pri7; + /* + * Total number of received PFC frames with transition from XON + * to XOFF on Pri 7 + */ + uint64_t rx_pfc_ena_frames_pri0; + /* + * Total number of received PFC frames with PFC enabled bit for + * Pri 0 + */ + uint64_t rx_pfc_ena_frames_pri1; + /* + * Total number of received PFC frames with PFC enabled bit for + * Pri 1 + */ + uint64_t rx_pfc_ena_frames_pri2; + /* + * Total number of received PFC frames with PFC enabled bit for + * Pri 2 + */ + uint64_t rx_pfc_ena_frames_pri3; + /* + * Total number of received PFC frames with PFC enabled bit for + * Pri 3 + */ + uint64_t rx_pfc_ena_frames_pri4; + /* + * Total number of received PFC frames with PFC enabled bit for + * Pri 4 + */ + uint64_t rx_pfc_ena_frames_pri5; + /* + * Total number of received PFC frames with PFC enabled bit for + * Pri 5 + */ + uint64_t rx_pfc_ena_frames_pri6; + /* + * Total number of received PFC frames with PFC enabled bit for + * Pri 6 + */ + uint64_t rx_pfc_ena_frames_pri7; + /* + * Total number of received PFC frames with PFC enabled bit for + * Pri 7 + */ + uint64_t rx_sch_crc_err_frames; + /* Total Number of frames received with SCH CRC error */ + uint64_t rx_undrsz_frames; + /* Total Number of under-sized frames received */ + uint64_t rx_frag_frames; + /* Total Number of fragmented frames received */ + uint64_t rx_eee_lpi_events; + /* Total number of RX EEE LPI Events */ + uint64_t rx_eee_lpi_duration; + /* EEE LPI Duration Counter on RX */ + uint64_t rx_llfc_physical_msgs; + /* + * Total number of physical type Link Level Flow Control (LLFC) + * messages received + */ + uint64_t rx_llfc_logical_msgs; + /* + * Total number of logical type Link Level Flow Control (LLFC) + * messages received + */ + uint64_t rx_llfc_msgs_with_crc_err; + /* + * Total number of logical type Link Level Flow Control (LLFC) + * messages received with CRC error + */ + uint64_t rx_hcfc_msgs; + /* Total number of HCFC messages received */ + uint64_t rx_hcfc_msgs_with_crc_err; + /* Total number of HCFC messages received with CRC error */ + uint64_t rx_bytes; + /* Total number of received bytes */ + uint64_t rx_runt_bytes; + /* Total number of bytes received in runt frames */ + uint64_t rx_runt_frames; + /* Total number of runt frames received */ + uint64_t rx_stat_discard; + /* Total Rx Discards per Port reported by STATS block */ + uint64_t rx_stat_err; + /* Total Rx Error Drops per Port reported by STATS block */ +} __attribute__((packed)); + +/* Periodic Statistics Context DMA to host (160 bytes) */ +/* + * per-context HW statistics -- chip view + */ + +struct ctx_hw_stats64 { + uint64_t rx_ucast_pkts; + uint64_t rx_mcast_pkts; + uint64_t rx_bcast_pkts; + uint64_t rx_drop_pkts; + uint64_t rx_discard_pkts; + uint64_t rx_ucast_bytes; + uint64_t rx_mcast_bytes; + uint64_t rx_bcast_bytes; + + uint64_t tx_ucast_pkts; + uint64_t tx_mcast_pkts; + uint64_t tx_bcast_pkts; + uint64_t tx_drop_pkts; + uint64_t tx_discard_pkts; + uint64_t tx_ucast_bytes; + uint64_t tx_mcast_bytes; + uint64_t tx_bcast_bytes; + + uint64_t tpa_pkts; + uint64_t tpa_bytes; + uint64_t tpa_events; + uint64_t tpa_aborts; +} __attribute__((packed)); + +#endif /* _HSI_STRUCT_DEF_DPDK_ */ diff --git a/drivers/net/bnxt/rte_pmd_bnxt.c b/drivers/net/bnxt/rte_pmd_bnxt.c new file mode 100644 index 00000000..c343d903 --- /dev/null +++ b/drivers/net/bnxt/rte_pmd_bnxt.c @@ -0,0 +1,843 @@ +/*- + * BSD LICENSE + * + * Copyright(c) 2017 Broadcom Limited. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Broadcom Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#include <inttypes.h> +#include <stdbool.h> +#include <unistd.h> + +#include <rte_dev.h> +#include <rte_ethdev.h> +#include <rte_malloc.h> +#include <rte_cycles.h> +#include <rte_byteorder.h> + +#include "bnxt.h" +#include "bnxt_filter.h" +#include "bnxt_hwrm.h" +#include "bnxt_vnic.h" +#include "rte_pmd_bnxt.h" +#include "hsi_struct_def_dpdk.h" + +int bnxt_rcv_msg_from_vf(struct bnxt *bp, uint16_t vf_id, void *msg) +{ + struct rte_pmd_bnxt_mb_event_param ret_param; + + ret_param.retval = RTE_PMD_BNXT_MB_EVENT_PROCEED; + ret_param.vf_id = vf_id; + ret_param.msg = msg; + + _rte_eth_dev_callback_process(bp->eth_dev, RTE_ETH_EVENT_VF_MBOX, + NULL, &ret_param); + + /* Default to approve */ + if (ret_param.retval == RTE_PMD_BNXT_MB_EVENT_PROCEED) + ret_param.retval = RTE_PMD_BNXT_MB_EVENT_NOOP_ACK; + + return ret_param.retval == RTE_PMD_BNXT_MB_EVENT_NOOP_ACK ? + true : false; +} + +int rte_pmd_bnxt_set_tx_loopback(uint8_t port, uint8_t on) +{ + struct rte_eth_dev *eth_dev; + struct bnxt *bp; + int rc; + + RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV); + + if (on > 1) + return -EINVAL; + + eth_dev = &rte_eth_devices[port]; + if (!is_bnxt_supported(eth_dev)) + return -ENOTSUP; + + bp = (struct bnxt *)eth_dev->data->dev_private; + + if (!BNXT_PF(bp)) { + RTE_LOG(ERR, PMD, + "Attempt to set Tx loopback on non-PF port %d!\n", + port); + return -ENOTSUP; + } + + if (on) + bp->pf.evb_mode = BNXT_EVB_MODE_VEB; + else + bp->pf.evb_mode = BNXT_EVB_MODE_VEPA; + + rc = bnxt_hwrm_pf_evb_mode(bp); + + return rc; +} + +static void +rte_pmd_bnxt_set_all_queues_drop_en_cb(struct bnxt_vnic_info *vnic, void *onptr) +{ + uint8_t *on = onptr; + vnic->bd_stall = !(*on); +} + +int rte_pmd_bnxt_set_all_queues_drop_en(uint8_t port, uint8_t on) +{ + struct rte_eth_dev *eth_dev; + struct bnxt *bp; + uint32_t i; + int rc; + + RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV); + + if (on > 1) + return -EINVAL; + + eth_dev = &rte_eth_devices[port]; + if (!is_bnxt_supported(eth_dev)) + return -ENOTSUP; + + bp = (struct bnxt *)eth_dev->data->dev_private; + + if (!BNXT_PF(bp)) { + RTE_LOG(ERR, PMD, + "Attempt to set all queues drop on non-PF port!\n"); + return -ENOTSUP; + } + + if (bp->vnic_info == NULL) + return -ENODEV; + + /* Stall PF */ + for (i = 0; i < bp->nr_vnics; i++) { + bp->vnic_info[i].bd_stall = !on; + rc = bnxt_hwrm_vnic_cfg(bp, &bp->vnic_info[i]); + if (rc) { + RTE_LOG(ERR, PMD, "Failed to update PF VNIC %d.\n", i); + return rc; + } + } + + /* Stall all active VFs */ + for (i = 0; i < bp->pf.active_vfs; i++) { + rc = bnxt_hwrm_func_vf_vnic_query_and_config(bp, i, + rte_pmd_bnxt_set_all_queues_drop_en_cb, &on, + bnxt_hwrm_vnic_cfg); + if (rc) { + RTE_LOG(ERR, PMD, "Failed to update VF VNIC %d.\n", i); + break; + } + } + + return rc; +} + +int rte_pmd_bnxt_set_vf_mac_addr(uint8_t port, uint16_t vf, + struct ether_addr *mac_addr) +{ + struct rte_eth_dev *dev; + struct rte_eth_dev_info dev_info; + struct bnxt *bp; + int rc; + + RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV); + + dev = &rte_eth_devices[port]; + if (!is_bnxt_supported(dev)) + return -ENOTSUP; + + rte_eth_dev_info_get(port, &dev_info); + bp = (struct bnxt *)dev->data->dev_private; + + if (vf >= dev_info.max_vfs || mac_addr == NULL) + return -EINVAL; + + if (!BNXT_PF(bp)) { + RTE_LOG(ERR, PMD, + "Attempt to set VF %d mac address on non-PF port %d!\n", + vf, port); + return -ENOTSUP; + } + + rc = bnxt_hwrm_func_vf_mac(bp, vf, (uint8_t *)mac_addr); + + return rc; +} + +int rte_pmd_bnxt_set_vf_rate_limit(uint8_t port, uint16_t vf, + uint16_t tx_rate, uint64_t q_msk) +{ + struct rte_eth_dev *eth_dev; + struct rte_eth_dev_info dev_info; + struct bnxt *bp; + uint16_t tot_rate = 0; + uint64_t idx; + int rc; + + RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV); + + eth_dev = &rte_eth_devices[port]; + if (!is_bnxt_supported(eth_dev)) + return -ENOTSUP; + + rte_eth_dev_info_get(port, &dev_info); + bp = (struct bnxt *)eth_dev->data->dev_private; + + if (!bp->pf.active_vfs) + return -EINVAL; + + if (vf >= bp->pf.max_vfs) + return -EINVAL; + + /* Add up the per queue BW and configure MAX BW of the VF */ + for (idx = 0; idx < 64; idx++) { + if ((1ULL << idx) & q_msk) + tot_rate += tx_rate; + } + + /* Requested BW can't be greater than link speed */ + if (tot_rate > eth_dev->data->dev_link.link_speed) { + RTE_LOG(ERR, PMD, "Rate > Link speed. Set to %d\n", tot_rate); + return -EINVAL; + } + + /* Requested BW already configured */ + if (tot_rate == bp->pf.vf_info[vf].max_tx_rate) + return 0; + + rc = bnxt_hwrm_func_bw_cfg(bp, vf, tot_rate, + HWRM_FUNC_CFG_INPUT_ENABLES_MAX_BW); + + if (!rc) + bp->pf.vf_info[vf].max_tx_rate = tot_rate; + + return rc; +} + +int rte_pmd_bnxt_set_vf_mac_anti_spoof(uint8_t port, uint16_t vf, uint8_t on) +{ + struct rte_eth_dev_info dev_info; + struct rte_eth_dev *dev; + uint32_t func_flags; + struct bnxt *bp; + int rc; + + RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV); + + if (on > 1) + return -EINVAL; + + dev = &rte_eth_devices[port]; + if (!is_bnxt_supported(dev)) + return -ENOTSUP; + + rte_eth_dev_info_get(port, &dev_info); + bp = (struct bnxt *)dev->data->dev_private; + + if (!BNXT_PF(bp)) { + RTE_LOG(ERR, PMD, + "Attempt to set mac spoof on non-PF port %d!\n", port); + return -EINVAL; + } + + if (vf >= dev_info.max_vfs) + return -EINVAL; + + /* Prev setting same as new setting. */ + if (on == bp->pf.vf_info[vf].mac_spoof_en) + return 0; + + func_flags = bp->pf.vf_info[vf].func_cfg_flags; + func_flags &= ~(HWRM_FUNC_CFG_INPUT_FLAGS_SRC_MAC_ADDR_CHECK_ENABLE | + HWRM_FUNC_CFG_INPUT_FLAGS_SRC_MAC_ADDR_CHECK_DISABLE); + + if (on) + func_flags |= + HWRM_FUNC_CFG_INPUT_FLAGS_SRC_MAC_ADDR_CHECK_ENABLE; + else + func_flags |= + HWRM_FUNC_CFG_INPUT_FLAGS_SRC_MAC_ADDR_CHECK_DISABLE; + + rc = bnxt_hwrm_func_cfg_vf_set_flags(bp, vf, func_flags); + if (!rc) { + bp->pf.vf_info[vf].mac_spoof_en = on; + bp->pf.vf_info[vf].func_cfg_flags = func_flags; + } + + return rc; +} + +int rte_pmd_bnxt_set_vf_vlan_anti_spoof(uint8_t port, uint16_t vf, uint8_t on) +{ + struct rte_eth_dev_info dev_info; + struct rte_eth_dev *dev; + struct bnxt *bp; + int rc; + + RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV); + + if (on > 1) + return -EINVAL; + + dev = &rte_eth_devices[port]; + if (!is_bnxt_supported(dev)) + return -ENOTSUP; + + rte_eth_dev_info_get(port, &dev_info); + bp = (struct bnxt *)dev->data->dev_private; + + if (!BNXT_PF(bp)) { + RTE_LOG(ERR, PMD, + "Attempt to set VLAN spoof on non-PF port %d!\n", port); + return -EINVAL; + } + + if (vf >= dev_info.max_vfs) + return -EINVAL; + + if (on == bp->pf.vf_info[vf].vlan_spoof_en) + return 0; + + rc = bnxt_hwrm_func_cfg_vf_set_vlan_anti_spoof(bp, vf, on); + if (!rc) { + bp->pf.vf_info[vf].vlan_spoof_en = on; + if (on) { + if (bnxt_hwrm_cfa_vlan_antispoof_cfg(bp, + bp->pf.first_vf_id + vf, + bp->pf.vf_info[vf].vlan_count, + bp->pf.vf_info[vf].vlan_as_table)) + rc = -1; + } + } else { + RTE_LOG(ERR, PMD, "Failed to update VF VNIC %d.\n", vf); + } + + return rc; +} + +static void +rte_pmd_bnxt_set_vf_vlan_stripq_cb(struct bnxt_vnic_info *vnic, void *onptr) +{ + uint8_t *on = onptr; + vnic->vlan_strip = *on; +} + +int +rte_pmd_bnxt_set_vf_vlan_stripq(uint8_t port, uint16_t vf, uint8_t on) +{ + struct rte_eth_dev *dev; + struct rte_eth_dev_info dev_info; + struct bnxt *bp; + int rc; + + RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV); + + dev = &rte_eth_devices[port]; + if (!is_bnxt_supported(dev)) + return -ENOTSUP; + + rte_eth_dev_info_get(port, &dev_info); + bp = (struct bnxt *)dev->data->dev_private; + + if (vf >= dev_info.max_vfs) + return -EINVAL; + + if (!BNXT_PF(bp)) { + RTE_LOG(ERR, PMD, + "Attempt to set VF %d stripq on non-PF port %d!\n", + vf, port); + return -ENOTSUP; + } + + rc = bnxt_hwrm_func_vf_vnic_query_and_config(bp, vf, + rte_pmd_bnxt_set_vf_vlan_stripq_cb, &on, + bnxt_hwrm_vnic_cfg); + if (rc) + RTE_LOG(ERR, PMD, "Failed to update VF VNIC %d.\n", vf); + + return rc; +} + +int rte_pmd_bnxt_set_vf_rxmode(uint8_t port, uint16_t vf, + uint16_t rx_mask, uint8_t on) +{ + struct rte_eth_dev *dev; + struct rte_eth_dev_info dev_info; + uint16_t flag = 0; + struct bnxt *bp; + int rc; + + RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV); + + dev = &rte_eth_devices[port]; + if (!is_bnxt_supported(dev)) + return -ENOTSUP; + + rte_eth_dev_info_get(port, &dev_info); + bp = (struct bnxt *)dev->data->dev_private; + + if (!bp->pf.vf_info) + return -EINVAL; + + if (vf >= bp->pdev->max_vfs) + return -EINVAL; + + if (rx_mask & (ETH_VMDQ_ACCEPT_UNTAG | ETH_VMDQ_ACCEPT_HASH_MC)) { + RTE_LOG(ERR, PMD, "Currently cannot toggle this setting\n"); + return -ENOTSUP; + } + + if (rx_mask & ETH_VMDQ_ACCEPT_HASH_UC && !on) { + RTE_LOG(ERR, PMD, "Currently cannot disable UC Rx\n"); + return -ENOTSUP; + } + + if (rx_mask & ETH_VMDQ_ACCEPT_BROADCAST) + flag |= BNXT_VNIC_INFO_BCAST; + if (rx_mask & ETH_VMDQ_ACCEPT_MULTICAST) + flag |= BNXT_VNIC_INFO_ALLMULTI; + + if (on) + bp->pf.vf_info[vf].l2_rx_mask |= flag; + else + bp->pf.vf_info[vf].l2_rx_mask &= ~flag; + + rc = bnxt_hwrm_func_vf_vnic_query_and_config(bp, vf, + vf_vnic_set_rxmask_cb, + &bp->pf.vf_info[vf].l2_rx_mask, + bnxt_set_rx_mask_no_vlan); + if (rc) + RTE_LOG(ERR, PMD, "bnxt_hwrm_func_vf_vnic_set_rxmask failed\n"); + + return rc; +} + +static int bnxt_set_vf_table(struct bnxt *bp, uint16_t vf) +{ + int rc = 0; + int dflt_vnic; + struct bnxt_vnic_info vnic; + + if (!BNXT_PF(bp)) { + RTE_LOG(ERR, PMD, + "Attempt to set VLAN table on non-PF port!\n"); + return -EINVAL; + } + + if (vf >= bp->pdev->max_vfs) + return -EINVAL; + + dflt_vnic = bnxt_hwrm_func_qcfg_vf_dflt_vnic_id(bp, vf); + if (dflt_vnic < 0) { + /* This simply indicates there's no driver loaded. + * This is not an error. + */ + RTE_LOG(ERR, PMD, "Unable to get default VNIC for VF %d\n", vf); + } else { + memset(&vnic, 0, sizeof(vnic)); + vnic.fw_vnic_id = dflt_vnic; + if (bnxt_hwrm_vnic_qcfg(bp, &vnic, + bp->pf.first_vf_id + vf) == 0) { + if (bnxt_hwrm_cfa_l2_set_rx_mask(bp, &vnic, + bp->pf.vf_info[vf].vlan_count, + bp->pf.vf_info[vf].vlan_table)) + rc = -1; + } else { + rc = -1; + } + } + + return rc; +} + +int rte_pmd_bnxt_set_vf_vlan_filter(uint8_t port, uint16_t vlan, + uint64_t vf_mask, uint8_t vlan_on) +{ + struct bnxt_vlan_table_entry *ve; + struct bnxt_vlan_antispoof_table_entry *vase; + struct rte_eth_dev *dev; + struct bnxt *bp; + uint16_t cnt; + int rc = 0; + int i, j; + + RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV); + + dev = &rte_eth_devices[port]; + if (!is_bnxt_supported(dev)) + return -ENOTSUP; + + bp = (struct bnxt *)dev->data->dev_private; + if (!bp->pf.vf_info) + return -EINVAL; + + for (i = 0; vf_mask; i++, vf_mask >>= 1) { + cnt = bp->pf.vf_info[i].vlan_count; + if ((vf_mask & 1) == 0) + continue; + + if (bp->pf.vf_info[i].vlan_table == NULL) { + rc = -1; + continue; + } + if (bp->pf.vf_info[i].vlan_as_table == NULL) { + rc = -1; + continue; + } + if (vlan_on) { + /* First, search for a duplicate... */ + for (j = 0; j < cnt; j++) { + if (rte_be_to_cpu_16( + bp->pf.vf_info[i].vlan_table[j].vid) == vlan) + break; + } + if (j == cnt) { + /* Now check that there's space */ + if (cnt == getpagesize() / sizeof(struct + bnxt_vlan_antispoof_table_entry)) { + RTE_LOG(ERR, PMD, + "VLAN anti-spoof table is full\n"); + RTE_LOG(ERR, PMD, + "VF %d cannot add VLAN %u\n", + i, vlan); + rc = -1; + continue; + } + + /* cnt is one less than vlan_count */ + cnt = bp->pf.vf_info[i].vlan_count++; + /* + * And finally, add to the + * end of the table + */ + vase = &bp->pf.vf_info[i].vlan_as_table[cnt]; + // TODO: Hardcoded TPID + vase->tpid = rte_cpu_to_be_16(0x8100); + vase->vid = rte_cpu_to_be_16(vlan); + vase->mask = rte_cpu_to_be_16(0xfff); + ve = &bp->pf.vf_info[i].vlan_table[cnt]; + /* TODO: Hardcoded TPID */ + ve->tpid = rte_cpu_to_be_16(0x8100); + ve->vid = rte_cpu_to_be_16(vlan); + } + } else { + for (j = 0; j < cnt; j++) { + if (rte_be_to_cpu_16( + bp->pf.vf_info[i].vlan_table[j].vid) != vlan) + continue; + memmove(&bp->pf.vf_info[i].vlan_table[j], + &bp->pf.vf_info[i].vlan_table[j + 1], + getpagesize() - ((j + 1) * + sizeof(struct bnxt_vlan_table_entry))); + memmove(&bp->pf.vf_info[i].vlan_as_table[j], + &bp->pf.vf_info[i].vlan_as_table[j + 1], + getpagesize() - ((j + 1) * sizeof(struct + bnxt_vlan_antispoof_table_entry))); + j--; + cnt = --bp->pf.vf_info[i].vlan_count; + } + } + bnxt_set_vf_table(bp, i); + } + + return rc; +} + +int rte_pmd_bnxt_get_vf_stats(uint8_t port, + uint16_t vf_id, + struct rte_eth_stats *stats) +{ + struct rte_eth_dev *dev; + struct rte_eth_dev_info dev_info; + struct bnxt *bp; + + dev = &rte_eth_devices[port]; + if (!is_bnxt_supported(dev)) + return -ENOTSUP; + + rte_eth_dev_info_get(port, &dev_info); + bp = (struct bnxt *)dev->data->dev_private; + + if (vf_id >= dev_info.max_vfs) + return -EINVAL; + + if (!BNXT_PF(bp)) { + RTE_LOG(ERR, PMD, + "Attempt to get VF %d stats on non-PF port %d!\n", + vf_id, port); + return -ENOTSUP; + } + + return bnxt_hwrm_func_qstats(bp, bp->pf.first_vf_id + vf_id, stats); +} + +int rte_pmd_bnxt_reset_vf_stats(uint8_t port, + uint16_t vf_id) +{ + struct rte_eth_dev *dev; + struct rte_eth_dev_info dev_info; + struct bnxt *bp; + + dev = &rte_eth_devices[port]; + if (!is_bnxt_supported(dev)) + return -ENOTSUP; + + rte_eth_dev_info_get(port, &dev_info); + bp = (struct bnxt *)dev->data->dev_private; + + if (vf_id >= dev_info.max_vfs) + return -EINVAL; + + if (!BNXT_PF(bp)) { + RTE_LOG(ERR, PMD, + "Attempt to reset VF %d stats on non-PF port %d!\n", + vf_id, port); + return -ENOTSUP; + } + + return bnxt_hwrm_func_clr_stats(bp, bp->pf.first_vf_id + vf_id); +} + +int rte_pmd_bnxt_get_vf_rx_status(uint8_t port, uint16_t vf_id) +{ + struct rte_eth_dev *dev; + struct rte_eth_dev_info dev_info; + struct bnxt *bp; + + dev = &rte_eth_devices[port]; + if (!is_bnxt_supported(dev)) + return -ENOTSUP; + + rte_eth_dev_info_get(port, &dev_info); + bp = (struct bnxt *)dev->data->dev_private; + + if (vf_id >= dev_info.max_vfs) + return -EINVAL; + + if (!BNXT_PF(bp)) { + RTE_LOG(ERR, PMD, + "Attempt to query VF %d RX stats on non-PF port %d!\n", + vf_id, port); + return -ENOTSUP; + } + + return bnxt_vf_vnic_count(bp, vf_id); +} + +int rte_pmd_bnxt_get_vf_tx_drop_count(uint8_t port, uint16_t vf_id, + uint64_t *count) +{ + struct rte_eth_dev *dev; + struct rte_eth_dev_info dev_info; + struct bnxt *bp; + + dev = &rte_eth_devices[port]; + if (!is_bnxt_supported(dev)) + return -ENOTSUP; + + rte_eth_dev_info_get(port, &dev_info); + bp = (struct bnxt *)dev->data->dev_private; + + if (vf_id >= dev_info.max_vfs) + return -EINVAL; + + if (!BNXT_PF(bp)) { + RTE_LOG(ERR, PMD, + "Attempt to query VF %d TX drops on non-PF port %d!\n", + vf_id, port); + return -ENOTSUP; + } + + return bnxt_hwrm_func_qstats_tx_drop(bp, bp->pf.first_vf_id + vf_id, + count); +} + +int rte_pmd_bnxt_mac_addr_add(uint8_t port, struct ether_addr *addr, + uint32_t vf_id) +{ + struct rte_eth_dev *dev; + struct rte_eth_dev_info dev_info; + struct bnxt *bp; + struct bnxt_filter_info *filter; + struct bnxt_vnic_info vnic; + struct ether_addr dflt_mac; + int rc; + + dev = &rte_eth_devices[port]; + if (!is_bnxt_supported(dev)) + return -ENOTSUP; + + rte_eth_dev_info_get(port, &dev_info); + bp = (struct bnxt *)dev->data->dev_private; + + if (vf_id >= dev_info.max_vfs) + return -EINVAL; + + if (!BNXT_PF(bp)) { + RTE_LOG(ERR, PMD, + "Attempt to config VF %d MAC on non-PF port %d!\n", + vf_id, port); + return -ENOTSUP; + } + + /* If the VF currently uses a random MAC, update default to this one */ + if (bp->pf.vf_info[vf_id].random_mac) { + if (rte_pmd_bnxt_get_vf_rx_status(port, vf_id) <= 0) + rc = bnxt_hwrm_func_vf_mac(bp, vf_id, (uint8_t *)addr); + } + + /* query the default VNIC id used by the function */ + rc = bnxt_hwrm_func_qcfg_vf_dflt_vnic_id(bp, vf_id); + if (rc < 0) + goto exit; + + memset(&vnic, 0, sizeof(struct bnxt_vnic_info)); + vnic.fw_vnic_id = rte_le_to_cpu_16(rc); + rc = bnxt_hwrm_vnic_qcfg(bp, &vnic, bp->pf.first_vf_id + vf_id); + if (rc < 0) + goto exit; + + STAILQ_FOREACH(filter, &bp->pf.vf_info[vf_id].filter, next) { + if (filter->flags == + HWRM_CFA_L2_FILTER_ALLOC_INPUT_FLAGS_PATH_RX && + filter->enables == + (HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_ADDR | + HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_ADDR_MASK) && + memcmp(addr, filter->l2_addr, ETHER_ADDR_LEN) == 0) { + bnxt_hwrm_clear_filter(bp, filter); + break; + } + } + + if (filter == NULL) + filter = bnxt_alloc_vf_filter(bp, vf_id); + + filter->fw_l2_filter_id = UINT64_MAX; + filter->flags = HWRM_CFA_L2_FILTER_ALLOC_INPUT_FLAGS_PATH_RX; + filter->enables = HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_ADDR | + HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_ADDR_MASK; + memcpy(filter->l2_addr, addr, ETHER_ADDR_LEN); + memset(filter->l2_addr_mask, 0xff, ETHER_ADDR_LEN); + + /* Do not add a filter for the default MAC */ + if (bnxt_hwrm_func_qcfg_vf_default_mac(bp, vf_id, &dflt_mac) || + memcmp(filter->l2_addr, dflt_mac.addr_bytes, ETHER_ADDR_LEN)) + rc = bnxt_hwrm_set_filter(bp, vnic.fw_vnic_id, filter); + +exit: + return rc; +} + +int +rte_pmd_bnxt_set_vf_vlan_insert(uint8_t port, uint16_t vf, + uint16_t vlan_id) +{ + struct rte_eth_dev *dev; + struct rte_eth_dev_info dev_info; + struct bnxt *bp; + int rc; + + RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV); + + dev = &rte_eth_devices[port]; + if (!is_bnxt_supported(dev)) + return -ENOTSUP; + + rte_eth_dev_info_get(port, &dev_info); + bp = (struct bnxt *)dev->data->dev_private; + + if (vf >= dev_info.max_vfs) + return -EINVAL; + + if (!BNXT_PF(bp)) { + RTE_LOG(ERR, PMD, + "Attempt to set VF %d vlan insert on non-PF port %d!\n", + vf, port); + return -ENOTSUP; + } + + bp->pf.vf_info[vf].dflt_vlan = vlan_id; + if (bnxt_hwrm_func_qcfg_current_vf_vlan(bp, vf) == + bp->pf.vf_info[vf].dflt_vlan) + return 0; + + rc = bnxt_hwrm_set_vf_vlan(bp, vf); + + return rc; +} + +int rte_pmd_bnxt_set_vf_persist_stats(uint8_t port, uint16_t vf, uint8_t on) +{ + struct rte_eth_dev_info dev_info; + struct rte_eth_dev *dev; + uint32_t func_flags; + struct bnxt *bp; + int rc; + + RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV); + + if (on > 1) + return -EINVAL; + + dev = &rte_eth_devices[port]; + rte_eth_dev_info_get(port, &dev_info); + bp = (struct bnxt *)dev->data->dev_private; + + if (!BNXT_PF(bp)) { + RTE_LOG(ERR, PMD, + "Attempt to set persist stats on non-PF port %d!\n", + port); + return -EINVAL; + } + + if (vf >= dev_info.max_vfs) + return -EINVAL; + + /* Prev setting same as new setting. */ + if (on == bp->pf.vf_info[vf].persist_stats) + return 0; + + func_flags = bp->pf.vf_info[vf].func_cfg_flags; + + if (on) + func_flags |= + HWRM_FUNC_CFG_INPUT_FLAGS_NO_AUTOCLEAR_STATISTIC; + else + func_flags &= + ~HWRM_FUNC_CFG_INPUT_FLAGS_NO_AUTOCLEAR_STATISTIC; + + rc = bnxt_hwrm_func_cfg_vf_set_flags(bp, vf, func_flags); + if (!rc) { + bp->pf.vf_info[vf].persist_stats = on; + bp->pf.vf_info[vf].func_cfg_flags = func_flags; + } + + return rc; +} diff --git a/drivers/net/bnxt/rte_pmd_bnxt.h b/drivers/net/bnxt/rte_pmd_bnxt.h new file mode 100644 index 00000000..c4c4770e --- /dev/null +++ b/drivers/net/bnxt/rte_pmd_bnxt.h @@ -0,0 +1,354 @@ +/*- + * BSD LICENSE + * + * Copyright(c) 2017 Broadcom Limited. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Broadcom Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef _PMD_BNXT_H_ +#define _PMD_BNXT_H_ + +#include <rte_ethdev.h> + +/* + * Response sent back to the caller after callback + */ +enum rte_pmd_bnxt_mb_event_rsp { + RTE_PMD_BNXT_MB_EVENT_NOOP_ACK, /**< skip mbox request and ACK */ + RTE_PMD_BNXT_MB_EVENT_NOOP_NACK, /**< skip mbox request and NACK */ + RTE_PMD_BNXT_MB_EVENT_PROCEED, /**< proceed with mbox request */ + RTE_PMD_BNXT_MB_EVENT_MAX /**< max value of this enum */ +}; + +/* mailbox message types */ +#define BNXT_VF_RESET 0x01 /* VF requests reset */ +#define BNXT_VF_SET_MAC_ADDR 0x02 /* VF requests PF to set MAC addr */ +#define BNXT_VF_SET_VLAN 0x03 /* VF requests PF to set VLAN */ +#define BNXT_VF_SET_MTU 0x04 /* VF requests PF to set MTU */ +#define BNXT_VF_SET_MRU 0x05 /* VF requests PF to set MRU */ + +/* + * Data sent to the caller when the callback is executed. + */ +struct rte_pmd_bnxt_mb_event_param { + uint16_t vf_id; /* Virtual Function number */ + int retval; /* return value */ + void *msg; /* pointer to message */ +}; + +/** + * Enable/Disable VF MAC anti spoof + * + * @param port + * The port identifier of the Ethernet device. + * @param vf + * VF id. + * @param on + * 1 - Enable VF MAC anti spoof. + * 0 - Disable VF MAC anti spoof. + * + * @return + * - (0) if successful. + * - (-ENODEV) if *port* invalid. + * - (-EINVAL) if bad parameter. + */ +int rte_pmd_bnxt_set_vf_mac_anti_spoof(uint8_t port, uint16_t vf, uint8_t on); + +/** + * Set the VF MAC address. + * + * @param port + * The port identifier of the Ethernet device. + * @param vf + * VF id. + * @param mac_addr + * VF MAC address. + * @return + * - (0) if successful. + * - (-ENODEV) if *port* invalid. + * - (-EINVAL) if *vf* or *mac_addr* is invalid. + */ +int rte_pmd_bnxt_set_vf_mac_addr(uint8_t port, uint16_t vf, + struct ether_addr *mac_addr); + +/** + * Enable/Disable vf vlan strip for all queues in a pool + * + * @param port + * The port identifier of the Ethernet device. + * @param vf + * ID specifying VF. + * @param on + * 1 - Enable VF's vlan strip on RX queues. + * 0 - Disable VF's vlan strip on RX queues. + * + * @return + * - (0) if successful. + * - (-ENOTSUP) if hardware doesn't support this feature. + * - (-ENODEV) if *port* invalid. + * - (-EINVAL) if bad parameter. + */ +int +rte_pmd_bnxt_set_vf_vlan_stripq(uint8_t port, uint16_t vf, uint8_t on); + +/** + * Enable/Disable vf vlan insert + * + * @param port + * The port identifier of the Ethernet device. + * @param vf + * ID specifying VF. + * @param vlan_id + * 0 - Disable VF's vlan insert. + * n - Enable; n is inserted as the vlan id. + * + * @return + * - (0) if successful. + * - (-ENODEV) if *port* invalid. + * - (-EINVAL) if bad parameter. + */ +int +rte_pmd_bnxt_set_vf_vlan_insert(uint8_t port, uint16_t vf, + uint16_t vlan_id); + +/** + * Enable/Disable hardware VF VLAN filtering by an Ethernet device of + * received VLAN packets tagged with a given VLAN Tag Identifier. + * + * @param port + * The port identifier of the Ethernet device. + * @param vlan + * The VLAN Tag Identifier whose filtering must be enabled or disabled. + * @param vf_mask + * Bitmap listing which VFs participate in the VLAN filtering. + * @param vlan_on + * 1 - Enable VFs VLAN filtering. + * 0 - Disable VFs VLAN filtering. + * @return + * - (0) if successful. + * - (-ENOTSUP) if hardware doesn't support. + * - (-ENODEV) if *port_id* invalid. + * - (-EINVAL) if bad parameter. + */ +int rte_pmd_bnxt_set_vf_vlan_filter(uint8_t port, uint16_t vlan, + uint64_t vf_mask, uint8_t vlan_on); + +/** + * Enable/Disable tx loopback + * + * @param port + * The port identifier of the Ethernet device. + * @param on + * 1 - Enable tx loopback. + * 0 - Disable tx loopback. + * + * @return + * - (0) if successful. + * - (-ENODEV) if *port* invalid. + * - (-EINVAL) if bad parameter. + */ +int rte_pmd_bnxt_set_tx_loopback(uint8_t port, uint8_t on); + +/** + * set all queues drop enable bit + * + * @param port + * The port identifier of the Ethernet device. + * @param on + * 1 - set the queue drop enable bit for all pools. + * 0 - reset the queue drop enable bit for all pools. + * + * @return + * - (0) if successful. + * - (-ENODEV) if *port* invalid. + * - (-EINVAL) if bad parameter. + */ +int rte_pmd_bnxt_set_all_queues_drop_en(uint8_t port, uint8_t on); + +/** + * Set the VF rate limit. + * + * @param port + * The port identifier of the Ethernet device. + * @param vf + * VF id. + * @param tx_rate + * Tx rate for the VF + * @param q_msk + * Mask of the Tx queue + * @return + * - (0) if successful. + * - (-ENODEV) if *port* invalid. + * - (-EINVAL) if *vf* or *mac_addr* is invalid. + */ +int rte_pmd_bnxt_set_vf_rate_limit(uint8_t port, uint16_t vf, + uint16_t tx_rate, uint64_t q_msk); + +/** + * Get VF's statistics + * + * @param port + * The port identifier of the Ethernet device. + * @param vf_id + * VF on which to get. + * @param stats + * A pointer to a structure of type *rte_eth_stats* to be filled with + * the values of device counters supported statistics: + * @return + * - (0) if successful. + * - (-ENODEV) if *port* invalid. + * - (-EINVAL) if bad parameter. + */ + +int rte_pmd_bnxt_get_vf_stats(uint8_t port, + uint16_t vf_id, + struct rte_eth_stats *stats); + +/** + * Clear VF's statistics + * + * @param port + * The port identifier of the Ethernet device. + * @param vf_id + * VF on which to get. + * @return + * - (0) if successful. + * - (-ENODEV) if *port* invalid. + * - (-EINVAL) if bad parameter. + */ +int rte_pmd_bnxt_reset_vf_stats(uint8_t port, + uint16_t vf_id); + +/** + * Enable/Disable VF VLAN anti spoof + * + * @param port + * The port identifier of the Ethernet device. + * @param vf + * VF id. + * @param on + * 1 - Enable VF VLAN anti spoof. + * 0 - Disable VF VLAN anti spoof. + * + * @return + * - (0) if successful. + * - (-ENODEV) if *port* invalid. + * - (-EINVAL) if bad parameter. + */ +int rte_pmd_bnxt_set_vf_vlan_anti_spoof(uint8_t port, uint16_t vf, uint8_t on); + +/** + * Set RX L2 Filtering mode of a VF of an Ethernet device. + * + * @param port + * The port identifier of the Ethernet device. + * @param vf + * VF id. + * @param rx_mask + * The RX mode mask + * @param on + * 1 - Enable a VF RX mode. + * 0 - Disable a VF RX mode. + * @return + * - (0) if successful. + * - (-ENODEV) if *port_id* invalid. + * - (-EINVAL) if bad parameter. + */ +int rte_pmd_bnxt_set_vf_rxmode(uint8_t port, uint16_t vf, + uint16_t rx_mask, uint8_t on); + +/** + * Returns the number of default RX queues on a VF + * + * @param port + * The port identifier of the Ethernet device. + * @param vf + * VF id. + * @return + * - Non-negative value - Number of default RX queues + * - (-EINVAL) if bad parameter. + * - (-ENOTSUP) if on a function without VFs + * - (-ENOMEM) on an allocation failure + * - (-1) firmware interface error + */ +int rte_pmd_bnxt_get_vf_rx_status(uint8_t port, uint16_t vf_id); + +/** + * Queries the TX drop counter for the function + * + * @param port + * The port identifier of the Ethernet device. + * @param vf_id + * VF on which to get. + * @param count + * Pointer to a uint64_t that will be populated with the counter value. + * @return + * - Positive Non-zero value - Error code from HWRM + * - (-EINVAL) invalid vf_id specified. + * - (-ENOTSUP) Ethernet device is not a PF + */ +int rte_pmd_bnxt_get_vf_tx_drop_count(uint8_t port, uint16_t vf_id, + uint64_t *count); + +/** + * Programs the MAC address for the function specified + * + * @param port + * The port identifier of the Ethernet device. + * @param mac_addr + * The MAC address to be programmed in the filter. + * @param vf_id + * VF on which to get. + * @return + * - Positive Non-zero value - Error code from HWRM + * - (-EINVAL) invalid vf_id specified. + * - (-ENOTSUP) Ethernet device is not a PF + * - (-ENOMEM) on an allocation failure + */ +int rte_pmd_bnxt_mac_addr_add(uint8_t port, struct ether_addr *mac_addr, + uint32_t vf_id); + +/** + * Enable/Disable VF statistics retention + * + * @param port + * The port identifier of the Ethernet device. + * @param vf + * VF id. + * @param on + * 1 - Prevent VF statistics from automatically resetting + * 0 - Allow VF statistics to automatically reset + * + * @return + * - (0) if successful. + * - (-ENODEV) if *port* invalid. + * - (-EINVAL) if bad parameter. + */ +int rte_pmd_bnxt_set_vf_persist_stats(uint8_t port, uint16_t vf, uint8_t on); +#endif /* _PMD_BNXT_H_ */ diff --git a/drivers/net/bnxt/rte_pmd_bnxt_version.map b/drivers/net/bnxt/rte_pmd_bnxt_version.map index 349c6e1c..4750d40a 100644 --- a/drivers/net/bnxt/rte_pmd_bnxt_version.map +++ b/drivers/net/bnxt/rte_pmd_bnxt_version.map @@ -1,4 +1,22 @@ -DPDK_16.04 { +DPDK_17.08 { + global: + + rte_pmd_bnxt_get_vf_rx_status; + rte_pmd_bnxt_get_vf_stats; + rte_pmd_bnxt_get_vf_tx_drop_count; + rte_pmd_bnxt_mac_addr_add; + rte_pmd_bnxt_reset_vf_stats; + rte_pmd_bnxt_set_all_queues_drop_en; + rte_pmd_bnxt_set_tx_loopback; + rte_pmd_bnxt_set_vf_mac_addr; + rte_pmd_bnxt_set_vf_mac_anti_spoof; + rte_pmd_bnxt_set_vf_rate_limit; + rte_pmd_bnxt_set_vf_rxmode; + rte_pmd_bnxt_set_vf_vlan_anti_spoof; + rte_pmd_bnxt_set_vf_vlan_filter; + rte_pmd_bnxt_set_vf_vlan_insert; + rte_pmd_bnxt_set_vf_vlan_stripq; + rte_pmd_bnxt_set_vf_persist_stats; local: *; }; |