diff options
Diffstat (limited to 'drivers/net/ena/ena_ethdev.c')
-rw-r--r-- | drivers/net/ena/ena_ethdev.c | 800 |
1 files changed, 596 insertions, 204 deletions
diff --git a/drivers/net/ena/ena_ethdev.c b/drivers/net/ena/ena_ethdev.c index 34b2a8d7..c255dc6d 100644 --- a/drivers/net/ena/ena_ethdev.c +++ b/drivers/net/ena/ena_ethdev.c @@ -54,7 +54,7 @@ #include <ena_eth_io_defs.h> #define DRV_MODULE_VER_MAJOR 1 -#define DRV_MODULE_VER_MINOR 0 +#define DRV_MODULE_VER_MINOR 1 #define DRV_MODULE_VER_SUBMINOR 0 #define ENA_IO_TXQ_IDX(q) (2 * (q)) @@ -85,6 +85,9 @@ #define ARRAY_SIZE(x) (sizeof(x) / sizeof((x)[0])) +#define ENA_MAX_RING_DESC ENA_DEFAULT_RING_SIZE +#define ENA_MIN_RING_DESC 128 + enum ethtool_stringset { ETH_SS_TEST = 0, ETH_SS_STATS, @@ -114,6 +117,12 @@ struct ena_stats { #define ENA_STAT_GLOBAL_ENTRY(stat) \ ENA_STAT_ENTRY(stat, dev) +/* + * Each rte_memzone should have unique name. + * To satisfy it, count number of allocation and add it to name. + */ +uint32_t ena_alloc_cnt; + static const struct ena_stats ena_stats_global_strings[] = { ENA_STAT_GLOBAL_ENTRY(tx_timeout), ENA_STAT_GLOBAL_ENTRY(io_suspend), @@ -195,8 +204,11 @@ static const struct rte_pci_id pci_id_ena_map[] = { { .device_id = 0 }, }; +static struct ena_aenq_handlers aenq_handlers; + static int ena_device_init(struct ena_com_dev *ena_dev, - struct ena_com_dev_get_features_ctx *get_feat_ctx); + struct ena_com_dev_get_features_ctx *get_feat_ctx, + bool *wd_state); static int ena_dev_configure(struct rte_eth_dev *dev); static uint16_t eth_ena_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts); @@ -215,7 +227,9 @@ static int ena_populate_rx_queue(struct ena_ring *rxq, unsigned int count); static void ena_init_rings(struct ena_adapter *adapter); static int ena_mtu_set(struct rte_eth_dev *dev, uint16_t mtu); static int ena_start(struct rte_eth_dev *dev); +static void ena_stop(struct rte_eth_dev *dev); static void ena_close(struct rte_eth_dev *dev); +static int ena_dev_reset(struct rte_eth_dev *dev); static int ena_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats); static void ena_rx_queue_release_all(struct rte_eth_dev *dev); static void ena_tx_queue_release_all(struct rte_eth_dev *dev); @@ -238,10 +252,8 @@ static int ena_rss_reta_query(struct rte_eth_dev *dev, struct rte_eth_rss_reta_entry64 *reta_conf, uint16_t reta_size); static int ena_get_sset_count(struct rte_eth_dev *dev, int sset); -static bool ena_are_tx_queue_offloads_allowed(struct ena_adapter *adapter, - uint64_t offloads); -static bool ena_are_rx_queue_offloads_allowed(struct ena_adapter *adapter, - uint64_t offloads); +static void ena_interrupt_handler_rte(void *cb_arg); +static void ena_timer_wd_callback(struct rte_timer *timer, void *arg); static const struct eth_dev_ops ena_dev_ops = { .dev_configure = ena_dev_configure, @@ -249,12 +261,14 @@ static const struct eth_dev_ops ena_dev_ops = { .rx_queue_setup = ena_rx_queue_setup, .tx_queue_setup = ena_tx_queue_setup, .dev_start = ena_start, + .dev_stop = ena_stop, .link_update = ena_link_update, .stats_get = ena_stats_get, .mtu_set = ena_mtu_set, .rx_queue_release = ena_rx_queue_release, .tx_queue_release = ena_tx_queue_release, .dev_close = ena_close, + .dev_reset = ena_dev_reset, .reta_update = ena_rss_reta_update, .reta_query = ena_rss_reta_query, }; @@ -264,11 +278,15 @@ static const struct eth_dev_ops ena_dev_ops = { static inline int ena_cpu_to_node(int cpu) { struct rte_config *config = rte_eal_get_configuration(); + struct rte_fbarray *arr = &config->mem_config->memzones; + const struct rte_memzone *mz; + + if (unlikely(cpu >= RTE_MAX_MEMZONE)) + return NUMA_NO_NODE; - if (likely(cpu < RTE_MAX_MEMZONE)) - return config->mem_config->memzone[cpu].socket_id; + mz = rte_fbarray_get(arr, cpu); - return NUMA_NO_NODE; + return mz->socket_id; } static inline void ena_rx_mbuf_prepare(struct rte_mbuf *mbuf, @@ -346,9 +364,6 @@ static inline void ena_tx_mbuf_prepare(struct rte_mbuf *mbuf, ena_meta->mss = mbuf->tso_segsz; ena_meta->l3_hdr_len = mbuf->l3_len; ena_meta->l3_hdr_offset = mbuf->l2_len; - /* this param needed only for TSO */ - ena_meta->l3_outer_hdr_len = 0; - ena_meta->l3_outer_hdr_offset = 0; ena_tx_ctx->meta_valid = true; } else { @@ -356,6 +371,40 @@ static inline void ena_tx_mbuf_prepare(struct rte_mbuf *mbuf, } } +static inline int validate_rx_req_id(struct ena_ring *rx_ring, uint16_t req_id) +{ + if (likely(req_id < rx_ring->ring_size)) + return 0; + + RTE_LOG(ERR, PMD, "Invalid rx req_id: %hu\n", req_id); + + rx_ring->adapter->reset_reason = ENA_REGS_RESET_INV_RX_REQ_ID; + rx_ring->adapter->trigger_reset = true; + + return -EFAULT; +} + +static int validate_tx_req_id(struct ena_ring *tx_ring, u16 req_id) +{ + struct ena_tx_buffer *tx_info = NULL; + + if (likely(req_id < tx_ring->ring_size)) { + tx_info = &tx_ring->tx_buffer_info[req_id]; + if (likely(tx_info->mbuf)) + return 0; + } + + if (tx_info) + RTE_LOG(ERR, PMD, "tx_info doesn't have valid mbuf\n"); + else + RTE_LOG(ERR, PMD, "Invalid req_id: %hu\n", req_id); + + /* Trigger device reset */ + tx_ring->adapter->reset_reason = ENA_REGS_RESET_INV_TX_REQ_ID; + tx_ring->adapter->trigger_reset = true; + return -EFAULT; +} + static void ena_config_host_info(struct ena_com_dev *ena_dev) { struct ena_admin_host_info *host_info; @@ -387,9 +436,12 @@ static void ena_config_host_info(struct ena_com_dev *ena_dev) rc = ena_com_set_host_attributes(ena_dev); if (rc) { - RTE_LOG(ERR, PMD, "Cannot set host attributes\n"); - if (rc != -EPERM) - goto err; + if (rc == -ENA_COM_UNSUPPORTED) + RTE_LOG(WARNING, PMD, "Cannot set host attributes\n"); + else + RTE_LOG(ERR, PMD, "Cannot set host attributes\n"); + + goto err; } return; @@ -440,9 +492,12 @@ static void ena_config_debug_area(struct ena_adapter *adapter) rc = ena_com_set_host_attributes(&adapter->ena_dev); if (rc) { - RTE_LOG(WARNING, PMD, "Cannot set host attributes\n"); - if (rc != -EPERM) - goto err; + if (rc == -ENA_COM_UNSUPPORTED) + RTE_LOG(WARNING, PMD, "Cannot set host attributes\n"); + else + RTE_LOG(ERR, PMD, "Cannot set host attributes\n"); + + goto err; } return; @@ -455,12 +510,76 @@ static void ena_close(struct rte_eth_dev *dev) struct ena_adapter *adapter = (struct ena_adapter *)(dev->data->dev_private); - adapter->state = ENA_ADAPTER_STATE_STOPPED; + ena_stop(dev); + adapter->state = ENA_ADAPTER_STATE_CLOSED; ena_rx_queue_release_all(dev); ena_tx_queue_release_all(dev); } +static int +ena_dev_reset(struct rte_eth_dev *dev) +{ + struct rte_mempool *mb_pool_rx[ENA_MAX_NUM_QUEUES]; + struct rte_eth_dev *eth_dev; + struct rte_pci_device *pci_dev; + struct rte_intr_handle *intr_handle; + struct ena_com_dev *ena_dev; + struct ena_com_dev_get_features_ctx get_feat_ctx; + struct ena_adapter *adapter; + int nb_queues; + int rc, i; + bool wd_state; + + adapter = (struct ena_adapter *)(dev->data->dev_private); + ena_dev = &adapter->ena_dev; + eth_dev = adapter->rte_dev; + pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev); + intr_handle = &pci_dev->intr_handle; + nb_queues = eth_dev->data->nb_rx_queues; + + ena_com_set_admin_running_state(ena_dev, false); + + rc = ena_com_dev_reset(ena_dev, adapter->reset_reason); + if (rc) + RTE_LOG(ERR, PMD, "Device reset failed\n"); + + for (i = 0; i < nb_queues; i++) + mb_pool_rx[i] = adapter->rx_ring[i].mb_pool; + + ena_rx_queue_release_all(eth_dev); + ena_tx_queue_release_all(eth_dev); + + rte_intr_disable(intr_handle); + + ena_com_abort_admin_commands(ena_dev); + ena_com_wait_for_abort_completion(ena_dev); + ena_com_admin_destroy(ena_dev); + ena_com_mmio_reg_read_request_destroy(ena_dev); + + rc = ena_device_init(ena_dev, &get_feat_ctx, &wd_state); + if (rc) { + PMD_INIT_LOG(CRIT, "Cannot initialize device\n"); + return rc; + } + adapter->wd_state = wd_state; + + rte_intr_enable(intr_handle); + ena_com_set_admin_polling_mode(ena_dev, false); + ena_com_admin_aenq_enable(ena_dev); + + for (i = 0; i < nb_queues; ++i) + ena_rx_queue_setup(eth_dev, i, adapter->rx_ring_size, 0, NULL, + mb_pool_rx[i]); + + for (i = 0; i < nb_queues; ++i) + ena_tx_queue_setup(eth_dev, i, adapter->tx_ring_size, 0, NULL); + + adapter->trigger_reset = false; + + return 0; +} + static int ena_rss_reta_update(struct rte_eth_dev *dev, struct rte_eth_rss_reta_entry64 *reta_conf, uint16_t reta_size) @@ -468,7 +587,7 @@ static int ena_rss_reta_update(struct rte_eth_dev *dev, struct ena_adapter *adapter = (struct ena_adapter *)(dev->data->dev_private); struct ena_com_dev *ena_dev = &adapter->ena_dev; - int ret, i; + int rc, i; u16 entry_value; int conf_idx; int idx; @@ -480,8 +599,7 @@ static int ena_rss_reta_update(struct rte_eth_dev *dev, RTE_LOG(WARNING, PMD, "indirection table %d is bigger than supported (%d)\n", reta_size, ENA_RX_RSS_TABLE_SIZE); - ret = -EINVAL; - goto err; + return -EINVAL; } for (i = 0 ; i < reta_size ; i++) { @@ -493,29 +611,28 @@ static int ena_rss_reta_update(struct rte_eth_dev *dev, if (TEST_BIT(reta_conf[conf_idx].mask, idx)) { entry_value = ENA_IO_RXQ_IDX(reta_conf[conf_idx].reta[idx]); - ret = ena_com_indirect_table_fill_entry(ena_dev, - i, - entry_value); - if (unlikely(ret && (ret != ENA_COM_PERMISSION))) { + + rc = ena_com_indirect_table_fill_entry(ena_dev, + i, + entry_value); + if (unlikely(rc && rc != ENA_COM_UNSUPPORTED)) { RTE_LOG(ERR, PMD, "Cannot fill indirect table\n"); - ret = -ENOTSUP; - goto err; + return rc; } } } - ret = ena_com_indirect_table_set(ena_dev); - if (unlikely(ret && (ret != ENA_COM_PERMISSION))) { + rc = ena_com_indirect_table_set(ena_dev); + if (unlikely(rc && rc != ENA_COM_UNSUPPORTED)) { RTE_LOG(ERR, PMD, "Cannot flush the indirect table\n"); - ret = -ENOTSUP; - goto err; + return rc; } RTE_LOG(DEBUG, PMD, "%s(): RSS configured %d entries for port %d\n", __func__, reta_size, adapter->rte_dev->data->port_id); -err: - return ret; + + return 0; } /* Query redirection table. */ @@ -526,7 +643,7 @@ static int ena_rss_reta_query(struct rte_eth_dev *dev, struct ena_adapter *adapter = (struct ena_adapter *)(dev->data->dev_private); struct ena_com_dev *ena_dev = &adapter->ena_dev; - int ret; + int rc; int i; u32 indirect_table[ENA_RX_RSS_TABLE_SIZE] = {0}; int reta_conf_idx; @@ -536,11 +653,10 @@ static int ena_rss_reta_query(struct rte_eth_dev *dev, (reta_size > RTE_RETA_GROUP_SIZE && ((reta_conf + 1) == NULL))) return -EINVAL; - ret = ena_com_indirect_table_get(ena_dev, indirect_table); - if (unlikely(ret && (ret != ENA_COM_PERMISSION))) { + rc = ena_com_indirect_table_get(ena_dev, indirect_table); + if (unlikely(rc && rc != ENA_COM_UNSUPPORTED)) { RTE_LOG(ERR, PMD, "cannot get indirect table\n"); - ret = -ENOTSUP; - goto err; + return -ENOTSUP; } for (i = 0 ; i < reta_size ; i++) { @@ -550,8 +666,8 @@ static int ena_rss_reta_query(struct rte_eth_dev *dev, reta_conf[reta_conf_idx].reta[reta_idx] = ENA_IO_RXQ_IDX_REV(indirect_table[i]); } -err: - return ret; + + return 0; } static int ena_rss_init_default(struct ena_adapter *adapter) @@ -571,7 +687,7 @@ static int ena_rss_init_default(struct ena_adapter *adapter) val = i % nb_rx_queues; rc = ena_com_indirect_table_fill_entry(ena_dev, i, ENA_IO_RXQ_IDX(val)); - if (unlikely(rc && (rc != ENA_COM_PERMISSION))) { + if (unlikely(rc && (rc != ENA_COM_UNSUPPORTED))) { RTE_LOG(ERR, PMD, "Cannot fill indirect table\n"); goto err_fill_indir; } @@ -579,19 +695,19 @@ static int ena_rss_init_default(struct ena_adapter *adapter) rc = ena_com_fill_hash_function(ena_dev, ENA_ADMIN_CRC32, NULL, ENA_HASH_KEY_SIZE, 0xFFFFFFFF); - if (unlikely(rc && (rc != ENA_COM_PERMISSION))) { + if (unlikely(rc && (rc != ENA_COM_UNSUPPORTED))) { RTE_LOG(INFO, PMD, "Cannot fill hash function\n"); goto err_fill_indir; } rc = ena_com_set_default_hash_ctrl(ena_dev); - if (unlikely(rc && (rc != ENA_COM_PERMISSION))) { + if (unlikely(rc && (rc != ENA_COM_UNSUPPORTED))) { RTE_LOG(INFO, PMD, "Cannot fill hash control\n"); goto err_fill_indir; } rc = ena_com_indirect_table_set(ena_dev); - if (unlikely(rc && (rc != ENA_COM_PERMISSION))) { + if (unlikely(rc && (rc != ENA_COM_UNSUPPORTED))) { RTE_LOG(ERR, PMD, "Cannot flush the indirect table\n"); goto err_fill_indir; } @@ -650,6 +766,10 @@ static void ena_rx_queue_release(void *queue) rte_free(ring->rx_buffer_info); ring->rx_buffer_info = NULL; + if (ring->empty_rx_reqs) + rte_free(ring->empty_rx_reqs); + ring->empty_rx_reqs = NULL; + ring->configured = 0; RTE_LOG(NOTICE, PMD, "RX Queue %d:%d released\n", @@ -723,9 +843,12 @@ static int ena_link_update(struct rte_eth_dev *dev, __rte_unused int wait_to_complete) { struct rte_eth_link *link = &dev->data->dev_link; + struct ena_adapter *adapter; + + adapter = (struct ena_adapter *)(dev->data->dev_private); - link->link_status = 1; - link->link_speed = ETH_SPEED_NUM_10G; + link->link_status = adapter->link_status ? ETH_LINK_UP : ETH_LINK_DOWN; + link->link_speed = ETH_SPEED_NUM_NONE; link->link_duplex = ETH_LINK_FULL_DUPLEX; return 0; @@ -737,13 +860,18 @@ static int ena_queue_restart_all(struct rte_eth_dev *dev, struct ena_adapter *adapter = (struct ena_adapter *)(dev->data->dev_private); struct ena_ring *queues = NULL; + int nb_queues; int i = 0; int rc = 0; - queues = (ring_type == ENA_RING_TYPE_RX) ? - adapter->rx_ring : adapter->tx_ring; - - for (i = 0; i < adapter->num_queues; i++) { + if (ring_type == ENA_RING_TYPE_RX) { + queues = adapter->rx_ring; + nb_queues = dev->data->nb_rx_queues; + } else { + queues = adapter->tx_ring; + nb_queues = dev->data->nb_tx_queues; + } + for (i = 0; i < nb_queues; i++) { if (queues[i].configured) { if (ring_type == ENA_RING_TYPE_RX) { ena_assert_msg( @@ -761,7 +889,7 @@ static int ena_queue_restart_all(struct rte_eth_dev *dev, PMD_INIT_LOG(ERR, "failed to restart queue %d type(%d)", i, ring_type); - return -1; + return rc; } } } @@ -785,9 +913,11 @@ static int ena_check_valid_conf(struct ena_adapter *adapter) { uint32_t max_frame_len = ena_get_mtu_conf(adapter); - if (max_frame_len > adapter->max_mtu) { - PMD_INIT_LOG(ERR, "Unsupported MTU of %d", max_frame_len); - return -1; + if (max_frame_len > adapter->max_mtu || max_frame_len < ENA_MIN_MTU) { + PMD_INIT_LOG(ERR, "Unsupported MTU of %d. " + "max mtu: %d, min mtu: %d\n", + max_frame_len, adapter->max_mtu, ENA_MIN_MTU); + return ENA_COM_UNSUPPORTED; } return 0; @@ -795,6 +925,7 @@ static int ena_check_valid_conf(struct ena_adapter *adapter) static int ena_calc_queue_size(struct ena_com_dev *ena_dev, + u16 *max_tx_sgl_size, struct ena_com_dev_get_features_ctx *get_feat_ctx) { uint32_t queue_size = ENA_DEFAULT_RING_SIZE; @@ -812,11 +943,14 @@ ena_calc_queue_size(struct ena_com_dev *ena_dev, if (!rte_is_power_of_2(queue_size)) queue_size = rte_align32pow2(queue_size >> 1); - if (queue_size == 0) { + if (unlikely(queue_size == 0)) { PMD_INIT_LOG(ERR, "Invalid queue size"); return -EFAULT; } + *max_tx_sgl_size = RTE_MIN(ENA_PKT_MAX_BUFS, + get_feat_ctx->max_queues.max_packet_tx_descs); + return queue_size; } @@ -881,12 +1015,12 @@ static int ena_mtu_set(struct rte_eth_dev *dev, uint16_t mtu) ena_dev = &adapter->ena_dev; ena_assert_msg(ena_dev != NULL, "Uninitialized device"); - if (mtu > ena_get_mtu_conf(adapter)) { + if (mtu > ena_get_mtu_conf(adapter) || mtu < ENA_MIN_MTU) { RTE_LOG(ERR, PMD, - "Given MTU (%d) exceeds maximum MTU supported (%d)\n", - mtu, ena_get_mtu_conf(adapter)); - rc = -EINVAL; - goto err; + "Invalid MTU setting. new_mtu: %d " + "max mtu: %d min mtu: %d\n", + mtu, ena_get_mtu_conf(adapter), ENA_MIN_MTU); + return -EINVAL; } rc = ena_com_set_dev_mtu(ena_dev, mtu); @@ -895,7 +1029,6 @@ static int ena_mtu_set(struct rte_eth_dev *dev, uint16_t mtu) else RTE_LOG(NOTICE, PMD, "Set MTU: %d\n", mtu); -err: return rc; } @@ -903,14 +1036,9 @@ static int ena_start(struct rte_eth_dev *dev) { struct ena_adapter *adapter = (struct ena_adapter *)(dev->data->dev_private); + uint64_t ticks; int rc = 0; - if (!(adapter->state == ENA_ADAPTER_STATE_CONFIG || - adapter->state == ENA_ADAPTER_STATE_STOPPED)) { - PMD_INIT_LOG(ERR, "API violation"); - return -1; - } - rc = ena_check_valid_conf(adapter); if (rc) return rc; @@ -924,7 +1052,7 @@ static int ena_start(struct rte_eth_dev *dev) return rc; if (adapter->rte_dev->data->dev_conf.rxmode.mq_mode & - ETH_MQ_RX_RSS_FLAG) { + ETH_MQ_RX_RSS_FLAG && adapter->rte_dev->data->nb_rx_queues > 0) { rc = ena_rss_init_default(adapter); if (rc) return rc; @@ -932,11 +1060,28 @@ static int ena_start(struct rte_eth_dev *dev) ena_stats_restart(dev); + adapter->timestamp_wd = rte_get_timer_cycles(); + adapter->keep_alive_timeout = ENA_DEVICE_KALIVE_TIMEOUT; + + ticks = rte_get_timer_hz(); + rte_timer_reset(&adapter->timer_wd, ticks, PERIODICAL, rte_lcore_id(), + ena_timer_wd_callback, adapter); + adapter->state = ENA_ADAPTER_STATE_RUNNING; return 0; } +static void ena_stop(struct rte_eth_dev *dev) +{ + struct ena_adapter *adapter = + (struct ena_adapter *)(dev->data->dev_private); + + rte_timer_stop_sync(&adapter->timer_wd); + + adapter->state = ENA_ADAPTER_STATE_STOPPED; +} + static int ena_queue_restart(struct ena_ring *ring) { int rc, bufs_num; @@ -954,7 +1099,7 @@ static int ena_queue_restart(struct ena_ring *ring) rc = ena_populate_rx_queue(ring, bufs_num); if (rc != bufs_num) { PMD_INIT_LOG(ERR, "Failed to populate rx ring !"); - return (-1); + return ENA_COM_FAULT; } return 0; @@ -984,12 +1129,12 @@ static int ena_tx_queue_setup(struct rte_eth_dev *dev, RTE_LOG(CRIT, PMD, "API violation. Queue %d is already configured\n", queue_idx); - return -1; + return ENA_COM_FAULT; } if (!rte_is_power_of_2(nb_desc)) { RTE_LOG(ERR, PMD, - "Unsupported size of RX queue: %d is not a power of 2.", + "Unsupported size of TX queue: %d is not a power of 2.", nb_desc); return -EINVAL; } @@ -1001,12 +1146,6 @@ static int ena_tx_queue_setup(struct rte_eth_dev *dev, return -EINVAL; } - if (tx_conf->txq_flags == ETH_TXQ_FLAGS_IGNORE && - !ena_are_tx_queue_offloads_allowed(adapter, tx_conf->offloads)) { - RTE_LOG(ERR, PMD, "Unsupported queue offloads\n"); - return -EINVAL; - } - ena_qid = ENA_IO_TXQ_IDX(queue_idx); ctx.direction = ENA_COM_IO_QUEUE_DIRECTION_TX; @@ -1021,6 +1160,7 @@ static int ena_tx_queue_setup(struct rte_eth_dev *dev, RTE_LOG(ERR, PMD, "failed to create io TX queue #%d (qid:%d) rc: %d\n", queue_idx, ena_qid, rc); + return rc; } txq->ena_com_io_cq = &ena_dev->io_cq_queues[ena_qid]; txq->ena_com_io_sq = &ena_dev->io_sq_queues[ena_qid]; @@ -1032,10 +1172,11 @@ static int ena_tx_queue_setup(struct rte_eth_dev *dev, RTE_LOG(ERR, PMD, "Failed to get TX queue handlers. TX queue num %d rc: %d\n", queue_idx, rc); - ena_com_destroy_io_queue(ena_dev, ena_qid); - goto err; + goto err_destroy_io_queue; } + ena_com_update_numa_node(txq->ena_com_io_cq, ctx.numa_node); + txq->port_id = dev->data->port_id; txq->next_to_clean = 0; txq->next_to_use = 0; @@ -1047,7 +1188,8 @@ static int ena_tx_queue_setup(struct rte_eth_dev *dev, RTE_CACHE_LINE_SIZE); if (!txq->tx_buffer_info) { RTE_LOG(ERR, PMD, "failed to alloc mem for tx buffer info\n"); - return -ENOMEM; + rc = -ENOMEM; + goto err_destroy_io_queue; } txq->empty_tx_reqs = rte_zmalloc("txq->empty_tx_reqs", @@ -1055,18 +1197,29 @@ static int ena_tx_queue_setup(struct rte_eth_dev *dev, RTE_CACHE_LINE_SIZE); if (!txq->empty_tx_reqs) { RTE_LOG(ERR, PMD, "failed to alloc mem for tx reqs\n"); - rte_free(txq->tx_buffer_info); - return -ENOMEM; + rc = -ENOMEM; + goto err_free; } + for (i = 0; i < txq->ring_size; i++) txq->empty_tx_reqs[i] = i; - txq->offloads = tx_conf->offloads; + if (tx_conf != NULL) { + txq->offloads = + tx_conf->offloads | dev->data->dev_conf.txmode.offloads; + } /* Store pointer to this queue in upper layer */ txq->configured = 1; dev->data->tx_queues[queue_idx] = txq; -err: + + return 0; + +err_free: + rte_free(txq->tx_buffer_info); + +err_destroy_io_queue: + ena_com_destroy_io_queue(ena_dev, ena_qid); return rc; } @@ -1074,7 +1227,7 @@ static int ena_rx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx, uint16_t nb_desc, __rte_unused unsigned int socket_id, - const struct rte_eth_rxconf *rx_conf, + __rte_unused const struct rte_eth_rxconf *rx_conf, struct rte_mempool *mp) { struct ena_com_create_io_ctx ctx = @@ -1085,7 +1238,7 @@ static int ena_rx_queue_setup(struct rte_eth_dev *dev, (struct ena_adapter *)(dev->data->dev_private); struct ena_ring *rxq = NULL; uint16_t ena_qid = 0; - int rc = 0; + int i, rc = 0; struct ena_com_dev *ena_dev = &adapter->ena_dev; rxq = &adapter->rx_ring[queue_idx]; @@ -1093,12 +1246,12 @@ static int ena_rx_queue_setup(struct rte_eth_dev *dev, RTE_LOG(CRIT, PMD, "API violation. Queue %d is already configured\n", queue_idx); - return -1; + return ENA_COM_FAULT; } if (!rte_is_power_of_2(nb_desc)) { RTE_LOG(ERR, PMD, - "Unsupported size of TX queue: %d is not a power of 2.", + "Unsupported size of RX queue: %d is not a power of 2.", nb_desc); return -EINVAL; } @@ -1110,11 +1263,6 @@ static int ena_rx_queue_setup(struct rte_eth_dev *dev, return -EINVAL; } - if (!ena_are_rx_queue_offloads_allowed(adapter, rx_conf->offloads)) { - RTE_LOG(ERR, PMD, "Unsupported queue offloads\n"); - return -EINVAL; - } - ena_qid = ENA_IO_RXQ_IDX(queue_idx); ctx.qid = ena_qid; @@ -1125,9 +1273,11 @@ static int ena_rx_queue_setup(struct rte_eth_dev *dev, ctx.numa_node = ena_cpu_to_node(queue_idx); rc = ena_com_create_io_queue(ena_dev, &ctx); - if (rc) + if (rc) { RTE_LOG(ERR, PMD, "failed to create io RX queue #%d rc: %d\n", queue_idx, rc); + return rc; + } rxq->ena_com_io_cq = &ena_dev->io_cq_queues[ena_qid]; rxq->ena_com_io_sq = &ena_dev->io_sq_queues[ena_qid]; @@ -1140,6 +1290,7 @@ static int ena_rx_queue_setup(struct rte_eth_dev *dev, "Failed to get RX queue handlers. RX queue num %d rc: %d\n", queue_idx, rc); ena_com_destroy_io_queue(ena_dev, ena_qid); + return rc; } rxq->port_id = dev->data->port_id; @@ -1153,9 +1304,24 @@ static int ena_rx_queue_setup(struct rte_eth_dev *dev, RTE_CACHE_LINE_SIZE); if (!rxq->rx_buffer_info) { RTE_LOG(ERR, PMD, "failed to alloc mem for rx buffer info\n"); + ena_com_destroy_io_queue(ena_dev, ena_qid); return -ENOMEM; } + rxq->empty_rx_reqs = rte_zmalloc("rxq->empty_rx_reqs", + sizeof(uint16_t) * nb_desc, + RTE_CACHE_LINE_SIZE); + if (!rxq->empty_rx_reqs) { + RTE_LOG(ERR, PMD, "failed to alloc mem for empty rx reqs\n"); + rte_free(rxq->rx_buffer_info); + rxq->rx_buffer_info = NULL; + ena_com_destroy_io_queue(ena_dev, ena_qid); + return -ENOMEM; + } + + for (i = 0; i < nb_desc; i++) + rxq->empty_tx_reqs[i] = i; + /* Store pointer to this queue in upper layer */ rxq->configured = 1; dev->data->rx_queues[queue_idx] = rxq; @@ -1170,7 +1336,7 @@ static int ena_populate_rx_queue(struct ena_ring *rxq, unsigned int count) uint16_t ring_size = rxq->ring_size; uint16_t ring_mask = ring_size - 1; uint16_t next_to_use = rxq->next_to_use; - uint16_t in_use; + uint16_t in_use, req_id; struct rte_mbuf **mbufs = &rxq->rx_buffer_info[0]; if (unlikely(!count)) @@ -1198,12 +1364,18 @@ static int ena_populate_rx_queue(struct ena_ring *rxq, unsigned int count) struct ena_com_buf ebuf; rte_prefetch0(mbufs[((next_to_use + 4) & ring_mask)]); + + req_id = rxq->empty_rx_reqs[next_to_use_masked]; + rc = validate_rx_req_id(rxq, req_id); + if (unlikely(rc < 0)) + break; + /* prepare physical address for DMA transaction */ ebuf.paddr = mbuf->buf_iova + RTE_PKTMBUF_HEADROOM; ebuf.len = mbuf->buf_len - RTE_PKTMBUF_HEADROOM; /* pass resource to device */ rc = ena_com_add_single_rx_desc(rxq->ena_com_io_sq, - &ebuf, next_to_use_masked); + &ebuf, req_id); if (unlikely(rc)) { rte_mempool_put_bulk(rxq->mb_pool, (void **)(&mbuf), count - i); @@ -1213,9 +1385,17 @@ static int ena_populate_rx_queue(struct ena_ring *rxq, unsigned int count) next_to_use++; } + if (unlikely(i < count)) + RTE_LOG(WARNING, PMD, "refilled rx qid %d with only %d " + "buffers (from %d)\n", rxq->id, i, count); + /* When we submitted free recources to device... */ - if (i > 0) { - /* ...let HW know that it can fill buffers with data */ + if (likely(i > 0)) { + /* ...let HW know that it can fill buffers with data + * + * Add memory barrier to make sure the desc were written before + * issue a doorbell + */ rte_wmb(); ena_com_write_sq_doorbell(rxq->ena_com_io_sq); @@ -1226,8 +1406,10 @@ static int ena_populate_rx_queue(struct ena_ring *rxq, unsigned int count) } static int ena_device_init(struct ena_com_dev *ena_dev, - struct ena_com_dev_get_features_ctx *get_feat_ctx) + struct ena_com_dev_get_features_ctx *get_feat_ctx, + bool *wd_state) { + uint32_t aenq_groups; int rc; bool readless_supported; @@ -1247,7 +1429,7 @@ static int ena_device_init(struct ena_com_dev *ena_dev, ena_com_set_mmio_read_mode(ena_dev, readless_supported); /* reset device */ - rc = ena_com_dev_reset(ena_dev); + rc = ena_com_dev_reset(ena_dev, ENA_REGS_RESET_NORMAL); if (rc) { RTE_LOG(ERR, PMD, "cannot reset device\n"); goto err_mmio_read_less; @@ -1263,7 +1445,7 @@ static int ena_device_init(struct ena_com_dev *ena_dev, ena_dev->dma_addr_bits = ena_com_get_dma_width(ena_dev); /* ENA device administration layer init */ - rc = ena_com_admin_init(ena_dev, NULL, true); + rc = ena_com_admin_init(ena_dev, &aenq_handlers, true); if (rc) { RTE_LOG(ERR, PMD, "cannot initialize ena admin queue with device\n"); @@ -1286,6 +1468,21 @@ static int ena_device_init(struct ena_com_dev *ena_dev, goto err_admin_init; } + aenq_groups = BIT(ENA_ADMIN_LINK_CHANGE) | + BIT(ENA_ADMIN_NOTIFICATION) | + BIT(ENA_ADMIN_KEEP_ALIVE) | + BIT(ENA_ADMIN_FATAL_ERROR) | + BIT(ENA_ADMIN_WARNING); + + aenq_groups &= get_feat_ctx->aenq.supported_groups; + rc = ena_com_set_aenq_config(ena_dev, aenq_groups); + if (rc) { + RTE_LOG(ERR, PMD, "Cannot configure aenq groups rc: %d\n", rc); + goto err_admin_init; + } + + *wd_state = !!(aenq_groups & BIT(ENA_ADMIN_KEEP_ALIVE)); + return 0; err_admin_init: @@ -1297,16 +1494,89 @@ err_mmio_read_less: return rc; } +static void ena_interrupt_handler_rte(void *cb_arg) +{ + struct ena_adapter *adapter = (struct ena_adapter *)cb_arg; + struct ena_com_dev *ena_dev = &adapter->ena_dev; + + ena_com_admin_q_comp_intr_handler(ena_dev); + if (likely(adapter->state != ENA_ADAPTER_STATE_CLOSED)) + ena_com_aenq_intr_handler(ena_dev, adapter); +} + +static void check_for_missing_keep_alive(struct ena_adapter *adapter) +{ + if (!adapter->wd_state) + return; + + if (adapter->keep_alive_timeout == ENA_HW_HINTS_NO_TIMEOUT) + return; + + if (unlikely((rte_get_timer_cycles() - adapter->timestamp_wd) >= + adapter->keep_alive_timeout)) { + RTE_LOG(ERR, PMD, "Keep alive timeout\n"); + adapter->reset_reason = ENA_REGS_RESET_KEEP_ALIVE_TO; + adapter->trigger_reset = true; + } +} + +/* Check if admin queue is enabled */ +static void check_for_admin_com_state(struct ena_adapter *adapter) +{ + if (unlikely(!ena_com_get_admin_running_state(&adapter->ena_dev))) { + RTE_LOG(ERR, PMD, "ENA admin queue is not in running state!\n"); + adapter->reset_reason = ENA_REGS_RESET_ADMIN_TO; + adapter->trigger_reset = true; + } +} + +static void ena_timer_wd_callback(__rte_unused struct rte_timer *timer, + void *arg) +{ + struct ena_adapter *adapter = (struct ena_adapter *)arg; + struct rte_eth_dev *dev = adapter->rte_dev; + + check_for_missing_keep_alive(adapter); + check_for_admin_com_state(adapter); + + if (unlikely(adapter->trigger_reset)) { + RTE_LOG(ERR, PMD, "Trigger reset is on\n"); + _rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_RESET, + NULL); + } +} + +static int ena_calc_io_queue_num(__rte_unused struct ena_com_dev *ena_dev, + struct ena_com_dev_get_features_ctx *get_feat_ctx) +{ + int io_sq_num, io_cq_num, io_queue_num; + + io_sq_num = get_feat_ctx->max_queues.max_sq_num; + io_cq_num = get_feat_ctx->max_queues.max_cq_num; + + io_queue_num = RTE_MIN(io_sq_num, io_cq_num); + + if (unlikely(io_queue_num == 0)) { + RTE_LOG(ERR, PMD, "Number of IO queues should not be 0\n"); + return -EFAULT; + } + + return io_queue_num; +} + static int eth_ena_dev_init(struct rte_eth_dev *eth_dev) { struct rte_pci_device *pci_dev; + struct rte_intr_handle *intr_handle; struct ena_adapter *adapter = (struct ena_adapter *)(eth_dev->data->dev_private); struct ena_com_dev *ena_dev = &adapter->ena_dev; struct ena_com_dev_get_features_ctx get_feat_ctx; int queue_size, rc; + u16 tx_sgl_size = 0; static int adapters_found; + bool wd_state; memset(adapter, 0, sizeof(struct ena_adapter)); ena_dev = &adapter->ena_dev; @@ -1330,19 +1600,16 @@ static int eth_ena_dev_init(struct rte_eth_dev *eth_dev) pci_dev->addr.devid, pci_dev->addr.function); + intr_handle = &pci_dev->intr_handle; + adapter->regs = pci_dev->mem_resource[ENA_REGS_BAR].addr; adapter->dev_mem_base = pci_dev->mem_resource[ENA_MEM_BAR].addr; - /* Present ENA_MEM_BAR indicates available LLQ mode. - * Use corresponding policy - */ - if (adapter->dev_mem_base) - ena_dev->tx_mem_queue_type = ENA_ADMIN_PLACEMENT_POLICY_DEV; - else if (adapter->regs) - ena_dev->tx_mem_queue_type = ENA_ADMIN_PLACEMENT_POLICY_HOST; - else + if (!adapter->regs) { PMD_INIT_LOG(CRIT, "Failed to access registers BAR(%d)", ENA_REGS_BAR); + return -ENXIO; + } ena_dev->reg_bar = adapter->regs; ena_dev->dmadev = adapter->pdev; @@ -1353,36 +1620,28 @@ static int eth_ena_dev_init(struct rte_eth_dev *eth_dev) adapter->id_number); /* device specific initialization routine */ - rc = ena_device_init(ena_dev, &get_feat_ctx); + rc = ena_device_init(ena_dev, &get_feat_ctx, &wd_state); if (rc) { PMD_INIT_LOG(CRIT, "Failed to init ENA device"); - return -1; - } - - if (ena_dev->tx_mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV) { - if (get_feat_ctx.max_queues.max_llq_num == 0) { - PMD_INIT_LOG(ERR, - "Trying to use LLQ but llq_num is 0.\n" - "Fall back into regular queues."); - ena_dev->tx_mem_queue_type = - ENA_ADMIN_PLACEMENT_POLICY_HOST; - adapter->num_queues = - get_feat_ctx.max_queues.max_sq_num; - } else { - adapter->num_queues = - get_feat_ctx.max_queues.max_llq_num; - } - } else { - adapter->num_queues = get_feat_ctx.max_queues.max_sq_num; + goto err; } + adapter->wd_state = wd_state; - queue_size = ena_calc_queue_size(ena_dev, &get_feat_ctx); - if ((queue_size <= 0) || (adapter->num_queues <= 0)) - return -EFAULT; + ena_dev->tx_mem_queue_type = ENA_ADMIN_PLACEMENT_POLICY_HOST; + adapter->num_queues = ena_calc_io_queue_num(ena_dev, + &get_feat_ctx); + + queue_size = ena_calc_queue_size(ena_dev, &tx_sgl_size, &get_feat_ctx); + if (queue_size <= 0 || adapter->num_queues <= 0) { + rc = -EFAULT; + goto err_device_destroy; + } adapter->tx_ring_size = queue_size; adapter->rx_ring_size = queue_size; + adapter->max_tx_sgl_size = tx_sgl_size; + /* prepare ring structures */ ena_init_rings(adapter); @@ -1405,58 +1664,77 @@ static int eth_ena_dev_init(struct rte_eth_dev *eth_dev) RTE_CACHE_LINE_SIZE); if (!adapter->drv_stats) { RTE_LOG(ERR, PMD, "failed to alloc mem for adapter stats\n"); - return -ENOMEM; + rc = -ENOMEM; + goto err_delete_debug_area; } + rte_intr_callback_register(intr_handle, + ena_interrupt_handler_rte, + adapter); + rte_intr_enable(intr_handle); + ena_com_set_admin_polling_mode(ena_dev, false); + ena_com_admin_aenq_enable(ena_dev); + + if (adapters_found == 0) + rte_timer_subsystem_init(); + rte_timer_init(&adapter->timer_wd); + adapters_found++; adapter->state = ENA_ADAPTER_STATE_INIT; return 0; + +err_delete_debug_area: + ena_com_delete_debug_area(ena_dev); + +err_device_destroy: + ena_com_delete_host_info(ena_dev); + ena_com_admin_destroy(ena_dev); + +err: + return rc; } -static int ena_dev_configure(struct rte_eth_dev *dev) +static int eth_ena_dev_uninit(struct rte_eth_dev *eth_dev) { + struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev); + struct rte_intr_handle *intr_handle = &pci_dev->intr_handle; struct ena_adapter *adapter = - (struct ena_adapter *)(dev->data->dev_private); - uint64_t tx_offloads = dev->data->dev_conf.txmode.offloads; - uint64_t rx_offloads = dev->data->dev_conf.rxmode.offloads; + (struct ena_adapter *)(eth_dev->data->dev_private); - if ((tx_offloads & adapter->tx_supported_offloads) != tx_offloads) { - RTE_LOG(ERR, PMD, "Some Tx offloads are not supported " - "requested 0x%" PRIx64 " supported 0x%" PRIx64 "\n", - tx_offloads, adapter->tx_supported_offloads); - return -ENOTSUP; - } + if (rte_eal_process_type() != RTE_PROC_PRIMARY) + return -EPERM; - if ((rx_offloads & adapter->rx_supported_offloads) != rx_offloads) { - RTE_LOG(ERR, PMD, "Some Rx offloads are not supported " - "requested 0x%" PRIx64 " supported 0x%" PRIx64 "\n", - rx_offloads, adapter->rx_supported_offloads); - return -ENOTSUP; - } + if (adapter->state != ENA_ADAPTER_STATE_CLOSED) + ena_close(eth_dev); - if (!(adapter->state == ENA_ADAPTER_STATE_INIT || - adapter->state == ENA_ADAPTER_STATE_STOPPED)) { - PMD_INIT_LOG(ERR, "Illegal adapter state: %d", - adapter->state); - return -1; - } + eth_dev->dev_ops = NULL; + eth_dev->rx_pkt_burst = NULL; + eth_dev->tx_pkt_burst = NULL; + eth_dev->tx_pkt_prepare = NULL; - switch (adapter->state) { - case ENA_ADAPTER_STATE_INIT: - case ENA_ADAPTER_STATE_STOPPED: - adapter->state = ENA_ADAPTER_STATE_CONFIG; - break; - case ENA_ADAPTER_STATE_CONFIG: - RTE_LOG(WARNING, PMD, - "Ivalid driver state while trying to configure device\n"); - break; - default: - break; - } + rte_free(adapter->drv_stats); + adapter->drv_stats = NULL; + + rte_intr_disable(intr_handle); + rte_intr_callback_unregister(intr_handle, + ena_interrupt_handler_rte, + adapter); + + adapter->state = ENA_ADAPTER_STATE_FREE; + + return 0; +} - adapter->tx_selected_offloads = tx_offloads; - adapter->rx_selected_offloads = rx_offloads; +static int ena_dev_configure(struct rte_eth_dev *dev) +{ + struct ena_adapter *adapter = + (struct ena_adapter *)(dev->data->dev_private); + + adapter->state = ENA_ADAPTER_STATE_CONFIG; + + adapter->tx_selected_offloads = dev->data->dev_conf.txmode.offloads; + adapter->rx_selected_offloads = dev->data->dev_conf.rxmode.offloads; return 0; } @@ -1473,6 +1751,7 @@ static void ena_init_rings(struct ena_adapter *adapter) ring->id = i; ring->tx_mem_queue_type = adapter->ena_dev.tx_mem_queue_type; ring->tx_max_header_size = adapter->ena_dev.tx_max_header_size; + ring->sgl_size = adapter->max_tx_sgl_size; } for (i = 0; i < adapter->num_queues; i++) { @@ -1485,32 +1764,6 @@ static void ena_init_rings(struct ena_adapter *adapter) } } -static bool ena_are_tx_queue_offloads_allowed(struct ena_adapter *adapter, - uint64_t offloads) -{ - uint64_t port_offloads = adapter->tx_selected_offloads; - - /* Check if port supports all requested offloads. - * True if all offloads selected for queue are set for port. - */ - if ((offloads & port_offloads) != offloads) - return false; - return true; -} - -static bool ena_are_rx_queue_offloads_allowed(struct ena_adapter *adapter, - uint64_t offloads) -{ - uint64_t port_offloads = adapter->rx_selected_offloads; - - /* Check if port supports all requested offloads. - * True if all offloads selected for queue are set for port. - */ - if ((offloads & port_offloads) != offloads) - return false; - return true; -} - static void ena_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info) { @@ -1527,8 +1780,6 @@ static void ena_infos_get(struct rte_eth_dev *dev, ena_dev = &adapter->ena_dev; ena_assert_msg(ena_dev != NULL, "Uninitialized device"); - dev_info->pci_dev = RTE_ETH_DEV_TO_PCI(dev); - dev_info->speed_capa = ETH_LINK_SPEED_1G | ETH_LINK_SPEED_2_5G | @@ -1581,6 +1832,16 @@ static void ena_infos_get(struct rte_eth_dev *dev, adapter->tx_supported_offloads = tx_feat; adapter->rx_supported_offloads = rx_feat; + + dev_info->rx_desc_lim.nb_max = ENA_MAX_RING_DESC; + dev_info->rx_desc_lim.nb_min = ENA_MIN_RING_DESC; + + dev_info->tx_desc_lim.nb_max = ENA_MAX_RING_DESC; + dev_info->tx_desc_lim.nb_min = ENA_MIN_RING_DESC; + dev_info->tx_desc_lim.nb_seg_max = RTE_MIN(ENA_PKT_MAX_BUFS, + feat.max_queues.max_packet_tx_descs); + dev_info->tx_desc_lim.nb_mtu_seg_max = RTE_MIN(ENA_PKT_MAX_BUFS, + feat.max_queues.max_packet_tx_descs); } static uint16_t eth_ena_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, @@ -1591,6 +1852,7 @@ static uint16_t eth_ena_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, unsigned int ring_mask = ring_size - 1; uint16_t next_to_clean = rx_ring->next_to_clean; uint16_t desc_in_use = 0; + uint16_t req_id; unsigned int recv_idx = 0; struct rte_mbuf *mbuf = NULL; struct rte_mbuf *mbuf_head = NULL; @@ -1624,6 +1886,7 @@ static uint16_t eth_ena_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, &ena_rx_ctx); if (unlikely(rc)) { RTE_LOG(ERR, PMD, "ena_com_rx_pkt error %d\n", rc); + rx_ring->adapter->trigger_reset = true; return 0; } @@ -1631,12 +1894,17 @@ static uint16_t eth_ena_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, break; while (segments < ena_rx_ctx.descs) { - mbuf = rx_buff_info[next_to_clean & ring_mask]; + req_id = ena_rx_ctx.ena_bufs[segments].req_id; + rc = validate_rx_req_id(rx_ring, req_id); + if (unlikely(rc)) + break; + + mbuf = rx_buff_info[req_id]; mbuf->data_len = ena_rx_ctx.ena_bufs[segments].len; mbuf->data_off = RTE_PKTMBUF_HEADROOM; mbuf->refcnt = 1; mbuf->next = NULL; - if (segments == 0) { + if (unlikely(segments == 0)) { mbuf->nb_segs = ena_rx_ctx.descs; mbuf->port = rx_ring->port_id; mbuf->pkt_len = 0; @@ -1648,6 +1916,8 @@ static uint16_t eth_ena_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, mbuf_head->pkt_len += mbuf->data_len; mbuf_prev = mbuf; + rx_ring->empty_rx_reqs[next_to_clean & ring_mask] = + req_id; segments++; next_to_clean++; } @@ -1741,6 +2011,46 @@ eth_ena_prep_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, return i; } +static void ena_update_hints(struct ena_adapter *adapter, + struct ena_admin_ena_hw_hints *hints) +{ + if (hints->admin_completion_tx_timeout) + adapter->ena_dev.admin_queue.completion_timeout = + hints->admin_completion_tx_timeout * 1000; + + if (hints->mmio_read_timeout) + /* convert to usec */ + adapter->ena_dev.mmio_read.reg_read_to = + hints->mmio_read_timeout * 1000; + + if (hints->driver_watchdog_timeout) { + if (hints->driver_watchdog_timeout == ENA_HW_HINTS_NO_TIMEOUT) + adapter->keep_alive_timeout = ENA_HW_HINTS_NO_TIMEOUT; + else + // Convert msecs to ticks + adapter->keep_alive_timeout = + (hints->driver_watchdog_timeout * + rte_get_timer_hz()) / 1000; + } +} + +static int ena_check_and_linearize_mbuf(struct ena_ring *tx_ring, + struct rte_mbuf *mbuf) +{ + int num_segments, rc; + + num_segments = mbuf->nb_segs; + + if (likely(num_segments < tx_ring->sgl_size)) + return 0; + + rc = rte_pktmbuf_linearize(mbuf); + if (unlikely(rc)) + RTE_LOG(WARNING, PMD, "Mbuf linearize failed\n"); + + return rc; +} + static uint16_t eth_ena_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts) { @@ -1771,6 +2081,10 @@ static uint16_t eth_ena_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, for (sent_idx = 0; sent_idx < nb_pkts; sent_idx++) { mbuf = tx_pkts[sent_idx]; + rc = ena_check_and_linearize_mbuf(tx_ring, mbuf); + if (unlikely(rc)) + break; + req_id = tx_ring->empty_tx_reqs[next_to_use & ring_mask]; tx_info = &tx_ring->tx_buffer_info[req_id]; tx_info->mbuf = mbuf; @@ -1848,6 +2162,10 @@ static uint16_t eth_ena_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, /* Clear complete packets */ while (ena_com_tx_comp_req_id_get(tx_ring->ena_com_io_cq, &req_id) >= 0) { + rc = validate_tx_req_id(tx_ring, req_id); + if (rc) + break; + /* Get Tx info & store how many descs were processed */ tx_info = &tx_ring->tx_buffer_info[req_id]; total_tx_descs += tx_info->tx_descs; @@ -1875,6 +2193,9 @@ static uint16_t eth_ena_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, return sent_idx; } +/********************************************************************* + * PMD configuration + *********************************************************************/ static int eth_ena_pci_probe(struct rte_pci_driver *pci_drv __rte_unused, struct rte_pci_device *pci_dev) { @@ -1884,12 +2205,13 @@ static int eth_ena_pci_probe(struct rte_pci_driver *pci_drv __rte_unused, static int eth_ena_pci_remove(struct rte_pci_device *pci_dev) { - return rte_eth_dev_pci_generic_remove(pci_dev, NULL); + return rte_eth_dev_pci_generic_remove(pci_dev, eth_ena_dev_uninit); } static struct rte_pci_driver rte_ena_pmd = { .id_table = pci_id_ena_map, - .drv_flags = RTE_PCI_DRV_NEED_MAPPING, + .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC | + RTE_PCI_DRV_WC_ACTIVATE, .probe = eth_ena_pci_probe, .remove = eth_ena_pci_remove, }; @@ -1898,9 +2220,7 @@ RTE_PMD_REGISTER_PCI(net_ena, rte_ena_pmd); RTE_PMD_REGISTER_PCI_TABLE(net_ena, pci_id_ena_map); RTE_PMD_REGISTER_KMOD_DEP(net_ena, "* igb_uio | uio_pci_generic | vfio-pci"); -RTE_INIT(ena_init_log); -static void -ena_init_log(void) +RTE_INIT(ena_init_log) { ena_logtype_init = rte_log_register("pmd.net.ena.init"); if (ena_logtype_init >= 0) @@ -1909,3 +2229,75 @@ ena_init_log(void) if (ena_logtype_driver >= 0) rte_log_set_level(ena_logtype_driver, RTE_LOG_NOTICE); } + +/****************************************************************************** + ******************************** AENQ Handlers ******************************* + *****************************************************************************/ +static void ena_update_on_link_change(void *adapter_data, + struct ena_admin_aenq_entry *aenq_e) +{ + struct rte_eth_dev *eth_dev; + struct ena_adapter *adapter; + struct ena_admin_aenq_link_change_desc *aenq_link_desc; + uint32_t status; + + adapter = (struct ena_adapter *)adapter_data; + aenq_link_desc = (struct ena_admin_aenq_link_change_desc *)aenq_e; + eth_dev = adapter->rte_dev; + + status = get_ena_admin_aenq_link_change_desc_link_status(aenq_link_desc); + adapter->link_status = status; + + ena_link_update(eth_dev, 0); + _rte_eth_dev_callback_process(eth_dev, RTE_ETH_EVENT_INTR_LSC, NULL); +} + +static void ena_notification(void *data, + struct ena_admin_aenq_entry *aenq_e) +{ + struct ena_adapter *adapter = (struct ena_adapter *)data; + struct ena_admin_ena_hw_hints *hints; + + if (aenq_e->aenq_common_desc.group != ENA_ADMIN_NOTIFICATION) + RTE_LOG(WARNING, PMD, "Invalid group(%x) expected %x\n", + aenq_e->aenq_common_desc.group, + ENA_ADMIN_NOTIFICATION); + + switch (aenq_e->aenq_common_desc.syndrom) { + case ENA_ADMIN_UPDATE_HINTS: + hints = (struct ena_admin_ena_hw_hints *) + (&aenq_e->inline_data_w4); + ena_update_hints(adapter, hints); + break; + default: + RTE_LOG(ERR, PMD, "Invalid aenq notification link state %d\n", + aenq_e->aenq_common_desc.syndrom); + } +} + +static void ena_keep_alive(void *adapter_data, + __rte_unused struct ena_admin_aenq_entry *aenq_e) +{ + struct ena_adapter *adapter = (struct ena_adapter *)adapter_data; + + adapter->timestamp_wd = rte_get_timer_cycles(); +} + +/** + * This handler will called for unknown event group or unimplemented handlers + **/ +static void unimplemented_aenq_handler(__rte_unused void *data, + __rte_unused struct ena_admin_aenq_entry *aenq_e) +{ + RTE_LOG(ERR, PMD, "Unknown event was received or event with " + "unimplemented handler\n"); +} + +static struct ena_aenq_handlers aenq_handlers = { + .handlers = { + [ENA_ADMIN_LINK_CHANGE] = ena_update_on_link_change, + [ENA_ADMIN_NOTIFICATION] = ena_notification, + [ENA_ADMIN_KEEP_ALIVE] = ena_keep_alive + }, + .unimplemented_handler = unimplemented_aenq_handler +}; |