aboutsummaryrefslogtreecommitdiffstats
path: root/build/external/patches/dpdk_18.08/0005-net-ena-recreate-HW-IO-rings-on-start-and-stop.patch
diff options
context:
space:
mode:
authorMatthew Smith <mgsmith@netgate.com>2018-10-25 08:46:53 -0500
committerDamjan Marion <dmarion@me.com>2018-10-26 19:48:54 +0000
commit2a1da5218a7bf4ccd970d74a6e22578513affc32 (patch)
tree1945cd3bba69ec65e3a68b83c1700a39d779931d /build/external/patches/dpdk_18.08/0005-net-ena-recreate-HW-IO-rings-on-start-and-stop.patch
parent8e7ba4ad12bf5d0ebfbef31129770eee79b54ea8 (diff)
dpdk: ENA PMD patch for failure on port restart
Patch to fix a bug in the ENA PMD on starting a port that had previously been started and stopped. There was a failure to allocate descriptors on start because they had been allocated during the initial start and not released when the port was stopped. Issue reported/tracked at https://github.com/amzn/amzn-drivers/issues/86 Change-Id: Id9c8d598929f0561788b985011fe5dea8f4a21a5 Signed-off-by: Matthew Smith <mgsmith@netgate.com>
Diffstat (limited to 'build/external/patches/dpdk_18.08/0005-net-ena-recreate-HW-IO-rings-on-start-and-stop.patch')
-rw-r--r--build/external/patches/dpdk_18.08/0005-net-ena-recreate-HW-IO-rings-on-start-and-stop.patch359
1 files changed, 359 insertions, 0 deletions
diff --git a/build/external/patches/dpdk_18.08/0005-net-ena-recreate-HW-IO-rings-on-start-and-stop.patch b/build/external/patches/dpdk_18.08/0005-net-ena-recreate-HW-IO-rings-on-start-and-stop.patch
new file mode 100644
index 00000000000..4e45b2139d5
--- /dev/null
+++ b/build/external/patches/dpdk_18.08/0005-net-ena-recreate-HW-IO-rings-on-start-and-stop.patch
@@ -0,0 +1,359 @@
+From 792dd52ca1a513fc16ee56b789c7e3177cb782f7 Mon Sep 17 00:00:00 2001
+From: Michal Krawczyk <mk@semihalf.com>
+Date: Wed, 24 Oct 2018 11:37:17 +0200
+Subject: [PATCH] net/ena: recreate HW IO rings on start and stop
+
+On the start the driver was refilling all Rx buffs, but the old ones
+were not released. That way running start/stop for a few times was
+causing device to run out of descriptors.
+
+To fix the issue, IO rings are now being destroyed on stop, and
+recreated on start. That is way the device is not losing any
+descriptors.
+
+Furthermore, there was also memory leak for the Rx mbufs, which were
+created on start and not destroyed on stop.
+
+Change-Id: I01dfd036d0bff517e42e35257481de4983679763
+---
+ drivers/net/ena/ena_ethdev.c | 196 ++++++++++++++++++++-----------------------
+ 1 file changed, 91 insertions(+), 105 deletions(-)
+
+diff --git a/drivers/net/ena/ena_ethdev.c b/drivers/net/ena/ena_ethdev.c
+index c255dc6..de5d2ed 100644
+--- a/drivers/net/ena/ena_ethdev.c
++++ b/drivers/net/ena/ena_ethdev.c
+@@ -239,6 +239,8 @@ static void ena_rx_queue_release_bufs(struct ena_ring *ring);
+ static void ena_tx_queue_release_bufs(struct ena_ring *ring);
+ static int ena_link_update(struct rte_eth_dev *dev,
+ int wait_to_complete);
++static int ena_create_io_queue(struct ena_ring *ring);
++static void ena_free_io_queues_all(struct ena_adapter *adapter);
+ static int ena_queue_restart(struct ena_ring *ring);
+ static int ena_queue_restart_all(struct rte_eth_dev *dev,
+ enum ena_ring_type ring_type);
+@@ -510,7 +512,8 @@ static void ena_close(struct rte_eth_dev *dev)
+ struct ena_adapter *adapter =
+ (struct ena_adapter *)(dev->data->dev_private);
+
+- ena_stop(dev);
++ if (adapter->state == ENA_ADAPTER_STATE_RUNNING)
++ ena_stop(dev);
+ adapter->state = ENA_ADAPTER_STATE_CLOSED;
+
+ ena_rx_queue_release_all(dev);
+@@ -746,21 +749,12 @@ static void ena_tx_queue_release_all(struct rte_eth_dev *dev)
+ static void ena_rx_queue_release(void *queue)
+ {
+ struct ena_ring *ring = (struct ena_ring *)queue;
+- struct ena_adapter *adapter = ring->adapter;
+- int ena_qid;
+
+ ena_assert_msg(ring->configured,
+ "API violation - releasing not configured queue");
+ ena_assert_msg(ring->adapter->state != ENA_ADAPTER_STATE_RUNNING,
+ "API violation");
+
+- /* Destroy HW queue */
+- ena_qid = ENA_IO_RXQ_IDX(ring->id);
+- ena_com_destroy_io_queue(&adapter->ena_dev, ena_qid);
+-
+- /* Free all bufs */
+- ena_rx_queue_release_bufs(ring);
+-
+ /* Free ring resources */
+ if (ring->rx_buffer_info)
+ rte_free(ring->rx_buffer_info);
+@@ -779,18 +773,12 @@ static void ena_rx_queue_release(void *queue)
+ static void ena_tx_queue_release(void *queue)
+ {
+ struct ena_ring *ring = (struct ena_ring *)queue;
+- struct ena_adapter *adapter = ring->adapter;
+- int ena_qid;
+
+ ena_assert_msg(ring->configured,
+ "API violation. Releasing not configured queue");
+ ena_assert_msg(ring->adapter->state != ENA_ADAPTER_STATE_RUNNING,
+ "API violation");
+
+- /* Destroy HW queue */
+- ena_qid = ENA_IO_TXQ_IDX(ring->id);
+- ena_com_destroy_io_queue(&adapter->ena_dev, ena_qid);
+-
+ /* Free all bufs */
+ ena_tx_queue_release_bufs(ring);
+
+@@ -1078,10 +1066,86 @@ static void ena_stop(struct rte_eth_dev *dev)
+ (struct ena_adapter *)(dev->data->dev_private);
+
+ rte_timer_stop_sync(&adapter->timer_wd);
++ ena_free_io_queues_all(adapter);
+
+ adapter->state = ENA_ADAPTER_STATE_STOPPED;
+ }
+
++static int ena_create_io_queue(struct ena_ring *ring)
++{
++ struct ena_adapter *adapter;
++ struct ena_com_dev *ena_dev;
++ struct ena_com_create_io_ctx ctx =
++ /* policy set to _HOST just to satisfy icc compiler */
++ { ENA_ADMIN_PLACEMENT_POLICY_HOST,
++ 0, 0, 0, 0, 0 };
++ uint16_t ena_qid;
++ int rc;
++
++ adapter = ring->adapter;
++ ena_dev = &adapter->ena_dev;
++
++ if (ring->type == ENA_RING_TYPE_TX) {
++ ena_qid = ENA_IO_TXQ_IDX(ring->id);
++ ctx.direction = ENA_COM_IO_QUEUE_DIRECTION_TX;
++ ctx.mem_queue_type = ena_dev->tx_mem_queue_type;
++ ctx.queue_size = adapter->tx_ring_size;
++ } else {
++ ena_qid = ENA_IO_RXQ_IDX(ring->id);
++ ctx.direction = ENA_COM_IO_QUEUE_DIRECTION_RX;
++ ctx.queue_size = adapter->rx_ring_size;
++ }
++ ctx.qid = ena_qid;
++ ctx.msix_vector = -1; /* interrupts not used */
++ ctx.numa_node = ena_cpu_to_node(ring->id);
++
++ rc = ena_com_create_io_queue(ena_dev, &ctx);
++ if (rc) {
++ RTE_LOG(ERR, PMD,
++ "failed to create io queue #%d (qid:%d) rc: %d\n",
++ ring->id, ena_qid, rc);
++ return rc;
++ }
++
++ rc = ena_com_get_io_handlers(ena_dev, ena_qid,
++ &ring->ena_com_io_sq,
++ &ring->ena_com_io_cq);
++ if (rc) {
++ RTE_LOG(ERR, PMD,
++ "Failed to get io queue handlers. queue num %d rc: %d\n",
++ ring->id, rc);
++ ena_com_destroy_io_queue(ena_dev, ena_qid);
++ return rc;
++ }
++
++ if (ring->type == ENA_RING_TYPE_TX)
++ ena_com_update_numa_node(ring->ena_com_io_cq, ctx.numa_node);
++
++ return 0;
++}
++
++static void ena_free_io_queues_all(struct ena_adapter *adapter)
++{
++ struct rte_eth_dev *eth_dev = adapter->rte_dev;
++ struct ena_com_dev *ena_dev = &adapter->ena_dev;
++ int i;
++ uint16_t ena_qid;
++ uint16_t nb_rxq = eth_dev->data->nb_rx_queues;
++ uint16_t nb_txq = eth_dev->data->nb_tx_queues;
++
++ for (i = 0; i < nb_txq; ++i) {
++ ena_qid = ENA_IO_TXQ_IDX(i);
++ ena_com_destroy_io_queue(ena_dev, ena_qid);
++ }
++
++ for (i = 0; i < nb_rxq; ++i) {
++ ena_qid = ENA_IO_RXQ_IDX(i);
++ ena_com_destroy_io_queue(ena_dev, ena_qid);
++
++ ena_rx_queue_release_bufs(&adapter->rx_ring[i]);
++ }
++}
++
+ static int ena_queue_restart(struct ena_ring *ring)
+ {
+ int rc, bufs_num;
+@@ -1089,6 +1153,12 @@ static int ena_queue_restart(struct ena_ring *ring)
+ ena_assert_msg(ring->configured == 1,
+ "Trying to restart unconfigured queue\n");
+
++ rc = ena_create_io_queue(ring);
++ if (rc) {
++ PMD_INIT_LOG(ERR, "Failed to create IO queue!\n");
++ return rc;
++ }
++
+ ring->next_to_clean = 0;
+ ring->next_to_use = 0;
+
+@@ -1111,17 +1181,10 @@ static int ena_tx_queue_setup(struct rte_eth_dev *dev,
+ __rte_unused unsigned int socket_id,
+ const struct rte_eth_txconf *tx_conf)
+ {
+- struct ena_com_create_io_ctx ctx =
+- /* policy set to _HOST just to satisfy icc compiler */
+- { ENA_ADMIN_PLACEMENT_POLICY_HOST,
+- ENA_COM_IO_QUEUE_DIRECTION_TX, 0, 0, 0, 0 };
+ struct ena_ring *txq = NULL;
+ struct ena_adapter *adapter =
+ (struct ena_adapter *)(dev->data->dev_private);
+ unsigned int i;
+- int ena_qid;
+- int rc;
+- struct ena_com_dev *ena_dev = &adapter->ena_dev;
+
+ txq = &adapter->tx_ring[queue_idx];
+
+@@ -1146,37 +1209,6 @@ static int ena_tx_queue_setup(struct rte_eth_dev *dev,
+ return -EINVAL;
+ }
+
+- ena_qid = ENA_IO_TXQ_IDX(queue_idx);
+-
+- ctx.direction = ENA_COM_IO_QUEUE_DIRECTION_TX;
+- ctx.qid = ena_qid;
+- ctx.msix_vector = -1; /* admin interrupts not used */
+- ctx.mem_queue_type = ena_dev->tx_mem_queue_type;
+- ctx.queue_size = adapter->tx_ring_size;
+- ctx.numa_node = ena_cpu_to_node(queue_idx);
+-
+- rc = ena_com_create_io_queue(ena_dev, &ctx);
+- if (rc) {
+- RTE_LOG(ERR, PMD,
+- "failed to create io TX queue #%d (qid:%d) rc: %d\n",
+- queue_idx, ena_qid, rc);
+- return rc;
+- }
+- txq->ena_com_io_cq = &ena_dev->io_cq_queues[ena_qid];
+- txq->ena_com_io_sq = &ena_dev->io_sq_queues[ena_qid];
+-
+- rc = ena_com_get_io_handlers(ena_dev, ena_qid,
+- &txq->ena_com_io_sq,
+- &txq->ena_com_io_cq);
+- if (rc) {
+- RTE_LOG(ERR, PMD,
+- "Failed to get TX queue handlers. TX queue num %d rc: %d\n",
+- queue_idx, rc);
+- goto err_destroy_io_queue;
+- }
+-
+- ena_com_update_numa_node(txq->ena_com_io_cq, ctx.numa_node);
+-
+ txq->port_id = dev->data->port_id;
+ txq->next_to_clean = 0;
+ txq->next_to_use = 0;
+@@ -1188,8 +1220,7 @@ static int ena_tx_queue_setup(struct rte_eth_dev *dev,
+ RTE_CACHE_LINE_SIZE);
+ if (!txq->tx_buffer_info) {
+ RTE_LOG(ERR, PMD, "failed to alloc mem for tx buffer info\n");
+- rc = -ENOMEM;
+- goto err_destroy_io_queue;
++ return -ENOMEM;
+ }
+
+ txq->empty_tx_reqs = rte_zmalloc("txq->empty_tx_reqs",
+@@ -1197,8 +1228,8 @@ static int ena_tx_queue_setup(struct rte_eth_dev *dev,
+ RTE_CACHE_LINE_SIZE);
+ if (!txq->empty_tx_reqs) {
+ RTE_LOG(ERR, PMD, "failed to alloc mem for tx reqs\n");
+- rc = -ENOMEM;
+- goto err_free;
++ rte_free(txq->tx_buffer_info);
++ return -ENOMEM;
+ }
+
+ for (i = 0; i < txq->ring_size; i++)
+@@ -1214,13 +1245,6 @@ static int ena_tx_queue_setup(struct rte_eth_dev *dev,
+ dev->data->tx_queues[queue_idx] = txq;
+
+ return 0;
+-
+-err_free:
+- rte_free(txq->tx_buffer_info);
+-
+-err_destroy_io_queue:
+- ena_com_destroy_io_queue(ena_dev, ena_qid);
+- return rc;
+ }
+
+ static int ena_rx_queue_setup(struct rte_eth_dev *dev,
+@@ -1230,16 +1254,10 @@ static int ena_rx_queue_setup(struct rte_eth_dev *dev,
+ __rte_unused const struct rte_eth_rxconf *rx_conf,
+ struct rte_mempool *mp)
+ {
+- struct ena_com_create_io_ctx ctx =
+- /* policy set to _HOST just to satisfy icc compiler */
+- { ENA_ADMIN_PLACEMENT_POLICY_HOST,
+- ENA_COM_IO_QUEUE_DIRECTION_RX, 0, 0, 0, 0 };
+ struct ena_adapter *adapter =
+ (struct ena_adapter *)(dev->data->dev_private);
+ struct ena_ring *rxq = NULL;
+- uint16_t ena_qid = 0;
+- int i, rc = 0;
+- struct ena_com_dev *ena_dev = &adapter->ena_dev;
++ int i;
+
+ rxq = &adapter->rx_ring[queue_idx];
+ if (rxq->configured) {
+@@ -1263,36 +1281,6 @@ static int ena_rx_queue_setup(struct rte_eth_dev *dev,
+ return -EINVAL;
+ }
+
+- ena_qid = ENA_IO_RXQ_IDX(queue_idx);
+-
+- ctx.qid = ena_qid;
+- ctx.direction = ENA_COM_IO_QUEUE_DIRECTION_RX;
+- ctx.mem_queue_type = ENA_ADMIN_PLACEMENT_POLICY_HOST;
+- ctx.msix_vector = -1; /* admin interrupts not used */
+- ctx.queue_size = adapter->rx_ring_size;
+- ctx.numa_node = ena_cpu_to_node(queue_idx);
+-
+- rc = ena_com_create_io_queue(ena_dev, &ctx);
+- if (rc) {
+- RTE_LOG(ERR, PMD, "failed to create io RX queue #%d rc: %d\n",
+- queue_idx, rc);
+- return rc;
+- }
+-
+- rxq->ena_com_io_cq = &ena_dev->io_cq_queues[ena_qid];
+- rxq->ena_com_io_sq = &ena_dev->io_sq_queues[ena_qid];
+-
+- rc = ena_com_get_io_handlers(ena_dev, ena_qid,
+- &rxq->ena_com_io_sq,
+- &rxq->ena_com_io_cq);
+- if (rc) {
+- RTE_LOG(ERR, PMD,
+- "Failed to get RX queue handlers. RX queue num %d rc: %d\n",
+- queue_idx, rc);
+- ena_com_destroy_io_queue(ena_dev, ena_qid);
+- return rc;
+- }
+-
+ rxq->port_id = dev->data->port_id;
+ rxq->next_to_clean = 0;
+ rxq->next_to_use = 0;
+@@ -1304,7 +1292,6 @@ static int ena_rx_queue_setup(struct rte_eth_dev *dev,
+ RTE_CACHE_LINE_SIZE);
+ if (!rxq->rx_buffer_info) {
+ RTE_LOG(ERR, PMD, "failed to alloc mem for rx buffer info\n");
+- ena_com_destroy_io_queue(ena_dev, ena_qid);
+ return -ENOMEM;
+ }
+
+@@ -1315,7 +1302,6 @@ static int ena_rx_queue_setup(struct rte_eth_dev *dev,
+ RTE_LOG(ERR, PMD, "failed to alloc mem for empty rx reqs\n");
+ rte_free(rxq->rx_buffer_info);
+ rxq->rx_buffer_info = NULL;
+- ena_com_destroy_io_queue(ena_dev, ena_qid);
+ return -ENOMEM;
+ }
+
+@@ -1326,7 +1312,7 @@ static int ena_rx_queue_setup(struct rte_eth_dev *dev,
+ rxq->configured = 1;
+ dev->data->rx_queues[queue_idx] = rxq;
+
+- return rc;
++ return 0;
+ }
+
+ static int ena_populate_rx_queue(struct ena_ring *rxq, unsigned int count)
+--
+2.7.4
+