summaryrefslogtreecommitdiffstats
path: root/drivers/net/ena/ena_ethdev.c
diff options
context:
space:
mode:
authorLuca Boccassi <luca.boccassi@gmail.com>2018-02-19 11:16:57 +0000
committerLuca Boccassi <luca.boccassi@gmail.com>2018-02-19 11:17:28 +0000
commitca33590b6af032bff57d9cc70455660466a654b2 (patch)
tree0b68b090bd9b4a78a3614b62400b29279d76d553 /drivers/net/ena/ena_ethdev.c
parent169a9de21e263aa6599cdc2d87a45ae158d9f509 (diff)
New upstream version 18.02upstream/18.02
Change-Id: I89ed24cb2a49b78fe5be6970b99dd46c1499fcc3 Signed-off-by: Luca Boccassi <luca.boccassi@gmail.com>
Diffstat (limited to 'drivers/net/ena/ena_ethdev.c')
-rw-r--r--drivers/net/ena/ena_ethdev.c139
1 files changed, 117 insertions, 22 deletions
diff --git a/drivers/net/ena/ena_ethdev.c b/drivers/net/ena/ena_ethdev.c
index 22db8951..34b2a8d7 100644
--- a/drivers/net/ena/ena_ethdev.c
+++ b/drivers/net/ena/ena_ethdev.c
@@ -32,7 +32,7 @@
*/
#include <rte_ether.h>
-#include <rte_ethdev.h>
+#include <rte_ethdev_driver.h>
#include <rte_ethdev_pci.h>
#include <rte_tcp.h>
#include <rte_atomic.h>
@@ -164,6 +164,14 @@ static const struct ena_stats ena_stats_ena_com_strings[] = {
#define ENA_STATS_ARRAY_RX ARRAY_SIZE(ena_stats_rx_strings)
#define ENA_STATS_ARRAY_ENA_COM ARRAY_SIZE(ena_stats_ena_com_strings)
+#define QUEUE_OFFLOADS (DEV_TX_OFFLOAD_TCP_CKSUM |\
+ DEV_TX_OFFLOAD_UDP_CKSUM |\
+ DEV_TX_OFFLOAD_IPV4_CKSUM |\
+ DEV_TX_OFFLOAD_TCP_TSO)
+#define MBUF_OFFLOADS (PKT_TX_L4_MASK |\
+ PKT_TX_IP_CKSUM |\
+ PKT_TX_TCP_SEG)
+
/** Vendor ID used by Amazon devices */
#define PCI_VENDOR_ID_AMAZON 0x1D0F
/** Amazon devices */
@@ -178,6 +186,9 @@ static const struct ena_stats ena_stats_ena_com_strings[] = {
#define ENA_TX_OFFLOAD_NOTSUP_MASK \
(PKT_TX_OFFLOAD_MASK ^ ENA_TX_OFFLOAD_MASK)
+int ena_logtype_init;
+int ena_logtype_driver;
+
static const struct rte_pci_id pci_id_ena_map[] = {
{ RTE_PCI_DEVICE(PCI_VENDOR_ID_AMAZON, PCI_DEVICE_ID_ENA_VF) },
{ RTE_PCI_DEVICE(PCI_VENDOR_ID_AMAZON, PCI_DEVICE_ID_ENA_LLQ_VF) },
@@ -227,6 +238,10 @@ static int ena_rss_reta_query(struct rte_eth_dev *dev,
struct rte_eth_rss_reta_entry64 *reta_conf,
uint16_t reta_size);
static int ena_get_sset_count(struct rte_eth_dev *dev, int sset);
+static bool ena_are_tx_queue_offloads_allowed(struct ena_adapter *adapter,
+ uint64_t offloads);
+static bool ena_are_rx_queue_offloads_allowed(struct ena_adapter *adapter,
+ uint64_t offloads);
static const struct eth_dev_ops ena_dev_ops = {
.dev_configure = ena_dev_configure,
@@ -260,16 +275,17 @@ static inline void ena_rx_mbuf_prepare(struct rte_mbuf *mbuf,
struct ena_com_rx_ctx *ena_rx_ctx)
{
uint64_t ol_flags = 0;
+ uint32_t packet_type = 0;
if (ena_rx_ctx->l4_proto == ENA_ETH_IO_L4_PROTO_TCP)
- ol_flags |= PKT_TX_TCP_CKSUM;
+ packet_type |= RTE_PTYPE_L4_TCP;
else if (ena_rx_ctx->l4_proto == ENA_ETH_IO_L4_PROTO_UDP)
- ol_flags |= PKT_TX_UDP_CKSUM;
+ packet_type |= RTE_PTYPE_L4_UDP;
if (ena_rx_ctx->l3_proto == ENA_ETH_IO_L3_PROTO_IPV4)
- ol_flags |= PKT_TX_IPV4;
+ packet_type |= RTE_PTYPE_L3_IPV4;
else if (ena_rx_ctx->l3_proto == ENA_ETH_IO_L3_PROTO_IPV6)
- ol_flags |= PKT_TX_IPV6;
+ packet_type |= RTE_PTYPE_L3_IPV6;
if (unlikely(ena_rx_ctx->l4_csum_err))
ol_flags |= PKT_RX_L4_CKSUM_BAD;
@@ -277,24 +293,28 @@ static inline void ena_rx_mbuf_prepare(struct rte_mbuf *mbuf,
ol_flags |= PKT_RX_IP_CKSUM_BAD;
mbuf->ol_flags = ol_flags;
+ mbuf->packet_type = packet_type;
}
static inline void ena_tx_mbuf_prepare(struct rte_mbuf *mbuf,
- struct ena_com_tx_ctx *ena_tx_ctx)
+ struct ena_com_tx_ctx *ena_tx_ctx,
+ uint64_t queue_offloads)
{
struct ena_com_tx_meta *ena_meta = &ena_tx_ctx->ena_meta;
- if (mbuf->ol_flags &
- (PKT_TX_L4_MASK | PKT_TX_IP_CKSUM | PKT_TX_TCP_SEG)) {
+ if ((mbuf->ol_flags & MBUF_OFFLOADS) &&
+ (queue_offloads & QUEUE_OFFLOADS)) {
/* check if TSO is required */
- if (mbuf->ol_flags & PKT_TX_TCP_SEG) {
+ if ((mbuf->ol_flags & PKT_TX_TCP_SEG) &&
+ (queue_offloads & DEV_TX_OFFLOAD_TCP_TSO)) {
ena_tx_ctx->tso_enable = true;
ena_meta->l4_hdr_len = GET_L4_HDR_LEN(mbuf);
}
/* check if L3 checksum is needed */
- if (mbuf->ol_flags & PKT_TX_IP_CKSUM)
+ if ((mbuf->ol_flags & PKT_TX_IP_CKSUM) &&
+ (queue_offloads & DEV_TX_OFFLOAD_IPV4_CKSUM))
ena_tx_ctx->l3_csum_enable = true;
if (mbuf->ol_flags & PKT_TX_IPV6) {
@@ -310,19 +330,17 @@ static inline void ena_tx_mbuf_prepare(struct rte_mbuf *mbuf,
}
/* check if L4 checksum is needed */
- switch (mbuf->ol_flags & PKT_TX_L4_MASK) {
- case PKT_TX_TCP_CKSUM:
+ if ((mbuf->ol_flags & PKT_TX_TCP_CKSUM) &&
+ (queue_offloads & DEV_TX_OFFLOAD_TCP_CKSUM)) {
ena_tx_ctx->l4_proto = ENA_ETH_IO_L4_PROTO_TCP;
ena_tx_ctx->l4_csum_enable = true;
- break;
- case PKT_TX_UDP_CKSUM:
+ } else if ((mbuf->ol_flags & PKT_TX_UDP_CKSUM) &&
+ (queue_offloads & DEV_TX_OFFLOAD_UDP_CKSUM)) {
ena_tx_ctx->l4_proto = ENA_ETH_IO_L4_PROTO_UDP;
ena_tx_ctx->l4_csum_enable = true;
- break;
- default:
+ } else {
ena_tx_ctx->l4_proto = ENA_ETH_IO_L4_PROTO_UNKNOWN;
ena_tx_ctx->l4_csum_enable = false;
- break;
}
ena_meta->mss = mbuf->tso_segsz;
@@ -755,7 +773,8 @@ static uint32_t ena_get_mtu_conf(struct ena_adapter *adapter)
{
uint32_t max_frame_len = adapter->max_mtu;
- if (adapter->rte_eth_dev_data->dev_conf.rxmode.jumbo_frame == 1)
+ if (adapter->rte_eth_dev_data->dev_conf.rxmode.offloads &
+ DEV_RX_OFFLOAD_JUMBO_FRAME)
max_frame_len =
adapter->rte_eth_dev_data->dev_conf.rxmode.max_rx_pkt_len;
@@ -945,7 +964,7 @@ static int ena_tx_queue_setup(struct rte_eth_dev *dev,
uint16_t queue_idx,
uint16_t nb_desc,
__rte_unused unsigned int socket_id,
- __rte_unused const struct rte_eth_txconf *tx_conf)
+ const struct rte_eth_txconf *tx_conf)
{
struct ena_com_create_io_ctx ctx =
/* policy set to _HOST just to satisfy icc compiler */
@@ -982,6 +1001,12 @@ static int ena_tx_queue_setup(struct rte_eth_dev *dev,
return -EINVAL;
}
+ if (tx_conf->txq_flags == ETH_TXQ_FLAGS_IGNORE &&
+ !ena_are_tx_queue_offloads_allowed(adapter, tx_conf->offloads)) {
+ RTE_LOG(ERR, PMD, "Unsupported queue offloads\n");
+ return -EINVAL;
+ }
+
ena_qid = ENA_IO_TXQ_IDX(queue_idx);
ctx.direction = ENA_COM_IO_QUEUE_DIRECTION_TX;
@@ -1036,6 +1061,8 @@ static int ena_tx_queue_setup(struct rte_eth_dev *dev,
for (i = 0; i < txq->ring_size; i++)
txq->empty_tx_reqs[i] = i;
+ txq->offloads = tx_conf->offloads;
+
/* Store pointer to this queue in upper layer */
txq->configured = 1;
dev->data->tx_queues[queue_idx] = txq;
@@ -1047,7 +1074,7 @@ static int ena_rx_queue_setup(struct rte_eth_dev *dev,
uint16_t queue_idx,
uint16_t nb_desc,
__rte_unused unsigned int socket_id,
- __rte_unused const struct rte_eth_rxconf *rx_conf,
+ const struct rte_eth_rxconf *rx_conf,
struct rte_mempool *mp)
{
struct ena_com_create_io_ctx ctx =
@@ -1083,6 +1110,11 @@ static int ena_rx_queue_setup(struct rte_eth_dev *dev,
return -EINVAL;
}
+ if (!ena_are_rx_queue_offloads_allowed(adapter, rx_conf->offloads)) {
+ RTE_LOG(ERR, PMD, "Unsupported queue offloads\n");
+ return -EINVAL;
+ }
+
ena_qid = ENA_IO_RXQ_IDX(queue_idx);
ctx.qid = ena_qid;
@@ -1386,6 +1418,22 @@ static int ena_dev_configure(struct rte_eth_dev *dev)
{
struct ena_adapter *adapter =
(struct ena_adapter *)(dev->data->dev_private);
+ uint64_t tx_offloads = dev->data->dev_conf.txmode.offloads;
+ uint64_t rx_offloads = dev->data->dev_conf.rxmode.offloads;
+
+ if ((tx_offloads & adapter->tx_supported_offloads) != tx_offloads) {
+ RTE_LOG(ERR, PMD, "Some Tx offloads are not supported "
+ "requested 0x%" PRIx64 " supported 0x%" PRIx64 "\n",
+ tx_offloads, adapter->tx_supported_offloads);
+ return -ENOTSUP;
+ }
+
+ if ((rx_offloads & adapter->rx_supported_offloads) != rx_offloads) {
+ RTE_LOG(ERR, PMD, "Some Rx offloads are not supported "
+ "requested 0x%" PRIx64 " supported 0x%" PRIx64 "\n",
+ rx_offloads, adapter->rx_supported_offloads);
+ return -ENOTSUP;
+ }
if (!(adapter->state == ENA_ADAPTER_STATE_INIT ||
adapter->state == ENA_ADAPTER_STATE_STOPPED)) {
@@ -1407,6 +1455,8 @@ static int ena_dev_configure(struct rte_eth_dev *dev)
break;
}
+ adapter->tx_selected_offloads = tx_offloads;
+ adapter->rx_selected_offloads = rx_offloads;
return 0;
}
@@ -1435,13 +1485,39 @@ static void ena_init_rings(struct ena_adapter *adapter)
}
}
+static bool ena_are_tx_queue_offloads_allowed(struct ena_adapter *adapter,
+ uint64_t offloads)
+{
+ uint64_t port_offloads = adapter->tx_selected_offloads;
+
+ /* Check if port supports all requested offloads.
+ * True if all offloads selected for queue are set for port.
+ */
+ if ((offloads & port_offloads) != offloads)
+ return false;
+ return true;
+}
+
+static bool ena_are_rx_queue_offloads_allowed(struct ena_adapter *adapter,
+ uint64_t offloads)
+{
+ uint64_t port_offloads = adapter->rx_selected_offloads;
+
+ /* Check if port supports all requested offloads.
+ * True if all offloads selected for queue are set for port.
+ */
+ if ((offloads & port_offloads) != offloads)
+ return false;
+ return true;
+}
+
static void ena_infos_get(struct rte_eth_dev *dev,
struct rte_eth_dev_info *dev_info)
{
struct ena_adapter *adapter;
struct ena_com_dev *ena_dev;
struct ena_com_dev_get_features_ctx feat;
- uint32_t rx_feat = 0, tx_feat = 0;
+ uint64_t rx_feat = 0, tx_feat = 0;
int rc = 0;
ena_assert_msg(dev->data != NULL, "Uninitialized device");
@@ -1487,9 +1563,13 @@ static void ena_infos_get(struct rte_eth_dev *dev,
DEV_RX_OFFLOAD_UDP_CKSUM |
DEV_RX_OFFLOAD_TCP_CKSUM;
+ rx_feat |= DEV_RX_OFFLOAD_JUMBO_FRAME;
+
/* Inform framework about available features */
dev_info->rx_offload_capa = rx_feat;
+ dev_info->rx_queue_offload_capa = rx_feat;
dev_info->tx_offload_capa = tx_feat;
+ dev_info->tx_queue_offload_capa = tx_feat;
dev_info->min_rx_bufsize = ENA_MIN_FRAME_LEN;
dev_info->max_rx_pktlen = adapter->max_mtu;
@@ -1498,6 +1578,9 @@ static void ena_infos_get(struct rte_eth_dev *dev,
dev_info->max_rx_queues = adapter->num_queues;
dev_info->max_tx_queues = adapter->num_queues;
dev_info->reta_size = ENA_RX_RSS_TABLE_SIZE;
+
+ adapter->tx_supported_offloads = tx_feat;
+ adapter->rx_supported_offloads = rx_feat;
}
static uint16_t eth_ena_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
@@ -1714,7 +1797,7 @@ static uint16_t eth_ena_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
} /* there's no else as we take advantage of memset zeroing */
/* Set TX offloads flags, if applicable */
- ena_tx_mbuf_prepare(mbuf, &ena_tx_ctx);
+ ena_tx_mbuf_prepare(mbuf, &ena_tx_ctx, tx_ring->offloads);
if (unlikely(mbuf->ol_flags &
(PKT_RX_L4_CKSUM_BAD | PKT_RX_IP_CKSUM_BAD)))
@@ -1814,3 +1897,15 @@ static struct rte_pci_driver rte_ena_pmd = {
RTE_PMD_REGISTER_PCI(net_ena, rte_ena_pmd);
RTE_PMD_REGISTER_PCI_TABLE(net_ena, pci_id_ena_map);
RTE_PMD_REGISTER_KMOD_DEP(net_ena, "* igb_uio | uio_pci_generic | vfio-pci");
+
+RTE_INIT(ena_init_log);
+static void
+ena_init_log(void)
+{
+ ena_logtype_init = rte_log_register("pmd.net.ena.init");
+ if (ena_logtype_init >= 0)
+ rte_log_set_level(ena_logtype_init, RTE_LOG_NOTICE);
+ ena_logtype_driver = rte_log_register("pmd.net.ena.driver");
+ if (ena_logtype_driver >= 0)
+ rte_log_set_level(ena_logtype_driver, RTE_LOG_NOTICE);
+}