aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/ena
diff options
context:
space:
mode:
authorC.J. Collier <cjcollier@linuxfoundation.org>2016-06-14 07:50:17 -0700
committerC.J. Collier <cjcollier@linuxfoundation.org>2016-06-14 12:17:54 -0700
commit97f17497d162afdb82c8704bf097f0fee3724b2e (patch)
tree1c6269614c0c15ffef8451c58ae8f8b30a1bc804 /drivers/net/ena
parente04be89c2409570e0055b2cda60bd11395bb93b0 (diff)
Imported Upstream version 16.04
Change-Id: I77eadcd8538a9122e4773cbe55b24033dc451757 Signed-off-by: C.J. Collier <cjcollier@linuxfoundation.org>
Diffstat (limited to 'drivers/net/ena')
-rw-r--r--drivers/net/ena/Makefile61
-rw-r--r--drivers/net/ena/base/ena_com.c2809
-rw-r--r--drivers/net/ena/base/ena_com.h1052
-rw-r--r--drivers/net/ena/base/ena_defs/ena_admin_defs.h1979
-rw-r--r--drivers/net/ena/base/ena_defs/ena_common_defs.h54
-rw-r--r--drivers/net/ena/base/ena_defs/ena_eth_io_defs.h1488
-rw-r--r--drivers/net/ena/base/ena_defs/ena_gen_info.h35
-rw-r--r--drivers/net/ena/base/ena_defs/ena_includes.h39
-rw-r--r--drivers/net/ena/base/ena_defs/ena_regs_defs.h135
-rw-r--r--drivers/net/ena/base/ena_eth_com.c508
-rw-r--r--drivers/net/ena/base/ena_eth_com.h153
-rw-r--r--drivers/net/ena/base/ena_plat.h53
-rw-r--r--drivers/net/ena/base/ena_plat_dpdk.h220
-rw-r--r--drivers/net/ena/ena_ethdev.c1455
-rw-r--r--drivers/net/ena/ena_ethdev.h160
-rw-r--r--drivers/net/ena/ena_logs.h70
-rw-r--r--drivers/net/ena/ena_platform.h59
-rw-r--r--drivers/net/ena/rte_pmd_ena_version.map4
18 files changed, 10334 insertions, 0 deletions
diff --git a/drivers/net/ena/Makefile b/drivers/net/ena/Makefile
new file mode 100644
index 00000000..ac2b55dc
--- /dev/null
+++ b/drivers/net/ena/Makefile
@@ -0,0 +1,61 @@
+#
+# BSD LICENSE
+#
+# Copyright (c) 2015-2016 Amazon.com, Inc. or its affiliates.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in
+# the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of copyright holder nor the names of its
+# contributors may be used to endorse or promote products derived
+# from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+#
+
+include $(RTE_SDK)/mk/rte.vars.mk
+
+#
+# library name
+#
+LIB = librte_pmd_ena.a
+CFLAGS += $(WERROR_FLAGS) -O2
+INCLUDES :=-I$(SRCDIR) -I$(SRCDIR)/base/ena_defs -I$(SRCDIR)/base
+
+EXPORT_MAP := rte_pmd_ena_version.map
+LIBABIVER := 1
+
+VPATH += $(SRCDIR)/base
+#
+# all source are stored in SRCS-y
+#
+SRCS-$(CONFIG_RTE_LIBRTE_ENA_PMD) += ena_ethdev.c
+SRCS-$(CONFIG_RTE_LIBRTE_ENA_PMD) += ena_com.c
+SRCS-$(CONFIG_RTE_LIBRTE_ENA_PMD) += ena_eth_com.c
+
+# this lib depends upon:
+DEPDIRS-$(CONFIG_RTE_LIBRTE_ENA_PMD) += lib/librte_eal lib/librte_ether
+DEPDIRS-$(CONFIG_RTE_LIBRTE_ENA_PMD) += lib/librte_mempool lib/librte_mbuf
+DEPDIRS-$(CONFIG_RTE_LIBRTE_ENA_PMD) += lib/librte_net lib/librte_malloc
+
+CFLAGS += $(INCLUDES)
+
+include $(RTE_SDK)/mk/rte.lib.mk
diff --git a/drivers/net/ena/base/ena_com.c b/drivers/net/ena/base/ena_com.c
new file mode 100644
index 00000000..a21a9513
--- /dev/null
+++ b/drivers/net/ena/base/ena_com.c
@@ -0,0 +1,2809 @@
+/*-
+* BSD LICENSE
+*
+* Copyright (c) 2015-2016 Amazon.com, Inc. or its affiliates.
+* All rights reserved.
+*
+* Redistribution and use in source and binary forms, with or without
+* modification, are permitted provided that the following conditions
+* are met:
+*
+* * Redistributions of source code must retain the above copyright
+* notice, this list of conditions and the following disclaimer.
+* * Redistributions in binary form must reproduce the above copyright
+* notice, this list of conditions and the following disclaimer in
+* the documentation and/or other materials provided with the
+* distribution.
+* * Neither the name of copyright holder nor the names of its
+* contributors may be used to endorse or promote products derived
+* from this software without specific prior written permission.
+*
+* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+
+#include "ena_com.h"
+
+/*****************************************************************************/
+/*****************************************************************************/
+
+/* Timeout in micro-sec */
+#define ADMIN_CMD_TIMEOUT_US (1000000)
+
+#define ENA_ASYNC_QUEUE_DEPTH 4
+#define ENA_ADMIN_QUEUE_DEPTH 32
+
+#define ENA_EXTENDED_STAT_GET_FUNCT(_funct_queue) (_funct_queue & 0xFFFF)
+#define ENA_EXTENDED_STAT_GET_QUEUE(_funct_queue) (_funct_queue >> 16)
+
+#define MIN_ENA_VER (((ENA_COMMON_SPEC_VERSION_MAJOR) << \
+ ENA_REGS_VERSION_MAJOR_VERSION_SHIFT) \
+ | (ENA_COMMON_SPEC_VERSION_MINOR))
+
+#define ENA_CTRL_MAJOR 0
+#define ENA_CTRL_MINOR 0
+#define ENA_CTRL_SUB_MINOR 1
+
+#define MIN_ENA_CTRL_VER \
+ (((ENA_CTRL_MAJOR) << \
+ (ENA_REGS_CONTROLLER_VERSION_MAJOR_VERSION_SHIFT)) | \
+ ((ENA_CTRL_MINOR) << \
+ (ENA_REGS_CONTROLLER_VERSION_MINOR_VERSION_SHIFT)) | \
+ (ENA_CTRL_SUB_MINOR))
+
+#define ENA_DMA_ADDR_TO_UINT32_LOW(x) ((u32)((u64)(x)))
+#define ENA_DMA_ADDR_TO_UINT32_HIGH(x) ((u32)(((u64)(x)) >> 32))
+
+#define ENA_MMIO_READ_TIMEOUT 0xFFFFFFFF
+
+static int ena_alloc_cnt;
+
+/*****************************************************************************/
+/*****************************************************************************/
+/*****************************************************************************/
+
+enum ena_cmd_status {
+ ENA_CMD_SUBMITTED,
+ ENA_CMD_COMPLETED,
+ /* Abort - canceled by the driver */
+ ENA_CMD_ABORTED,
+};
+
+struct ena_comp_ctx {
+ ena_wait_event_t wait_event;
+ struct ena_admin_acq_entry *user_cqe;
+ u32 comp_size;
+ enum ena_cmd_status status;
+ /* status from the device */
+ u8 comp_status;
+ u8 cmd_opcode;
+ bool occupied;
+};
+
+static inline int ena_com_mem_addr_set(struct ena_com_dev *ena_dev,
+ struct ena_common_mem_addr *ena_addr,
+ dma_addr_t addr)
+{
+ if ((addr & GENMASK_ULL(ena_dev->dma_addr_bits - 1, 0)) != addr) {
+ ena_trc_err("dma address has more bits that the device supports\n");
+ return ENA_COM_INVAL;
+ }
+
+ ena_addr->mem_addr_low = (u32)addr;
+ ena_addr->mem_addr_high =
+ ((addr & GENMASK_ULL(ena_dev->dma_addr_bits - 1, 32)) >> 32);
+
+ return 0;
+}
+
+static int ena_com_admin_init_sq(struct ena_com_admin_queue *queue)
+{
+ ENA_MEM_ALLOC_COHERENT(queue->q_dmadev,
+ ADMIN_SQ_SIZE(queue->q_depth),
+ queue->sq.entries,
+ queue->sq.dma_addr,
+ queue->sq.mem_handle);
+
+ if (!queue->sq.entries) {
+ ena_trc_err("memory allocation failed");
+ return ENA_COM_NO_MEM;
+ }
+
+ queue->sq.head = 0;
+ queue->sq.tail = 0;
+ queue->sq.phase = 1;
+
+ queue->sq.db_addr = NULL;
+
+ return 0;
+}
+
+static int ena_com_admin_init_cq(struct ena_com_admin_queue *queue)
+{
+ ENA_MEM_ALLOC_COHERENT(queue->q_dmadev,
+ ADMIN_CQ_SIZE(queue->q_depth),
+ queue->cq.entries,
+ queue->cq.dma_addr,
+ queue->cq.mem_handle);
+
+ if (!queue->cq.entries) {
+ ena_trc_err("memory allocation failed");
+ return ENA_COM_NO_MEM;
+ }
+
+ queue->cq.head = 0;
+ queue->cq.phase = 1;
+
+ return 0;
+}
+
+static int ena_com_admin_init_aenq(struct ena_com_dev *dev,
+ struct ena_aenq_handlers *aenq_handlers)
+{
+ u32 addr_low, addr_high, aenq_caps;
+
+ dev->aenq.q_depth = ENA_ASYNC_QUEUE_DEPTH;
+ ENA_MEM_ALLOC_COHERENT(dev->dmadev,
+ ADMIN_AENQ_SIZE(dev->aenq.q_depth),
+ dev->aenq.entries,
+ dev->aenq.dma_addr,
+ dev->aenq.mem_handle);
+
+ if (!dev->aenq.entries) {
+ ena_trc_err("memory allocation failed");
+ return ENA_COM_NO_MEM;
+ }
+
+ dev->aenq.head = dev->aenq.q_depth;
+ dev->aenq.phase = 1;
+
+ addr_low = ENA_DMA_ADDR_TO_UINT32_LOW(dev->aenq.dma_addr);
+ addr_high = ENA_DMA_ADDR_TO_UINT32_HIGH(dev->aenq.dma_addr);
+
+ ENA_REG_WRITE32(addr_low, (unsigned char *)dev->reg_bar
+ + ENA_REGS_AENQ_BASE_LO_OFF);
+ ENA_REG_WRITE32(addr_high, (unsigned char *)dev->reg_bar
+ + ENA_REGS_AENQ_BASE_HI_OFF);
+
+ aenq_caps = 0;
+ aenq_caps |= dev->aenq.q_depth & ENA_REGS_AENQ_CAPS_AENQ_DEPTH_MASK;
+ aenq_caps |= (sizeof(struct ena_admin_aenq_entry) <<
+ ENA_REGS_AENQ_CAPS_AENQ_ENTRY_SIZE_SHIFT) &
+ ENA_REGS_AENQ_CAPS_AENQ_ENTRY_SIZE_MASK;
+
+ ENA_REG_WRITE32(aenq_caps, (unsigned char *)dev->reg_bar
+ + ENA_REGS_AENQ_CAPS_OFF);
+
+ if (unlikely(!aenq_handlers))
+ ena_trc_err("aenq handlers pointer is NULL\n");
+
+ dev->aenq.aenq_handlers = aenq_handlers;
+
+ return 0;
+}
+
+static inline void comp_ctxt_release(struct ena_com_admin_queue *queue,
+ struct ena_comp_ctx *comp_ctx)
+{
+ comp_ctx->occupied = false;
+ ATOMIC32_DEC(&queue->outstanding_cmds);
+}
+
+static struct ena_comp_ctx *get_comp_ctxt(struct ena_com_admin_queue *queue,
+ u16 command_id, bool capture)
+{
+ ENA_ASSERT(command_id < queue->q_depth,
+ "command id is larger than the queue size. cmd_id: %u queue size %d\n",
+ command_id, queue->q_depth);
+
+ ENA_ASSERT(!(queue->comp_ctx[command_id].occupied && capture),
+ "Completion context is occupied");
+
+ if (capture) {
+ ATOMIC32_INC(&queue->outstanding_cmds);
+ queue->comp_ctx[command_id].occupied = true;
+ }
+
+ return &queue->comp_ctx[command_id];
+}
+
+static struct ena_comp_ctx *
+__ena_com_submit_admin_cmd(struct ena_com_admin_queue *admin_queue,
+ struct ena_admin_aq_entry *cmd,
+ size_t cmd_size_in_bytes,
+ struct ena_admin_acq_entry *comp,
+ size_t comp_size_in_bytes)
+{
+ struct ena_comp_ctx *comp_ctx;
+ u16 tail_masked, cmd_id;
+ u16 queue_size_mask;
+ u16 cnt;
+
+ queue_size_mask = admin_queue->q_depth - 1;
+
+ tail_masked = admin_queue->sq.tail & queue_size_mask;
+
+ /* In case of queue FULL */
+ cnt = admin_queue->sq.tail - admin_queue->sq.head;
+ if (cnt >= admin_queue->q_depth) {
+ ena_trc_dbg("admin queue is FULL (tail %d head %d depth: %d)\n",
+ admin_queue->sq.tail,
+ admin_queue->sq.head,
+ admin_queue->q_depth);
+ admin_queue->stats.out_of_space++;
+ return ERR_PTR(ENA_COM_NO_SPACE);
+ }
+
+ cmd_id = admin_queue->curr_cmd_id;
+
+ cmd->aq_common_descriptor.flags |= admin_queue->sq.phase &
+ ENA_ADMIN_AQ_COMMON_DESC_PHASE_MASK;
+
+ cmd->aq_common_descriptor.command_id |= cmd_id &
+ ENA_ADMIN_AQ_COMMON_DESC_COMMAND_ID_MASK;
+
+ comp_ctx = get_comp_ctxt(admin_queue, cmd_id, true);
+
+ comp_ctx->status = ENA_CMD_SUBMITTED;
+ comp_ctx->comp_size = (u32)comp_size_in_bytes;
+ comp_ctx->user_cqe = comp;
+ comp_ctx->cmd_opcode = cmd->aq_common_descriptor.opcode;
+
+ ENA_WAIT_EVENT_CLEAR(comp_ctx->wait_event);
+
+ memcpy(&admin_queue->sq.entries[tail_masked], cmd, cmd_size_in_bytes);
+
+ admin_queue->curr_cmd_id = (admin_queue->curr_cmd_id + 1) &
+ queue_size_mask;
+
+ admin_queue->sq.tail++;
+ admin_queue->stats.submitted_cmd++;
+
+ if (unlikely((admin_queue->sq.tail & queue_size_mask) == 0))
+ admin_queue->sq.phase = !admin_queue->sq.phase;
+
+ ENA_REG_WRITE32(admin_queue->sq.tail, admin_queue->sq.db_addr);
+
+ return comp_ctx;
+}
+
+static inline int ena_com_init_comp_ctxt(struct ena_com_admin_queue *queue)
+{
+ size_t size = queue->q_depth * sizeof(struct ena_comp_ctx);
+ struct ena_comp_ctx *comp_ctx;
+ u16 i;
+
+ queue->comp_ctx = ENA_MEM_ALLOC(queue->q_dmadev, size);
+ if (unlikely(!queue->comp_ctx)) {
+ ena_trc_err("memory allocation failed");
+ return ENA_COM_NO_MEM;
+ }
+
+ for (i = 0; i < queue->q_depth; i++) {
+ comp_ctx = get_comp_ctxt(queue, i, false);
+ ENA_WAIT_EVENT_INIT(comp_ctx->wait_event);
+ }
+
+ return 0;
+}
+
+static struct ena_comp_ctx *
+ena_com_submit_admin_cmd(struct ena_com_admin_queue *admin_queue,
+ struct ena_admin_aq_entry *cmd,
+ size_t cmd_size_in_bytes,
+ struct ena_admin_acq_entry *comp,
+ size_t comp_size_in_bytes)
+{
+ unsigned long flags = 0;
+ struct ena_comp_ctx *comp_ctx;
+
+ ENA_SPINLOCK_LOCK(admin_queue->q_lock, flags);
+ if (unlikely(!admin_queue->running_state)) {
+ ENA_SPINLOCK_UNLOCK(admin_queue->q_lock, flags);
+ return ERR_PTR(ENA_COM_NO_DEVICE);
+ }
+ comp_ctx = __ena_com_submit_admin_cmd(admin_queue, cmd,
+ cmd_size_in_bytes,
+ comp,
+ comp_size_in_bytes);
+ ENA_SPINLOCK_UNLOCK(admin_queue->q_lock, flags);
+
+ return comp_ctx;
+}
+
+static int ena_com_init_io_sq(struct ena_com_dev *ena_dev,
+ struct ena_com_io_sq *io_sq)
+{
+ size_t size;
+
+ memset(&io_sq->desc_addr, 0x0, sizeof(struct ena_com_io_desc_addr));
+
+ io_sq->desc_entry_size =
+ (io_sq->direction == ENA_COM_IO_QUEUE_DIRECTION_TX) ?
+ sizeof(struct ena_eth_io_tx_desc) :
+ sizeof(struct ena_eth_io_rx_desc);
+
+ size = io_sq->desc_entry_size * io_sq->q_depth;
+
+ if (io_sq->mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_HOST)
+ ENA_MEM_ALLOC_COHERENT(ena_dev->dmadev,
+ size,
+ io_sq->desc_addr.virt_addr,
+ io_sq->desc_addr.phys_addr,
+ io_sq->desc_addr.mem_handle);
+ else
+ io_sq->desc_addr.virt_addr =
+ ENA_MEM_ALLOC(ena_dev->dmadev, size);
+
+ if (!io_sq->desc_addr.virt_addr) {
+ ena_trc_err("memory allocation failed");
+ return ENA_COM_NO_MEM;
+ }
+
+ io_sq->tail = 0;
+ io_sq->next_to_comp = 0;
+ io_sq->phase = 1;
+
+ return 0;
+}
+
+static int ena_com_init_io_cq(struct ena_com_dev *ena_dev,
+ struct ena_com_io_cq *io_cq)
+{
+ size_t size;
+
+ memset(&io_cq->cdesc_addr, 0x0, sizeof(struct ena_com_io_desc_addr));
+
+ /* Use the basic completion descriptor for Rx */
+ io_cq->cdesc_entry_size_in_bytes =
+ (io_cq->direction == ENA_COM_IO_QUEUE_DIRECTION_TX) ?
+ sizeof(struct ena_eth_io_tx_cdesc) :
+ sizeof(struct ena_eth_io_rx_cdesc_base);
+
+ size = io_cq->cdesc_entry_size_in_bytes * io_cq->q_depth;
+
+ ENA_MEM_ALLOC_COHERENT(ena_dev->dmadev,
+ size,
+ io_cq->cdesc_addr.virt_addr,
+ io_cq->cdesc_addr.phys_addr,
+ io_cq->cdesc_addr.mem_handle);
+
+ if (!io_cq->cdesc_addr.virt_addr) {
+ ena_trc_err("memory allocation failed");
+ return ENA_COM_NO_MEM;
+ }
+
+ io_cq->phase = 1;
+ io_cq->head = 0;
+
+ return 0;
+}
+
+static void
+ena_com_handle_single_admin_completion(struct ena_com_admin_queue *admin_queue,
+ struct ena_admin_acq_entry *cqe)
+{
+ struct ena_comp_ctx *comp_ctx;
+ u16 cmd_id;
+
+ cmd_id = cqe->acq_common_descriptor.command &
+ ENA_ADMIN_ACQ_COMMON_DESC_COMMAND_ID_MASK;
+
+ comp_ctx = get_comp_ctxt(admin_queue, cmd_id, false);
+
+ comp_ctx->status = ENA_CMD_COMPLETED;
+ comp_ctx->comp_status = cqe->acq_common_descriptor.status;
+
+ if (comp_ctx->user_cqe)
+ memcpy(comp_ctx->user_cqe, (void *)cqe, comp_ctx->comp_size);
+
+ if (!admin_queue->polling)
+ ENA_WAIT_EVENT_SIGNAL(comp_ctx->wait_event);
+}
+
+static void
+ena_com_handle_admin_completion(struct ena_com_admin_queue *admin_queue)
+{
+ struct ena_admin_acq_entry *cqe = NULL;
+ u16 comp_num = 0;
+ u16 head_masked;
+ u8 phase;
+
+ head_masked = admin_queue->cq.head & (admin_queue->q_depth - 1);
+ phase = admin_queue->cq.phase;
+
+ cqe = &admin_queue->cq.entries[head_masked];
+
+ /* Go over all the completions */
+ while ((cqe->acq_common_descriptor.flags &
+ ENA_ADMIN_ACQ_COMMON_DESC_PHASE_MASK) == phase) {
+ /* Do not read the rest of the completion entry before the
+ * phase bit was validated
+ */
+ rmb();
+ ena_com_handle_single_admin_completion(admin_queue, cqe);
+
+ head_masked++;
+ comp_num++;
+ if (unlikely(head_masked == admin_queue->q_depth)) {
+ head_masked = 0;
+ phase = !phase;
+ }
+
+ cqe = &admin_queue->cq.entries[head_masked];
+ }
+
+ admin_queue->cq.head += comp_num;
+ admin_queue->cq.phase = phase;
+ admin_queue->sq.head += comp_num;
+ admin_queue->stats.completed_cmd += comp_num;
+}
+
+static int ena_com_comp_status_to_errno(u8 comp_status)
+{
+ if (unlikely(comp_status != 0))
+ ena_trc_err("admin command failed[%u]\n", comp_status);
+
+ if (unlikely(comp_status > ENA_ADMIN_UNKNOWN_ERROR))
+ return ENA_COM_INVAL;
+
+ switch (comp_status) {
+ case ENA_ADMIN_SUCCESS:
+ return 0;
+ case ENA_ADMIN_RESOURCE_ALLOCATION_FAILURE:
+ return ENA_COM_NO_MEM;
+ case ENA_ADMIN_UNSUPPORTED_OPCODE:
+ return ENA_COM_PERMISSION;
+ case ENA_ADMIN_BAD_OPCODE:
+ case ENA_ADMIN_MALFORMED_REQUEST:
+ case ENA_ADMIN_ILLEGAL_PARAMETER:
+ case ENA_ADMIN_UNKNOWN_ERROR:
+ return ENA_COM_INVAL;
+ }
+
+ return 0;
+}
+
+static int
+ena_com_wait_and_process_admin_cq_polling(
+ struct ena_comp_ctx *comp_ctx,
+ struct ena_com_admin_queue *admin_queue)
+{
+ unsigned long flags = 0;
+ u64 start_time;
+ int ret;
+
+ start_time = ENA_GET_SYSTEM_USECS();
+
+ while (comp_ctx->status == ENA_CMD_SUBMITTED) {
+ if ((ENA_GET_SYSTEM_USECS() - start_time) >
+ ADMIN_CMD_TIMEOUT_US) {
+ ena_trc_err("Wait for completion (polling) timeout\n");
+ /* ENA didn't have any completion */
+ ENA_SPINLOCK_LOCK(admin_queue->q_lock, flags);
+ admin_queue->stats.no_completion++;
+ admin_queue->running_state = false;
+ ENA_SPINLOCK_UNLOCK(admin_queue->q_lock, flags);
+
+ ret = ENA_COM_TIMER_EXPIRED;
+ goto err;
+ }
+
+ ENA_SPINLOCK_LOCK(admin_queue->q_lock, flags);
+ ena_com_handle_admin_completion(admin_queue);
+ ENA_SPINLOCK_UNLOCK(admin_queue->q_lock, flags);
+ }
+
+ if (unlikely(comp_ctx->status == ENA_CMD_ABORTED)) {
+ ena_trc_err("Command was aborted\n");
+ ENA_SPINLOCK_LOCK(admin_queue->q_lock, flags);
+ admin_queue->stats.aborted_cmd++;
+ ENA_SPINLOCK_UNLOCK(admin_queue->q_lock, flags);
+ ret = ENA_COM_NO_DEVICE;
+ goto err;
+ }
+
+ ENA_ASSERT(comp_ctx->status == ENA_CMD_COMPLETED,
+ "Invalid comp status %d\n", comp_ctx->status);
+
+ ret = ena_com_comp_status_to_errno(comp_ctx->comp_status);
+err:
+ comp_ctxt_release(admin_queue, comp_ctx);
+ return ret;
+}
+
+static int
+ena_com_wait_and_process_admin_cq_interrupts(
+ struct ena_comp_ctx *comp_ctx,
+ struct ena_com_admin_queue *admin_queue)
+{
+ unsigned long flags = 0;
+ int ret = 0;
+
+ ENA_WAIT_EVENT_WAIT(comp_ctx->wait_event,
+ ADMIN_CMD_TIMEOUT_US);
+
+ /* In case the command wasn't completed find out the root cause.
+ * There might be 2 kinds of errors
+ * 1) No completion (timeout reached)
+ * 2) There is completion but the device didn't get any msi-x interrupt.
+ */
+ if (unlikely(comp_ctx->status == ENA_CMD_SUBMITTED)) {
+ ENA_SPINLOCK_LOCK(admin_queue->q_lock, flags);
+ ena_com_handle_admin_completion(admin_queue);
+ admin_queue->stats.no_completion++;
+ ENA_SPINLOCK_UNLOCK(admin_queue->q_lock, flags);
+
+ if (comp_ctx->status == ENA_CMD_COMPLETED)
+ ena_trc_err("The ena device have completion but the driver didn't receive any MSI-X interrupt (cmd %d)\n",
+ comp_ctx->cmd_opcode);
+ else
+ ena_trc_err("The ena device doesn't send any completion for the admin cmd %d status %d\n",
+ comp_ctx->cmd_opcode, comp_ctx->status);
+
+ admin_queue->running_state = false;
+ ret = ENA_COM_TIMER_EXPIRED;
+ goto err;
+ }
+
+ ret = ena_com_comp_status_to_errno(comp_ctx->comp_status);
+err:
+ comp_ctxt_release(admin_queue, comp_ctx);
+ return ret;
+}
+
+/* This method read the hardware device register through posting writes
+ * and waiting for response
+ * On timeout the function will return ENA_MMIO_READ_TIMEOUT
+ */
+static u32 ena_com_reg_bar_read32(struct ena_com_dev *ena_dev, u16 offset)
+{
+ struct ena_com_mmio_read *mmio_read = &ena_dev->mmio_read;
+ volatile struct ena_admin_ena_mmio_req_read_less_resp *read_resp =
+ mmio_read->read_resp;
+ u32 mmio_read_reg, ret;
+ unsigned long flags = 0;
+ int i;
+
+ ENA_MIGHT_SLEEP();
+
+ /* If readless is disabled, perform regular read */
+ if (!mmio_read->readless_supported)
+ return ENA_REG_READ32((unsigned char *)ena_dev->reg_bar +
+ offset);
+
+ ENA_SPINLOCK_LOCK(mmio_read->lock, flags);
+ mmio_read->seq_num++;
+
+ read_resp->req_id = mmio_read->seq_num + 0xDEAD;
+ mmio_read_reg = (offset << ENA_REGS_MMIO_REG_READ_REG_OFF_SHIFT) &
+ ENA_REGS_MMIO_REG_READ_REG_OFF_MASK;
+ mmio_read_reg |= mmio_read->seq_num &
+ ENA_REGS_MMIO_REG_READ_REQ_ID_MASK;
+
+ /* make sure read_resp->req_id get updated before the hw can write
+ * there
+ */
+ wmb();
+
+ ENA_REG_WRITE32(mmio_read_reg, (unsigned char *)ena_dev->reg_bar
+ + ENA_REGS_MMIO_REG_READ_OFF);
+
+ for (i = 0; i < ENA_REG_READ_TIMEOUT; i++) {
+ if (read_resp->req_id == mmio_read->seq_num)
+ break;
+
+ ENA_UDELAY(1);
+ }
+
+ if (unlikely(i == ENA_REG_READ_TIMEOUT)) {
+ ena_trc_err("reading reg failed for timeout. expected: req id[%hu] offset[%hu] actual: req id[%hu] offset[%hu]\n",
+ mmio_read->seq_num,
+ offset,
+ read_resp->req_id,
+ read_resp->reg_off);
+ ret = ENA_MMIO_READ_TIMEOUT;
+ goto err;
+ }
+
+ ENA_ASSERT(read_resp->reg_off == offset,
+ "Invalid MMIO read return value");
+
+ ret = read_resp->reg_val;
+err:
+ ENA_SPINLOCK_UNLOCK(mmio_read->lock, flags);
+
+ return ret;
+}
+
+/* There are two types to wait for completion.
+ * Polling mode - wait until the completion is available.
+ * Async mode - wait on wait queue until the completion is ready
+ * (or the timeout expired).
+ * It is expected that the IRQ called ena_com_handle_admin_completion
+ * to mark the completions.
+ */
+static int
+ena_com_wait_and_process_admin_cq(struct ena_comp_ctx *comp_ctx,
+ struct ena_com_admin_queue *admin_queue)
+{
+ if (admin_queue->polling)
+ return ena_com_wait_and_process_admin_cq_polling(comp_ctx,
+ admin_queue);
+
+ return ena_com_wait_and_process_admin_cq_interrupts(comp_ctx,
+ admin_queue);
+}
+
+static int ena_com_destroy_io_sq(struct ena_com_dev *ena_dev,
+ struct ena_com_io_sq *io_sq)
+{
+ struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue;
+ struct ena_admin_aq_destroy_sq_cmd destroy_cmd;
+ struct ena_admin_acq_destroy_sq_resp_desc destroy_resp;
+ u8 direction;
+ int ret;
+
+ memset(&destroy_cmd, 0x0, sizeof(struct ena_admin_aq_destroy_sq_cmd));
+
+ if (io_sq->direction == ENA_COM_IO_QUEUE_DIRECTION_TX)
+ direction = ENA_ADMIN_SQ_DIRECTION_TX;
+ else
+ direction = ENA_ADMIN_SQ_DIRECTION_RX;
+
+ destroy_cmd.sq.sq_identity |= (direction <<
+ ENA_ADMIN_SQ_SQ_DIRECTION_SHIFT) &
+ ENA_ADMIN_SQ_SQ_DIRECTION_MASK;
+
+ destroy_cmd.sq.sq_idx = io_sq->idx;
+ destroy_cmd.aq_common_descriptor.opcode = ENA_ADMIN_DESTROY_SQ;
+
+ ret = ena_com_execute_admin_command(
+ admin_queue,
+ (struct ena_admin_aq_entry *)&destroy_cmd,
+ sizeof(destroy_cmd),
+ (struct ena_admin_acq_entry *)&destroy_resp,
+ sizeof(destroy_resp));
+
+ if (unlikely(ret && (ret != ENA_COM_NO_DEVICE)))
+ ena_trc_err("failed to destroy io sq error: %d\n", ret);
+
+ return ret;
+}
+
+static void ena_com_io_queue_free(struct ena_com_dev *ena_dev,
+ struct ena_com_io_sq *io_sq,
+ struct ena_com_io_cq *io_cq)
+{
+ size_t size;
+
+ if (io_cq->cdesc_addr.virt_addr) {
+ size = io_cq->cdesc_entry_size_in_bytes * io_cq->q_depth;
+
+ ENA_MEM_FREE_COHERENT(ena_dev->dmadev,
+ size,
+ io_cq->cdesc_addr.virt_addr,
+ io_cq->cdesc_addr.phys_addr,
+ io_cq->cdesc_addr.mem_handle);
+
+ io_cq->cdesc_addr.virt_addr = NULL;
+ }
+
+ if (io_sq->desc_addr.virt_addr) {
+ size = io_sq->desc_entry_size * io_sq->q_depth;
+
+ if (io_sq->mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_HOST)
+ ENA_MEM_FREE_COHERENT(ena_dev->dmadev,
+ size,
+ io_sq->desc_addr.virt_addr,
+ io_sq->desc_addr.phys_addr,
+ io_sq->desc_addr.mem_handle);
+ else
+ ENA_MEM_FREE(ena_dev->dmadev,
+ io_sq->desc_addr.virt_addr);
+
+ io_sq->desc_addr.virt_addr = NULL;
+ }
+}
+
+static int wait_for_reset_state(struct ena_com_dev *ena_dev,
+ u32 timeout, u16 exp_state)
+{
+ u32 val, i;
+
+ for (i = 0; i < timeout; i++) {
+ val = ena_com_reg_bar_read32(ena_dev, ENA_REGS_DEV_STS_OFF);
+
+ if (unlikely(val == ENA_MMIO_READ_TIMEOUT)) {
+ ena_trc_err("Reg read timeout occurred\n");
+ return ENA_COM_TIMER_EXPIRED;
+ }
+
+ if ((val & ENA_REGS_DEV_STS_RESET_IN_PROGRESS_MASK) ==
+ exp_state)
+ return 0;
+
+ /* The resolution of the timeout is 100ms */
+ ENA_MSLEEP(100);
+ }
+
+ return ENA_COM_TIMER_EXPIRED;
+}
+
+static bool
+ena_com_check_supported_feature_id(struct ena_com_dev *ena_dev,
+ enum ena_admin_aq_feature_id feature_id)
+{
+ u32 feature_mask = 1 << feature_id;
+
+ /* Device attributes is always supported */
+ if ((feature_id != ENA_ADMIN_DEVICE_ATTRIBUTES) &&
+ !(ena_dev->supported_features & feature_mask))
+ return false;
+
+ return true;
+}
+
+static int ena_com_get_feature_ex(struct ena_com_dev *ena_dev,
+ struct ena_admin_get_feat_resp *get_resp,
+ enum ena_admin_aq_feature_id feature_id,
+ dma_addr_t control_buf_dma_addr,
+ u32 control_buff_size)
+{
+ struct ena_com_admin_queue *admin_queue;
+ struct ena_admin_get_feat_cmd get_cmd;
+ int ret;
+
+ if (!ena_dev) {
+ ena_trc_err("%s : ena_dev is NULL\n", __func__);
+ return ENA_COM_NO_DEVICE;
+ }
+
+ if (!ena_com_check_supported_feature_id(ena_dev, feature_id)) {
+ ena_trc_info("Feature %d isn't supported\n", feature_id);
+ return ENA_COM_PERMISSION;
+ }
+
+ memset(&get_cmd, 0x0, sizeof(get_cmd));
+ admin_queue = &ena_dev->admin_queue;
+
+ get_cmd.aq_common_descriptor.opcode = ENA_ADMIN_GET_FEATURE;
+
+ if (control_buff_size)
+ get_cmd.aq_common_descriptor.flags =
+ ENA_ADMIN_AQ_COMMON_DESC_CTRL_DATA_INDIRECT_MASK;
+ else
+ get_cmd.aq_common_descriptor.flags = 0;
+
+ ret = ena_com_mem_addr_set(ena_dev,
+ &get_cmd.control_buffer.address,
+ control_buf_dma_addr);
+ if (unlikely(ret)) {
+ ena_trc_err("memory address set failed\n");
+ return ret;
+ }
+
+ get_cmd.control_buffer.length = control_buff_size;
+
+ get_cmd.feat_common.feature_id = feature_id;
+
+ ret = ena_com_execute_admin_command(admin_queue,
+ (struct ena_admin_aq_entry *)
+ &get_cmd,
+ sizeof(get_cmd),
+ (struct ena_admin_acq_entry *)
+ get_resp,
+ sizeof(*get_resp));
+
+ if (unlikely(ret))
+ ena_trc_err("Failed to submit get_feature command %d error: %d\n",
+ feature_id, ret);
+
+ return ret;
+}
+
+static int ena_com_get_feature(struct ena_com_dev *ena_dev,
+ struct ena_admin_get_feat_resp *get_resp,
+ enum ena_admin_aq_feature_id feature_id)
+{
+ return ena_com_get_feature_ex(ena_dev,
+ get_resp,
+ feature_id,
+ 0,
+ 0);
+}
+
+static int ena_com_hash_key_allocate(struct ena_com_dev *ena_dev)
+{
+ struct ena_rss *rss = &ena_dev->rss;
+
+ ENA_MEM_ALLOC_COHERENT(ena_dev->dmadev,
+ sizeof(*rss->hash_key),
+ rss->hash_key,
+ rss->hash_key_dma_addr,
+ rss->hash_key_mem_handle);
+
+ if (unlikely(!rss->hash_key))
+ return ENA_COM_NO_MEM;
+
+ return 0;
+}
+
+static int ena_com_hash_key_destroy(struct ena_com_dev *ena_dev)
+{
+ struct ena_rss *rss = &ena_dev->rss;
+
+ if (rss->hash_key)
+ ENA_MEM_FREE_COHERENT(ena_dev->dmadev,
+ sizeof(*rss->hash_key),
+ rss->hash_key,
+ rss->hash_key_dma_addr,
+ rss->hash_key_mem_handle);
+ rss->hash_key = NULL;
+ return 0;
+}
+
+static int ena_com_hash_ctrl_init(struct ena_com_dev *ena_dev)
+{
+ struct ena_rss *rss = &ena_dev->rss;
+
+ ENA_MEM_ALLOC_COHERENT(ena_dev->dmadev,
+ sizeof(*rss->hash_ctrl),
+ rss->hash_ctrl,
+ rss->hash_ctrl_dma_addr,
+ rss->hash_ctrl_mem_handle);
+
+ return 0;
+}
+
+static int ena_com_hash_ctrl_destroy(struct ena_com_dev *ena_dev)
+{
+ struct ena_rss *rss = &ena_dev->rss;
+
+ if (rss->hash_ctrl)
+ ENA_MEM_FREE_COHERENT(ena_dev->dmadev,
+ sizeof(*rss->hash_ctrl),
+ rss->hash_ctrl,
+ rss->hash_ctrl_dma_addr,
+ rss->hash_ctrl_mem_handle);
+ rss->hash_ctrl = NULL;
+
+ return 0;
+}
+
+static int ena_com_indirect_table_allocate(struct ena_com_dev *ena_dev,
+ u16 log_size)
+{
+ struct ena_rss *rss = &ena_dev->rss;
+ struct ena_admin_get_feat_resp get_resp;
+ size_t tbl_size;
+ int ret;
+
+ ret = ena_com_get_feature(ena_dev, &get_resp,
+ ENA_ADMIN_RSS_REDIRECTION_TABLE_CONFIG);
+ if (unlikely(ret))
+ return ret;
+
+ if ((get_resp.u.ind_table.min_size > log_size) ||
+ (get_resp.u.ind_table.max_size < log_size)) {
+ ena_trc_err("indirect table size doesn't fit. requested size: %d while min is:%d and max %d\n",
+ 1 << log_size,
+ 1 << get_resp.u.ind_table.min_size,
+ 1 << get_resp.u.ind_table.max_size);
+ return ENA_COM_INVAL;
+ }
+
+ tbl_size = (1 << log_size) *
+ sizeof(struct ena_admin_rss_ind_table_entry);
+
+ ENA_MEM_ALLOC_COHERENT(ena_dev->dmadev,
+ tbl_size,
+ rss->rss_ind_tbl,
+ rss->rss_ind_tbl_dma_addr,
+ rss->rss_ind_tbl_mem_handle);
+ if (unlikely(!rss->rss_ind_tbl))
+ goto mem_err1;
+
+ tbl_size = (1 << log_size) * sizeof(u16);
+ rss->host_rss_ind_tbl =
+ ENA_MEM_ALLOC(ena_dev->dmadev, tbl_size);
+ if (unlikely(!rss->host_rss_ind_tbl))
+ goto mem_err2;
+
+ rss->tbl_log_size = log_size;
+
+ return 0;
+
+mem_err2:
+ tbl_size = (1 << log_size) *
+ sizeof(struct ena_admin_rss_ind_table_entry);
+
+ ENA_MEM_FREE_COHERENT(ena_dev->dmadev,
+ tbl_size,
+ rss->rss_ind_tbl,
+ rss->rss_ind_tbl_dma_addr,
+ rss->rss_ind_tbl_mem_handle);
+ rss->rss_ind_tbl = NULL;
+mem_err1:
+ rss->tbl_log_size = 0;
+ return ENA_COM_NO_MEM;
+}
+
+static int ena_com_indirect_table_destroy(struct ena_com_dev *ena_dev)
+{
+ struct ena_rss *rss = &ena_dev->rss;
+ size_t tbl_size = (1 << rss->tbl_log_size) *
+ sizeof(struct ena_admin_rss_ind_table_entry);
+
+ if (rss->rss_ind_tbl)
+ ENA_MEM_FREE_COHERENT(ena_dev->dmadev,
+ tbl_size,
+ rss->rss_ind_tbl,
+ rss->rss_ind_tbl_dma_addr,
+ rss->rss_ind_tbl_mem_handle);
+ rss->rss_ind_tbl = NULL;
+
+ if (rss->host_rss_ind_tbl)
+ ENA_MEM_FREE(ena_dev->dmadev, rss->host_rss_ind_tbl);
+ rss->host_rss_ind_tbl = NULL;
+
+ return 0;
+}
+
+static int ena_com_create_io_sq(struct ena_com_dev *ena_dev,
+ struct ena_com_io_sq *io_sq, u16 cq_idx)
+{
+ struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue;
+ struct ena_admin_aq_create_sq_cmd create_cmd;
+ struct ena_admin_acq_create_sq_resp_desc cmd_completion;
+ u8 direction;
+ int ret;
+
+ memset(&create_cmd, 0x0, sizeof(struct ena_admin_aq_create_sq_cmd));
+
+ create_cmd.aq_common_descriptor.opcode = ENA_ADMIN_CREATE_SQ;
+
+ if (io_sq->direction == ENA_COM_IO_QUEUE_DIRECTION_TX)
+ direction = ENA_ADMIN_SQ_DIRECTION_TX;
+ else
+ direction = ENA_ADMIN_SQ_DIRECTION_RX;
+
+ create_cmd.sq_identity |= (direction <<
+ ENA_ADMIN_AQ_CREATE_SQ_CMD_SQ_DIRECTION_SHIFT) &
+ ENA_ADMIN_AQ_CREATE_SQ_CMD_SQ_DIRECTION_MASK;
+
+ create_cmd.sq_caps_2 |= io_sq->mem_queue_type &
+ ENA_ADMIN_AQ_CREATE_SQ_CMD_PLACEMENT_POLICY_MASK;
+
+ create_cmd.sq_caps_2 |= (ENA_ADMIN_COMPLETION_POLICY_DESC <<
+ ENA_ADMIN_AQ_CREATE_SQ_CMD_COMPLETION_POLICY_SHIFT) &
+ ENA_ADMIN_AQ_CREATE_SQ_CMD_COMPLETION_POLICY_MASK;
+
+ create_cmd.sq_caps_3 |=
+ ENA_ADMIN_AQ_CREATE_SQ_CMD_IS_PHYSICALLY_CONTIGUOUS_MASK;
+
+ create_cmd.cq_idx = cq_idx;
+ create_cmd.sq_depth = io_sq->q_depth;
+
+ if (io_sq->mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_HOST) {
+ ret = ena_com_mem_addr_set(ena_dev,
+ &create_cmd.sq_ba,
+ io_sq->desc_addr.phys_addr);
+ if (unlikely(ret)) {
+ ena_trc_err("memory address set failed\n");
+ return ret;
+ }
+ }
+
+ ret = ena_com_execute_admin_command(
+ admin_queue,
+ (struct ena_admin_aq_entry *)&create_cmd,
+ sizeof(create_cmd),
+ (struct ena_admin_acq_entry *)&cmd_completion,
+ sizeof(cmd_completion));
+ if (unlikely(ret)) {
+ ena_trc_err("Failed to create IO SQ. error: %d\n", ret);
+ return ret;
+ }
+
+ io_sq->idx = cmd_completion.sq_idx;
+
+ io_sq->db_addr = (u32 __iomem *)((uintptr_t)ena_dev->reg_bar +
+ (uintptr_t)cmd_completion.sq_doorbell_offset);
+
+ if (io_sq->mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV) {
+ io_sq->header_addr = (u8 __iomem *)((uintptr_t)ena_dev->mem_bar
+ + cmd_completion.llq_headers_offset);
+
+ io_sq->desc_addr.pbuf_dev_addr =
+ (u8 __iomem *)((uintptr_t)ena_dev->mem_bar +
+ cmd_completion.llq_descriptors_offset);
+ }
+
+ ena_trc_dbg("created sq[%u], depth[%u]\n", io_sq->idx, io_sq->q_depth);
+
+ return ret;
+}
+
+static int ena_com_ind_tbl_convert_to_device(struct ena_com_dev *ena_dev)
+{
+ struct ena_rss *rss = &ena_dev->rss;
+ struct ena_com_io_sq *io_sq;
+ u16 qid;
+ int i;
+
+ for (i = 0; i < 1 << rss->tbl_log_size; i++) {
+ qid = rss->host_rss_ind_tbl[i];
+ if (qid >= ENA_TOTAL_NUM_QUEUES)
+ return ENA_COM_INVAL;
+
+ io_sq = &ena_dev->io_sq_queues[qid];
+
+ if (io_sq->direction != ENA_COM_IO_QUEUE_DIRECTION_RX)
+ return ENA_COM_INVAL;
+
+ rss->rss_ind_tbl[i].cq_idx = io_sq->idx;
+ }
+
+ return 0;
+}
+
+static int ena_com_ind_tbl_convert_from_device(struct ena_com_dev *ena_dev)
+{
+ u16 dev_idx_to_host_tbl[ENA_TOTAL_NUM_QUEUES] = { -1 };
+ struct ena_rss *rss = &ena_dev->rss;
+ u16 idx, i;
+
+ for (i = 0; i < ENA_TOTAL_NUM_QUEUES; i++)
+ dev_idx_to_host_tbl[ena_dev->io_sq_queues[i].idx] = i;
+
+ for (i = 0; i < 1 << rss->tbl_log_size; i++) {
+ idx = rss->rss_ind_tbl[i].cq_idx;
+ if (idx > ENA_TOTAL_NUM_QUEUES)
+ return ENA_COM_INVAL;
+
+ if (dev_idx_to_host_tbl[idx] > ENA_TOTAL_NUM_QUEUES)
+ return ENA_COM_INVAL;
+
+ rss->host_rss_ind_tbl[i] = dev_idx_to_host_tbl[idx];
+ }
+
+ return 0;
+}
+
+static int ena_com_init_interrupt_moderation_table(struct ena_com_dev *ena_dev)
+{
+ size_t size;
+
+ size = sizeof(struct ena_intr_moder_entry) * ENA_INTR_MAX_NUM_OF_LEVELS;
+
+ ena_dev->intr_moder_tbl = ENA_MEM_ALLOC(ena_dev->dmadev, size);
+ if (!ena_dev->intr_moder_tbl)
+ return ENA_COM_NO_MEM;
+
+ ena_com_config_default_interrupt_moderation_table(ena_dev);
+
+ return 0;
+}
+
+static void
+ena_com_update_intr_delay_resolution(struct ena_com_dev *ena_dev,
+ unsigned int intr_delay_resolution)
+{
+ struct ena_intr_moder_entry *intr_moder_tbl = ena_dev->intr_moder_tbl;
+ unsigned int i;
+
+ if (!intr_delay_resolution) {
+ ena_trc_err("Illegal intr_delay_resolution provided. Going to use default 1 usec resolution\n");
+ intr_delay_resolution = 1;
+ }
+ ena_dev->intr_delay_resolution = intr_delay_resolution;
+
+ /* update Rx */
+ for (i = 0; i < ENA_INTR_MAX_NUM_OF_LEVELS; i++)
+ intr_moder_tbl[i].intr_moder_interval /= intr_delay_resolution;
+
+ /* update Tx */
+ ena_dev->intr_moder_tx_interval /= intr_delay_resolution;
+}
+
+/*****************************************************************************/
+/******************************* API ******************************/
+/*****************************************************************************/
+
+int ena_com_execute_admin_command(struct ena_com_admin_queue *admin_queue,
+ struct ena_admin_aq_entry *cmd,
+ size_t cmd_size,
+ struct ena_admin_acq_entry *comp,
+ size_t comp_size)
+{
+ struct ena_comp_ctx *comp_ctx;
+ int ret = 0;
+
+ comp_ctx = ena_com_submit_admin_cmd(admin_queue, cmd, cmd_size,
+ comp, comp_size);
+ if (unlikely(IS_ERR(comp_ctx))) {
+ ena_trc_err("Failed to submit command [%ld]\n",
+ PTR_ERR(comp_ctx));
+ return PTR_ERR(comp_ctx);
+ }
+
+ ret = ena_com_wait_and_process_admin_cq(comp_ctx, admin_queue);
+ if (unlikely(ret)) {
+ if (admin_queue->running_state)
+ ena_trc_err("Failed to process command. ret = %d\n",
+ ret);
+ else
+ ena_trc_dbg("Failed to process command. ret = %d\n",
+ ret);
+ }
+ return ret;
+}
+
+int ena_com_create_io_cq(struct ena_com_dev *ena_dev,
+ struct ena_com_io_cq *io_cq)
+{
+ struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue;
+ struct ena_admin_aq_create_cq_cmd create_cmd;
+ struct ena_admin_acq_create_cq_resp_desc cmd_completion;
+ int ret;
+
+ memset(&create_cmd, 0x0, sizeof(struct ena_admin_aq_create_cq_cmd));
+
+ create_cmd.aq_common_descriptor.opcode = ENA_ADMIN_CREATE_CQ;
+
+ create_cmd.cq_caps_2 |= (io_cq->cdesc_entry_size_in_bytes / 4) &
+ ENA_ADMIN_AQ_CREATE_CQ_CMD_CQ_ENTRY_SIZE_WORDS_MASK;
+ create_cmd.cq_caps_1 |=
+ ENA_ADMIN_AQ_CREATE_CQ_CMD_INTERRUPT_MODE_ENABLED_MASK;
+
+ create_cmd.msix_vector = io_cq->msix_vector;
+ create_cmd.cq_depth = io_cq->q_depth;
+
+ ret = ena_com_mem_addr_set(ena_dev,
+ &create_cmd.cq_ba,
+ io_cq->cdesc_addr.phys_addr);
+ if (unlikely(ret)) {
+ ena_trc_err("memory address set failed\n");
+ return ret;
+ }
+
+ ret = ena_com_execute_admin_command(
+ admin_queue,
+ (struct ena_admin_aq_entry *)&create_cmd,
+ sizeof(create_cmd),
+ (struct ena_admin_acq_entry *)&cmd_completion,
+ sizeof(cmd_completion));
+ if (unlikely(ret)) {
+ ena_trc_err("Failed to create IO CQ. error: %d\n", ret);
+ return ret;
+ }
+
+ io_cq->idx = cmd_completion.cq_idx;
+ io_cq->db_addr = (u32 __iomem *)((uintptr_t)ena_dev->reg_bar +
+ cmd_completion.cq_doorbell_offset);
+
+ if (io_cq->q_depth != cmd_completion.cq_actual_depth) {
+ ena_trc_err("completion actual queue size (%d) is differ from requested size (%d)\n",
+ cmd_completion.cq_actual_depth, io_cq->q_depth);
+ ena_com_destroy_io_cq(ena_dev, io_cq);
+ return ENA_COM_NO_SPACE;
+ }
+
+ io_cq->unmask_reg = (u32 __iomem *)((uintptr_t)ena_dev->reg_bar +
+ cmd_completion.cq_interrupt_unmask_register);
+
+ if (cmd_completion.cq_head_db_offset)
+ io_cq->cq_head_db_reg =
+ (u32 __iomem *)((uintptr_t)ena_dev->reg_bar +
+ cmd_completion.cq_head_db_offset);
+
+ ena_trc_dbg("created cq[%u], depth[%u]\n", io_cq->idx, io_cq->q_depth);
+
+ return ret;
+}
+
+int ena_com_get_io_handlers(struct ena_com_dev *ena_dev, u16 qid,
+ struct ena_com_io_sq **io_sq,
+ struct ena_com_io_cq **io_cq)
+{
+ if (qid >= ENA_TOTAL_NUM_QUEUES) {
+ ena_trc_err("Invalid queue number %d but the max is %d\n",
+ qid, ENA_TOTAL_NUM_QUEUES);
+ return ENA_COM_INVAL;
+ }
+
+ *io_sq = &ena_dev->io_sq_queues[qid];
+ *io_cq = &ena_dev->io_cq_queues[qid];
+
+ return 0;
+}
+
+void ena_com_abort_admin_commands(struct ena_com_dev *ena_dev)
+{
+ struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue;
+ struct ena_comp_ctx *comp_ctx;
+ u16 i;
+
+ if (!admin_queue->comp_ctx)
+ return;
+
+ for (i = 0; i < admin_queue->q_depth; i++) {
+ comp_ctx = get_comp_ctxt(admin_queue, i, false);
+ comp_ctx->status = ENA_CMD_ABORTED;
+
+ ENA_WAIT_EVENT_SIGNAL(comp_ctx->wait_event);
+ }
+}
+
+void ena_com_wait_for_abort_completion(struct ena_com_dev *ena_dev)
+{
+ struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue;
+ unsigned long flags = 0;
+
+ ENA_SPINLOCK_LOCK(admin_queue->q_lock, flags);
+ while (ATOMIC32_READ(&admin_queue->outstanding_cmds) != 0) {
+ ENA_SPINLOCK_UNLOCK(admin_queue->q_lock, flags);
+ ENA_MSLEEP(20);
+ ENA_SPINLOCK_LOCK(admin_queue->q_lock, flags);
+ }
+ ENA_SPINLOCK_UNLOCK(admin_queue->q_lock, flags);
+}
+
+int ena_com_destroy_io_cq(struct ena_com_dev *ena_dev,
+ struct ena_com_io_cq *io_cq)
+{
+ struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue;
+ struct ena_admin_aq_destroy_cq_cmd destroy_cmd;
+ struct ena_admin_acq_destroy_cq_resp_desc destroy_resp;
+ int ret;
+
+ memset(&destroy_cmd, 0x0, sizeof(struct ena_admin_aq_destroy_sq_cmd));
+
+ destroy_cmd.cq_idx = io_cq->idx;
+ destroy_cmd.aq_common_descriptor.opcode = ENA_ADMIN_DESTROY_CQ;
+
+ ret = ena_com_execute_admin_command(
+ admin_queue,
+ (struct ena_admin_aq_entry *)&destroy_cmd,
+ sizeof(destroy_cmd),
+ (struct ena_admin_acq_entry *)&destroy_resp,
+ sizeof(destroy_resp));
+
+ if (unlikely(ret && (ret != ENA_COM_NO_DEVICE)))
+ ena_trc_err("Failed to destroy IO CQ. error: %d\n", ret);
+
+ return ret;
+}
+
+bool ena_com_get_admin_running_state(struct ena_com_dev *ena_dev)
+{
+ return ena_dev->admin_queue.running_state;
+}
+
+void ena_com_set_admin_running_state(struct ena_com_dev *ena_dev, bool state)
+{
+ struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue;
+ unsigned long flags = 0;
+
+ ENA_SPINLOCK_LOCK(admin_queue->q_lock, flags);
+ ena_dev->admin_queue.running_state = state;
+ ENA_SPINLOCK_UNLOCK(admin_queue->q_lock, flags);
+}
+
+void ena_com_admin_aenq_enable(struct ena_com_dev *ena_dev)
+{
+ u16 depth = ena_dev->aenq.q_depth;
+
+ ENA_ASSERT(ena_dev->aenq.head == depth, "Invliad AENQ state\n");
+
+ /* Init head_db to mark that all entries in the queue
+ * are initially available
+ */
+ ENA_REG_WRITE32(depth, (unsigned char *)ena_dev->reg_bar
+ + ENA_REGS_AENQ_HEAD_DB_OFF);
+}
+
+int ena_com_set_aenq_config(struct ena_com_dev *ena_dev, u32 groups_flag)
+{
+ struct ena_com_admin_queue *admin_queue;
+ struct ena_admin_set_feat_cmd cmd;
+ struct ena_admin_set_feat_resp resp;
+ struct ena_admin_get_feat_resp get_resp;
+ int ret = 0;
+
+ if (unlikely(!ena_dev)) {
+ ena_trc_err("%s : ena_dev is NULL\n", __func__);
+ return ENA_COM_NO_DEVICE;
+ }
+
+ ret = ena_com_get_feature(ena_dev, &get_resp, ENA_ADMIN_AENQ_CONFIG);
+ if (ret) {
+ ena_trc_info("Can't get aenq configuration\n");
+ return ret;
+ }
+
+ if ((get_resp.u.aenq.supported_groups & groups_flag) != groups_flag) {
+ ena_trc_warn("Trying to set unsupported aenq events. supported flag: %x asked flag: %x\n",
+ get_resp.u.aenq.supported_groups,
+ groups_flag);
+ return ENA_COM_PERMISSION;
+ }
+
+ memset(&cmd, 0x0, sizeof(cmd));
+ admin_queue = &ena_dev->admin_queue;
+
+ cmd.aq_common_descriptor.opcode = ENA_ADMIN_SET_FEATURE;
+ cmd.aq_common_descriptor.flags = 0;
+ cmd.feat_common.feature_id = ENA_ADMIN_AENQ_CONFIG;
+ cmd.u.aenq.enabled_groups = groups_flag;
+
+ ret = ena_com_execute_admin_command(admin_queue,
+ (struct ena_admin_aq_entry *)&cmd,
+ sizeof(cmd),
+ (struct ena_admin_acq_entry *)&resp,
+ sizeof(resp));
+
+ if (unlikely(ret))
+ ena_trc_err("Failed to config AENQ ret: %d\n", ret);
+
+ return ret;
+}
+
+int ena_com_get_dma_width(struct ena_com_dev *ena_dev)
+{
+ u32 caps = ena_com_reg_bar_read32(ena_dev, ENA_REGS_CAPS_OFF);
+ int width;
+
+ if (unlikely(caps == ENA_MMIO_READ_TIMEOUT)) {
+ ena_trc_err("Reg read timeout occurred\n");
+ return ENA_COM_TIMER_EXPIRED;
+ }
+
+ width = (caps & ENA_REGS_CAPS_DMA_ADDR_WIDTH_MASK) >>
+ ENA_REGS_CAPS_DMA_ADDR_WIDTH_SHIFT;
+
+ ena_trc_dbg("ENA dma width: %d\n", width);
+
+ if ((width < 32) || width > ENA_MAX_PHYS_ADDR_SIZE_BITS) {
+ ena_trc_err("DMA width illegal value: %d\n", width);
+ return ENA_COM_INVAL;
+ }
+
+ ena_dev->dma_addr_bits = width;
+
+ return width;
+}
+
+int ena_com_validate_version(struct ena_com_dev *ena_dev)
+{
+ u32 ver;
+ u32 ctrl_ver;
+ u32 ctrl_ver_masked;
+
+ /* Make sure the ENA version and the controller version are at least
+ * as the driver expects
+ */
+ ver = ena_com_reg_bar_read32(ena_dev, ENA_REGS_VERSION_OFF);
+ ctrl_ver = ena_com_reg_bar_read32(ena_dev,
+ ENA_REGS_CONTROLLER_VERSION_OFF);
+
+ if (unlikely((ver == ENA_MMIO_READ_TIMEOUT) ||
+ (ctrl_ver == ENA_MMIO_READ_TIMEOUT))) {
+ ena_trc_err("Reg read timeout occurred\n");
+ return ENA_COM_TIMER_EXPIRED;
+ }
+
+ ena_trc_info("ena device version: %d.%d\n",
+ (ver & ENA_REGS_VERSION_MAJOR_VERSION_MASK) >>
+ ENA_REGS_VERSION_MAJOR_VERSION_SHIFT,
+ ver & ENA_REGS_VERSION_MINOR_VERSION_MASK);
+
+ if (ver < MIN_ENA_VER) {
+ ena_trc_err("ENA version is lower than the minimal version the driver supports\n");
+ return -1;
+ }
+
+ ena_trc_info("ena controller version: %d.%d.%d implementation version %d\n",
+ (ctrl_ver & ENA_REGS_CONTROLLER_VERSION_MAJOR_VERSION_MASK)
+ >> ENA_REGS_CONTROLLER_VERSION_MAJOR_VERSION_SHIFT,
+ (ctrl_ver & ENA_REGS_CONTROLLER_VERSION_MINOR_VERSION_MASK)
+ >> ENA_REGS_CONTROLLER_VERSION_MINOR_VERSION_SHIFT,
+ (ctrl_ver & ENA_REGS_CONTROLLER_VERSION_SUBMINOR_VERSION_MASK),
+ (ctrl_ver & ENA_REGS_CONTROLLER_VERSION_IMPL_ID_MASK) >>
+ ENA_REGS_CONTROLLER_VERSION_IMPL_ID_SHIFT);
+
+ ctrl_ver_masked =
+ (ctrl_ver & ENA_REGS_CONTROLLER_VERSION_MAJOR_VERSION_MASK) |
+ (ctrl_ver & ENA_REGS_CONTROLLER_VERSION_MINOR_VERSION_MASK) |
+ (ctrl_ver & ENA_REGS_CONTROLLER_VERSION_SUBMINOR_VERSION_MASK);
+
+ /* Validate the ctrl version without the implementation ID */
+ if (ctrl_ver_masked < MIN_ENA_CTRL_VER) {
+ ena_trc_err("ENA ctrl version is lower than the minimal ctrl version the driver supports\n");
+ return -1;
+ }
+
+ return 0;
+}
+
+void ena_com_admin_destroy(struct ena_com_dev *ena_dev)
+{
+ struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue;
+
+ if (!admin_queue)
+ return;
+
+ if (admin_queue->comp_ctx)
+ ENA_MEM_FREE(ena_dev->dmadev, admin_queue->comp_ctx);
+ admin_queue->comp_ctx = NULL;
+
+ if (admin_queue->sq.entries)
+ ENA_MEM_FREE_COHERENT(ena_dev->dmadev,
+ ADMIN_SQ_SIZE(admin_queue->q_depth),
+ admin_queue->sq.entries,
+ admin_queue->sq.dma_addr,
+ admin_queue->sq.mem_handle);
+ admin_queue->sq.entries = NULL;
+
+ if (admin_queue->cq.entries)
+ ENA_MEM_FREE_COHERENT(ena_dev->dmadev,
+ ADMIN_CQ_SIZE(admin_queue->q_depth),
+ admin_queue->cq.entries,
+ admin_queue->cq.dma_addr,
+ admin_queue->cq.mem_handle);
+ admin_queue->cq.entries = NULL;
+
+ if (ena_dev->aenq.entries)
+ ENA_MEM_FREE_COHERENT(ena_dev->dmadev,
+ ADMIN_AENQ_SIZE(ena_dev->aenq.q_depth),
+ ena_dev->aenq.entries,
+ ena_dev->aenq.dma_addr,
+ ena_dev->aenq.mem_handle);
+ ena_dev->aenq.entries = NULL;
+}
+
+void ena_com_set_admin_polling_mode(struct ena_com_dev *ena_dev, bool polling)
+{
+ ena_dev->admin_queue.polling = polling;
+}
+
+int ena_com_mmio_reg_read_request_init(struct ena_com_dev *ena_dev)
+{
+ struct ena_com_mmio_read *mmio_read = &ena_dev->mmio_read;
+
+ ENA_SPINLOCK_INIT(mmio_read->lock);
+ ENA_MEM_ALLOC_COHERENT(ena_dev->dmadev,
+ sizeof(*mmio_read->read_resp),
+ mmio_read->read_resp,
+ mmio_read->read_resp_dma_addr,
+ mmio_read->read_resp_mem_handle);
+ if (unlikely(!mmio_read->read_resp))
+ return ENA_COM_NO_MEM;
+
+ ena_com_mmio_reg_read_request_write_dev_addr(ena_dev);
+
+ mmio_read->read_resp->req_id = 0x0;
+ mmio_read->seq_num = 0x0;
+ mmio_read->readless_supported = true;
+
+ return 0;
+}
+
+void
+ena_com_set_mmio_read_mode(struct ena_com_dev *ena_dev, bool readless_supported)
+{
+ struct ena_com_mmio_read *mmio_read = &ena_dev->mmio_read;
+
+ mmio_read->readless_supported = readless_supported;
+}
+
+void ena_com_mmio_reg_read_request_destroy(struct ena_com_dev *ena_dev)
+{
+ struct ena_com_mmio_read *mmio_read = &ena_dev->mmio_read;
+
+ ENA_REG_WRITE32(0x0, (unsigned char *)ena_dev->reg_bar
+ + ENA_REGS_MMIO_RESP_LO_OFF);
+ ENA_REG_WRITE32(0x0, (unsigned char *)ena_dev->reg_bar
+ + ENA_REGS_MMIO_RESP_HI_OFF);
+
+ ENA_MEM_FREE_COHERENT(ena_dev->dmadev,
+ sizeof(*mmio_read->read_resp),
+ mmio_read->read_resp,
+ mmio_read->read_resp_dma_addr,
+ mmio_read->read_resp_mem_handle);
+
+ mmio_read->read_resp = NULL;
+}
+
+void ena_com_mmio_reg_read_request_write_dev_addr(struct ena_com_dev *ena_dev)
+{
+ struct ena_com_mmio_read *mmio_read = &ena_dev->mmio_read;
+ u32 addr_low, addr_high;
+
+ addr_low = ENA_DMA_ADDR_TO_UINT32_LOW(mmio_read->read_resp_dma_addr);
+ addr_high = ENA_DMA_ADDR_TO_UINT32_HIGH(mmio_read->read_resp_dma_addr);
+
+ ENA_REG_WRITE32(addr_low, (unsigned char *)ena_dev->reg_bar
+ + ENA_REGS_MMIO_RESP_LO_OFF);
+ ENA_REG_WRITE32(addr_high, (unsigned char *)ena_dev->reg_bar
+ + ENA_REGS_MMIO_RESP_HI_OFF);
+}
+
+int ena_com_admin_init(struct ena_com_dev *ena_dev,
+ struct ena_aenq_handlers *aenq_handlers,
+ bool init_spinlock)
+{
+ struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue;
+ u32 aq_caps, acq_caps, dev_sts, addr_low, addr_high;
+ int ret;
+
+ dev_sts = ena_com_reg_bar_read32(ena_dev, ENA_REGS_DEV_STS_OFF);
+
+ if (unlikely(dev_sts == ENA_MMIO_READ_TIMEOUT)) {
+ ena_trc_err("Reg read timeout occurred\n");
+ return ENA_COM_TIMER_EXPIRED;
+ }
+
+ if (!(dev_sts & ENA_REGS_DEV_STS_READY_MASK)) {
+ ena_trc_err("Device isn't ready, abort com init\n");
+ return -1;
+ }
+
+ admin_queue->q_depth = ENA_ADMIN_QUEUE_DEPTH;
+
+ admin_queue->q_dmadev = ena_dev->dmadev;
+ admin_queue->polling = false;
+ admin_queue->curr_cmd_id = 0;
+
+ ATOMIC32_SET(&admin_queue->outstanding_cmds, 0);
+
+ if (init_spinlock)
+ ENA_SPINLOCK_INIT(admin_queue->q_lock);
+
+ ret = ena_com_init_comp_ctxt(admin_queue);
+ if (ret)
+ goto error;
+
+ ret = ena_com_admin_init_sq(admin_queue);
+ if (ret)
+ goto error;
+
+ ret = ena_com_admin_init_cq(admin_queue);
+ if (ret)
+ goto error;
+
+ admin_queue->sq.db_addr = (u32 __iomem *)
+ ((unsigned char *)ena_dev->reg_bar + ENA_REGS_AQ_DB_OFF);
+
+ addr_low = ENA_DMA_ADDR_TO_UINT32_LOW(admin_queue->sq.dma_addr);
+ addr_high = ENA_DMA_ADDR_TO_UINT32_HIGH(admin_queue->sq.dma_addr);
+
+ ENA_REG_WRITE32(addr_low, (unsigned char *)ena_dev->reg_bar
+ + ENA_REGS_AQ_BASE_LO_OFF);
+ ENA_REG_WRITE32(addr_high, (unsigned char *)ena_dev->reg_bar
+ + ENA_REGS_AQ_BASE_HI_OFF);
+
+ addr_low = ENA_DMA_ADDR_TO_UINT32_LOW(admin_queue->cq.dma_addr);
+ addr_high = ENA_DMA_ADDR_TO_UINT32_HIGH(admin_queue->cq.dma_addr);
+
+ ENA_REG_WRITE32(addr_low, (unsigned char *)ena_dev->reg_bar
+ + ENA_REGS_ACQ_BASE_LO_OFF);
+ ENA_REG_WRITE32(addr_high, (unsigned char *)ena_dev->reg_bar
+ + ENA_REGS_ACQ_BASE_HI_OFF);
+
+ aq_caps = 0;
+ aq_caps |= admin_queue->q_depth & ENA_REGS_AQ_CAPS_AQ_DEPTH_MASK;
+ aq_caps |= (sizeof(struct ena_admin_aq_entry) <<
+ ENA_REGS_AQ_CAPS_AQ_ENTRY_SIZE_SHIFT) &
+ ENA_REGS_AQ_CAPS_AQ_ENTRY_SIZE_MASK;
+
+ acq_caps = 0;
+ acq_caps |= admin_queue->q_depth & ENA_REGS_ACQ_CAPS_ACQ_DEPTH_MASK;
+ acq_caps |= (sizeof(struct ena_admin_acq_entry) <<
+ ENA_REGS_ACQ_CAPS_ACQ_ENTRY_SIZE_SHIFT) &
+ ENA_REGS_ACQ_CAPS_ACQ_ENTRY_SIZE_MASK;
+
+ ENA_REG_WRITE32(aq_caps, (unsigned char *)ena_dev->reg_bar
+ + ENA_REGS_AQ_CAPS_OFF);
+ ENA_REG_WRITE32(acq_caps, (unsigned char *)ena_dev->reg_bar
+ + ENA_REGS_ACQ_CAPS_OFF);
+ ret = ena_com_admin_init_aenq(ena_dev, aenq_handlers);
+ if (ret)
+ goto error;
+
+ admin_queue->running_state = true;
+
+ return 0;
+error:
+ ena_com_admin_destroy(ena_dev);
+
+ return ret;
+}
+
+int ena_com_create_io_queue(struct ena_com_dev *ena_dev,
+ u16 qid,
+ enum queue_direction direction,
+ enum ena_admin_placement_policy_type mem_queue_type,
+ u32 msix_vector,
+ u16 queue_size)
+{
+ struct ena_com_io_sq *io_sq;
+ struct ena_com_io_cq *io_cq;
+ int ret = 0;
+
+ if (qid >= ENA_TOTAL_NUM_QUEUES) {
+ ena_trc_err("Qid (%d) is bigger than max num of queues (%d)\n",
+ qid, ENA_TOTAL_NUM_QUEUES);
+ return ENA_COM_INVAL;
+ }
+
+ io_sq = &ena_dev->io_sq_queues[qid];
+ io_cq = &ena_dev->io_cq_queues[qid];
+
+ memset(io_sq, 0x0, sizeof(struct ena_com_io_sq));
+ memset(io_cq, 0x0, sizeof(struct ena_com_io_cq));
+
+ /* Init CQ */
+ io_cq->q_depth = queue_size;
+ io_cq->direction = direction;
+ io_cq->qid = qid;
+
+ io_cq->msix_vector = msix_vector;
+
+ io_sq->q_depth = queue_size;
+ io_sq->direction = direction;
+ io_sq->qid = qid;
+
+ io_sq->mem_queue_type = mem_queue_type;
+
+ if (direction == ENA_COM_IO_QUEUE_DIRECTION_TX)
+ /* header length is limited to 8 bits */
+ io_sq->tx_max_header_size =
+ ENA_MIN16(ena_dev->tx_max_header_size, SZ_256);
+
+ ret = ena_com_init_io_sq(ena_dev, io_sq);
+ if (ret)
+ goto error;
+ ret = ena_com_init_io_cq(ena_dev, io_cq);
+ if (ret)
+ goto error;
+
+ ret = ena_com_create_io_cq(ena_dev, io_cq);
+ if (ret)
+ goto error;
+
+ ret = ena_com_create_io_sq(ena_dev, io_sq, io_cq->idx);
+ if (ret)
+ goto destroy_io_cq;
+
+ return 0;
+
+destroy_io_cq:
+ ena_com_destroy_io_cq(ena_dev, io_cq);
+error:
+ ena_com_io_queue_free(ena_dev, io_sq, io_cq);
+ return ret;
+}
+
+void ena_com_destroy_io_queue(struct ena_com_dev *ena_dev, u16 qid)
+{
+ struct ena_com_io_sq *io_sq;
+ struct ena_com_io_cq *io_cq;
+
+ if (qid >= ENA_TOTAL_NUM_QUEUES) {
+ ena_trc_err("Qid (%d) is bigger than max num of queues (%d)\n",
+ qid, ENA_TOTAL_NUM_QUEUES);
+ return;
+ }
+
+ io_sq = &ena_dev->io_sq_queues[qid];
+ io_cq = &ena_dev->io_cq_queues[qid];
+
+ ena_com_destroy_io_sq(ena_dev, io_sq);
+ ena_com_destroy_io_cq(ena_dev, io_cq);
+
+ ena_com_io_queue_free(ena_dev, io_sq, io_cq);
+}
+
+int ena_com_get_link_params(struct ena_com_dev *ena_dev,
+ struct ena_admin_get_feat_resp *resp)
+{
+ return ena_com_get_feature(ena_dev, resp, ENA_ADMIN_LINK_CONFIG);
+}
+
+int ena_com_get_dev_attr_feat(struct ena_com_dev *ena_dev,
+ struct ena_com_dev_get_features_ctx *get_feat_ctx)
+{
+ struct ena_admin_get_feat_resp get_resp;
+ int rc;
+
+ rc = ena_com_get_feature(ena_dev, &get_resp,
+ ENA_ADMIN_DEVICE_ATTRIBUTES);
+ if (rc)
+ return rc;
+
+ memcpy(&get_feat_ctx->dev_attr, &get_resp.u.dev_attr,
+ sizeof(get_resp.u.dev_attr));
+ ena_dev->supported_features = get_resp.u.dev_attr.supported_features;
+
+ rc = ena_com_get_feature(ena_dev, &get_resp,
+ ENA_ADMIN_MAX_QUEUES_NUM);
+ if (rc)
+ return rc;
+
+ memcpy(&get_feat_ctx->max_queues, &get_resp.u.max_queue,
+ sizeof(get_resp.u.max_queue));
+ ena_dev->tx_max_header_size = get_resp.u.max_queue.max_header_size;
+
+ rc = ena_com_get_feature(ena_dev, &get_resp,
+ ENA_ADMIN_AENQ_CONFIG);
+ if (rc)
+ return rc;
+
+ memcpy(&get_feat_ctx->aenq, &get_resp.u.aenq,
+ sizeof(get_resp.u.aenq));
+
+ rc = ena_com_get_feature(ena_dev, &get_resp,
+ ENA_ADMIN_STATELESS_OFFLOAD_CONFIG);
+ if (rc)
+ return rc;
+
+ memcpy(&get_feat_ctx->offload, &get_resp.u.offload,
+ sizeof(get_resp.u.offload));
+
+ return 0;
+}
+
+void ena_com_admin_q_comp_intr_handler(struct ena_com_dev *ena_dev)
+{
+ ena_com_handle_admin_completion(&ena_dev->admin_queue);
+}
+
+/* ena_handle_specific_aenq_event:
+ * return the handler that is relevant to the specific event group
+ */
+static ena_aenq_handler ena_com_get_specific_aenq_cb(struct ena_com_dev *dev,
+ u16 group)
+{
+ struct ena_aenq_handlers *aenq_handlers = dev->aenq.aenq_handlers;
+
+ if ((group < ENA_MAX_HANDLERS) && aenq_handlers->handlers[group])
+ return aenq_handlers->handlers[group];
+
+ return aenq_handlers->unimplemented_handler;
+}
+
+/* ena_aenq_intr_handler:
+ * handles the aenq incoming events.
+ * pop events from the queue and apply the specific handler
+ */
+void ena_com_aenq_intr_handler(struct ena_com_dev *dev, void *data)
+{
+ struct ena_admin_aenq_entry *aenq_e;
+ struct ena_admin_aenq_common_desc *aenq_common;
+ struct ena_com_aenq *aenq = &dev->aenq;
+ ena_aenq_handler handler_cb;
+ u16 masked_head, processed = 0;
+ u8 phase;
+
+ masked_head = aenq->head & (aenq->q_depth - 1);
+ phase = aenq->phase;
+ aenq_e = &aenq->entries[masked_head]; /* Get first entry */
+ aenq_common = &aenq_e->aenq_common_desc;
+
+ /* Go over all the events */
+ while ((aenq_common->flags & ENA_ADMIN_AENQ_COMMON_DESC_PHASE_MASK) ==
+ phase) {
+ ena_trc_dbg("AENQ! Group[%x] Syndrom[%x] timestamp: [%llus]\n",
+ aenq_common->group,
+ aenq_common->syndrom,
+ (unsigned long long)aenq_common->timestamp_low +
+ ((u64)aenq_common->timestamp_high << 32));
+
+ /* Handle specific event*/
+ handler_cb = ena_com_get_specific_aenq_cb(dev,
+ aenq_common->group);
+ handler_cb(data, aenq_e); /* call the actual event handler*/
+
+ /* Get next event entry */
+ masked_head++;
+ processed++;
+
+ if (unlikely(masked_head == aenq->q_depth)) {
+ masked_head = 0;
+ phase = !phase;
+ }
+ aenq_e = &aenq->entries[masked_head];
+ aenq_common = &aenq_e->aenq_common_desc;
+ }
+
+ aenq->head += processed;
+ aenq->phase = phase;
+
+ /* Don't update aenq doorbell if there weren't any processed events */
+ if (!processed)
+ return;
+
+ /* write the aenq doorbell after all AENQ descriptors were read */
+ mb();
+ ENA_REG_WRITE32((u32)aenq->head, (unsigned char *)dev->reg_bar
+ + ENA_REGS_AENQ_HEAD_DB_OFF);
+}
+
+/* Sets the function Idx and Queue Idx to be used for
+ * get full statistics feature
+ */
+int ena_com_extended_stats_set_func_queue(struct ena_com_dev *ena_dev,
+ u32 func_queue)
+{
+ /* Function & Queue is acquired from user in the following format :
+ * Bottom Half word: funct
+ * Top Half Word: queue
+ */
+ ena_dev->stats_func = ENA_EXTENDED_STAT_GET_FUNCT(func_queue);
+ ena_dev->stats_queue = ENA_EXTENDED_STAT_GET_QUEUE(func_queue);
+
+ return 0;
+}
+
+int ena_com_dev_reset(struct ena_com_dev *ena_dev)
+{
+ u32 stat, timeout, cap, reset_val;
+ int rc;
+
+ stat = ena_com_reg_bar_read32(ena_dev, ENA_REGS_DEV_STS_OFF);
+ cap = ena_com_reg_bar_read32(ena_dev, ENA_REGS_CAPS_OFF);
+
+ if (unlikely((stat == ENA_MMIO_READ_TIMEOUT) ||
+ (cap == ENA_MMIO_READ_TIMEOUT))) {
+ ena_trc_err("Reg read32 timeout occurred\n");
+ return ENA_COM_TIMER_EXPIRED;
+ }
+
+ if ((stat & ENA_REGS_DEV_STS_READY_MASK) == 0) {
+ ena_trc_err("Device isn't ready, can't reset device\n");
+ return ENA_COM_INVAL;
+ }
+
+ timeout = (cap & ENA_REGS_CAPS_RESET_TIMEOUT_MASK) >>
+ ENA_REGS_CAPS_RESET_TIMEOUT_SHIFT;
+ if (timeout == 0) {
+ ena_trc_err("Invalid timeout value\n");
+ return ENA_COM_INVAL;
+ }
+
+ /* start reset */
+ reset_val = ENA_REGS_DEV_CTL_DEV_RESET_MASK;
+ ENA_REG_WRITE32(reset_val, (unsigned char *)ena_dev->reg_bar
+ + ENA_REGS_DEV_CTL_OFF);
+
+ /* Write again the MMIO read request address */
+ ena_com_mmio_reg_read_request_write_dev_addr(ena_dev);
+
+ rc = wait_for_reset_state(ena_dev, timeout,
+ ENA_REGS_DEV_STS_RESET_IN_PROGRESS_MASK);
+ if (rc != 0) {
+ ena_trc_err("Reset indication didn't turn on\n");
+ return rc;
+ }
+
+ /* reset done */
+ ENA_REG_WRITE32(0, (unsigned char *)ena_dev->reg_bar
+ + ENA_REGS_DEV_CTL_OFF);
+ rc = wait_for_reset_state(ena_dev, timeout, 0);
+ if (rc != 0) {
+ ena_trc_err("Reset indication didn't turn off\n");
+ return rc;
+ }
+
+ return 0;
+}
+
+static int ena_get_dev_stats(struct ena_com_dev *ena_dev,
+ struct ena_admin_aq_get_stats_cmd *get_cmd,
+ struct ena_admin_acq_get_stats_resp *get_resp,
+ enum ena_admin_get_stats_type type)
+{
+ struct ena_com_admin_queue *admin_queue;
+ int ret = 0;
+
+ if (!ena_dev) {
+ ena_trc_err("%s : ena_dev is NULL\n", __func__);
+ return ENA_COM_NO_DEVICE;
+ }
+
+ admin_queue = &ena_dev->admin_queue;
+
+ get_cmd->aq_common_descriptor.opcode = ENA_ADMIN_GET_STATS;
+ get_cmd->aq_common_descriptor.flags = 0;
+ get_cmd->type = type;
+
+ ret = ena_com_execute_admin_command(
+ admin_queue,
+ (struct ena_admin_aq_entry *)get_cmd,
+ sizeof(*get_cmd),
+ (struct ena_admin_acq_entry *)get_resp,
+ sizeof(*get_resp));
+
+ if (unlikely(ret))
+ ena_trc_err("Failed to get stats. error: %d\n", ret);
+
+ return ret;
+}
+
+int ena_com_get_dev_basic_stats(struct ena_com_dev *ena_dev,
+ struct ena_admin_basic_stats *stats)
+{
+ int ret = 0;
+ struct ena_admin_aq_get_stats_cmd get_cmd;
+ struct ena_admin_acq_get_stats_resp get_resp;
+
+ memset(&get_cmd, 0x0, sizeof(get_cmd));
+ ret = ena_get_dev_stats(ena_dev, &get_cmd, &get_resp,
+ ENA_ADMIN_GET_STATS_TYPE_BASIC);
+ if (likely(ret == 0))
+ memcpy(stats, &get_resp.basic_stats,
+ sizeof(get_resp.basic_stats));
+
+ return ret;
+}
+
+int ena_com_get_dev_extended_stats(struct ena_com_dev *ena_dev, char *buff,
+ u32 len)
+{
+ int ret = 0;
+ struct ena_admin_aq_get_stats_cmd get_cmd;
+ struct ena_admin_acq_get_stats_resp get_resp;
+ ena_mem_handle_t mem_handle = 0;
+ void *virt_addr;
+ dma_addr_t phys_addr;
+
+ ENA_MEM_ALLOC_COHERENT(ena_dev->dmadev, len,
+ virt_addr, phys_addr, mem_handle);
+ if (!virt_addr) {
+ ret = ENA_COM_NO_MEM;
+ goto done;
+ }
+ memset(&get_cmd, 0x0, sizeof(get_cmd));
+ ret = ena_com_mem_addr_set(ena_dev,
+ &get_cmd.u.control_buffer.address,
+ phys_addr);
+ if (unlikely(ret)) {
+ ena_trc_err("memory address set failed\n");
+ return ret;
+ }
+ get_cmd.u.control_buffer.length = len;
+
+ get_cmd.device_id = ena_dev->stats_func;
+ get_cmd.queue_idx = ena_dev->stats_queue;
+
+ ret = ena_get_dev_stats(ena_dev, &get_cmd, &get_resp,
+ ENA_ADMIN_GET_STATS_TYPE_EXTENDED);
+ if (ret < 0)
+ goto free_ext_stats_mem;
+
+ ret = snprintf(buff, len, "%s", (char *)virt_addr);
+
+free_ext_stats_mem:
+ ENA_MEM_FREE_COHERENT(ena_dev->dmadev, len, virt_addr, phys_addr,
+ mem_handle);
+done:
+ return ret;
+}
+
+int ena_com_set_dev_mtu(struct ena_com_dev *ena_dev, int mtu)
+{
+ struct ena_com_admin_queue *admin_queue;
+ struct ena_admin_set_feat_cmd cmd;
+ struct ena_admin_set_feat_resp resp;
+ int ret = 0;
+
+ if (unlikely(!ena_dev)) {
+ ena_trc_err("%s : ena_dev is NULL\n", __func__);
+ return ENA_COM_NO_DEVICE;
+ }
+
+ if (!ena_com_check_supported_feature_id(ena_dev, ENA_ADMIN_MTU)) {
+ ena_trc_info("Feature %d isn't supported\n", ENA_ADMIN_MTU);
+ return ENA_COM_PERMISSION;
+ }
+
+ memset(&cmd, 0x0, sizeof(cmd));
+ admin_queue = &ena_dev->admin_queue;
+
+ cmd.aq_common_descriptor.opcode = ENA_ADMIN_SET_FEATURE;
+ cmd.aq_common_descriptor.flags = 0;
+ cmd.feat_common.feature_id = ENA_ADMIN_MTU;
+ cmd.u.mtu.mtu = mtu;
+
+ ret = ena_com_execute_admin_command(admin_queue,
+ (struct ena_admin_aq_entry *)&cmd,
+ sizeof(cmd),
+ (struct ena_admin_acq_entry *)&resp,
+ sizeof(resp));
+
+ if (unlikely(ret)) {
+ ena_trc_err("Failed to set mtu %d. error: %d\n", mtu, ret);
+ return ENA_COM_INVAL;
+ }
+ return 0;
+}
+
+int ena_com_get_offload_settings(struct ena_com_dev *ena_dev,
+ struct ena_admin_feature_offload_desc *offload)
+{
+ int ret;
+ struct ena_admin_get_feat_resp resp;
+
+ ret = ena_com_get_feature(ena_dev, &resp,
+ ENA_ADMIN_STATELESS_OFFLOAD_CONFIG);
+ if (unlikely(ret)) {
+ ena_trc_err("Failed to get offload capabilities %d\n", ret);
+ return ENA_COM_INVAL;
+ }
+
+ memcpy(offload, &resp.u.offload, sizeof(resp.u.offload));
+
+ return 0;
+}
+
+int ena_com_set_hash_function(struct ena_com_dev *ena_dev)
+{
+ struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue;
+ struct ena_rss *rss = &ena_dev->rss;
+ struct ena_admin_set_feat_cmd cmd;
+ struct ena_admin_set_feat_resp resp;
+ struct ena_admin_get_feat_resp get_resp;
+ int ret;
+
+ if (!ena_com_check_supported_feature_id(ena_dev,
+ ENA_ADMIN_RSS_HASH_FUNCTION)) {
+ ena_trc_info("Feature %d isn't supported\n",
+ ENA_ADMIN_RSS_HASH_FUNCTION);
+ return ENA_COM_PERMISSION;
+ }
+
+ /* Validate hash function is supported */
+ ret = ena_com_get_feature(ena_dev, &get_resp,
+ ENA_ADMIN_RSS_HASH_FUNCTION);
+ if (unlikely(ret))
+ return ret;
+
+ if (get_resp.u.flow_hash_func.supported_func & (1 << rss->hash_func)) {
+ ena_trc_err("Func hash %d isn't supported by device, abort\n",
+ rss->hash_func);
+ return ENA_COM_PERMISSION;
+ }
+
+ memset(&cmd, 0x0, sizeof(cmd));
+
+ cmd.aq_common_descriptor.opcode = ENA_ADMIN_SET_FEATURE;
+ cmd.aq_common_descriptor.flags =
+ ENA_ADMIN_AQ_COMMON_DESC_CTRL_DATA_INDIRECT_MASK;
+ cmd.feat_common.feature_id = ENA_ADMIN_RSS_HASH_FUNCTION;
+ cmd.u.flow_hash_func.init_val = rss->hash_init_val;
+ cmd.u.flow_hash_func.selected_func = 1 << rss->hash_func;
+
+ ret = ena_com_mem_addr_set(ena_dev,
+ &cmd.control_buffer.address,
+ rss->hash_key_dma_addr);
+ if (unlikely(ret)) {
+ ena_trc_err("memory address set failed\n");
+ return ret;
+ }
+
+ cmd.control_buffer.length = sizeof(*rss->hash_key);
+
+ ret = ena_com_execute_admin_command(admin_queue,
+ (struct ena_admin_aq_entry *)&cmd,
+ sizeof(cmd),
+ (struct ena_admin_acq_entry *)&resp,
+ sizeof(resp));
+ if (unlikely(ret)) {
+ ena_trc_err("Failed to set hash function %d. error: %d\n",
+ rss->hash_func, ret);
+ return ENA_COM_INVAL;
+ }
+
+ return 0;
+}
+
+int ena_com_fill_hash_function(struct ena_com_dev *ena_dev,
+ enum ena_admin_hash_functions func,
+ const u8 *key, u16 key_len, u32 init_val)
+{
+ struct ena_rss *rss = &ena_dev->rss;
+ struct ena_admin_get_feat_resp get_resp;
+ struct ena_admin_feature_rss_flow_hash_control *hash_key =
+ rss->hash_key;
+ int rc;
+
+ /* Make sure size is a mult of DWs */
+ if (unlikely(key_len & 0x3))
+ return ENA_COM_INVAL;
+
+ rc = ena_com_get_feature_ex(ena_dev, &get_resp,
+ ENA_ADMIN_RSS_HASH_FUNCTION,
+ rss->hash_key_dma_addr,
+ sizeof(*rss->hash_key));
+ if (unlikely(rc))
+ return rc;
+
+ if (!((1 << func) & get_resp.u.flow_hash_func.supported_func)) {
+ ena_trc_err("Flow hash function %d isn't supported\n", func);
+ return ENA_COM_PERMISSION;
+ }
+
+ switch (func) {
+ case ENA_ADMIN_TOEPLITZ:
+ if (key_len > sizeof(hash_key->key)) {
+ ena_trc_err("key len (%hu) is bigger than the max supported (%zu)\n",
+ key_len, sizeof(hash_key->key));
+ return ENA_COM_INVAL;
+ }
+
+ memcpy(hash_key->key, key, key_len);
+ rss->hash_init_val = init_val;
+ hash_key->keys_num = key_len >> 2;
+ break;
+ case ENA_ADMIN_CRC32:
+ rss->hash_init_val = init_val;
+ break;
+ default:
+ ena_trc_err("Invalid hash function (%d)\n", func);
+ return ENA_COM_INVAL;
+ }
+
+ rc = ena_com_set_hash_function(ena_dev);
+
+ /* Restore the old function */
+ if (unlikely(rc))
+ ena_com_get_hash_function(ena_dev, NULL, NULL);
+
+ return rc;
+}
+
+int ena_com_get_hash_function(struct ena_com_dev *ena_dev,
+ enum ena_admin_hash_functions *func,
+ u8 *key)
+{
+ struct ena_rss *rss = &ena_dev->rss;
+ struct ena_admin_get_feat_resp get_resp;
+ struct ena_admin_feature_rss_flow_hash_control *hash_key =
+ rss->hash_key;
+ int rc;
+
+ rc = ena_com_get_feature_ex(ena_dev, &get_resp,
+ ENA_ADMIN_RSS_HASH_FUNCTION,
+ rss->hash_key_dma_addr,
+ sizeof(*rss->hash_key));
+ if (unlikely(rc))
+ return rc;
+
+ rss->hash_func = (enum ena_admin_hash_functions)get_resp.u.flow_hash_func.selected_func;
+ if (func)
+ *func = rss->hash_func;
+
+ if (key)
+ memcpy(key, hash_key->key, hash_key->keys_num << 2);
+
+ return 0;
+}
+
+int ena_com_get_hash_ctrl(struct ena_com_dev *ena_dev,
+ enum ena_admin_flow_hash_proto proto,
+ u16 *fields)
+{
+ struct ena_rss *rss = &ena_dev->rss;
+ struct ena_admin_get_feat_resp get_resp;
+ int rc;
+
+ rc = ena_com_get_feature_ex(ena_dev, &get_resp,
+ ENA_ADMIN_RSS_HASH_INPUT,
+ rss->hash_ctrl_dma_addr,
+ sizeof(*rss->hash_ctrl));
+ if (unlikely(rc))
+ return rc;
+
+ if (fields)
+ *fields = rss->hash_ctrl->selected_fields[proto].fields;
+
+ return 0;
+}
+
+int ena_com_set_hash_ctrl(struct ena_com_dev *ena_dev)
+{
+ struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue;
+ struct ena_rss *rss = &ena_dev->rss;
+ struct ena_admin_feature_rss_hash_control *hash_ctrl = rss->hash_ctrl;
+ struct ena_admin_set_feat_cmd cmd;
+ struct ena_admin_set_feat_resp resp;
+ int ret;
+
+ if (!ena_com_check_supported_feature_id(ena_dev,
+ ENA_ADMIN_RSS_HASH_INPUT)) {
+ ena_trc_info("Feature %d isn't supported\n",
+ ENA_ADMIN_RSS_HASH_INPUT);
+ return ENA_COM_PERMISSION;
+ }
+
+ cmd.aq_common_descriptor.opcode = ENA_ADMIN_SET_FEATURE;
+ cmd.aq_common_descriptor.flags =
+ ENA_ADMIN_AQ_COMMON_DESC_CTRL_DATA_INDIRECT_MASK;
+ cmd.feat_common.feature_id = ENA_ADMIN_RSS_HASH_INPUT;
+ cmd.u.flow_hash_input.enabled_input_sort =
+ ENA_ADMIN_FEATURE_RSS_FLOW_HASH_INPUT_L3_SORT_MASK |
+ ENA_ADMIN_FEATURE_RSS_FLOW_HASH_INPUT_L4_SORT_MASK;
+
+ ret = ena_com_mem_addr_set(ena_dev,
+ &cmd.control_buffer.address,
+ rss->hash_ctrl_dma_addr);
+ if (unlikely(ret)) {
+ ena_trc_err("memory address set failed\n");
+ return ret;
+ }
+ cmd.control_buffer.length = sizeof(*hash_ctrl);
+
+ ret = ena_com_execute_admin_command(admin_queue,
+ (struct ena_admin_aq_entry *)&cmd,
+ sizeof(cmd),
+ (struct ena_admin_acq_entry *)&resp,
+ sizeof(resp));
+ if (unlikely(ret)) {
+ ena_trc_err("Failed to set hash input. error: %d\n", ret);
+ ret = ENA_COM_INVAL;
+ }
+
+ return 0;
+}
+
+int ena_com_set_default_hash_ctrl(struct ena_com_dev *ena_dev)
+{
+ struct ena_rss *rss = &ena_dev->rss;
+ struct ena_admin_feature_rss_hash_control *hash_ctrl =
+ rss->hash_ctrl;
+ u16 available_fields = 0;
+ int rc, i;
+
+ /* Get the supported hash input */
+ rc = ena_com_get_hash_ctrl(ena_dev, (enum ena_admin_flow_hash_proto)0, NULL);
+ if (unlikely(rc))
+ return rc;
+
+ hash_ctrl->selected_fields[ENA_ADMIN_RSS_TCP4].fields =
+ ENA_ADMIN_RSS_L3_SA | ENA_ADMIN_RSS_L3_DA |
+ ENA_ADMIN_RSS_L4_DP | ENA_ADMIN_RSS_L4_SP;
+
+ hash_ctrl->selected_fields[ENA_ADMIN_RSS_UDP4].fields =
+ ENA_ADMIN_RSS_L3_SA | ENA_ADMIN_RSS_L3_DA |
+ ENA_ADMIN_RSS_L4_DP | ENA_ADMIN_RSS_L4_SP;
+
+ hash_ctrl->selected_fields[ENA_ADMIN_RSS_TCP6].fields =
+ ENA_ADMIN_RSS_L3_SA | ENA_ADMIN_RSS_L3_DA |
+ ENA_ADMIN_RSS_L4_DP | ENA_ADMIN_RSS_L4_SP;
+
+ hash_ctrl->selected_fields[ENA_ADMIN_RSS_UDP6].fields =
+ ENA_ADMIN_RSS_L3_SA | ENA_ADMIN_RSS_L3_DA |
+ ENA_ADMIN_RSS_L4_DP | ENA_ADMIN_RSS_L4_SP;
+
+ hash_ctrl->selected_fields[ENA_ADMIN_RSS_IP4].fields =
+ ENA_ADMIN_RSS_L3_SA | ENA_ADMIN_RSS_L3_DA;
+
+ hash_ctrl->selected_fields[ENA_ADMIN_RSS_IP6].fields =
+ ENA_ADMIN_RSS_L3_SA | ENA_ADMIN_RSS_L3_DA;
+
+ hash_ctrl->selected_fields[ENA_ADMIN_RSS_IP4_FRAG].fields =
+ ENA_ADMIN_RSS_L3_SA | ENA_ADMIN_RSS_L3_DA;
+
+ hash_ctrl->selected_fields[ENA_ADMIN_RSS_IP4_FRAG].fields =
+ ENA_ADMIN_RSS_L2_DA | ENA_ADMIN_RSS_L2_SA;
+
+ for (i = 0; i < ENA_ADMIN_RSS_PROTO_NUM; i++) {
+ available_fields = hash_ctrl->selected_fields[i].fields &
+ hash_ctrl->supported_fields[i].fields;
+ if (available_fields != hash_ctrl->selected_fields[i].fields) {
+ ena_trc_err("hash control doesn't support all the desire configuration. proto %x supported %x selected %x\n",
+ i, hash_ctrl->supported_fields[i].fields,
+ hash_ctrl->selected_fields[i].fields);
+ return ENA_COM_PERMISSION;
+ }
+ }
+
+ rc = ena_com_set_hash_ctrl(ena_dev);
+
+ /* In case of failure, restore the old hash ctrl */
+ if (unlikely(rc))
+ ena_com_get_hash_ctrl(ena_dev, (enum ena_admin_flow_hash_proto)0, NULL);
+
+ return rc;
+}
+
+int ena_com_fill_hash_ctrl(struct ena_com_dev *ena_dev,
+ enum ena_admin_flow_hash_proto proto,
+ u16 hash_fields)
+{
+ struct ena_rss *rss = &ena_dev->rss;
+ struct ena_admin_feature_rss_hash_control *hash_ctrl = rss->hash_ctrl;
+ u16 supported_fields;
+ int rc;
+
+ if (proto > ENA_ADMIN_RSS_PROTO_NUM) {
+ ena_trc_err("Invalid proto num (%u)\n", proto);
+ return ENA_COM_INVAL;
+ }
+
+ /* Get the ctrl table */
+ rc = ena_com_get_hash_ctrl(ena_dev, proto, NULL);
+ if (unlikely(rc))
+ return rc;
+
+ /* Make sure all the fields are supported */
+ supported_fields = hash_ctrl->supported_fields[proto].fields;
+ if ((hash_fields & supported_fields) != hash_fields) {
+ ena_trc_err("proto %d doesn't support the required fields %x. supports only: %x\n",
+ proto, hash_fields, supported_fields);
+ }
+
+ hash_ctrl->selected_fields[proto].fields = hash_fields;
+
+ rc = ena_com_set_hash_ctrl(ena_dev);
+
+ /* In case of failure, restore the old hash ctrl */
+ if (unlikely(rc))
+ ena_com_get_hash_ctrl(ena_dev, (enum ena_admin_flow_hash_proto)0, NULL);
+
+ return 0;
+}
+
+int ena_com_indirect_table_fill_entry(struct ena_com_dev *ena_dev,
+ u16 entry_idx, u16 entry_value)
+{
+ struct ena_rss *rss = &ena_dev->rss;
+
+ if (unlikely(entry_idx >= (1 << rss->tbl_log_size)))
+ return ENA_COM_INVAL;
+
+ if (unlikely((entry_value > ENA_TOTAL_NUM_QUEUES)))
+ return ENA_COM_INVAL;
+
+ rss->host_rss_ind_tbl[entry_idx] = entry_value;
+
+ return 0;
+}
+
+int ena_com_indirect_table_set(struct ena_com_dev *ena_dev)
+{
+ struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue;
+ struct ena_rss *rss = &ena_dev->rss;
+ struct ena_admin_set_feat_cmd cmd;
+ struct ena_admin_set_feat_resp resp;
+ int ret = 0;
+
+ if (!ena_com_check_supported_feature_id(
+ ena_dev,
+ ENA_ADMIN_RSS_REDIRECTION_TABLE_CONFIG)) {
+ ena_trc_info("Feature %d isn't supported\n",
+ ENA_ADMIN_RSS_REDIRECTION_TABLE_CONFIG);
+ return ENA_COM_PERMISSION;
+ }
+
+ ret = ena_com_ind_tbl_convert_to_device(ena_dev);
+ if (ret) {
+ ena_trc_err("Failed to convert host indirection table to device table\n");
+ return ret;
+ }
+
+ memset(&cmd, 0x0, sizeof(cmd));
+
+ cmd.aq_common_descriptor.opcode = ENA_ADMIN_SET_FEATURE;
+ cmd.aq_common_descriptor.flags =
+ ENA_ADMIN_AQ_COMMON_DESC_CTRL_DATA_INDIRECT_MASK;
+ cmd.feat_common.feature_id = ENA_ADMIN_RSS_REDIRECTION_TABLE_CONFIG;
+ cmd.u.ind_table.size = rss->tbl_log_size;
+ cmd.u.ind_table.inline_index = 0xFFFFFFFF;
+
+ ret = ena_com_mem_addr_set(ena_dev,
+ &cmd.control_buffer.address,
+ rss->rss_ind_tbl_dma_addr);
+ if (unlikely(ret)) {
+ ena_trc_err("memory address set failed\n");
+ return ret;
+ }
+
+ cmd.control_buffer.length = (1 << rss->tbl_log_size) *
+ sizeof(struct ena_admin_rss_ind_table_entry);
+
+ ret = ena_com_execute_admin_command(admin_queue,
+ (struct ena_admin_aq_entry *)&cmd,
+ sizeof(cmd),
+ (struct ena_admin_acq_entry *)&resp,
+ sizeof(resp));
+
+ if (unlikely(ret)) {
+ ena_trc_err("Failed to set indirect table. error: %d\n", ret);
+ return ENA_COM_INVAL;
+ }
+
+ return 0;
+}
+
+int ena_com_indirect_table_get(struct ena_com_dev *ena_dev, u32 *ind_tbl)
+{
+ struct ena_rss *rss = &ena_dev->rss;
+ struct ena_admin_get_feat_resp get_resp;
+ u32 tbl_size;
+ int i, rc;
+
+ tbl_size = (1 << rss->tbl_log_size) *
+ sizeof(struct ena_admin_rss_ind_table_entry);
+
+ rc = ena_com_get_feature_ex(ena_dev, &get_resp,
+ ENA_ADMIN_RSS_REDIRECTION_TABLE_CONFIG,
+ rss->rss_ind_tbl_dma_addr,
+ tbl_size);
+ if (unlikely(rc))
+ return rc;
+
+ if (!ind_tbl)
+ return 0;
+
+ rc = ena_com_ind_tbl_convert_from_device(ena_dev);
+ if (unlikely(rc))
+ return rc;
+
+ for (i = 0; i < (1 << rss->tbl_log_size); i++)
+ ind_tbl[i] = rss->host_rss_ind_tbl[i];
+
+ return 0;
+}
+
+int ena_com_rss_init(struct ena_com_dev *ena_dev, u16 indr_tbl_log_size)
+{
+ int rc;
+
+ memset(&ena_dev->rss, 0x0, sizeof(ena_dev->rss));
+
+ rc = ena_com_indirect_table_allocate(ena_dev, indr_tbl_log_size);
+ if (unlikely(rc))
+ goto err_indr_tbl;
+
+ rc = ena_com_hash_key_allocate(ena_dev);
+ if (unlikely(rc))
+ goto err_hash_key;
+
+ rc = ena_com_hash_ctrl_init(ena_dev);
+ if (unlikely(rc))
+ goto err_hash_ctrl;
+
+ return 0;
+
+err_hash_ctrl:
+ ena_com_hash_key_destroy(ena_dev);
+err_hash_key:
+ ena_com_indirect_table_destroy(ena_dev);
+err_indr_tbl:
+
+ return rc;
+}
+
+int ena_com_rss_destroy(struct ena_com_dev *ena_dev)
+{
+ ena_com_indirect_table_destroy(ena_dev);
+ ena_com_hash_key_destroy(ena_dev);
+ ena_com_hash_ctrl_destroy(ena_dev);
+
+ memset(&ena_dev->rss, 0x0, sizeof(ena_dev->rss));
+
+ return 0;
+}
+
+int ena_com_allocate_host_attribute(struct ena_com_dev *ena_dev,
+ u32 debug_area_size)
+{
+ struct ena_host_attribute *host_attr = &ena_dev->host_attr;
+ int rc;
+
+ ENA_MEM_ALLOC_COHERENT(ena_dev->dmadev,
+ SZ_4K,
+ host_attr->host_info,
+ host_attr->host_info_dma_addr,
+ host_attr->host_info_dma_handle);
+ if (unlikely(!host_attr->host_info))
+ return ENA_COM_NO_MEM;
+
+ if (debug_area_size) {
+ ENA_MEM_ALLOC_COHERENT(ena_dev->dmadev,
+ debug_area_size,
+ host_attr->debug_area_virt_addr,
+ host_attr->debug_area_dma_addr,
+ host_attr->debug_area_dma_handle);
+ if (unlikely(!host_attr->debug_area_virt_addr)) {
+ rc = ENA_COM_NO_MEM;
+ goto err;
+ }
+ }
+
+ host_attr->debug_area_size = debug_area_size;
+
+ return 0;
+err:
+
+ ENA_MEM_FREE_COHERENT(ena_dev->dmadev,
+ SZ_4K,
+ host_attr->host_info,
+ host_attr->host_info_dma_addr,
+ host_attr->host_info_dma_handle);
+ host_attr->host_info = NULL;
+ return rc;
+}
+
+void ena_com_delete_host_attribute(struct ena_com_dev *ena_dev)
+{
+ struct ena_host_attribute *host_attr = &ena_dev->host_attr;
+
+ if (host_attr->host_info) {
+ ENA_MEM_FREE_COHERENT(ena_dev->dmadev,
+ SZ_4K,
+ host_attr->host_info,
+ host_attr->host_info_dma_addr,
+ host_attr->host_info_dma_handle);
+ host_attr->host_info = NULL;
+ }
+
+ if (host_attr->debug_area_virt_addr) {
+ ENA_MEM_FREE_COHERENT(ena_dev->dmadev,
+ host_attr->debug_area_size,
+ host_attr->debug_area_virt_addr,
+ host_attr->debug_area_dma_addr,
+ host_attr->debug_area_dma_handle);
+ host_attr->debug_area_virt_addr = NULL;
+ }
+}
+
+int ena_com_set_host_attributes(struct ena_com_dev *ena_dev)
+{
+ struct ena_host_attribute *host_attr = &ena_dev->host_attr;
+ struct ena_com_admin_queue *admin_queue;
+ struct ena_admin_set_feat_cmd cmd;
+ struct ena_admin_set_feat_resp resp;
+
+ int ret = 0;
+
+ if (unlikely(!ena_dev)) {
+ ena_trc_err("%s : ena_dev is NULL\n", __func__);
+ return ENA_COM_NO_DEVICE;
+ }
+
+ if (!ena_com_check_supported_feature_id(ena_dev,
+ ENA_ADMIN_HOST_ATTR_CONFIG)) {
+ ena_trc_warn("Set host attribute isn't supported\n");
+ return ENA_COM_PERMISSION;
+ }
+
+ memset(&cmd, 0x0, sizeof(cmd));
+ admin_queue = &ena_dev->admin_queue;
+
+ cmd.aq_common_descriptor.opcode = ENA_ADMIN_SET_FEATURE;
+ cmd.feat_common.feature_id = ENA_ADMIN_HOST_ATTR_CONFIG;
+
+ ret = ena_com_mem_addr_set(ena_dev,
+ &cmd.u.host_attr.debug_ba,
+ host_attr->debug_area_dma_addr);
+ if (unlikely(ret)) {
+ ena_trc_err("memory address set failed\n");
+ return ret;
+ }
+
+ ret = ena_com_mem_addr_set(ena_dev,
+ &cmd.u.host_attr.os_info_ba,
+ host_attr->host_info_dma_addr);
+ if (unlikely(ret)) {
+ ena_trc_err("memory address set failed\n");
+ return ret;
+ }
+
+ cmd.u.host_attr.debug_area_size = host_attr->debug_area_size;
+
+ ret = ena_com_execute_admin_command(admin_queue,
+ (struct ena_admin_aq_entry *)&cmd,
+ sizeof(cmd),
+ (struct ena_admin_acq_entry *)&resp,
+ sizeof(resp));
+
+ if (unlikely(ret))
+ ena_trc_err("Failed to set host attributes: %d\n", ret);
+
+ return ret;
+}
+
+/* Interrupt moderation */
+bool ena_com_interrupt_moderation_supported(struct ena_com_dev *ena_dev)
+{
+ return ena_com_check_supported_feature_id(
+ ena_dev,
+ ENA_ADMIN_INTERRUPT_MODERATION);
+}
+
+int
+ena_com_update_nonadaptive_moderation_interval_tx(struct ena_com_dev *ena_dev,
+ u32 tx_coalesce_usecs)
+{
+ if (!ena_dev->intr_delay_resolution) {
+ ena_trc_err("Illegal interrupt delay granularity value\n");
+ return ENA_COM_FAULT;
+ }
+
+ ena_dev->intr_moder_tx_interval = tx_coalesce_usecs /
+ ena_dev->intr_delay_resolution;
+
+ return 0;
+}
+
+int
+ena_com_update_nonadaptive_moderation_interval_rx(struct ena_com_dev *ena_dev,
+ u32 rx_coalesce_usecs)
+{
+ if (!ena_dev->intr_delay_resolution) {
+ ena_trc_err("Illegal interrupt delay granularity value\n");
+ return ENA_COM_FAULT;
+ }
+
+ /* We use LOWEST entry of moderation table for storing
+ * nonadaptive interrupt coalescing values
+ */
+ ena_dev->intr_moder_tbl[ENA_INTR_MODER_LOWEST].intr_moder_interval =
+ rx_coalesce_usecs / ena_dev->intr_delay_resolution;
+
+ return 0;
+}
+
+void ena_com_destroy_interrupt_moderation(struct ena_com_dev *ena_dev)
+{
+ if (ena_dev->intr_moder_tbl)
+ ENA_MEM_FREE(ena_dev->dmadev, ena_dev->intr_moder_tbl);
+ ena_dev->intr_moder_tbl = NULL;
+}
+
+int ena_com_init_interrupt_moderation(struct ena_com_dev *ena_dev)
+{
+ struct ena_admin_get_feat_resp get_resp;
+ u32 delay_resolution;
+ int rc;
+
+ rc = ena_com_get_feature(ena_dev, &get_resp,
+ ENA_ADMIN_INTERRUPT_MODERATION);
+
+ if (rc) {
+ if (rc == ENA_COM_PERMISSION) {
+ ena_trc_info("Feature %d isn't supported\n",
+ ENA_ADMIN_INTERRUPT_MODERATION);
+ rc = 0;
+ } else {
+ ena_trc_err("Failed to get interrupt moderation admin cmd. rc: %d\n",
+ rc);
+ }
+
+ /* no moderation supported, disable adaptive support */
+ ena_com_disable_adaptive_moderation(ena_dev);
+ return rc;
+ }
+
+ rc = ena_com_init_interrupt_moderation_table(ena_dev);
+ if (rc)
+ goto err;
+
+ /* if moderation is supported by device we set adaptive moderation */
+ delay_resolution = get_resp.u.intr_moderation.intr_delay_resolution;
+ ena_com_update_intr_delay_resolution(ena_dev, delay_resolution);
+ ena_com_enable_adaptive_moderation(ena_dev);
+
+ return 0;
+err:
+ ena_com_destroy_interrupt_moderation(ena_dev);
+ return rc;
+}
+
+void
+ena_com_config_default_interrupt_moderation_table(struct ena_com_dev *ena_dev)
+{
+ struct ena_intr_moder_entry *intr_moder_tbl = ena_dev->intr_moder_tbl;
+
+ if (!intr_moder_tbl)
+ return;
+
+ intr_moder_tbl[ENA_INTR_MODER_LOWEST].intr_moder_interval =
+ ENA_INTR_LOWEST_USECS;
+ intr_moder_tbl[ENA_INTR_MODER_LOWEST].pkts_per_interval =
+ ENA_INTR_LOWEST_PKTS;
+ intr_moder_tbl[ENA_INTR_MODER_LOWEST].bytes_per_interval =
+ ENA_INTR_LOWEST_BYTES;
+
+ intr_moder_tbl[ENA_INTR_MODER_LOW].intr_moder_interval =
+ ENA_INTR_LOW_USECS;
+ intr_moder_tbl[ENA_INTR_MODER_LOW].pkts_per_interval =
+ ENA_INTR_LOW_PKTS;
+ intr_moder_tbl[ENA_INTR_MODER_LOW].bytes_per_interval =
+ ENA_INTR_LOW_BYTES;
+
+ intr_moder_tbl[ENA_INTR_MODER_MID].intr_moder_interval =
+ ENA_INTR_MID_USECS;
+ intr_moder_tbl[ENA_INTR_MODER_MID].pkts_per_interval =
+ ENA_INTR_MID_PKTS;
+ intr_moder_tbl[ENA_INTR_MODER_MID].bytes_per_interval =
+ ENA_INTR_MID_BYTES;
+
+ intr_moder_tbl[ENA_INTR_MODER_HIGH].intr_moder_interval =
+ ENA_INTR_HIGH_USECS;
+ intr_moder_tbl[ENA_INTR_MODER_HIGH].pkts_per_interval =
+ ENA_INTR_HIGH_PKTS;
+ intr_moder_tbl[ENA_INTR_MODER_HIGH].bytes_per_interval =
+ ENA_INTR_HIGH_BYTES;
+
+ intr_moder_tbl[ENA_INTR_MODER_HIGHEST].intr_moder_interval =
+ ENA_INTR_HIGHEST_USECS;
+ intr_moder_tbl[ENA_INTR_MODER_HIGHEST].pkts_per_interval =
+ ENA_INTR_HIGHEST_PKTS;
+ intr_moder_tbl[ENA_INTR_MODER_HIGHEST].bytes_per_interval =
+ ENA_INTR_HIGHEST_BYTES;
+}
+
+unsigned int
+ena_com_get_nonadaptive_moderation_interval_tx(struct ena_com_dev *ena_dev)
+{
+ return ena_dev->intr_moder_tx_interval;
+}
+
+unsigned int
+ena_com_get_nonadaptive_moderation_interval_rx(struct ena_com_dev *ena_dev)
+{
+ struct ena_intr_moder_entry *intr_moder_tbl = ena_dev->intr_moder_tbl;
+
+ if (intr_moder_tbl)
+ return intr_moder_tbl[ENA_INTR_MODER_LOWEST].intr_moder_interval;
+
+ return 0;
+}
+
+void ena_com_init_intr_moderation_entry(struct ena_com_dev *ena_dev,
+ enum ena_intr_moder_level level,
+ struct ena_intr_moder_entry *entry)
+{
+ struct ena_intr_moder_entry *intr_moder_tbl = ena_dev->intr_moder_tbl;
+
+ if (level >= ENA_INTR_MAX_NUM_OF_LEVELS)
+ return;
+
+ intr_moder_tbl[level].intr_moder_interval = entry->intr_moder_interval;
+ if (ena_dev->intr_delay_resolution)
+ intr_moder_tbl[level].intr_moder_interval /=
+ ena_dev->intr_delay_resolution;
+ intr_moder_tbl[level].pkts_per_interval = entry->pkts_per_interval;
+ intr_moder_tbl[level].bytes_per_interval = entry->bytes_per_interval;
+}
+
+void ena_com_get_intr_moderation_entry(struct ena_com_dev *ena_dev,
+ enum ena_intr_moder_level level,
+ struct ena_intr_moder_entry *entry)
+{
+ struct ena_intr_moder_entry *intr_moder_tbl = ena_dev->intr_moder_tbl;
+
+ if (level >= ENA_INTR_MAX_NUM_OF_LEVELS)
+ return;
+
+ entry->intr_moder_interval = intr_moder_tbl[level].intr_moder_interval;
+ if (ena_dev->intr_delay_resolution)
+ entry->intr_moder_interval *= ena_dev->intr_delay_resolution;
+ entry->pkts_per_interval =
+ intr_moder_tbl[level].pkts_per_interval;
+ entry->bytes_per_interval = intr_moder_tbl[level].bytes_per_interval;
+}
diff --git a/drivers/net/ena/base/ena_com.h b/drivers/net/ena/base/ena_com.h
new file mode 100644
index 00000000..19e53ffb
--- /dev/null
+++ b/drivers/net/ena/base/ena_com.h
@@ -0,0 +1,1052 @@
+/*-
+* BSD LICENSE
+*
+* Copyright (c) 2015-2016 Amazon.com, Inc. or its affiliates.
+* All rights reserved.
+*
+* Redistribution and use in source and binary forms, with or without
+* modification, are permitted provided that the following conditions
+* are met:
+*
+* * Redistributions of source code must retain the above copyright
+* notice, this list of conditions and the following disclaimer.
+* * Redistributions in binary form must reproduce the above copyright
+* notice, this list of conditions and the following disclaimer in
+* the documentation and/or other materials provided with the
+* distribution.
+* * Neither the name of copyright holder nor the names of its
+* contributors may be used to endorse or promote products derived
+* from this software without specific prior written permission.
+*
+* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+
+#ifndef ENA_COM
+#define ENA_COM
+
+#include "ena_plat.h"
+#include "ena_common_defs.h"
+#include "ena_admin_defs.h"
+#include "ena_eth_io_defs.h"
+#include "ena_regs_defs.h"
+#if defined(__linux__) && !defined(__KERNEL__)
+#include <rte_lcore.h>
+#include <rte_spinlock.h>
+#define __iomem
+#endif
+
+#define ENA_MAX_NUM_IO_QUEUES 128U
+/* We need to queues for each IO (on for Tx and one for Rx) */
+#define ENA_TOTAL_NUM_QUEUES (2 * (ENA_MAX_NUM_IO_QUEUES))
+
+#define ENA_MAX_HANDLERS 256
+
+#define ENA_MAX_PHYS_ADDR_SIZE_BITS 48
+
+/* Unit in usec */
+#define ENA_REG_READ_TIMEOUT 200000
+
+#define ADMIN_SQ_SIZE(depth) ((depth) * sizeof(struct ena_admin_aq_entry))
+#define ADMIN_CQ_SIZE(depth) ((depth) * sizeof(struct ena_admin_acq_entry))
+#define ADMIN_AENQ_SIZE(depth) ((depth) * sizeof(struct ena_admin_aenq_entry))
+
+/*****************************************************************************/
+/*****************************************************************************/
+/* ENA adaptive interrupt moderation settings */
+
+#define ENA_INTR_LOWEST_USECS (0)
+#define ENA_INTR_LOWEST_PKTS (3)
+#define ENA_INTR_LOWEST_BYTES (2 * 1524)
+
+#define ENA_INTR_LOW_USECS (32)
+#define ENA_INTR_LOW_PKTS (12)
+#define ENA_INTR_LOW_BYTES (16 * 1024)
+
+#define ENA_INTR_MID_USECS (80)
+#define ENA_INTR_MID_PKTS (48)
+#define ENA_INTR_MID_BYTES (64 * 1024)
+
+#define ENA_INTR_HIGH_USECS (128)
+#define ENA_INTR_HIGH_PKTS (96)
+#define ENA_INTR_HIGH_BYTES (128 * 1024)
+
+#define ENA_INTR_HIGHEST_USECS (192)
+#define ENA_INTR_HIGHEST_PKTS (128)
+#define ENA_INTR_HIGHEST_BYTES (192 * 1024)
+
+#define ENA_INTR_INITIAL_TX_INTERVAL_USECS 196
+#define ENA_INTR_INITIAL_RX_INTERVAL_USECS 4
+#define ENA_INTR_DELAY_OLD_VALUE_WEIGHT 6
+#define ENA_INTR_DELAY_NEW_VALUE_WEIGHT 4
+
+enum ena_intr_moder_level {
+ ENA_INTR_MODER_LOWEST = 0,
+ ENA_INTR_MODER_LOW,
+ ENA_INTR_MODER_MID,
+ ENA_INTR_MODER_HIGH,
+ ENA_INTR_MODER_HIGHEST,
+ ENA_INTR_MAX_NUM_OF_LEVELS,
+};
+
+struct ena_intr_moder_entry {
+ unsigned int intr_moder_interval;
+ unsigned int pkts_per_interval;
+ unsigned int bytes_per_interval;
+};
+
+enum queue_direction {
+ ENA_COM_IO_QUEUE_DIRECTION_TX,
+ ENA_COM_IO_QUEUE_DIRECTION_RX
+};
+
+struct ena_com_buf {
+ dma_addr_t paddr; /**< Buffer physical address */
+ u16 len; /**< Buffer length in bytes */
+};
+
+struct ena_com_rx_buf_info {
+ u16 len;
+ u16 req_id;
+};
+
+struct ena_com_io_desc_addr {
+ void __iomem *pbuf_dev_addr; /* LLQ address */
+ void *virt_addr;
+ dma_addr_t phys_addr;
+ ena_mem_handle_t mem_handle;
+};
+
+struct ena_com_tx_meta {
+ u16 mss;
+ u16 l3_hdr_len;
+ u16 l3_hdr_offset;
+ u16 l3_outer_hdr_len; /* In words */
+ u16 l3_outer_hdr_offset;
+ u16 l4_hdr_len; /* In words */
+};
+
+struct ena_com_io_cq {
+ struct ena_com_io_desc_addr cdesc_addr;
+
+ u32 __iomem *db_addr;
+
+ /* Interrupt unmask register */
+ u32 __iomem *unmask_reg;
+
+ /* The completion queue head doorbell register */
+ uint32_t __iomem *cq_head_db_reg;
+
+ /* The value to write to the above register to unmask
+ * the interrupt of this queue
+ */
+ u32 msix_vector;
+
+ enum queue_direction direction;
+
+ /* holds the number of cdesc of the current packet */
+ u16 cur_rx_pkt_cdesc_count;
+ /* save the firt cdesc idx of the current packet */
+ u16 cur_rx_pkt_cdesc_start_idx;
+
+ u16 q_depth;
+ /* Caller qid */
+ u16 qid;
+
+ /* Device queue index */
+ u16 idx;
+ u16 head;
+ u16 last_head_update;
+ u8 phase;
+ u8 cdesc_entry_size_in_bytes;
+
+} ____cacheline_aligned;
+
+struct ena_com_io_sq {
+ struct ena_com_io_desc_addr desc_addr;
+
+ u32 __iomem *db_addr;
+ u8 __iomem *header_addr;
+
+ enum queue_direction direction;
+ enum ena_admin_placement_policy_type mem_queue_type;
+
+ u32 msix_vector;
+ struct ena_com_tx_meta cached_tx_meta;
+
+ u16 q_depth;
+ u16 qid;
+
+ u16 idx;
+ u16 tail;
+ u16 next_to_comp;
+ u16 tx_max_header_size;
+ u8 phase;
+ u8 desc_entry_size;
+ u8 dma_addr_bits;
+} ____cacheline_aligned;
+
+struct ena_com_admin_cq {
+ struct ena_admin_acq_entry *entries;
+ ena_mem_handle_t mem_handle;
+ dma_addr_t dma_addr;
+
+ u16 head;
+ u8 phase;
+};
+
+struct ena_com_admin_sq {
+ struct ena_admin_aq_entry *entries;
+ ena_mem_handle_t mem_handle;
+ dma_addr_t dma_addr;
+
+ u32 __iomem *db_addr;
+
+ u16 head;
+ u16 tail;
+ u8 phase;
+
+};
+
+struct ena_com_stats_admin {
+ u32 aborted_cmd;
+ u32 submitted_cmd;
+ u32 completed_cmd;
+ u32 out_of_space;
+ u32 no_completion;
+};
+
+struct ena_com_admin_queue {
+ void *q_dmadev;
+ ena_spinlock_t q_lock; /* spinlock for the admin queue */
+ struct ena_comp_ctx *comp_ctx;
+ u16 q_depth;
+ struct ena_com_admin_cq cq;
+ struct ena_com_admin_sq sq;
+
+ /* Indicate if the admin queue should poll for completion */
+ bool polling;
+
+ u16 curr_cmd_id;
+
+ /* Indicate that the ena was initialized and can
+ * process new admin commands
+ */
+ bool running_state;
+
+ /* Count the number of outstanding admin commands */
+ ena_atomic32_t outstanding_cmds;
+
+ struct ena_com_stats_admin stats;
+};
+
+struct ena_aenq_handlers;
+
+struct ena_com_aenq {
+ u16 head;
+ u8 phase;
+ struct ena_admin_aenq_entry *entries;
+ dma_addr_t dma_addr;
+ ena_mem_handle_t mem_handle;
+ u16 q_depth;
+ struct ena_aenq_handlers *aenq_handlers;
+};
+
+struct ena_com_mmio_read {
+ struct ena_admin_ena_mmio_req_read_less_resp *read_resp;
+ dma_addr_t read_resp_dma_addr;
+ ena_mem_handle_t read_resp_mem_handle;
+ u16 seq_num;
+ bool readless_supported;
+ /* spin lock to ensure a single outstanding read */
+ ena_spinlock_t lock;
+};
+
+struct ena_rss {
+ /* Indirect table */
+ u16 *host_rss_ind_tbl;
+ struct ena_admin_rss_ind_table_entry *rss_ind_tbl;
+ dma_addr_t rss_ind_tbl_dma_addr;
+ ena_mem_handle_t rss_ind_tbl_mem_handle;
+ u16 tbl_log_size;
+
+ /* Hash key */
+ enum ena_admin_hash_functions hash_func;
+ struct ena_admin_feature_rss_flow_hash_control *hash_key;
+ dma_addr_t hash_key_dma_addr;
+ ena_mem_handle_t hash_key_mem_handle;
+ u32 hash_init_val;
+
+ /* Flow Control */
+ struct ena_admin_feature_rss_hash_control *hash_ctrl;
+ dma_addr_t hash_ctrl_dma_addr;
+ ena_mem_handle_t hash_ctrl_mem_handle;
+
+};
+
+struct ena_host_attribute {
+ /* Debug area */
+ u8 *debug_area_virt_addr;
+ dma_addr_t debug_area_dma_addr;
+ ena_mem_handle_t debug_area_dma_handle;
+ u32 debug_area_size;
+
+ /* Host information */
+ struct ena_admin_host_info *host_info;
+ dma_addr_t host_info_dma_addr;
+ ena_mem_handle_t host_info_dma_handle;
+};
+
+/* Each ena_dev is a PCI function. */
+struct ena_com_dev {
+ struct ena_com_admin_queue admin_queue;
+ struct ena_com_aenq aenq;
+ struct ena_com_io_cq io_cq_queues[ENA_TOTAL_NUM_QUEUES];
+ struct ena_com_io_sq io_sq_queues[ENA_TOTAL_NUM_QUEUES];
+ void __iomem *reg_bar;
+ void __iomem *mem_bar;
+ void *dmadev;
+
+ enum ena_admin_placement_policy_type tx_mem_queue_type;
+
+ u16 stats_func; /* Selected function for extended statistic dump */
+ u16 stats_queue; /* Selected queue for extended statistic dump */
+
+ u16 tx_max_header_size;
+
+ struct ena_com_mmio_read mmio_read;
+
+ struct ena_rss rss;
+ u32 supported_features;
+ u32 dma_addr_bits;
+
+ struct ena_host_attribute host_attr;
+ bool adaptive_coalescing;
+ u16 intr_delay_resolution;
+ u32 intr_moder_tx_interval;
+ struct ena_intr_moder_entry *intr_moder_tbl;
+};
+
+struct ena_com_dev_get_features_ctx {
+ struct ena_admin_queue_feature_desc max_queues;
+ struct ena_admin_device_attr_feature_desc dev_attr;
+ struct ena_admin_feature_aenq_desc aenq;
+ struct ena_admin_feature_offload_desc offload;
+};
+
+typedef void (*ena_aenq_handler)(void *data,
+ struct ena_admin_aenq_entry *aenq_e);
+
+/* Holds aenq handlers. Indexed by AENQ event group */
+struct ena_aenq_handlers {
+ ena_aenq_handler handlers[ENA_MAX_HANDLERS];
+ ena_aenq_handler unimplemented_handler;
+};
+
+/*****************************************************************************/
+/*****************************************************************************/
+#if defined(__cplusplus)
+extern "C" {
+#endif
+
+/* ena_com_mmio_reg_read_request_init - Init the mmio reg read mechanism
+ * @ena_dev: ENA communication layer struct
+ *
+ * Initialize the register read mechanism.
+ *
+ * @note: This method must be the first stage in the initialization sequence.
+ *
+ * @return - 0 on success, negative value on failure.
+ */
+int ena_com_mmio_reg_read_request_init(struct ena_com_dev *ena_dev);
+
+/* ena_com_set_mmio_read_mode - Enable/disable the mmio reg read mechanism
+ * @ena_dev: ENA communication layer struct
+ * @realess_supported: readless mode (enable/disable)
+ */
+void ena_com_set_mmio_read_mode(struct ena_com_dev *ena_dev,
+ bool readless_supported);
+
+/* ena_com_mmio_reg_read_request_write_dev_addr - Write the mmio reg read return
+ * value physical address.
+ * @ena_dev: ENA communication layer struct
+ */
+void ena_com_mmio_reg_read_request_write_dev_addr(struct ena_com_dev *ena_dev);
+
+/* ena_com_mmio_reg_read_request_destroy - Destroy the mmio reg read mechanism
+ * @ena_dev: ENA communication layer struct
+ */
+void ena_com_mmio_reg_read_request_destroy(struct ena_com_dev *ena_dev);
+
+/* ena_com_admin_init - Init the admin and the async queues
+ * @ena_dev: ENA communication layer struct
+ * @aenq_handlers: Those handlers to be called upon event.
+ * @init_spinlock: Indicate if this method should init the admin spinlock or
+ * the spinlock was init before (for example, in a case of FLR).
+ *
+ * Initialize the admin submission and completion queues.
+ * Initialize the asynchronous events notification queues.
+ *
+ * @return - 0 on success, negative value on failure.
+ */
+int ena_com_admin_init(struct ena_com_dev *ena_dev,
+ struct ena_aenq_handlers *aenq_handlers,
+ bool init_spinlock);
+
+/* ena_com_admin_destroy - Destroy the admin and the async events queues.
+ * @ena_dev: ENA communication layer struct
+ *
+ * @note: Before calling this method, the caller must validate that the device
+ * won't send any additional admin completions/aenq.
+ * To achieve that, a FLR is recommended.
+ */
+void ena_com_admin_destroy(struct ena_com_dev *ena_dev);
+
+/* ena_com_dev_reset - Perform device FLR to the device.
+ * @ena_dev: ENA communication layer struct
+ *
+ * @return - 0 on success, negative value on failure.
+ */
+int ena_com_dev_reset(struct ena_com_dev *ena_dev);
+
+/* ena_com_create_io_queue - Create io queue.
+ * @ena_dev: ENA communication layer struct
+ * @qid - the caller virtual queue id.
+ * @direction - the queue direction (Rx/Tx)
+ * @mem_queue_type - Indicate if this queue is LLQ or regular queue
+ * (relevant only for Tx queue)
+ * @msix_vector - MSI-X vector
+ * @queue_size - queue size
+ *
+ * Create the submission and the completion queues for queue id - qid.
+ *
+ * @return - 0 on success, negative value on failure.
+ */
+int ena_com_create_io_queue(struct ena_com_dev *ena_dev, u16 qid,
+ enum queue_direction direction,
+ enum ena_admin_placement_policy_type mem_queue_type,
+ u32 msix_vector,
+ u16 queue_size);
+
+/* ena_com_admin_destroy - Destroy IO queue with the queue id - qid.
+ * @ena_dev: ENA communication layer struct
+ */
+void ena_com_destroy_io_queue(struct ena_com_dev *ena_dev, u16 qid);
+
+/* ena_com_get_io_handlers - Return the io queue handlers
+ * @ena_dev: ENA communication layer struct
+ * @qid - the caller virtual queue id.
+ * @io_sq - IO submission queue handler
+ * @io_cq - IO completion queue handler.
+ *
+ * @return - 0 on success, negative value on failure.
+ */
+int ena_com_get_io_handlers(struct ena_com_dev *ena_dev, u16 qid,
+ struct ena_com_io_sq **io_sq,
+ struct ena_com_io_cq **io_cq);
+
+/* ena_com_admin_aenq_enable - ENAble asynchronous event notifications
+ * @ena_dev: ENA communication layer struct
+ *
+ * After this method, aenq event can be received via AENQ.
+ */
+void ena_com_admin_aenq_enable(struct ena_com_dev *ena_dev);
+
+/* ena_com_set_admin_running_state - Set the state of the admin queue
+ * @ena_dev: ENA communication layer struct
+ *
+ * Change the state of the admin queue (enable/disable)
+ */
+void ena_com_set_admin_running_state(struct ena_com_dev *ena_dev, bool state);
+
+/* ena_com_get_admin_running_state - Get the admin queue state
+ * @ena_dev: ENA communication layer struct
+ *
+ * Retrieve the state of the admin queue (enable/disable)
+ *
+ * @return - current polling mode (enable/disable)
+ */
+bool ena_com_get_admin_running_state(struct ena_com_dev *ena_dev);
+
+/* ena_com_set_admin_polling_mode - Set the admin completion queue polling mode
+ * @ena_dev: ENA communication layer struct
+ * @polling: ENAble/Disable polling mode
+ *
+ * Set the admin completion mode.
+ */
+void ena_com_set_admin_polling_mode(struct ena_com_dev *ena_dev, bool polling);
+
+/* ena_com_set_admin_polling_mode - Get the admin completion queue polling mode
+ * @ena_dev: ENA communication layer struct
+ *
+ * Get the admin completion mode.
+ * If polling mode is on, ena_com_execute_admin_command will perform a
+ * polling on the admin completion queue for the commands completion,
+ * otherwise it will wait on wait event.
+ *
+ * @return state
+ */
+bool ena_com_get_ena_admin_polling_mode(struct ena_com_dev *ena_dev);
+
+/* ena_com_admin_q_comp_intr_handler - admin queue interrupt handler
+ * @ena_dev: ENA communication layer struct
+ *
+ * This method go over the admin completion queue and wake up all the pending
+ * threads that wait on the commands wait event.
+ *
+ * @note: Should be called after MSI-X interrupt.
+ */
+void ena_com_admin_q_comp_intr_handler(struct ena_com_dev *ena_dev);
+
+/* ena_com_aenq_intr_handler - AENQ interrupt handler
+ * @ena_dev: ENA communication layer struct
+ *
+ * This method go over the async event notification queue and call the proper
+ * aenq handler.
+ */
+void ena_com_aenq_intr_handler(struct ena_com_dev *dev, void *data);
+
+/* ena_com_abort_admin_commands - Abort all the outstanding admin commands.
+ * @ena_dev: ENA communication layer struct
+ *
+ * This method aborts all the outstanding admin commands.
+ * The called should then call ena_com_wait_for_abort_completion to make sure
+ * all the commands were completed.
+ */
+void ena_com_abort_admin_commands(struct ena_com_dev *ena_dev);
+
+/* ena_com_wait_for_abort_completion - Wait for admin commands abort.
+ * @ena_dev: ENA communication layer struct
+ *
+ * This method wait until all the outstanding admin commands will be completed.
+ */
+void ena_com_wait_for_abort_completion(struct ena_com_dev *ena_dev);
+
+/* ena_com_validate_version - Validate the device parameters
+ * @ena_dev: ENA communication layer struct
+ *
+ * This method validate the device parameters are the same as the saved
+ * parameters in ena_dev.
+ * This method is useful after device reset, to validate the device mac address
+ * and the device offloads are the same as before the reset.
+ *
+ * @return - 0 on success negative value otherwise.
+ */
+int ena_com_validate_version(struct ena_com_dev *ena_dev);
+
+/* ena_com_get_link_params - Retrieve physical link parameters.
+ * @ena_dev: ENA communication layer struct
+ * @resp: Link parameters
+ *
+ * Retrieve the physical link parameters,
+ * like speed, auto-negotiation and full duplex support.
+ *
+ * @return - 0 on Success negative value otherwise.
+ */
+int ena_com_get_link_params(struct ena_com_dev *ena_dev,
+ struct ena_admin_get_feat_resp *resp);
+
+/* ena_com_get_dma_width - Retrieve physical dma address width the device
+ * supports.
+ * @ena_dev: ENA communication layer struct
+ *
+ * Retrieve the maximum physical address bits the device can handle.
+ *
+ * @return: > 0 on Success and negative value otherwise.
+ */
+int ena_com_get_dma_width(struct ena_com_dev *ena_dev);
+
+/* ena_com_set_aenq_config - Set aenq groups configurations
+ * @ena_dev: ENA communication layer struct
+ * @groups flag: bit fields flags of enum ena_admin_aenq_group.
+ *
+ * Configure which aenq event group the driver would like to receive.
+ *
+ * @return: 0 on Success and negative value otherwise.
+ */
+int ena_com_set_aenq_config(struct ena_com_dev *ena_dev, u32 groups_flag);
+
+/* ena_com_get_dev_attr_feat - Get device features
+ * @ena_dev: ENA communication layer struct
+ * @get_feat_ctx: returned context that contain the get features.
+ *
+ * @return: 0 on Success and negative value otherwise.
+ */
+int
+ena_com_get_dev_attr_feat(struct ena_com_dev *ena_dev,
+ struct ena_com_dev_get_features_ctx *get_feat_ctx);
+
+/* ena_com_get_dev_basic_stats - Get device basic statistics
+ * @ena_dev: ENA communication layer struct
+ * @stats: stats return value
+ *
+ * @return: 0 on Success and negative value otherwise.
+ */
+int ena_com_get_dev_basic_stats(struct ena_com_dev *ena_dev,
+ struct ena_admin_basic_stats *stats);
+
+/* ena_com_set_dev_mtu - Configure the device mtu.
+ * @ena_dev: ENA communication layer struct
+ * @mtu: mtu value
+ *
+ * @return: 0 on Success and negative value otherwise.
+ */
+int ena_com_set_dev_mtu(struct ena_com_dev *ena_dev, int mtu);
+
+/* ena_com_get_offload_settings - Retrieve the device offloads capabilities
+ * @ena_dev: ENA communication layer struct
+ * @offlad: offload return value
+ *
+ * @return: 0 on Success and negative value otherwise.
+ */
+int
+ena_com_get_offload_settings(struct ena_com_dev *ena_dev,
+ struct ena_admin_feature_offload_desc *offload);
+
+/* ena_com_rss_init - Init RSS
+ * @ena_dev: ENA communication layer struct
+ * @log_size: indirection log size
+ *
+ * Allocate RSS/RFS resources.
+ * The caller then can configure rss using ena_com_set_hash_function,
+ * ena_com_set_hash_ctrl and ena_com_indirect_table_set.
+ *
+ * @return: 0 on Success and negative value otherwise.
+ */
+int ena_com_rss_init(struct ena_com_dev *ena_dev, u16 log_size);
+
+/* ena_com_rss_destroy - Destroy rss
+ * @ena_dev: ENA communication layer struct
+ *
+ * Free all the RSS/RFS resources.
+ *
+ * @return: 0 on Success and negative value otherwise.
+ */
+int ena_com_rss_destroy(struct ena_com_dev *ena_dev);
+
+/* ena_com_fill_hash_function - Fill RSS hash function
+ * @ena_dev: ENA communication layer struct
+ * @func: The hash function (Toeplitz or crc)
+ * @key: Hash key (for toeplitz hash)
+ * @key_len: key length (max length 10 DW)
+ * @init_val: initial value for the hash function
+ *
+ * Fill the ena_dev resources with the desire hash function, hash key, key_len
+ * and key initial value (if needed by the hash function).
+ * To flush the key into the device the caller should call
+ * ena_com_set_hash_function.
+ *
+ * @return: 0 on Success and negative value otherwise.
+ */
+int ena_com_fill_hash_function(struct ena_com_dev *ena_dev,
+ enum ena_admin_hash_functions func,
+ const u8 *key, u16 key_len, u32 init_val);
+
+/* ena_com_set_hash_function - Flush the hash function and it dependencies to
+ * the device.
+ * @ena_dev: ENA communication layer struct
+ *
+ * Flush the hash function and it dependencies (key, key length and
+ * initial value) if needed.
+ *
+ * @note: Prior to this method the caller should call ena_com_fill_hash_function
+ *
+ * @return: 0 on Success and negative value otherwise.
+ */
+int ena_com_set_hash_function(struct ena_com_dev *ena_dev);
+
+/* ena_com_get_hash_function - Retrieve the hash function and the hash key
+ * from the device.
+ * @ena_dev: ENA communication layer struct
+ * @func: hash function
+ * @key: hash key
+ *
+ * Retrieve the hash function and the hash key from the device.
+ *
+ * @note: If the caller called ena_com_fill_hash_function but didn't flash
+ * it to the device, the new configuration will be lost.
+ *
+ * @return: 0 on Success and negative value otherwise.
+ */
+int ena_com_get_hash_function(struct ena_com_dev *ena_dev,
+ enum ena_admin_hash_functions *func,
+ u8 *key);
+
+/* ena_com_fill_hash_ctrl - Fill RSS hash control
+ * @ena_dev: ENA communication layer struct.
+ * @proto: The protocol to configure.
+ * @hash_fields: bit mask of ena_admin_flow_hash_fields
+ *
+ * Fill the ena_dev resources with the desire hash control (the ethernet
+ * fields that take part of the hash) for a specific protocol.
+ * To flush the hash control to the device, the caller should call
+ * ena_com_set_hash_ctrl.
+ *
+ * @return: 0 on Success and negative value otherwise.
+ */
+int ena_com_fill_hash_ctrl(struct ena_com_dev *ena_dev,
+ enum ena_admin_flow_hash_proto proto,
+ u16 hash_fields);
+
+/* ena_com_set_hash_ctrl - Flush the hash control resources to the device.
+ * @ena_dev: ENA communication layer struct
+ *
+ * Flush the hash control (the ethernet fields that take part of the hash)
+ *
+ * @note: Prior to this method the caller should call ena_com_fill_hash_ctrl.
+ *
+ * @return: 0 on Success and negative value otherwise.
+ */
+int ena_com_set_hash_ctrl(struct ena_com_dev *ena_dev);
+
+/* ena_com_get_hash_ctrl - Retrieve the hash control from the device.
+ * @ena_dev: ENA communication layer struct
+ * @proto: The protocol to retrieve.
+ * @fields: bit mask of ena_admin_flow_hash_fields.
+ *
+ * Retrieve the hash control from the device.
+ *
+ * @note, If the caller called ena_com_fill_hash_ctrl but didn't flash
+ * it to the device, the new configuration will be lost.
+ *
+ * @return: 0 on Success and negative value otherwise.
+ */
+int ena_com_get_hash_ctrl(struct ena_com_dev *ena_dev,
+ enum ena_admin_flow_hash_proto proto,
+ u16 *fields);
+
+/* ena_com_set_default_hash_ctrl - Set the hash control to a default
+ * configuration.
+ * @ena_dev: ENA communication layer struct
+ *
+ * Fill the ena_dev resources with the default hash control configuration.
+ * To flush the hash control to the device, the caller should call
+ * ena_com_set_hash_ctrl.
+ *
+ * @return: 0 on Success and negative value otherwise.
+ */
+int ena_com_set_default_hash_ctrl(struct ena_com_dev *ena_dev);
+
+/* ena_com_indirect_table_fill_entry - Fill a single entry in the RSS
+ * indirection table
+ * @ena_dev: ENA communication layer struct.
+ * @entry_idx - indirection table entry.
+ * @entry_value - redirection value
+ *
+ * Fill a single entry of the RSS indirection table in the ena_dev resources.
+ * To flush the indirection table to the device, the called should call
+ * ena_com_indirect_table_set.
+ *
+ * @return: 0 on Success and negative value otherwise.
+ */
+int ena_com_indirect_table_fill_entry(struct ena_com_dev *ena_dev,
+ u16 entry_idx, u16 entry_value);
+
+/* ena_com_indirect_table_set - Flush the indirection table to the device.
+ * @ena_dev: ENA communication layer struct
+ *
+ * Flush the indirection hash control to the device.
+ * Prior to this method the caller should call ena_com_indirect_table_fill_entry
+ *
+ * @return: 0 on Success and negative value otherwise.
+ */
+int ena_com_indirect_table_set(struct ena_com_dev *ena_dev);
+
+/* ena_com_indirect_table_get - Retrieve the indirection table from the device.
+ * @ena_dev: ENA communication layer struct
+ * @ind_tbl: indirection table
+ *
+ * Retrieve the RSS indirection table from the device.
+ *
+ * @note: If the caller called ena_com_indirect_table_fill_entry but didn't
+ * flash it to the device, the new configuration will be lost.
+ *
+ * @return: 0 on Success and negative value otherwise.
+ */
+int ena_com_indirect_table_get(struct ena_com_dev *ena_dev, u32 *ind_tbl);
+
+/* ena_com_allocate_host_attribute - Allocate host attributes resources.
+ * @ena_dev: ENA communication layer struct
+ * @debug_area_size: Debug aread size
+ *
+ * Allocate host info and debug area.
+ *
+ * @return: 0 on Success and negative value otherwise.
+ */
+int ena_com_allocate_host_attribute(struct ena_com_dev *ena_dev,
+ u32 debug_area_size);
+
+/* ena_com_allocate_host_attribute - Free the host attributes resources.
+ * @ena_dev: ENA communication layer struct
+ *
+ * Free the allocate host info and debug area.
+ */
+void ena_com_delete_host_attribute(struct ena_com_dev *ena_dev);
+
+/* ena_com_set_host_attributes - Update the device with the host
+ * attributes base address.
+ * @ena_dev: ENA communication layer struct
+ *
+ * @return: 0 on Success and negative value otherwise.
+ */
+int ena_com_set_host_attributes(struct ena_com_dev *ena_dev);
+
+/* ena_com_create_io_cq - Create io completion queue.
+ * @ena_dev: ENA communication layer struct
+ * @io_cq - io completion queue handler
+
+ * Create IO completion queue.
+ *
+ * @return - 0 on success, negative value on failure.
+ */
+int ena_com_create_io_cq(struct ena_com_dev *ena_dev,
+ struct ena_com_io_cq *io_cq);
+
+/* ena_com_destroy_io_cq - Destroy io completion queue.
+ * @ena_dev: ENA communication layer struct
+ * @io_cq - io completion queue handler
+
+ * Destroy IO completion queue.
+ *
+ * @return - 0 on success, negative value on failure.
+ */
+int ena_com_destroy_io_cq(struct ena_com_dev *ena_dev,
+ struct ena_com_io_cq *io_cq);
+
+/* ena_com_execute_admin_command - Execute admin command
+ * @admin_queue: admin queue.
+ * @cmd: the admin command to execute.
+ * @cmd_size: the command size.
+ * @cmd_completion: command completion return value.
+ * @cmd_comp_size: command completion size.
+
+ * Submit an admin command and then wait until the device will return a
+ * completion.
+ * The completion will be copyed into cmd_comp.
+ *
+ * @return - 0 on success, negative value on failure.
+ */
+int ena_com_execute_admin_command(struct ena_com_admin_queue *admin_queue,
+ struct ena_admin_aq_entry *cmd,
+ size_t cmd_size,
+ struct ena_admin_acq_entry *cmd_comp,
+ size_t cmd_comp_size);
+
+/* ena_com_init_interrupt_moderation - Init interrupt moderation
+ * @ena_dev: ENA communication layer struct
+ *
+ * @return - 0 on success, negative value on failure.
+ */
+int ena_com_init_interrupt_moderation(struct ena_com_dev *ena_dev);
+
+/* ena_com_destroy_interrupt_moderation - Destroy interrupt moderation resources
+ * @ena_dev: ENA communication layer struct
+ */
+void ena_com_destroy_interrupt_moderation(struct ena_com_dev *ena_dev);
+
+/* ena_com_interrupt_moderation_supported - Return if interrupt moderation
+ * capability is supported by the device.
+ *
+ * @return - supported or not.
+ */
+bool ena_com_interrupt_moderation_supported(struct ena_com_dev *ena_dev);
+
+/* ena_com_config_default_interrupt_moderation_table - Restore the interrupt
+ * moderation table back to the default parameters.
+ * @ena_dev: ENA communication layer struct
+ */
+void
+ena_com_config_default_interrupt_moderation_table(struct ena_com_dev *ena_dev);
+
+/* ena_com_update_nonadaptive_moderation_interval_tx - Update the
+ * non-adaptive interval in Tx direction.
+ * @ena_dev: ENA communication layer struct
+ * @tx_coalesce_usecs: Interval in usec.
+ *
+ * @return - 0 on success, negative value on failure.
+ */
+int
+ena_com_update_nonadaptive_moderation_interval_tx(struct ena_com_dev *ena_dev,
+ u32 tx_coalesce_usecs);
+
+/* ena_com_update_nonadaptive_moderation_interval_rx - Update the
+ * non-adaptive interval in Rx direction.
+ * @ena_dev: ENA communication layer struct
+ * @rx_coalesce_usecs: Interval in usec.
+ *
+ * @return - 0 on success, negative value on failure.
+ */
+int
+ena_com_update_nonadaptive_moderation_interval_rx(struct ena_com_dev *ena_dev,
+ u32 rx_coalesce_usecs);
+
+/* ena_com_get_nonadaptive_moderation_interval_tx - Retrieve the
+ * non-adaptive interval in Tx direction.
+ * @ena_dev: ENA communication layer struct
+ *
+ * @return - interval in usec
+ */
+unsigned int
+ena_com_get_nonadaptive_moderation_interval_tx(struct ena_com_dev *ena_dev);
+
+/* ena_com_get_nonadaptive_moderation_interval_rx - Retrieve the
+ * non-adaptive interval in Rx direction.
+ * @ena_dev: ENA communication layer struct
+ *
+ * @return - interval in usec
+ */
+unsigned int
+ena_com_get_nonadaptive_moderation_interval_rx(struct ena_com_dev *ena_dev);
+
+/* ena_com_init_intr_moderation_entry - Update a single entry in the interrupt
+ * moderation table.
+ * @ena_dev: ENA communication layer struct
+ * @level: Interrupt moderation table level
+ * @entry: Entry value
+ *
+ * Update a single entry in the interrupt moderation table.
+ */
+void ena_com_init_intr_moderation_entry(struct ena_com_dev *ena_dev,
+ enum ena_intr_moder_level level,
+ struct ena_intr_moder_entry *entry);
+
+/* ena_com_get_intr_moderation_entry - Init ena_intr_moder_entry.
+ * @ena_dev: ENA communication layer struct
+ * @level: Interrupt moderation table level
+ * @entry: Entry to fill.
+ *
+ * Initialize the entry according to the adaptive interrupt moderation table.
+ */
+void ena_com_get_intr_moderation_entry(struct ena_com_dev *ena_dev,
+ enum ena_intr_moder_level level,
+ struct ena_intr_moder_entry *entry);
+
+static inline bool
+ena_com_get_adaptive_moderation_enabled(struct ena_com_dev *ena_dev)
+{
+ return ena_dev->adaptive_coalescing;
+}
+
+static inline void
+ena_com_enable_adaptive_moderation(struct ena_com_dev *ena_dev)
+{
+ ena_dev->adaptive_coalescing = true;
+}
+
+static inline void
+ena_com_disable_adaptive_moderation(struct ena_com_dev *ena_dev)
+{
+ ena_dev->adaptive_coalescing = false;
+}
+
+/* ena_com_calculate_interrupt_delay - Calculate new interrupt delay
+ * @ena_dev: ENA communication layer struct
+ * @pkts: Number of packets since the last update
+ * @bytes: Number of bytes received since the last update.
+ * @smoothed_interval: Returned interval
+ * @moder_tbl_idx: Current table level as input update new level as return
+ * value.
+ */
+static inline void
+ena_com_calculate_interrupt_delay(struct ena_com_dev *ena_dev,
+ unsigned int pkts,
+ unsigned int bytes,
+ unsigned int *smoothed_interval,
+ unsigned int *moder_tbl_idx)
+{
+ enum ena_intr_moder_level curr_moder_idx, new_moder_idx;
+ struct ena_intr_moder_entry *curr_moder_entry;
+ struct ena_intr_moder_entry *pred_moder_entry;
+ struct ena_intr_moder_entry *new_moder_entry;
+ struct ena_intr_moder_entry *intr_moder_tbl = ena_dev->intr_moder_tbl;
+ unsigned int interval;
+
+ /* We apply adaptive moderation on Rx path only.
+ * Tx uses static interrupt moderation.
+ */
+ if (!pkts || !bytes)
+ /* Tx interrupt, or spurious interrupt,
+ * in both cases we just use same delay values
+ */
+ return;
+
+ curr_moder_idx = (enum ena_intr_moder_level)*moder_tbl_idx;
+ if (unlikely(curr_moder_idx >= ENA_INTR_MAX_NUM_OF_LEVELS)) {
+ ena_trc_err("Wrong moderation index %u\n", curr_moder_idx);
+ return;
+ }
+
+ curr_moder_entry = &intr_moder_tbl[curr_moder_idx];
+ new_moder_idx = curr_moder_idx;
+
+ if (curr_moder_idx == ENA_INTR_MODER_LOWEST) {
+ if ((pkts > curr_moder_entry->pkts_per_interval) ||
+ (bytes > curr_moder_entry->bytes_per_interval))
+ new_moder_idx = (enum ena_intr_moder_level)(curr_moder_idx + 1);
+ } else {
+ pred_moder_entry = &intr_moder_tbl[curr_moder_idx - 1];
+
+ if ((pkts <= pred_moder_entry->pkts_per_interval) ||
+ (bytes <= pred_moder_entry->bytes_per_interval))
+ new_moder_idx = (enum ena_intr_moder_level)(curr_moder_idx - 1);
+ else if ((pkts > curr_moder_entry->pkts_per_interval) ||
+ (bytes > curr_moder_entry->bytes_per_interval)) {
+ if (curr_moder_idx != ENA_INTR_MODER_HIGHEST)
+ new_moder_idx = (enum ena_intr_moder_level)(curr_moder_idx + 1);
+ }
+ }
+ new_moder_entry = &intr_moder_tbl[new_moder_idx];
+
+ interval = new_moder_entry->intr_moder_interval;
+ *smoothed_interval = (
+ (interval * ENA_INTR_DELAY_NEW_VALUE_WEIGHT +
+ ENA_INTR_DELAY_OLD_VALUE_WEIGHT * (*smoothed_interval)) + 5) /
+ 10;
+
+ *moder_tbl_idx = new_moder_idx;
+}
+
+/* ena_com_update_intr_reg - Prepare interrupt register
+ * @intr_reg: interrupt register to update.
+ * @rx_delay_interval: Rx interval in usecs
+ * @tx_delay_interval: Tx interval in usecs
+ * @unmask: unask enable/disable
+ *
+ * Prepare interrupt update register with the supplied parameters.
+ */
+static inline void ena_com_update_intr_reg(struct ena_eth_io_intr_reg *intr_reg,
+ u32 rx_delay_interval,
+ u32 tx_delay_interval,
+ bool unmask)
+{
+ intr_reg->intr_control = 0;
+ intr_reg->intr_control |= rx_delay_interval &
+ ENA_ETH_IO_INTR_REG_RX_INTR_DELAY_MASK;
+
+ intr_reg->intr_control |=
+ (tx_delay_interval << ENA_ETH_IO_INTR_REG_TX_INTR_DELAY_SHIFT)
+ & ENA_ETH_IO_INTR_REG_RX_INTR_DELAY_MASK;
+
+ if (unmask)
+ intr_reg->intr_control |= ENA_ETH_IO_INTR_REG_INTR_UNMASK_MASK;
+}
+
+int ena_com_get_dev_extended_stats(struct ena_com_dev *ena_dev, char *buff,
+ u32 len);
+
+int ena_com_extended_stats_set_func_queue(struct ena_com_dev *ena_dev,
+ u32 funct_queue);
+
+#if defined(__cplusplus)
+}
+#endif /* __cplusplus */
+#endif /* !(ENA_COM) */
diff --git a/drivers/net/ena/base/ena_defs/ena_admin_defs.h b/drivers/net/ena/base/ena_defs/ena_admin_defs.h
new file mode 100644
index 00000000..fe412469
--- /dev/null
+++ b/drivers/net/ena/base/ena_defs/ena_admin_defs.h
@@ -0,0 +1,1979 @@
+/*-
+* BSD LICENSE
+*
+* Copyright (c) 2015-2016 Amazon.com, Inc. or its affiliates.
+* All rights reserved.
+*
+* Redistribution and use in source and binary forms, with or without
+* modification, are permitted provided that the following conditions
+* are met:
+*
+* * Redistributions of source code must retain the above copyright
+* notice, this list of conditions and the following disclaimer.
+* * Redistributions in binary form must reproduce the above copyright
+* notice, this list of conditions and the following disclaimer in
+* the documentation and/or other materials provided with the
+* distribution.
+* * Neither the name of copyright holder nor the names of its
+* contributors may be used to endorse or promote products derived
+* from this software without specific prior written permission.
+*
+* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+
+#ifndef _ENA_ADMIN_H_
+#define _ENA_ADMIN_H_
+
+/* admin commands opcodes */
+enum ena_admin_aq_opcode {
+ /* create submission queue */
+ ENA_ADMIN_CREATE_SQ = 1,
+
+ /* destroy submission queue */
+ ENA_ADMIN_DESTROY_SQ = 2,
+
+ /* create completion queue */
+ ENA_ADMIN_CREATE_CQ = 3,
+
+ /* destroy completion queue */
+ ENA_ADMIN_DESTROY_CQ = 4,
+
+ /* get capabilities of particular feature */
+ ENA_ADMIN_GET_FEATURE = 8,
+
+ /* get capabilities of particular feature */
+ ENA_ADMIN_SET_FEATURE = 9,
+
+ /* get statistics */
+ ENA_ADMIN_GET_STATS = 11,
+};
+
+/* privileged amdin commands opcodes */
+enum ena_admin_aq_opcode_privileged {
+ /* get device capabilities */
+ ENA_ADMIN_IDENTIFY = 48,
+
+ /* configure device */
+ ENA_ADMIN_CONFIGURE_PF_DEVICE = 49,
+
+ /* setup SRIOV PCIe Virtual Function capabilities */
+ ENA_ADMIN_SETUP_VF = 50,
+
+ /* load firmware to the controller */
+ ENA_ADMIN_LOAD_FIRMWARE = 52,
+
+ /* commit previously loaded firmare */
+ ENA_ADMIN_COMMIT_FIRMWARE = 53,
+
+ /* quiesce virtual function */
+ ENA_ADMIN_QUIESCE_VF = 54,
+
+ /* load virtual function from migrates context */
+ ENA_ADMIN_MIGRATE_VF = 55,
+};
+
+/* admin command completion status codes */
+enum ena_admin_aq_completion_status {
+ /* Request completed successfully */
+ ENA_ADMIN_SUCCESS = 0,
+
+ /* no resources to satisfy request */
+ ENA_ADMIN_RESOURCE_ALLOCATION_FAILURE = 1,
+
+ /* Bad opcode in request descriptor */
+ ENA_ADMIN_BAD_OPCODE = 2,
+
+ /* Unsupported opcode in request descriptor */
+ ENA_ADMIN_UNSUPPORTED_OPCODE = 3,
+
+ /* Wrong request format */
+ ENA_ADMIN_MALFORMED_REQUEST = 4,
+
+ /* One of parameters is not valid. Provided in ACQ entry
+ * extended_status
+ */
+ ENA_ADMIN_ILLEGAL_PARAMETER = 5,
+
+ /* unexpected error */
+ ENA_ADMIN_UNKNOWN_ERROR = 6,
+};
+
+/* get/set feature subcommands opcodes */
+enum ena_admin_aq_feature_id {
+ /* list of all supported attributes/capabilities in the ENA */
+ ENA_ADMIN_DEVICE_ATTRIBUTES = 1,
+
+ /* max number of supported queues per for every queues type */
+ ENA_ADMIN_MAX_QUEUES_NUM = 2,
+
+ /* low latency queues capabilities (max entry size, depth) */
+ ENA_ADMIN_LLQ_CONFIG = 3,
+
+ /* power management capabilities */
+ ENA_ADMIN_POWER_MANAGEMENT_CONFIG = 4,
+
+ /* MAC address filters support, multicast, broadcast, and
+ * promiscuous
+ */
+ ENA_ADMIN_MAC_FILTERS_CONFIG = 5,
+
+ /* VLAN membership, frame format, etc. */
+ ENA_ADMIN_VLAN_CONFIG = 6,
+
+ /* Available size for various on-chip memory resources, accessible
+ * by the driver
+ */
+ ENA_ADMIN_ON_DEVICE_MEMORY_CONFIG = 7,
+
+ /* Receive Side Scaling (RSS) function */
+ ENA_ADMIN_RSS_HASH_FUNCTION = 10,
+
+ /* stateless TCP/UDP/IP offload capabilities. */
+ ENA_ADMIN_STATELESS_OFFLOAD_CONFIG = 11,
+
+ /* Multiple tuples flow table configuration */
+ ENA_ADMIN_RSS_REDIRECTION_TABLE_CONFIG = 12,
+
+ /* max MTU, current MTU */
+ ENA_ADMIN_MTU = 14,
+
+ /* Receive Side Scaling (RSS) hash input */
+ ENA_ADMIN_RSS_HASH_INPUT = 18,
+
+ /* overlay tunnels configuration */
+ ENA_ADMIN_TUNNEL_CONFIG = 19,
+
+ /* interrupt moderation parameters */
+ ENA_ADMIN_INTERRUPT_MODERATION = 20,
+
+ /* 1588v2 and Timing configuration */
+ ENA_ADMIN_1588_CONFIG = 21,
+
+ /* Packet Header format templates configuration for input and
+ * output parsers
+ */
+ ENA_ADMIN_PKT_HEADER_TEMPLATES_CONFIG = 23,
+
+ /* AENQ configuration */
+ ENA_ADMIN_AENQ_CONFIG = 26,
+
+ /* Link configuration */
+ ENA_ADMIN_LINK_CONFIG = 27,
+
+ /* Host attributes configuration */
+ ENA_ADMIN_HOST_ATTR_CONFIG = 28,
+
+ /* Number of valid opcodes */
+ ENA_ADMIN_FEATURES_OPCODE_NUM = 32,
+};
+
+/* descriptors and headers placement */
+enum ena_admin_placement_policy_type {
+ /* descriptors and headers are in OS memory */
+ ENA_ADMIN_PLACEMENT_POLICY_HOST = 1,
+
+ /* descriptors and headers in device memory (a.k.a Low Latency
+ * Queue)
+ */
+ ENA_ADMIN_PLACEMENT_POLICY_DEV = 3,
+};
+
+/* link speeds */
+enum ena_admin_link_types {
+ ENA_ADMIN_LINK_SPEED_1G = 0x1,
+
+ ENA_ADMIN_LINK_SPEED_2_HALF_G = 0x2,
+
+ ENA_ADMIN_LINK_SPEED_5G = 0x4,
+
+ ENA_ADMIN_LINK_SPEED_10G = 0x8,
+
+ ENA_ADMIN_LINK_SPEED_25G = 0x10,
+
+ ENA_ADMIN_LINK_SPEED_40G = 0x20,
+
+ ENA_ADMIN_LINK_SPEED_50G = 0x40,
+
+ ENA_ADMIN_LINK_SPEED_100G = 0x80,
+
+ ENA_ADMIN_LINK_SPEED_200G = 0x100,
+
+ ENA_ADMIN_LINK_SPEED_400G = 0x200,
+};
+
+/* completion queue update policy */
+enum ena_admin_completion_policy_type {
+ /* cqe for each sq descriptor */
+ ENA_ADMIN_COMPLETION_POLICY_DESC = 0,
+
+ /* cqe upon request in sq descriptor */
+ ENA_ADMIN_COMPLETION_POLICY_DESC_ON_DEMAND = 1,
+
+ /* current queue head pointer is updated in OS memory upon sq
+ * descriptor request
+ */
+ ENA_ADMIN_COMPLETION_POLICY_HEAD_ON_DEMAND = 2,
+
+ /* current queue head pointer is updated in OS memory for each sq
+ * descriptor
+ */
+ ENA_ADMIN_COMPLETION_POLICY_HEAD = 3,
+};
+
+/* type of get statistics command */
+enum ena_admin_get_stats_type {
+ /* Basic statistics */
+ ENA_ADMIN_GET_STATS_TYPE_BASIC = 0,
+
+ /* Extended statistics */
+ ENA_ADMIN_GET_STATS_TYPE_EXTENDED = 1,
+};
+
+/* scope of get statistics command */
+enum ena_admin_get_stats_scope {
+ ENA_ADMIN_SPECIFIC_QUEUE = 0,
+
+ ENA_ADMIN_ETH_TRAFFIC = 1,
+};
+
+/* ENA Admin Queue (AQ) common descriptor */
+struct ena_admin_aq_common_desc {
+ /* word 0 : */
+ /* command identificator to associate it with the completion
+ * 11:0 : command_id
+ * 15:12 : reserved12
+ */
+ uint16_t command_id;
+
+ /* as appears in ena_aq_opcode */
+ uint8_t opcode;
+
+ /* 0 : phase
+ * 1 : ctrl_data - control buffer address valid
+ * 2 : ctrl_data_indirect - control buffer address
+ * points to list of pages with addresses of control
+ * buffers
+ * 7:3 : reserved3
+ */
+ uint8_t flags;
+};
+
+/* used in ena_aq_entry. Can point directly to control data, or to a page
+ * list chunk. Used also at the end of indirect mode page list chunks, for
+ * chaining.
+ */
+struct ena_admin_ctrl_buff_info {
+ /* word 0 : indicates length of the buffer pointed by
+ * control_buffer_address.
+ */
+ uint32_t length;
+
+ /* words 1:2 : points to control buffer (direct or indirect) */
+ struct ena_common_mem_addr address;
+};
+
+/* submission queue full identification */
+struct ena_admin_sq {
+ /* word 0 : */
+ /* queue id */
+ uint16_t sq_idx;
+
+ /* 4:0 : reserved
+ * 7:5 : sq_direction - 0x1 - Tx; 0x2 - Rx
+ */
+ uint8_t sq_identity;
+
+ uint8_t reserved1;
+};
+
+/* AQ entry format */
+struct ena_admin_aq_entry {
+ /* words 0 : */
+ struct ena_admin_aq_common_desc aq_common_descriptor;
+
+ /* words 1:3 : */
+ union {
+ /* command specific inline data */
+ uint32_t inline_data_w1[3];
+
+ /* words 1:3 : points to control buffer (direct or
+ * indirect, chained if needed)
+ */
+ struct ena_admin_ctrl_buff_info control_buffer;
+ } u;
+
+ /* command specific inline data */
+ uint32_t inline_data_w4[12];
+};
+
+/* ENA Admin Completion Queue (ACQ) common descriptor */
+struct ena_admin_acq_common_desc {
+ /* word 0 : */
+ /* command identifier to associate it with the aq descriptor
+ * 11:0 : command_id
+ * 15:12 : reserved12
+ */
+ uint16_t command;
+
+ /* status of request execution */
+ uint8_t status;
+
+ /* 0 : phase
+ * 7:1 : reserved1
+ */
+ uint8_t flags;
+
+ /* word 1 : */
+ /* provides additional info */
+ uint16_t extended_status;
+
+ /* submission queue head index, serves as a hint what AQ entries can
+ * be revoked
+ */
+ uint16_t sq_head_indx;
+};
+
+/* ACQ entry format */
+struct ena_admin_acq_entry {
+ /* words 0:1 : */
+ struct ena_admin_acq_common_desc acq_common_descriptor;
+
+ /* response type specific data */
+ uint32_t response_specific_data[14];
+};
+
+/* ENA AQ Create Submission Queue command. Placed in control buffer pointed
+ * by AQ entry
+ */
+struct ena_admin_aq_create_sq_cmd {
+ /* words 0 : */
+ struct ena_admin_aq_common_desc aq_common_descriptor;
+
+ /* word 1 : */
+ /* 4:0 : reserved0_w1
+ * 7:5 : sq_direction - 0x1 - Tx, 0x2 - Rx
+ */
+ uint8_t sq_identity;
+
+ uint8_t reserved8_w1;
+
+ /* 3:0 : placement_policy - Describing where the SQ
+ * descriptor ring and the SQ packet headers reside:
+ * 0x1 - descriptors and headers are in OS memory,
+ * 0x3 - descriptors and headers in device memory
+ * (a.k.a Low Latency Queue)
+ * 6:4 : completion_policy - Describing what policy
+ * to use for generation completion entry (cqe) in
+ * the CQ associated with this SQ: 0x0 - cqe for each
+ * sq descriptor, 0x1 - cqe upon request in sq
+ * descriptor, 0x2 - current queue head pointer is
+ * updated in OS memory upon sq descriptor request
+ * 0x3 - current queue head pointer is updated in OS
+ * memory for each sq descriptor
+ * 7 : reserved15_w1
+ */
+ uint8_t sq_caps_2;
+
+ /* 0 : is_physically_contiguous - Described if the
+ * queue ring memory is allocated in physical
+ * contiguous pages or split.
+ * 7:1 : reserved17_w1
+ */
+ uint8_t sq_caps_3;
+
+ /* word 2 : */
+ /* associated completion queue id. This CQ must be created prior to
+ * SQ creation
+ */
+ uint16_t cq_idx;
+
+ /* submission queue depth in entries */
+ uint16_t sq_depth;
+
+ /* words 3:4 : SQ physical base address in OS memory. This field
+ * should not be used for Low Latency queues. Has to be page
+ * aligned.
+ */
+ struct ena_common_mem_addr sq_ba;
+
+ /* words 5:6 : specifies queue head writeback location in OS
+ * memory. Valid if completion_policy is set to
+ * completion_policy_head_on_demand or completion_policy_head. Has
+ * to be cache aligned
+ */
+ struct ena_common_mem_addr sq_head_writeback;
+
+ /* word 7 : reserved word */
+ uint32_t reserved0_w7;
+
+ /* word 8 : reserved word */
+ uint32_t reserved0_w8;
+};
+
+/* submission queue direction */
+enum ena_admin_sq_direction {
+ ENA_ADMIN_SQ_DIRECTION_TX = 1,
+
+ ENA_ADMIN_SQ_DIRECTION_RX = 2,
+};
+
+/* ENA Response for Create SQ Command. Appears in ACQ entry as
+ * response_specific_data
+ */
+struct ena_admin_acq_create_sq_resp_desc {
+ /* words 0:1 : Common Admin Queue completion descriptor */
+ struct ena_admin_acq_common_desc acq_common_desc;
+
+ /* word 2 : */
+ /* sq identifier */
+ uint16_t sq_idx;
+
+ uint16_t reserved;
+
+ /* word 3 : queue doorbell address as and offset to PCIe MMIO REG
+ * BAR
+ */
+ uint32_t sq_doorbell_offset;
+
+ /* word 4 : low latency queue ring base address as an offset to
+ * PCIe MMIO LLQ_MEM BAR
+ */
+ uint32_t llq_descriptors_offset;
+
+ /* word 5 : low latency queue headers' memory as an offset to PCIe
+ * MMIO LLQ_MEM BAR
+ */
+ uint32_t llq_headers_offset;
+};
+
+/* ENA AQ Destroy Submission Queue command. Placed in control buffer
+ * pointed by AQ entry
+ */
+struct ena_admin_aq_destroy_sq_cmd {
+ /* words 0 : */
+ struct ena_admin_aq_common_desc aq_common_descriptor;
+
+ /* words 1 : */
+ struct ena_admin_sq sq;
+};
+
+/* ENA Response for Destroy SQ Command. Appears in ACQ entry as
+ * response_specific_data
+ */
+struct ena_admin_acq_destroy_sq_resp_desc {
+ /* words 0:1 : Common Admin Queue completion descriptor */
+ struct ena_admin_acq_common_desc acq_common_desc;
+};
+
+/* ENA AQ Create Completion Queue command */
+struct ena_admin_aq_create_cq_cmd {
+ /* words 0 : */
+ struct ena_admin_aq_common_desc aq_common_descriptor;
+
+ /* word 1 : */
+ /* 4:0 : reserved5
+ * 5 : interrupt_mode_enabled - if set, cq operates
+ * in interrupt mode, otherwise - polling
+ * 7:6 : reserved6
+ */
+ uint8_t cq_caps_1;
+
+ /* 4:0 : cq_entry_size_words - size of CQ entry in
+ * 32-bit words, valid values: 4, 8.
+ * 7:5 : reserved7
+ */
+ uint8_t cq_caps_2;
+
+ /* completion queue depth in # of entries. must be power of 2 */
+ uint16_t cq_depth;
+
+ /* word 2 : msix vector assigned to this cq */
+ uint32_t msix_vector;
+
+ /* words 3:4 : cq physical base address in OS memory. CQ must be
+ * physically contiguous
+ */
+ struct ena_common_mem_addr cq_ba;
+};
+
+/* ENA Response for Create CQ Command. Appears in ACQ entry as response
+ * specific data
+ */
+struct ena_admin_acq_create_cq_resp_desc {
+ /* words 0:1 : Common Admin Queue completion descriptor */
+ struct ena_admin_acq_common_desc acq_common_desc;
+
+ /* word 2 : */
+ /* cq identifier */
+ uint16_t cq_idx;
+
+ /* actual cq depth in # of entries */
+ uint16_t cq_actual_depth;
+
+ /* word 3 : doorbell address as an offset to PCIe MMIO REG BAR */
+ uint32_t cq_doorbell_offset;
+
+ /* word 4 : completion head doorbell address as an offset to PCIe
+ * MMIO REG BAR
+ */
+ uint32_t cq_head_db_offset;
+
+ /* word 5 : interrupt unmask register address as an offset into
+ * PCIe MMIO REG BAR
+ */
+ uint32_t cq_interrupt_unmask_register;
+};
+
+/* ENA AQ Destroy Completion Queue command. Placed in control buffer
+ * pointed by AQ entry
+ */
+struct ena_admin_aq_destroy_cq_cmd {
+ /* words 0 : */
+ struct ena_admin_aq_common_desc aq_common_descriptor;
+
+ /* word 1 : */
+ /* associated queue id. */
+ uint16_t cq_idx;
+
+ uint16_t reserved1;
+};
+
+/* ENA Response for Destroy CQ Command. Appears in ACQ entry as
+ * response_specific_data
+ */
+struct ena_admin_acq_destroy_cq_resp_desc {
+ /* words 0:1 : Common Admin Queue completion descriptor */
+ struct ena_admin_acq_common_desc acq_common_desc;
+};
+
+/* ENA AQ Get Statistics command. Extended statistics are placed in control
+ * buffer pointed by AQ entry
+ */
+struct ena_admin_aq_get_stats_cmd {
+ /* words 0 : */
+ struct ena_admin_aq_common_desc aq_common_descriptor;
+
+ /* words 1:3 : */
+ union {
+ /* command specific inline data */
+ uint32_t inline_data_w1[3];
+
+ /* words 1:3 : points to control buffer (direct or
+ * indirect, chained if needed)
+ */
+ struct ena_admin_ctrl_buff_info control_buffer;
+ } u;
+
+ /* word 4 : */
+ /* stats type as defined in enum ena_admin_get_stats_type */
+ uint8_t type;
+
+ /* stats scope defined in enum ena_admin_get_stats_scope */
+ uint8_t scope;
+
+ uint16_t reserved3;
+
+ /* word 5 : */
+ /* queue id. used when scope is specific_queue */
+ uint16_t queue_idx;
+
+ /* device id, value 0xFFFF means mine. only privileged device can get
+ * stats of other device
+ */
+ uint16_t device_id;
+};
+
+/* Basic Statistics Command. */
+struct ena_admin_basic_stats {
+ /* word 0 : */
+ uint32_t tx_bytes_low;
+
+ /* word 1 : */
+ uint32_t tx_bytes_high;
+
+ /* word 2 : */
+ uint32_t tx_pkts_low;
+
+ /* word 3 : */
+ uint32_t tx_pkts_high;
+
+ /* word 4 : */
+ uint32_t rx_bytes_low;
+
+ /* word 5 : */
+ uint32_t rx_bytes_high;
+
+ /* word 6 : */
+ uint32_t rx_pkts_low;
+
+ /* word 7 : */
+ uint32_t rx_pkts_high;
+
+ /* word 8 : */
+ uint32_t rx_drops_low;
+
+ /* word 9 : */
+ uint32_t rx_drops_high;
+};
+
+/* ENA Response for Get Statistics Command. Appears in ACQ entry as
+ * response_specific_data
+ */
+struct ena_admin_acq_get_stats_resp {
+ /* words 0:1 : Common Admin Queue completion descriptor */
+ struct ena_admin_acq_common_desc acq_common_desc;
+
+ /* words 2:11 : */
+ struct ena_admin_basic_stats basic_stats;
+};
+
+/* ENA Get/Set Feature common descriptor. Appears as inline word in
+ * ena_aq_entry
+ */
+struct ena_admin_get_set_feature_common_desc {
+ /* word 0 : */
+ /* 1:0 : select - 0x1 - current value; 0x3 - default
+ * value
+ * 7:3 : reserved3
+ */
+ uint8_t flags;
+
+ /* as appears in ena_feature_id */
+ uint8_t feature_id;
+
+ /* reserved16 */
+ uint16_t reserved16;
+};
+
+/* ENA Device Attributes Feature descriptor. */
+struct ena_admin_device_attr_feature_desc {
+ /* word 0 : implementation id */
+ uint32_t impl_id;
+
+ /* word 1 : device version */
+ uint32_t device_version;
+
+ /* word 2 : bit map of which bits are supported value of 1
+ * indicated that this feature is supported and can perform SET/GET
+ * for it
+ */
+ uint32_t supported_features;
+
+ /* word 3 : */
+ uint32_t reserved3;
+
+ /* word 4 : Indicates how many bits are used physical address
+ * access.
+ */
+ uint32_t phys_addr_width;
+
+ /* word 5 : Indicates how many bits are used virtual address access. */
+ uint32_t virt_addr_width;
+
+ /* unicast MAC address (in Network byte order) */
+ uint8_t mac_addr[6];
+
+ uint8_t reserved7[2];
+
+ /* word 8 : Max supported MTU value */
+ uint32_t max_mtu;
+};
+
+/* ENA Max Queues Feature descriptor. */
+struct ena_admin_queue_feature_desc {
+ /* word 0 : Max number of submission queues (including LLQs) */
+ uint32_t max_sq_num;
+
+ /* word 1 : Max submission queue depth */
+ uint32_t max_sq_depth;
+
+ /* word 2 : Max number of completion queues */
+ uint32_t max_cq_num;
+
+ /* word 3 : Max completion queue depth */
+ uint32_t max_cq_depth;
+
+ /* word 4 : Max number of LLQ submission queues */
+ uint32_t max_llq_num;
+
+ /* word 5 : Max submission queue depth of LLQ */
+ uint32_t max_llq_depth;
+
+ /* word 6 : Max header size */
+ uint32_t max_header_size;
+
+ /* word 7 : */
+ /* Maximum Descriptors number, including meta descriptors, allowed
+ * for a single Tx packet
+ */
+ uint16_t max_packet_tx_descs;
+
+ /* Maximum Descriptors number allowed for a single Rx packet */
+ uint16_t max_packet_rx_descs;
+};
+
+/* ENA MTU Set Feature descriptor. */
+struct ena_admin_set_feature_mtu_desc {
+ /* word 0 : mtu size including L2 */
+ uint32_t mtu;
+};
+
+/* ENA host attributes Set Feature descriptor. */
+struct ena_admin_set_feature_host_attr_desc {
+ /* words 0:1 : host OS info base address in OS memory. host info is
+ * 4KB of physically contiguous
+ */
+ struct ena_common_mem_addr os_info_ba;
+
+ /* words 2:3 : host debug area base address in OS memory. debug
+ * area must be physically contiguous
+ */
+ struct ena_common_mem_addr debug_ba;
+
+ /* word 4 : debug area size */
+ uint32_t debug_area_size;
+};
+
+/* ENA Interrupt Moderation Get Feature descriptor. */
+struct ena_admin_feature_intr_moder_desc {
+ /* word 0 : */
+ /* interrupt delay granularity in usec */
+ uint16_t intr_delay_resolution;
+
+ uint16_t reserved;
+};
+
+/* ENA Link Get Feature descriptor. */
+struct ena_admin_get_feature_link_desc {
+ /* word 0 : Link speed in Mb */
+ uint32_t speed;
+
+ /* word 1 : supported speeds (bit field of enum ena_admin_link
+ * types)
+ */
+ uint32_t supported;
+
+ /* word 2 : */
+ /* 0 : autoneg - auto negotiation
+ * 1 : duplex - Full Duplex
+ * 31:2 : reserved2
+ */
+ uint32_t flags;
+};
+
+/* ENA AENQ Feature descriptor. */
+struct ena_admin_feature_aenq_desc {
+ /* word 0 : bitmask for AENQ groups the device can report */
+ uint32_t supported_groups;
+
+ /* word 1 : bitmask for AENQ groups to report */
+ uint32_t enabled_groups;
+};
+
+/* ENA Stateless Offload Feature descriptor. */
+struct ena_admin_feature_offload_desc {
+ /* word 0 : */
+ /* Trasmit side stateless offload
+ * 0 : TX_L3_csum_ipv4 - IPv4 checksum
+ * 1 : TX_L4_ipv4_csum_part - TCP/UDP over IPv4
+ * checksum, the checksum field should be initialized
+ * with pseudo header checksum
+ * 2 : TX_L4_ipv4_csum_full - TCP/UDP over IPv4
+ * checksum
+ * 3 : TX_L4_ipv6_csum_part - TCP/UDP over IPv6
+ * checksum, the checksum field should be initialized
+ * with pseudo header checksum
+ * 4 : TX_L4_ipv6_csum_full - TCP/UDP over IPv6
+ * checksum
+ * 5 : tso_ipv4 - TCP/IPv4 Segmentation Offloading
+ * 6 : tso_ipv6 - TCP/IPv6 Segmentation Offloading
+ * 7 : tso_ecn - TCP Segmentation with ECN
+ */
+ uint32_t tx;
+
+ /* word 1 : */
+ /* Receive side supported stateless offload
+ * 0 : RX_L3_csum_ipv4 - IPv4 checksum
+ * 1 : RX_L4_ipv4_csum - TCP/UDP/IPv4 checksum
+ * 2 : RX_L4_ipv6_csum - TCP/UDP/IPv6 checksum
+ * 3 : RX_hash - Hash calculation
+ */
+ uint32_t rx_supported;
+
+ /* word 2 : */
+ /* Receive side enabled stateless offload */
+ uint32_t rx_enabled;
+};
+
+/* hash functions */
+enum ena_admin_hash_functions {
+ /* Toeplitz hash */
+ ENA_ADMIN_TOEPLITZ = 1,
+
+ /* CRC32 hash */
+ ENA_ADMIN_CRC32 = 2,
+};
+
+/* ENA RSS flow hash control buffer structure */
+struct ena_admin_feature_rss_flow_hash_control {
+ /* word 0 : number of valid keys */
+ uint32_t keys_num;
+
+ /* word 1 : */
+ uint32_t reserved;
+
+ /* Toeplitz keys */
+ uint32_t key[10];
+};
+
+/* ENA RSS Flow Hash Function */
+struct ena_admin_feature_rss_flow_hash_function {
+ /* word 0 : */
+ /* supported hash functions
+ * 7:0 : funcs - supported hash functions (bitmask
+ * accroding to ena_admin_hash_functions)
+ */
+ uint32_t supported_func;
+
+ /* word 1 : */
+ /* selected hash func
+ * 7:0 : selected_func - selected hash function
+ * (bitmask accroding to ena_admin_hash_functions)
+ */
+ uint32_t selected_func;
+
+ /* word 2 : initial value */
+ uint32_t init_val;
+};
+
+/* RSS flow hash protocols */
+enum ena_admin_flow_hash_proto {
+ /* tcp/ipv4 */
+ ENA_ADMIN_RSS_TCP4 = 0,
+
+ /* udp/ipv4 */
+ ENA_ADMIN_RSS_UDP4 = 1,
+
+ /* tcp/ipv6 */
+ ENA_ADMIN_RSS_TCP6 = 2,
+
+ /* udp/ipv6 */
+ ENA_ADMIN_RSS_UDP6 = 3,
+
+ /* ipv4 not tcp/udp */
+ ENA_ADMIN_RSS_IP4 = 4,
+
+ /* ipv6 not tcp/udp */
+ ENA_ADMIN_RSS_IP6 = 5,
+
+ /* fragmented ipv4 */
+ ENA_ADMIN_RSS_IP4_FRAG = 6,
+
+ /* not ipv4/6 */
+ ENA_ADMIN_RSS_NOT_IP = 7,
+
+ /* max number of protocols */
+ ENA_ADMIN_RSS_PROTO_NUM = 16,
+};
+
+/* RSS flow hash fields */
+enum ena_admin_flow_hash_fields {
+ /* Ethernet Dest Addr */
+ ENA_ADMIN_RSS_L2_DA = 0,
+
+ /* Ethernet Src Addr */
+ ENA_ADMIN_RSS_L2_SA = 1,
+
+ /* ipv4/6 Dest Addr */
+ ENA_ADMIN_RSS_L3_DA = 2,
+
+ /* ipv4/6 Src Addr */
+ ENA_ADMIN_RSS_L3_SA = 5,
+
+ /* tcp/udp Dest Port */
+ ENA_ADMIN_RSS_L4_DP = 6,
+
+ /* tcp/udp Src Port */
+ ENA_ADMIN_RSS_L4_SP = 7,
+};
+
+/* hash input fields for flow protocol */
+struct ena_admin_proto_input {
+ /* word 0 : */
+ /* flow hash fields (bitwise according to ena_admin_flow_hash_fields) */
+ uint16_t fields;
+
+ /* 0 : inner - for tunneled packet, select the fields
+ * from inner header
+ */
+ uint16_t flags;
+};
+
+/* ENA RSS hash control buffer structure */
+struct ena_admin_feature_rss_hash_control {
+ /* supported input fields */
+ struct ena_admin_proto_input supported_fields[ENA_ADMIN_RSS_PROTO_NUM];
+
+ /* selected input fields */
+ struct ena_admin_proto_input selected_fields[ENA_ADMIN_RSS_PROTO_NUM];
+
+ /* supported input fields for inner header */
+ struct ena_admin_proto_input supported_inner_fields[ENA_ADMIN_RSS_PROTO_NUM];
+
+ /* selected input fields */
+ struct ena_admin_proto_input selected_inner_fields[ENA_ADMIN_RSS_PROTO_NUM];
+};
+
+/* ENA RSS flow hash input */
+struct ena_admin_feature_rss_flow_hash_input {
+ /* word 0 : */
+ /* supported hash input sorting
+ * 1 : L3_sort - support swap L3 addresses if DA
+ * smaller than SA
+ * 2 : L4_sort - support swap L4 ports if DP smaller
+ * SP
+ */
+ uint16_t supported_input_sort;
+
+ /* enabled hash input sorting
+ * 1 : enable_L3_sort - enable swap L3 addresses if
+ * DA smaller than SA
+ * 2 : enable_L4_sort - enable swap L4 ports if DP
+ * smaller than SP
+ */
+ uint16_t enabled_input_sort;
+};
+
+/* Operating system type */
+enum ena_admin_os_type {
+ /* Linux OS */
+ ENA_ADMIN_OS_LINUX = 1,
+
+ /* Windows OS */
+ ENA_ADMIN_OS_WIN = 2,
+
+ /* DPDK OS */
+ ENA_ADMIN_OS_DPDK = 3,
+
+ /* FreeBSD OS */
+ ENA_ADMIN_OS_FREE_BSD = 4,
+
+ /* PXE OS */
+ ENA_ADMIN_OS_PXE = 5,
+};
+
+/* host info */
+struct ena_admin_host_info {
+ /* word 0 : OS type defined in enum ena_os_type */
+ uint32_t os_type;
+
+ /* os distribution string format */
+ uint8_t os_dist_str[128];
+
+ /* word 33 : OS distribution numeric format */
+ uint32_t os_dist;
+
+ /* kernel version string format */
+ uint8_t kernel_ver_str[32];
+
+ /* word 42 : Kernel version numeric format */
+ uint32_t kernel_ver;
+
+ /* word 43 : */
+ /* driver version
+ * 7:0 : major - major
+ * 15:8 : minor - minor
+ * 23:16 : sub_minor - sub minor
+ */
+ uint32_t driver_version;
+
+ /* features bitmap */
+ uint32_t supported_network_features[4];
+};
+
+/* ENA RSS indirection table entry */
+struct ena_admin_rss_ind_table_entry {
+ /* word 0 : */
+ /* cq identifier */
+ uint16_t cq_idx;
+
+ uint16_t reserved;
+};
+
+/* ENA RSS indirection table */
+struct ena_admin_feature_rss_ind_table {
+ /* word 0 : */
+ /* min supported table size (2^min_size) */
+ uint16_t min_size;
+
+ /* max supported table size (2^max_size) */
+ uint16_t max_size;
+
+ /* word 1 : */
+ /* table size (2^size) */
+ uint16_t size;
+
+ uint16_t reserved;
+
+ /* word 2 : index of the inline entry. 0xFFFFFFFF means invalid */
+ uint32_t inline_index;
+
+ /* words 3 : used for updating single entry, ignored when setting
+ * the entire table through the control buffer.
+ */
+ struct ena_admin_rss_ind_table_entry inline_entry;
+};
+
+/* ENA Get Feature command */
+struct ena_admin_get_feat_cmd {
+ /* words 0 : */
+ struct ena_admin_aq_common_desc aq_common_descriptor;
+
+ /* words 1:3 : points to control buffer (direct or indirect,
+ * chained if needed)
+ */
+ struct ena_admin_ctrl_buff_info control_buffer;
+
+ /* words 4 : */
+ struct ena_admin_get_set_feature_common_desc feat_common;
+
+ /* words 5:15 : */
+ union {
+ /* raw words */
+ uint32_t raw[11];
+ } u;
+};
+
+/* ENA Get Feature command response */
+struct ena_admin_get_feat_resp {
+ /* words 0:1 : */
+ struct ena_admin_acq_common_desc acq_common_desc;
+
+ /* words 2:15 : */
+ union {
+ /* raw words */
+ uint32_t raw[14];
+
+ /* words 2:10 : Get Device Attributes */
+ struct ena_admin_device_attr_feature_desc dev_attr;
+
+ /* words 2:5 : Max queues num */
+ struct ena_admin_queue_feature_desc max_queue;
+
+ /* words 2:3 : AENQ configuration */
+ struct ena_admin_feature_aenq_desc aenq;
+
+ /* words 2:4 : Get Link configuration */
+ struct ena_admin_get_feature_link_desc link;
+
+ /* words 2:4 : offload configuration */
+ struct ena_admin_feature_offload_desc offload;
+
+ /* words 2:4 : rss flow hash function */
+ struct ena_admin_feature_rss_flow_hash_function flow_hash_func;
+
+ /* words 2 : rss flow hash input */
+ struct ena_admin_feature_rss_flow_hash_input flow_hash_input;
+
+ /* words 2:3 : rss indirection table */
+ struct ena_admin_feature_rss_ind_table ind_table;
+
+ /* words 2 : interrupt moderation configuration */
+ struct ena_admin_feature_intr_moder_desc intr_moderation;
+ } u;
+};
+
+/* ENA Set Feature command */
+struct ena_admin_set_feat_cmd {
+ /* words 0 : */
+ struct ena_admin_aq_common_desc aq_common_descriptor;
+
+ /* words 1:3 : points to control buffer (direct or indirect,
+ * chained if needed)
+ */
+ struct ena_admin_ctrl_buff_info control_buffer;
+
+ /* words 4 : */
+ struct ena_admin_get_set_feature_common_desc feat_common;
+
+ /* words 5:15 : */
+ union {
+ /* raw words */
+ uint32_t raw[11];
+
+ /* words 5 : mtu size */
+ struct ena_admin_set_feature_mtu_desc mtu;
+
+ /* words 5:7 : host attributes */
+ struct ena_admin_set_feature_host_attr_desc host_attr;
+
+ /* words 5:6 : AENQ configuration */
+ struct ena_admin_feature_aenq_desc aenq;
+
+ /* words 5:7 : rss flow hash function */
+ struct ena_admin_feature_rss_flow_hash_function flow_hash_func;
+
+ /* words 5 : rss flow hash input */
+ struct ena_admin_feature_rss_flow_hash_input flow_hash_input;
+
+ /* words 5:6 : rss indirection table */
+ struct ena_admin_feature_rss_ind_table ind_table;
+ } u;
+};
+
+/* ENA Set Feature command response */
+struct ena_admin_set_feat_resp {
+ /* words 0:1 : */
+ struct ena_admin_acq_common_desc acq_common_desc;
+
+ /* words 2:15 : */
+ union {
+ /* raw words */
+ uint32_t raw[14];
+ } u;
+};
+
+/* ENA Asynchronous Event Notification Queue descriptor. */
+struct ena_admin_aenq_common_desc {
+ /* word 0 : */
+ uint16_t group;
+
+ uint16_t syndrom;
+
+ /* word 1 : */
+ /* 0 : phase */
+ uint8_t flags;
+
+ uint8_t reserved1[3];
+
+ /* word 2 : Timestamp LSB */
+ uint32_t timestamp_low;
+
+ /* word 3 : Timestamp MSB */
+ uint32_t timestamp_high;
+};
+
+/* asynchronous event notification groups */
+enum ena_admin_aenq_group {
+ /* Link State Change */
+ ENA_ADMIN_LINK_CHANGE = 0,
+
+ ENA_ADMIN_FATAL_ERROR = 1,
+
+ ENA_ADMIN_WARNING = 2,
+
+ ENA_ADMIN_NOTIFICATION = 3,
+
+ ENA_ADMIN_KEEP_ALIVE = 4,
+
+ ENA_ADMIN_AENQ_GROUPS_NUM = 5,
+};
+
+/* syndorm of AENQ notification group */
+enum ena_admin_aenq_notification_syndrom {
+ ENA_ADMIN_SUSPEND = 0,
+
+ ENA_ADMIN_RESUME = 1,
+};
+
+/* ENA Asynchronous Event Notification generic descriptor. */
+struct ena_admin_aenq_entry {
+ /* words 0:3 : */
+ struct ena_admin_aenq_common_desc aenq_common_desc;
+
+ /* command specific inline data */
+ uint32_t inline_data_w4[12];
+};
+
+/* ENA Asynchronous Event Notification Queue Link Change descriptor. */
+struct ena_admin_aenq_link_change_desc {
+ /* words 0:3 : */
+ struct ena_admin_aenq_common_desc aenq_common_desc;
+
+ /* word 4 : */
+ /* 0 : link_status */
+ uint32_t flags;
+};
+
+/* ENA MMIO Readless response interface */
+struct ena_admin_ena_mmio_req_read_less_resp {
+ /* word 0 : */
+ /* request id */
+ uint16_t req_id;
+
+ /* register offset */
+ uint16_t reg_off;
+
+ /* word 1 : value is valid when poll is cleared */
+ uint32_t reg_val;
+};
+
+/* aq_common_desc */
+#define ENA_ADMIN_AQ_COMMON_DESC_COMMAND_ID_MASK GENMASK(11, 0)
+#define ENA_ADMIN_AQ_COMMON_DESC_PHASE_MASK BIT(0)
+#define ENA_ADMIN_AQ_COMMON_DESC_CTRL_DATA_SHIFT 1
+#define ENA_ADMIN_AQ_COMMON_DESC_CTRL_DATA_MASK BIT(1)
+#define ENA_ADMIN_AQ_COMMON_DESC_CTRL_DATA_INDIRECT_SHIFT 2
+#define ENA_ADMIN_AQ_COMMON_DESC_CTRL_DATA_INDIRECT_MASK BIT(2)
+
+/* sq */
+#define ENA_ADMIN_SQ_SQ_DIRECTION_SHIFT 5
+#define ENA_ADMIN_SQ_SQ_DIRECTION_MASK GENMASK(7, 5)
+
+/* acq_common_desc */
+#define ENA_ADMIN_ACQ_COMMON_DESC_COMMAND_ID_MASK GENMASK(11, 0)
+#define ENA_ADMIN_ACQ_COMMON_DESC_PHASE_MASK BIT(0)
+
+/* aq_create_sq_cmd */
+#define ENA_ADMIN_AQ_CREATE_SQ_CMD_SQ_DIRECTION_SHIFT 5
+#define ENA_ADMIN_AQ_CREATE_SQ_CMD_SQ_DIRECTION_MASK GENMASK(7, 5)
+#define ENA_ADMIN_AQ_CREATE_SQ_CMD_PLACEMENT_POLICY_MASK GENMASK(3, 0)
+#define ENA_ADMIN_AQ_CREATE_SQ_CMD_COMPLETION_POLICY_SHIFT 4
+#define ENA_ADMIN_AQ_CREATE_SQ_CMD_COMPLETION_POLICY_MASK GENMASK(6, 4)
+#define ENA_ADMIN_AQ_CREATE_SQ_CMD_IS_PHYSICALLY_CONTIGUOUS_MASK BIT(0)
+
+/* aq_create_cq_cmd */
+#define ENA_ADMIN_AQ_CREATE_CQ_CMD_INTERRUPT_MODE_ENABLED_SHIFT 5
+#define ENA_ADMIN_AQ_CREATE_CQ_CMD_INTERRUPT_MODE_ENABLED_MASK BIT(5)
+#define ENA_ADMIN_AQ_CREATE_CQ_CMD_CQ_ENTRY_SIZE_WORDS_MASK GENMASK(4, 0)
+
+/* get_set_feature_common_desc */
+#define ENA_ADMIN_GET_SET_FEATURE_COMMON_DESC_SELECT_MASK GENMASK(1, 0)
+
+/* get_feature_link_desc */
+#define ENA_ADMIN_GET_FEATURE_LINK_DESC_AUTONEG_MASK BIT(0)
+#define ENA_ADMIN_GET_FEATURE_LINK_DESC_DUPLEX_SHIFT 1
+#define ENA_ADMIN_GET_FEATURE_LINK_DESC_DUPLEX_MASK BIT(1)
+
+/* feature_offload_desc */
+#define ENA_ADMIN_FEATURE_OFFLOAD_DESC_TX_L3_CSUM_IPV4_MASK BIT(0)
+#define ENA_ADMIN_FEATURE_OFFLOAD_DESC_TX_L4_IPV4_CSUM_PART_SHIFT 1
+#define ENA_ADMIN_FEATURE_OFFLOAD_DESC_TX_L4_IPV4_CSUM_PART_MASK BIT(1)
+#define ENA_ADMIN_FEATURE_OFFLOAD_DESC_TX_L4_IPV4_CSUM_FULL_SHIFT 2
+#define ENA_ADMIN_FEATURE_OFFLOAD_DESC_TX_L4_IPV4_CSUM_FULL_MASK BIT(2)
+#define ENA_ADMIN_FEATURE_OFFLOAD_DESC_TX_L4_IPV6_CSUM_PART_SHIFT 3
+#define ENA_ADMIN_FEATURE_OFFLOAD_DESC_TX_L4_IPV6_CSUM_PART_MASK BIT(3)
+#define ENA_ADMIN_FEATURE_OFFLOAD_DESC_TX_L4_IPV6_CSUM_FULL_SHIFT 4
+#define ENA_ADMIN_FEATURE_OFFLOAD_DESC_TX_L4_IPV6_CSUM_FULL_MASK BIT(4)
+#define ENA_ADMIN_FEATURE_OFFLOAD_DESC_TSO_IPV4_SHIFT 5
+#define ENA_ADMIN_FEATURE_OFFLOAD_DESC_TSO_IPV4_MASK BIT(5)
+#define ENA_ADMIN_FEATURE_OFFLOAD_DESC_TSO_IPV6_SHIFT 6
+#define ENA_ADMIN_FEATURE_OFFLOAD_DESC_TSO_IPV6_MASK BIT(6)
+#define ENA_ADMIN_FEATURE_OFFLOAD_DESC_TSO_ECN_SHIFT 7
+#define ENA_ADMIN_FEATURE_OFFLOAD_DESC_TSO_ECN_MASK BIT(7)
+#define ENA_ADMIN_FEATURE_OFFLOAD_DESC_RX_L3_CSUM_IPV4_MASK BIT(0)
+#define ENA_ADMIN_FEATURE_OFFLOAD_DESC_RX_L4_IPV4_CSUM_SHIFT 1
+#define ENA_ADMIN_FEATURE_OFFLOAD_DESC_RX_L4_IPV4_CSUM_MASK BIT(1)
+#define ENA_ADMIN_FEATURE_OFFLOAD_DESC_RX_L4_IPV6_CSUM_SHIFT 2
+#define ENA_ADMIN_FEATURE_OFFLOAD_DESC_RX_L4_IPV6_CSUM_MASK BIT(2)
+#define ENA_ADMIN_FEATURE_OFFLOAD_DESC_RX_HASH_SHIFT 3
+#define ENA_ADMIN_FEATURE_OFFLOAD_DESC_RX_HASH_MASK BIT(3)
+
+/* feature_rss_flow_hash_function */
+#define ENA_ADMIN_FEATURE_RSS_FLOW_HASH_FUNCTION_FUNCS_MASK GENMASK(7, 0)
+#define ENA_ADMIN_FEATURE_RSS_FLOW_HASH_FUNCTION_SELECTED_FUNC_MASK \
+ GENMASK(7, 0)
+
+/* proto_input */
+#define ENA_ADMIN_PROTO_INPUT_INNER_MASK BIT(0)
+
+/* feature_rss_flow_hash_input */
+#define ENA_ADMIN_FEATURE_RSS_FLOW_HASH_INPUT_L3_SORT_SHIFT 1
+#define ENA_ADMIN_FEATURE_RSS_FLOW_HASH_INPUT_L3_SORT_MASK BIT(1)
+#define ENA_ADMIN_FEATURE_RSS_FLOW_HASH_INPUT_L4_SORT_SHIFT 2
+#define ENA_ADMIN_FEATURE_RSS_FLOW_HASH_INPUT_L4_SORT_MASK BIT(2)
+#define ENA_ADMIN_FEATURE_RSS_FLOW_HASH_INPUT_ENABLE_L3_SORT_SHIFT 1
+#define ENA_ADMIN_FEATURE_RSS_FLOW_HASH_INPUT_ENABLE_L3_SORT_MASK BIT(1)
+#define ENA_ADMIN_FEATURE_RSS_FLOW_HASH_INPUT_ENABLE_L4_SORT_SHIFT 2
+#define ENA_ADMIN_FEATURE_RSS_FLOW_HASH_INPUT_ENABLE_L4_SORT_MASK BIT(2)
+
+/* host_info */
+#define ENA_ADMIN_HOST_INFO_MAJOR_MASK GENMASK(7, 0)
+#define ENA_ADMIN_HOST_INFO_MINOR_SHIFT 8
+#define ENA_ADMIN_HOST_INFO_MINOR_MASK GENMASK(15, 8)
+#define ENA_ADMIN_HOST_INFO_SUB_MINOR_SHIFT 16
+#define ENA_ADMIN_HOST_INFO_SUB_MINOR_MASK GENMASK(23, 16)
+
+/* aenq_common_desc */
+#define ENA_ADMIN_AENQ_COMMON_DESC_PHASE_MASK BIT(0)
+
+/* aenq_link_change_desc */
+#define ENA_ADMIN_AENQ_LINK_CHANGE_DESC_LINK_STATUS_MASK BIT(0)
+
+#if !defined(ENA_DEFS_LINUX_MAINLINE)
+static inline uint16_t
+get_ena_admin_aq_common_desc_command_id(
+ const struct ena_admin_aq_common_desc *p)
+{
+ return p->command_id & ENA_ADMIN_AQ_COMMON_DESC_COMMAND_ID_MASK;
+}
+
+static inline void
+set_ena_admin_aq_common_desc_command_id(struct ena_admin_aq_common_desc *p,
+ uint16_t val)
+{
+ p->command_id |= val & ENA_ADMIN_AQ_COMMON_DESC_COMMAND_ID_MASK;
+}
+
+static inline uint8_t
+get_ena_admin_aq_common_desc_phase(const struct ena_admin_aq_common_desc *p)
+{
+ return p->flags & ENA_ADMIN_AQ_COMMON_DESC_PHASE_MASK;
+}
+
+static inline void
+set_ena_admin_aq_common_desc_phase(struct ena_admin_aq_common_desc *p,
+ uint8_t val)
+{
+ p->flags |= val & ENA_ADMIN_AQ_COMMON_DESC_PHASE_MASK;
+}
+
+static inline uint8_t
+get_ena_admin_aq_common_desc_ctrl_data(
+ const struct ena_admin_aq_common_desc *p)
+{
+ return (p->flags & ENA_ADMIN_AQ_COMMON_DESC_CTRL_DATA_MASK) >>
+ ENA_ADMIN_AQ_COMMON_DESC_CTRL_DATA_SHIFT;
+}
+
+static inline void
+set_ena_admin_aq_common_desc_ctrl_data(struct ena_admin_aq_common_desc *p,
+ uint8_t val)
+{
+ p->flags |= (val << ENA_ADMIN_AQ_COMMON_DESC_CTRL_DATA_SHIFT)
+ & ENA_ADMIN_AQ_COMMON_DESC_CTRL_DATA_MASK;
+}
+
+static inline uint8_t
+get_ena_admin_aq_common_desc_ctrl_data_indirect(
+ const struct ena_admin_aq_common_desc *p)
+{
+ return (p->flags & ENA_ADMIN_AQ_COMMON_DESC_CTRL_DATA_INDIRECT_MASK)
+ >> ENA_ADMIN_AQ_COMMON_DESC_CTRL_DATA_INDIRECT_SHIFT;
+}
+
+static inline void
+set_ena_admin_aq_common_desc_ctrl_data_indirect(
+ struct ena_admin_aq_common_desc *p,
+ uint8_t val)
+{
+ p->flags |= (val << ENA_ADMIN_AQ_COMMON_DESC_CTRL_DATA_INDIRECT_SHIFT)
+ & ENA_ADMIN_AQ_COMMON_DESC_CTRL_DATA_INDIRECT_MASK;
+}
+
+static inline uint8_t
+get_ena_admin_sq_sq_direction(const struct ena_admin_sq *p)
+{
+ return (p->sq_identity & ENA_ADMIN_SQ_SQ_DIRECTION_MASK)
+ >> ENA_ADMIN_SQ_SQ_DIRECTION_SHIFT;
+}
+
+static inline void
+set_ena_admin_sq_sq_direction(struct ena_admin_sq *p, uint8_t val)
+{
+ p->sq_identity |= (val << ENA_ADMIN_SQ_SQ_DIRECTION_SHIFT) &
+ ENA_ADMIN_SQ_SQ_DIRECTION_MASK;
+}
+
+static inline uint16_t
+get_ena_admin_acq_common_desc_command_id(
+ const struct ena_admin_acq_common_desc *p)
+{
+ return p->command & ENA_ADMIN_ACQ_COMMON_DESC_COMMAND_ID_MASK;
+}
+
+static inline void
+set_ena_admin_acq_common_desc_command_id(struct ena_admin_acq_common_desc *p,
+ uint16_t val)
+{
+ p->command |= val & ENA_ADMIN_ACQ_COMMON_DESC_COMMAND_ID_MASK;
+}
+
+static inline uint8_t
+get_ena_admin_acq_common_desc_phase(const struct ena_admin_acq_common_desc *p)
+{
+ return p->flags & ENA_ADMIN_ACQ_COMMON_DESC_PHASE_MASK;
+}
+
+static inline void
+set_ena_admin_acq_common_desc_phase(struct ena_admin_acq_common_desc *p,
+ uint8_t val)
+{
+ p->flags |= val & ENA_ADMIN_ACQ_COMMON_DESC_PHASE_MASK;
+}
+
+static inline uint8_t
+get_ena_admin_aq_create_sq_cmd_sq_direction(
+ const struct ena_admin_aq_create_sq_cmd *p)
+{
+ return (p->sq_identity & ENA_ADMIN_AQ_CREATE_SQ_CMD_SQ_DIRECTION_MASK)
+ >> ENA_ADMIN_AQ_CREATE_SQ_CMD_SQ_DIRECTION_SHIFT;
+}
+
+static inline void
+set_ena_admin_aq_create_sq_cmd_sq_direction(
+ struct ena_admin_aq_create_sq_cmd *p,
+ uint8_t val)
+{
+ p->sq_identity |= (val <<
+ ENA_ADMIN_AQ_CREATE_SQ_CMD_SQ_DIRECTION_SHIFT)
+ & ENA_ADMIN_AQ_CREATE_SQ_CMD_SQ_DIRECTION_MASK;
+}
+
+static inline uint8_t
+get_ena_admin_aq_create_sq_cmd_placement_policy(
+ const struct ena_admin_aq_create_sq_cmd *p)
+{
+ return p->sq_caps_2 & ENA_ADMIN_AQ_CREATE_SQ_CMD_PLACEMENT_POLICY_MASK;
+}
+
+static inline void
+set_ena_admin_aq_create_sq_cmd_placement_policy(
+ struct ena_admin_aq_create_sq_cmd *p,
+ uint8_t val)
+{
+ p->sq_caps_2 |= val & ENA_ADMIN_AQ_CREATE_SQ_CMD_PLACEMENT_POLICY_MASK;
+}
+
+static inline uint8_t
+get_ena_admin_aq_create_sq_cmd_completion_policy(
+ const struct ena_admin_aq_create_sq_cmd *p)
+{
+ return (p->sq_caps_2
+ & ENA_ADMIN_AQ_CREATE_SQ_CMD_COMPLETION_POLICY_MASK)
+ >> ENA_ADMIN_AQ_CREATE_SQ_CMD_COMPLETION_POLICY_SHIFT;
+}
+
+static inline void
+set_ena_admin_aq_create_sq_cmd_completion_policy(
+ struct ena_admin_aq_create_sq_cmd *p,
+ uint8_t val)
+{
+ p->sq_caps_2 |=
+ (val << ENA_ADMIN_AQ_CREATE_SQ_CMD_COMPLETION_POLICY_SHIFT)
+ & ENA_ADMIN_AQ_CREATE_SQ_CMD_COMPLETION_POLICY_MASK;
+}
+
+static inline uint8_t
+get_ena_admin_aq_create_sq_cmd_is_physically_contiguous(
+ const struct ena_admin_aq_create_sq_cmd *p)
+{
+ return p->sq_caps_3 &
+ ENA_ADMIN_AQ_CREATE_SQ_CMD_IS_PHYSICALLY_CONTIGUOUS_MASK;
+}
+
+static inline void
+set_ena_admin_aq_create_sq_cmd_is_physically_contiguous(
+ struct ena_admin_aq_create_sq_cmd *p,
+ uint8_t val)
+{
+ p->sq_caps_3 |= val &
+ ENA_ADMIN_AQ_CREATE_SQ_CMD_IS_PHYSICALLY_CONTIGUOUS_MASK;
+}
+
+static inline uint8_t
+get_ena_admin_aq_create_cq_cmd_interrupt_mode_enabled(
+ const struct ena_admin_aq_create_cq_cmd *p)
+{
+ return (p->cq_caps_1 &
+ ENA_ADMIN_AQ_CREATE_CQ_CMD_INTERRUPT_MODE_ENABLED_MASK)
+ >> ENA_ADMIN_AQ_CREATE_CQ_CMD_INTERRUPT_MODE_ENABLED_SHIFT;
+}
+
+static inline void
+set_ena_admin_aq_create_cq_cmd_interrupt_mode_enabled(
+ struct ena_admin_aq_create_cq_cmd *p,
+ uint8_t val)
+{
+ p->cq_caps_1 |=
+ (val << ENA_ADMIN_AQ_CREATE_CQ_CMD_INTERRUPT_MODE_ENABLED_SHIFT)
+ & ENA_ADMIN_AQ_CREATE_CQ_CMD_INTERRUPT_MODE_ENABLED_MASK;
+}
+
+static inline uint8_t
+get_ena_admin_aq_create_cq_cmd_cq_entry_size_words(
+ const struct ena_admin_aq_create_cq_cmd *p)
+{
+ return p->cq_caps_2
+ & ENA_ADMIN_AQ_CREATE_CQ_CMD_CQ_ENTRY_SIZE_WORDS_MASK;
+}
+
+static inline void
+set_ena_admin_aq_create_cq_cmd_cq_entry_size_words(
+ struct ena_admin_aq_create_cq_cmd *p,
+ uint8_t val)
+{
+ p->cq_caps_2 |=
+ val & ENA_ADMIN_AQ_CREATE_CQ_CMD_CQ_ENTRY_SIZE_WORDS_MASK;
+}
+
+static inline uint8_t
+get_ena_admin_get_set_feature_common_desc_select(
+ const struct ena_admin_get_set_feature_common_desc *p)
+{
+ return p->flags & ENA_ADMIN_GET_SET_FEATURE_COMMON_DESC_SELECT_MASK;
+}
+
+static inline void
+set_ena_admin_get_set_feature_common_desc_select(
+ struct ena_admin_get_set_feature_common_desc *p,
+ uint8_t val)
+{
+ p->flags |= val & ENA_ADMIN_GET_SET_FEATURE_COMMON_DESC_SELECT_MASK;
+}
+
+static inline uint32_t
+get_ena_admin_get_feature_link_desc_autoneg(
+ const struct ena_admin_get_feature_link_desc *p)
+{
+ return p->flags & ENA_ADMIN_GET_FEATURE_LINK_DESC_AUTONEG_MASK;
+}
+
+static inline void
+set_ena_admin_get_feature_link_desc_autoneg(
+ struct ena_admin_get_feature_link_desc *p,
+ uint32_t val)
+{
+ p->flags |= val & ENA_ADMIN_GET_FEATURE_LINK_DESC_AUTONEG_MASK;
+}
+
+static inline uint32_t
+get_ena_admin_get_feature_link_desc_duplex(
+ const struct ena_admin_get_feature_link_desc *p)
+{
+ return (p->flags & ENA_ADMIN_GET_FEATURE_LINK_DESC_DUPLEX_MASK)
+ >> ENA_ADMIN_GET_FEATURE_LINK_DESC_DUPLEX_SHIFT;
+}
+
+static inline void
+set_ena_admin_get_feature_link_desc_duplex(
+ struct ena_admin_get_feature_link_desc *p,
+ uint32_t val)
+{
+ p->flags |= (val << ENA_ADMIN_GET_FEATURE_LINK_DESC_DUPLEX_SHIFT)
+ & ENA_ADMIN_GET_FEATURE_LINK_DESC_DUPLEX_MASK;
+}
+
+static inline uint32_t
+get_ena_admin_feature_offload_desc_TX_L3_csum_ipv4(
+ const struct ena_admin_feature_offload_desc *p)
+{
+ return p->tx & ENA_ADMIN_FEATURE_OFFLOAD_DESC_TX_L3_CSUM_IPV4_MASK;
+}
+
+static inline void
+set_ena_admin_feature_offload_desc_TX_L3_csum_ipv4(
+ struct ena_admin_feature_offload_desc *p,
+ uint32_t val)
+{
+ p->tx |= val & ENA_ADMIN_FEATURE_OFFLOAD_DESC_TX_L3_CSUM_IPV4_MASK;
+}
+
+static inline uint32_t
+get_ena_admin_feature_offload_desc_TX_L4_ipv4_csum_part(
+ const struct ena_admin_feature_offload_desc *p)
+{
+ return (p->tx &
+ ENA_ADMIN_FEATURE_OFFLOAD_DESC_TX_L4_IPV4_CSUM_PART_MASK)
+ >> ENA_ADMIN_FEATURE_OFFLOAD_DESC_TX_L4_IPV4_CSUM_PART_SHIFT;
+}
+
+static inline void
+set_ena_admin_feature_offload_desc_TX_L4_ipv4_csum_part(
+ struct ena_admin_feature_offload_desc *p,
+ uint32_t val)
+{
+ p->tx |= (val <<
+ ENA_ADMIN_FEATURE_OFFLOAD_DESC_TX_L4_IPV4_CSUM_PART_SHIFT)
+ & ENA_ADMIN_FEATURE_OFFLOAD_DESC_TX_L4_IPV4_CSUM_PART_MASK;
+}
+
+static inline uint32_t
+get_ena_admin_feature_offload_desc_TX_L4_ipv4_csum_full(
+ const struct ena_admin_feature_offload_desc *p)
+{
+ return (p->tx &
+ ENA_ADMIN_FEATURE_OFFLOAD_DESC_TX_L4_IPV4_CSUM_FULL_MASK)
+ >> ENA_ADMIN_FEATURE_OFFLOAD_DESC_TX_L4_IPV4_CSUM_FULL_SHIFT;
+}
+
+static inline void
+set_ena_admin_feature_offload_desc_TX_L4_ipv4_csum_full(
+ struct ena_admin_feature_offload_desc *p,
+ uint32_t val)
+{
+ p->tx |= (val <<
+ ENA_ADMIN_FEATURE_OFFLOAD_DESC_TX_L4_IPV4_CSUM_FULL_SHIFT)
+ & ENA_ADMIN_FEATURE_OFFLOAD_DESC_TX_L4_IPV4_CSUM_FULL_MASK;
+}
+
+static inline uint32_t
+get_ena_admin_feature_offload_desc_TX_L4_ipv6_csum_part(
+ const struct ena_admin_feature_offload_desc *p)
+{
+ return (p->tx &
+ ENA_ADMIN_FEATURE_OFFLOAD_DESC_TX_L4_IPV6_CSUM_PART_MASK)
+ >> ENA_ADMIN_FEATURE_OFFLOAD_DESC_TX_L4_IPV6_CSUM_PART_SHIFT;
+}
+
+static inline void
+set_ena_admin_feature_offload_desc_TX_L4_ipv6_csum_part(
+ struct ena_admin_feature_offload_desc *p,
+ uint32_t val)
+{
+ p->tx |= (val <<
+ ENA_ADMIN_FEATURE_OFFLOAD_DESC_TX_L4_IPV6_CSUM_PART_SHIFT)
+ & ENA_ADMIN_FEATURE_OFFLOAD_DESC_TX_L4_IPV6_CSUM_PART_MASK;
+}
+
+static inline uint32_t
+get_ena_admin_feature_offload_desc_TX_L4_ipv6_csum_full(
+ const struct ena_admin_feature_offload_desc *p)
+{
+ return (p->tx &
+ ENA_ADMIN_FEATURE_OFFLOAD_DESC_TX_L4_IPV6_CSUM_FULL_MASK)
+ >> ENA_ADMIN_FEATURE_OFFLOAD_DESC_TX_L4_IPV6_CSUM_FULL_SHIFT;
+}
+
+static inline void
+set_ena_admin_feature_offload_desc_TX_L4_ipv6_csum_full(
+ struct ena_admin_feature_offload_desc *p,
+ uint32_t val)
+{
+ p->tx |= (val <<
+ ENA_ADMIN_FEATURE_OFFLOAD_DESC_TX_L4_IPV6_CSUM_FULL_SHIFT)
+ & ENA_ADMIN_FEATURE_OFFLOAD_DESC_TX_L4_IPV6_CSUM_FULL_MASK;
+}
+
+static inline uint32_t
+get_ena_admin_feature_offload_desc_tso_ipv4(
+ const struct ena_admin_feature_offload_desc *p)
+{
+ return (p->tx & ENA_ADMIN_FEATURE_OFFLOAD_DESC_TSO_IPV4_MASK)
+ >> ENA_ADMIN_FEATURE_OFFLOAD_DESC_TSO_IPV4_SHIFT;
+}
+
+static inline void
+set_ena_admin_feature_offload_desc_tso_ipv4(
+ struct ena_admin_feature_offload_desc *p,
+ uint32_t val)
+{
+ p->tx |= (val << ENA_ADMIN_FEATURE_OFFLOAD_DESC_TSO_IPV4_SHIFT)
+ & ENA_ADMIN_FEATURE_OFFLOAD_DESC_TSO_IPV4_MASK;
+}
+
+static inline uint32_t
+get_ena_admin_feature_offload_desc_tso_ipv6(
+ const struct ena_admin_feature_offload_desc *p)
+{
+ return (p->tx & ENA_ADMIN_FEATURE_OFFLOAD_DESC_TSO_IPV6_MASK)
+ >> ENA_ADMIN_FEATURE_OFFLOAD_DESC_TSO_IPV6_SHIFT;
+}
+
+static inline void
+set_ena_admin_feature_offload_desc_tso_ipv6(
+ struct ena_admin_feature_offload_desc *p,
+ uint32_t val)
+{
+ p->tx |= (val << ENA_ADMIN_FEATURE_OFFLOAD_DESC_TSO_IPV6_SHIFT)
+ & ENA_ADMIN_FEATURE_OFFLOAD_DESC_TSO_IPV6_MASK;
+}
+
+static inline uint32_t
+get_ena_admin_feature_offload_desc_tso_ecn(
+ const struct ena_admin_feature_offload_desc *p)
+{
+ return (p->tx & ENA_ADMIN_FEATURE_OFFLOAD_DESC_TSO_ECN_MASK)
+ >> ENA_ADMIN_FEATURE_OFFLOAD_DESC_TSO_ECN_SHIFT;
+}
+
+static inline void
+set_ena_admin_feature_offload_desc_tso_ecn(
+ struct ena_admin_feature_offload_desc *p,
+ uint32_t val)
+{
+ p->tx |= (val << ENA_ADMIN_FEATURE_OFFLOAD_DESC_TSO_ECN_SHIFT)
+ & ENA_ADMIN_FEATURE_OFFLOAD_DESC_TSO_ECN_MASK;
+}
+
+static inline uint32_t
+get_ena_admin_feature_offload_desc_RX_L3_csum_ipv4(
+ const struct ena_admin_feature_offload_desc *p)
+{
+ return p->rx_supported &
+ ENA_ADMIN_FEATURE_OFFLOAD_DESC_RX_L3_CSUM_IPV4_MASK;
+}
+
+static inline void
+set_ena_admin_feature_offload_desc_RX_L3_csum_ipv4(
+ struct ena_admin_feature_offload_desc *p,
+ uint32_t val)
+{
+ p->rx_supported |=
+ val & ENA_ADMIN_FEATURE_OFFLOAD_DESC_RX_L3_CSUM_IPV4_MASK;
+}
+
+static inline uint32_t
+get_ena_admin_feature_offload_desc_RX_L4_ipv4_csum(
+ const struct ena_admin_feature_offload_desc *p)
+{
+ return (p->rx_supported &
+ ENA_ADMIN_FEATURE_OFFLOAD_DESC_RX_L4_IPV4_CSUM_MASK)
+ >> ENA_ADMIN_FEATURE_OFFLOAD_DESC_RX_L4_IPV4_CSUM_SHIFT;
+}
+
+static inline void
+set_ena_admin_feature_offload_desc_RX_L4_ipv4_csum(
+ struct ena_admin_feature_offload_desc *p,
+ uint32_t val)
+{
+ p->rx_supported |=
+ (val << ENA_ADMIN_FEATURE_OFFLOAD_DESC_RX_L4_IPV4_CSUM_SHIFT)
+ & ENA_ADMIN_FEATURE_OFFLOAD_DESC_RX_L4_IPV4_CSUM_MASK;
+}
+
+static inline uint32_t
+get_ena_admin_feature_offload_desc_RX_L4_ipv6_csum(
+ const struct ena_admin_feature_offload_desc *p)
+{
+ return (p->rx_supported &
+ ENA_ADMIN_FEATURE_OFFLOAD_DESC_RX_L4_IPV6_CSUM_MASK)
+ >> ENA_ADMIN_FEATURE_OFFLOAD_DESC_RX_L4_IPV6_CSUM_SHIFT;
+}
+
+static inline void
+set_ena_admin_feature_offload_desc_RX_L4_ipv6_csum(
+ struct ena_admin_feature_offload_desc *p,
+ uint32_t val)
+{
+ p->rx_supported |=
+ (val << ENA_ADMIN_FEATURE_OFFLOAD_DESC_RX_L4_IPV6_CSUM_SHIFT)
+ & ENA_ADMIN_FEATURE_OFFLOAD_DESC_RX_L4_IPV6_CSUM_MASK;
+}
+
+static inline uint32_t
+get_ena_admin_feature_offload_desc_RX_hash(
+ const struct ena_admin_feature_offload_desc *p)
+{
+ return (p->rx_supported &
+ ENA_ADMIN_FEATURE_OFFLOAD_DESC_RX_HASH_MASK)
+ >> ENA_ADMIN_FEATURE_OFFLOAD_DESC_RX_HASH_SHIFT;
+}
+
+static inline void
+set_ena_admin_feature_offload_desc_RX_hash(
+ struct ena_admin_feature_offload_desc *p,
+ uint32_t val)
+{
+ p->rx_supported |=
+ (val << ENA_ADMIN_FEATURE_OFFLOAD_DESC_RX_HASH_SHIFT)
+ & ENA_ADMIN_FEATURE_OFFLOAD_DESC_RX_HASH_MASK;
+}
+
+static inline uint32_t
+get_ena_admin_feature_rss_flow_hash_function_funcs(
+ const struct ena_admin_feature_rss_flow_hash_function *p)
+{
+ return p->supported_func &
+ ENA_ADMIN_FEATURE_RSS_FLOW_HASH_FUNCTION_FUNCS_MASK;
+}
+
+static inline void
+set_ena_admin_feature_rss_flow_hash_function_funcs(
+ struct ena_admin_feature_rss_flow_hash_function *p,
+ uint32_t val)
+{
+ p->supported_func |=
+ val & ENA_ADMIN_FEATURE_RSS_FLOW_HASH_FUNCTION_FUNCS_MASK;
+}
+
+static inline uint32_t
+get_ena_admin_feature_rss_flow_hash_function_selected_func(
+ const struct ena_admin_feature_rss_flow_hash_function *p)
+{
+ return p->selected_func &
+ ENA_ADMIN_FEATURE_RSS_FLOW_HASH_FUNCTION_SELECTED_FUNC_MASK;
+}
+
+static inline void
+set_ena_admin_feature_rss_flow_hash_function_selected_func(
+ struct ena_admin_feature_rss_flow_hash_function *p,
+ uint32_t val)
+{
+ p->selected_func |=
+ val &
+ ENA_ADMIN_FEATURE_RSS_FLOW_HASH_FUNCTION_SELECTED_FUNC_MASK;
+}
+
+static inline uint16_t
+get_ena_admin_proto_input_inner(const struct ena_admin_proto_input *p)
+{
+ return p->flags & ENA_ADMIN_PROTO_INPUT_INNER_MASK;
+}
+
+static inline void
+set_ena_admin_proto_input_inner(struct ena_admin_proto_input *p, uint16_t val)
+{
+ p->flags |= val & ENA_ADMIN_PROTO_INPUT_INNER_MASK;
+}
+
+static inline uint16_t
+get_ena_admin_feature_rss_flow_hash_input_L3_sort(
+ const struct ena_admin_feature_rss_flow_hash_input *p)
+{
+ return (p->supported_input_sort &
+ ENA_ADMIN_FEATURE_RSS_FLOW_HASH_INPUT_L3_SORT_MASK)
+ >> ENA_ADMIN_FEATURE_RSS_FLOW_HASH_INPUT_L3_SORT_SHIFT;
+}
+
+static inline void
+set_ena_admin_feature_rss_flow_hash_input_L3_sort(
+ struct ena_admin_feature_rss_flow_hash_input *p,
+ uint16_t val)
+{
+ p->supported_input_sort |=
+ (val << ENA_ADMIN_FEATURE_RSS_FLOW_HASH_INPUT_L3_SORT_SHIFT)
+ & ENA_ADMIN_FEATURE_RSS_FLOW_HASH_INPUT_L3_SORT_MASK;
+}
+
+static inline uint16_t
+get_ena_admin_feature_rss_flow_hash_input_L4_sort(
+ const struct ena_admin_feature_rss_flow_hash_input *p)
+{
+ return (p->supported_input_sort &
+ ENA_ADMIN_FEATURE_RSS_FLOW_HASH_INPUT_L4_SORT_MASK)
+ >> ENA_ADMIN_FEATURE_RSS_FLOW_HASH_INPUT_L4_SORT_SHIFT;
+}
+
+static inline void
+set_ena_admin_feature_rss_flow_hash_input_L4_sort(
+ struct ena_admin_feature_rss_flow_hash_input *p,
+ uint16_t val)
+{
+ p->supported_input_sort |=
+ (val << ENA_ADMIN_FEATURE_RSS_FLOW_HASH_INPUT_L4_SORT_SHIFT)
+ & ENA_ADMIN_FEATURE_RSS_FLOW_HASH_INPUT_L4_SORT_MASK;
+}
+
+static inline uint16_t
+get_ena_admin_feature_rss_flow_hash_input_enable_L3_sort(
+ const struct ena_admin_feature_rss_flow_hash_input *p)
+{
+ return (p->enabled_input_sort &
+ ENA_ADMIN_FEATURE_RSS_FLOW_HASH_INPUT_ENABLE_L3_SORT_MASK)
+ >> ENA_ADMIN_FEATURE_RSS_FLOW_HASH_INPUT_ENABLE_L3_SORT_SHIFT;
+}
+
+static inline void
+set_ena_admin_feature_rss_flow_hash_input_enable_L3_sort(
+ struct ena_admin_feature_rss_flow_hash_input *p,
+ uint16_t val)
+{
+ p->enabled_input_sort |=
+ (val <<
+ ENA_ADMIN_FEATURE_RSS_FLOW_HASH_INPUT_ENABLE_L3_SORT_SHIFT)
+ & ENA_ADMIN_FEATURE_RSS_FLOW_HASH_INPUT_ENABLE_L3_SORT_MASK;
+}
+
+static inline uint16_t
+get_ena_admin_feature_rss_flow_hash_input_enable_L4_sort(
+ const struct ena_admin_feature_rss_flow_hash_input *p)
+{
+ return (p->enabled_input_sort &
+ ENA_ADMIN_FEATURE_RSS_FLOW_HASH_INPUT_ENABLE_L4_SORT_MASK)
+ >> ENA_ADMIN_FEATURE_RSS_FLOW_HASH_INPUT_ENABLE_L4_SORT_SHIFT;
+}
+
+static inline void
+set_ena_admin_feature_rss_flow_hash_input_enable_L4_sort(
+ struct ena_admin_feature_rss_flow_hash_input *p,
+ uint16_t val)
+{
+ p->enabled_input_sort |=
+ (val <<
+ ENA_ADMIN_FEATURE_RSS_FLOW_HASH_INPUT_ENABLE_L4_SORT_SHIFT)
+ & ENA_ADMIN_FEATURE_RSS_FLOW_HASH_INPUT_ENABLE_L4_SORT_MASK;
+}
+
+static inline uint32_t
+get_ena_admin_host_info_major(const struct ena_admin_host_info *p)
+{
+ return p->driver_version & ENA_ADMIN_HOST_INFO_MAJOR_MASK;
+}
+
+static inline void
+set_ena_admin_host_info_major(struct ena_admin_host_info *p, uint32_t val)
+{
+ p->driver_version |= val & ENA_ADMIN_HOST_INFO_MAJOR_MASK;
+}
+
+static inline uint32_t
+get_ena_admin_host_info_minor(const struct ena_admin_host_info *p)
+{
+ return (p->driver_version & ENA_ADMIN_HOST_INFO_MINOR_MASK)
+ >> ENA_ADMIN_HOST_INFO_MINOR_SHIFT;
+}
+
+static inline void
+set_ena_admin_host_info_minor(struct ena_admin_host_info *p, uint32_t val)
+{
+ p->driver_version |= (val << ENA_ADMIN_HOST_INFO_MINOR_SHIFT)
+ & ENA_ADMIN_HOST_INFO_MINOR_MASK;
+}
+
+static inline uint32_t
+get_ena_admin_host_info_sub_minor(const struct ena_admin_host_info *p)
+{
+ return (p->driver_version & ENA_ADMIN_HOST_INFO_SUB_MINOR_MASK)
+ >> ENA_ADMIN_HOST_INFO_SUB_MINOR_SHIFT;
+}
+
+static inline void
+set_ena_admin_host_info_sub_minor(struct ena_admin_host_info *p, uint32_t val)
+{
+ p->driver_version |= (val << ENA_ADMIN_HOST_INFO_SUB_MINOR_SHIFT)
+ & ENA_ADMIN_HOST_INFO_SUB_MINOR_MASK;
+}
+
+static inline uint8_t
+get_ena_admin_aenq_common_desc_phase(
+ const struct ena_admin_aenq_common_desc *p)
+{
+ return p->flags & ENA_ADMIN_AENQ_COMMON_DESC_PHASE_MASK;
+}
+
+static inline void
+set_ena_admin_aenq_common_desc_phase(
+ struct ena_admin_aenq_common_desc *p,
+ uint8_t val)
+{
+ p->flags |= val & ENA_ADMIN_AENQ_COMMON_DESC_PHASE_MASK;
+}
+
+static inline uint32_t
+get_ena_admin_aenq_link_change_desc_link_status(
+ const struct ena_admin_aenq_link_change_desc *p)
+{
+ return p->flags & ENA_ADMIN_AENQ_LINK_CHANGE_DESC_LINK_STATUS_MASK;
+}
+
+static inline void
+set_ena_admin_aenq_link_change_desc_link_status(
+ struct ena_admin_aenq_link_change_desc *p,
+ uint32_t val)
+{
+ p->flags |= val & ENA_ADMIN_AENQ_LINK_CHANGE_DESC_LINK_STATUS_MASK;
+}
+
+#endif /* !defined(ENA_DEFS_LINUX_MAINLINE) */
+#endif /*_ENA_ADMIN_H_ */
diff --git a/drivers/net/ena/base/ena_defs/ena_common_defs.h b/drivers/net/ena/base/ena_defs/ena_common_defs.h
new file mode 100644
index 00000000..95e0f389
--- /dev/null
+++ b/drivers/net/ena/base/ena_defs/ena_common_defs.h
@@ -0,0 +1,54 @@
+/*-
+* BSD LICENSE
+*
+* Copyright (c) 2015-2016 Amazon.com, Inc. or its affiliates.
+* All rights reserved.
+*
+* Redistribution and use in source and binary forms, with or without
+* modification, are permitted provided that the following conditions
+* are met:
+*
+* * Redistributions of source code must retain the above copyright
+* notice, this list of conditions and the following disclaimer.
+* * Redistributions in binary form must reproduce the above copyright
+* notice, this list of conditions and the following disclaimer in
+* the documentation and/or other materials provided with the
+* distribution.
+* * Neither the name of copyright holder nor the names of its
+* contributors may be used to endorse or promote products derived
+* from this software without specific prior written permission.
+*
+* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+
+#ifndef _ENA_COMMON_H_
+#define _ENA_COMMON_H_
+
+/* spec version */
+#define ENA_COMMON_SPEC_VERSION_MAJOR 0 /* spec version major */
+#define ENA_COMMON_SPEC_VERSION_MINOR 10 /* spec version minor */
+
+/* ENA operates with 48-bit memory addresses. ena_mem_addr_t */
+struct ena_common_mem_addr {
+ /* word 0 : low 32 bit of the memory address */
+ uint32_t mem_addr_low;
+
+ /* word 1 : */
+ /* high 16 bits of the memory address */
+ uint16_t mem_addr_high;
+
+ /* MBZ */
+ uint16_t reserved16;
+};
+
+#endif /*_ENA_COMMON_H_ */
diff --git a/drivers/net/ena/base/ena_defs/ena_eth_io_defs.h b/drivers/net/ena/base/ena_defs/ena_eth_io_defs.h
new file mode 100644
index 00000000..a547033d
--- /dev/null
+++ b/drivers/net/ena/base/ena_defs/ena_eth_io_defs.h
@@ -0,0 +1,1488 @@
+/*-
+* BSD LICENSE
+*
+* Copyright (c) 2015-2016 Amazon.com, Inc. or its affiliates.
+* All rights reserved.
+*
+* Redistribution and use in source and binary forms, with or without
+* modification, are permitted provided that the following conditions
+* are met:
+*
+* * Redistributions of source code must retain the above copyright
+* notice, this list of conditions and the following disclaimer.
+* * Redistributions in binary form must reproduce the above copyright
+* notice, this list of conditions and the following disclaimer in
+* the documentation and/or other materials provided with the
+* distribution.
+* * Neither the name of copyright holder nor the names of its
+* contributors may be used to endorse or promote products derived
+* from this software without specific prior written permission.
+*
+* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+
+#ifndef _ENA_ETH_IO_H_
+#define _ENA_ETH_IO_H_
+
+/* Layer 3 protocol index */
+enum ena_eth_io_l3_proto_index {
+ ENA_ETH_IO_L3_PROTO_UNKNOWN = 0,
+
+ ENA_ETH_IO_L3_PROTO_IPV4 = 8,
+
+ ENA_ETH_IO_L3_PROTO_IPV6 = 11,
+
+ ENA_ETH_IO_L3_PROTO_FCOE = 21,
+
+ ENA_ETH_IO_L3_PROTO_ROCE = 22,
+};
+
+/* Layer 4 protocol index */
+enum ena_eth_io_l4_proto_index {
+ ENA_ETH_IO_L4_PROTO_UNKNOWN = 0,
+
+ ENA_ETH_IO_L4_PROTO_TCP = 12,
+
+ ENA_ETH_IO_L4_PROTO_UDP = 13,
+
+ ENA_ETH_IO_L4_PROTO_ROUTEABLE_ROCE = 23,
+};
+
+/* ENA IO Queue Tx descriptor */
+struct ena_eth_io_tx_desc {
+ /* word 0 : */
+ /* length, request id and control flags
+ * 15:0 : length - Buffer length in bytes, must
+ * include any packet trailers that the ENA supposed
+ * to update like End-to-End CRC, Authentication GMAC
+ * etc. This length must not include the
+ * 'Push_Buffer' length. This length must not include
+ * the 4-byte added in the end for 802.3 Ethernet FCS
+ * 21:16 : req_id_hi - Request ID[15:10]
+ * 22 : reserved22 - MBZ
+ * 23 : meta_desc - MBZ
+ * 24 : phase
+ * 25 : reserved1 - MBZ
+ * 26 : first - Indicates first descriptor in
+ * transaction
+ * 27 : last - Indicates last descriptor in
+ * transaction
+ * 28 : comp_req - Indicates whether completion
+ * should be posted, after packet is transmitted.
+ * Valid only for first descriptor
+ * 30:29 : reserved29 - MBZ
+ * 31 : reserved31 - MBZ
+ */
+ uint32_t len_ctrl;
+
+ /* word 1 : */
+ /* ethernet control
+ * 3:0 : l3_proto_idx - L3 protocol, if
+ * tunnel_ctrl[0] is set, then this is the inner
+ * packet L3. This field required when
+ * l3_csum_en,l3_csum or tso_en are set.
+ * 4 : DF - IPv4 DF, must be 0 if packet is IPv4 and
+ * DF flags of the IPv4 header is 0. Otherwise must
+ * be set to 1
+ * 6:5 : reserved5
+ * 7 : tso_en - Enable TSO, For TCP only. For packets
+ * with tunnel (tunnel_ctrl[0]=1), then the inner
+ * packet will be segmented while the outer tunnel is
+ * duplicated
+ * 12:8 : l4_proto_idx - L4 protocol, if
+ * tunnel_ctrl[0] is set, then this is the inner
+ * packet L4. This field need to be set when
+ * l4_csum_en or tso_en are set.
+ * 13 : l3_csum_en - enable IPv4 header checksum. if
+ * tunnel_ctrl[0] is set, then this will enable
+ * checksum for the inner packet IPv4
+ * 14 : l4_csum_en - enable TCP/UDP checksum. if
+ * tunnel_ctrl[0] is set, then this will enable
+ * checksum on the inner packet TCP/UDP checksum
+ * 15 : ethernet_fcs_dis - when set, the controller
+ * will not append the 802.3 Ethernet Frame Check
+ * Sequence to the packet
+ * 16 : reserved16
+ * 17 : l4_csum_partial - L4 partial checksum. when
+ * set to 0, the ENA calculates the L4 checksum,
+ * where the Destination Address required for the
+ * TCP/UDP pseudo-header is taken from the actual
+ * packet L3 header. when set to 1, the ENA doesn't
+ * calculate the sum of the pseudo-header, instead,
+ * the checksum field of the L4 is used instead. When
+ * TSO enabled, the checksum of the pseudo-header
+ * must not include the tcp length field. L4 partial
+ * checksum should be used for IPv6 packet that
+ * contains Routing Headers.
+ * 20:18 : tunnel_ctrl - Bit 0: tunneling exists, Bit
+ * 1: tunnel packet actually uses UDP as L4, Bit 2:
+ * tunnel packet L3 protocol: 0: IPv4 1: IPv6
+ * 21 : ts_req - Indicates that the packet is IEEE
+ * 1588v2 packet requiring the timestamp
+ * 31:22 : req_id_lo - Request ID[9:0]
+ */
+ uint32_t meta_ctrl;
+
+ /* word 2 : Buffer address bits[31:0] */
+ uint32_t buff_addr_lo;
+
+ /* word 3 : */
+ /* address high and header size
+ * 15:0 : addr_hi - Buffer Pointer[47:32]
+ * 23:16 : reserved16_w2
+ * 31:24 : header_length - Header length. For Low
+ * Latency Queues, this fields indicates the number
+ * of bytes written to the headers' memory. For
+ * normal queues, if packet is TCP or UDP, and longer
+ * than max_header_size, then this field should be
+ * set to the sum of L4 header offset and L4 header
+ * size(without options), otherwise, this field
+ * should be set to 0. For both modes, this field
+ * must not exceed the max_header_size.
+ * max_header_size value is reported by the Max
+ * Queues Feature descriptor
+ */
+ uint32_t buff_addr_hi_hdr_sz;
+};
+
+/* ENA IO Queue Tx Meta descriptor */
+struct ena_eth_io_tx_meta_desc {
+ /* word 0 : */
+ /* length, request id and control flags
+ * 9:0 : req_id_lo - Request ID[9:0]
+ * 11:10 : outr_l3_off_hi - valid if
+ * tunnel_ctrl[0]=1. bits[4:3] of outer packet L3
+ * offset
+ * 12 : reserved12 - MBZ
+ * 13 : reserved13 - MBZ
+ * 14 : ext_valid - if set, offset fields in Word2
+ * are valid Also MSS High in Word 0 and Outer L3
+ * Offset High in WORD 0 and bits [31:24] in Word 3
+ * 15 : word3_valid - If set Crypto Info[23:0] of
+ * Word 3 is valid
+ * 19:16 : mss_hi_ptp
+ * 20 : eth_meta_type - 0: Tx Metadata Descriptor, 1:
+ * Extended Metadata Descriptor
+ * 21 : meta_store - Store extended metadata in queue
+ * cache
+ * 22 : reserved22 - MBZ
+ * 23 : meta_desc - MBO
+ * 24 : phase
+ * 25 : reserved25 - MBZ
+ * 26 : first - Indicates first descriptor in
+ * transaction
+ * 27 : last - Indicates last descriptor in
+ * transaction
+ * 28 : comp_req - Indicates whether completion
+ * should be posted, after packet is transmitted.
+ * Valid only for first descriptor
+ * 30:29 : reserved29 - MBZ
+ * 31 : reserved31 - MBZ
+ */
+ uint32_t len_ctrl;
+
+ /* word 1 : */
+ /* word 1
+ * 5:0 : req_id_hi
+ * 31:6 : reserved6 - MBZ
+ */
+ uint32_t word1;
+
+ /* word 2 : */
+ /* word 2
+ * 7:0 : l3_hdr_len - the header length L3 IP header.
+ * if tunnel_ctrl[0]=1, this is the IP header length
+ * of the inner packet. FIXME - check if includes IP
+ * options hdr_len
+ * 15:8 : l3_hdr_off - the offset of the first byte
+ * in the L3 header from the beginning of the to-be
+ * transmitted packet. if tunnel_ctrl[0]=1, this is
+ * the offset the L3 header of the inner packet
+ * 21:16 : l4_hdr_len_in_words - counts the L4 header
+ * length in words. there is an explicit assumption
+ * that L4 header appears right after L3 header and
+ * L4 offset is based on l3_hdr_off+l3_hdr_len FIXME
+ * - pls confirm
+ * 31:22 : mss_lo
+ */
+ uint32_t word2;
+
+ /* word 3 : */
+ /* word 3
+ * 23:0 : crypto_info
+ * 28:24 : outr_l3_hdr_len_words - valid if
+ * tunnel_ctrl[0]=1. Counts in words
+ * 31:29 : outr_l3_off_lo - valid if
+ * tunnel_ctrl[0]=1. bits[2:0] of outer packet L3
+ * offset. Counts the offset of the tunnel IP header
+ * from beginning of the packet. NOTE: if the tunnel
+ * header requires CRC or checksum, it is expected to
+ * be done by the driver as it is not done by the HW
+ */
+ uint32_t word3;
+};
+
+/* ENA IO Queue Tx completions descriptor */
+struct ena_eth_io_tx_cdesc {
+ /* word 0 : */
+ /* Request ID[15:0] */
+ uint16_t req_id;
+
+ uint8_t status;
+
+ /* flags
+ * 0 : phase
+ * 7:1 : reserved1
+ */
+ uint8_t flags;
+
+ /* word 1 : */
+ uint16_t sub_qid;
+
+ /* indicates location of submission queue head */
+ uint16_t sq_head_idx;
+};
+
+/* ENA IO Queue Rx descriptor */
+struct ena_eth_io_rx_desc {
+ /* word 0 : */
+ /* In bytes. 0 means 64KB */
+ uint16_t length;
+
+ /* MBZ */
+ uint8_t reserved2;
+
+ /* control flags
+ * 0 : phase
+ * 1 : reserved1 - MBZ
+ * 2 : first - Indicates first descriptor in
+ * transaction
+ * 3 : last - Indicates last descriptor in transaction
+ * 4 : comp_req
+ * 5 : reserved5 - MBO
+ * 7:6 : reserved6 - MBZ
+ */
+ uint8_t ctrl;
+
+ /* word 1 : */
+ uint16_t req_id;
+
+ /* MBZ */
+ uint16_t reserved6;
+
+ /* word 2 : Buffer address bits[31:0] */
+ uint32_t buff_addr_lo;
+
+ /* word 3 : */
+ /* Buffer Address bits[47:16] */
+ uint16_t buff_addr_hi;
+
+ /* MBZ */
+ uint16_t reserved16_w3;
+};
+
+/* ENA IO Queue Rx Completion Base Descriptor (4-word format). Note: all
+ * ethernet parsing information are valid only when last=1
+ */
+struct ena_eth_io_rx_cdesc_base {
+ /* word 0 : */
+ /* 4:0 : l3_proto_idx - L3 protocol index
+ * 6:5 : src_vlan_cnt - Source VLAN count
+ * 7 : tunnel - Tunnel exists
+ * 12:8 : l4_proto_idx - L4 protocol index
+ * 13 : l3_csum_err - when set, either the L3
+ * checksum error detected, or, the controller didn't
+ * validate the checksum, If tunnel exists, this
+ * result is for the inner packet. This bit is valid
+ * only when l3_proto_idx indicates IPv4 packet
+ * 14 : l4_csum_err - when set, either the L4
+ * checksum error detected, or, the controller didn't
+ * validate the checksum. If tunnel exists, this
+ * result is for the inner packet. This bit is valid
+ * only when l4_proto_idx indicates TCP/UDP packet,
+ * and, ipv4_frag is not set
+ * 15 : ipv4_frag - Indicates IPv4 fragmented packet
+ * 17:16 : reserved16
+ * 19:18 : reserved18
+ * 20 : secured_pkt - Set if packet was handled by
+ * inline crypto engine
+ * 22:21 : crypto_status - bit 0 secured direction:
+ * 0: decryption, 1: encryption. bit 1 reserved
+ * 23 : reserved23
+ * 24 : phase
+ * 25 : l3_csum2 - second checksum engine result
+ * 26 : first - Indicates first descriptor in
+ * transaction
+ * 27 : last - Indicates last descriptor in
+ * transaction
+ * 28 : inr_l4_csum - TCP/UDP checksum results for
+ * inner packet
+ * 29 : reserved29
+ * 30 : buffer - 0: Metadata descriptor. 1: Buffer
+ * Descriptor was used
+ * 31 : reserved31
+ */
+ uint32_t status;
+
+ /* word 1 : */
+ uint16_t length;
+
+ uint16_t req_id;
+
+ /* word 2 : 32-bit hash result */
+ uint32_t hash;
+
+ /* word 3 : */
+ /* submission queue number */
+ uint16_t sub_qid;
+
+ uint16_t reserved;
+};
+
+/* ENA IO Queue Rx Completion Descriptor (8-word format) */
+struct ena_eth_io_rx_cdesc_ext {
+ /* words 0:3 : Rx Completion Extended */
+ struct ena_eth_io_rx_cdesc_base base;
+
+ /* word 4 : Completed Buffer address bits[31:0] */
+ uint32_t buff_addr_lo;
+
+ /* word 5 : */
+ /* the buffer address used bits[47:32] */
+ uint16_t buff_addr_hi;
+
+ uint16_t reserved16;
+
+ /* word 6 : Reserved */
+ uint32_t reserved_w6;
+
+ /* word 7 : Reserved */
+ uint32_t reserved_w7;
+};
+
+/* ENA Interrupt Unmask Register */
+struct ena_eth_io_intr_reg {
+ /* word 0 : */
+ /* 14:0 : rx_intr_delay - rx interrupt delay value
+ * 29:15 : tx_intr_delay - tx interrupt delay value
+ * 30 : intr_unmask - if set, unmasks interrupt
+ * 31 : reserved
+ */
+ uint32_t intr_control;
+};
+
+/* tx_desc */
+#define ENA_ETH_IO_TX_DESC_LENGTH_MASK GENMASK(15, 0)
+#define ENA_ETH_IO_TX_DESC_REQ_ID_HI_SHIFT 16
+#define ENA_ETH_IO_TX_DESC_REQ_ID_HI_MASK GENMASK(21, 16)
+#define ENA_ETH_IO_TX_DESC_META_DESC_SHIFT 23
+#define ENA_ETH_IO_TX_DESC_META_DESC_MASK BIT(23)
+#define ENA_ETH_IO_TX_DESC_PHASE_SHIFT 24
+#define ENA_ETH_IO_TX_DESC_PHASE_MASK BIT(24)
+#define ENA_ETH_IO_TX_DESC_FIRST_SHIFT 26
+#define ENA_ETH_IO_TX_DESC_FIRST_MASK BIT(26)
+#define ENA_ETH_IO_TX_DESC_LAST_SHIFT 27
+#define ENA_ETH_IO_TX_DESC_LAST_MASK BIT(27)
+#define ENA_ETH_IO_TX_DESC_COMP_REQ_SHIFT 28
+#define ENA_ETH_IO_TX_DESC_COMP_REQ_MASK BIT(28)
+#define ENA_ETH_IO_TX_DESC_L3_PROTO_IDX_MASK GENMASK(3, 0)
+#define ENA_ETH_IO_TX_DESC_DF_SHIFT 4
+#define ENA_ETH_IO_TX_DESC_DF_MASK BIT(4)
+#define ENA_ETH_IO_TX_DESC_TSO_EN_SHIFT 7
+#define ENA_ETH_IO_TX_DESC_TSO_EN_MASK BIT(7)
+#define ENA_ETH_IO_TX_DESC_L4_PROTO_IDX_SHIFT 8
+#define ENA_ETH_IO_TX_DESC_L4_PROTO_IDX_MASK GENMASK(12, 8)
+#define ENA_ETH_IO_TX_DESC_L3_CSUM_EN_SHIFT 13
+#define ENA_ETH_IO_TX_DESC_L3_CSUM_EN_MASK BIT(13)
+#define ENA_ETH_IO_TX_DESC_L4_CSUM_EN_SHIFT 14
+#define ENA_ETH_IO_TX_DESC_L4_CSUM_EN_MASK BIT(14)
+#define ENA_ETH_IO_TX_DESC_ETHERNET_FCS_DIS_SHIFT 15
+#define ENA_ETH_IO_TX_DESC_ETHERNET_FCS_DIS_MASK BIT(15)
+#define ENA_ETH_IO_TX_DESC_L4_CSUM_PARTIAL_SHIFT 17
+#define ENA_ETH_IO_TX_DESC_L4_CSUM_PARTIAL_MASK BIT(17)
+#define ENA_ETH_IO_TX_DESC_TUNNEL_CTRL_SHIFT 18
+#define ENA_ETH_IO_TX_DESC_TUNNEL_CTRL_MASK GENMASK(20, 18)
+#define ENA_ETH_IO_TX_DESC_TS_REQ_SHIFT 21
+#define ENA_ETH_IO_TX_DESC_TS_REQ_MASK BIT(21)
+#define ENA_ETH_IO_TX_DESC_REQ_ID_LO_SHIFT 22
+#define ENA_ETH_IO_TX_DESC_REQ_ID_LO_MASK GENMASK(31, 22)
+#define ENA_ETH_IO_TX_DESC_ADDR_HI_MASK GENMASK(15, 0)
+#define ENA_ETH_IO_TX_DESC_HEADER_LENGTH_SHIFT 24
+#define ENA_ETH_IO_TX_DESC_HEADER_LENGTH_MASK GENMASK(31, 24)
+
+/* tx_meta_desc */
+#define ENA_ETH_IO_TX_META_DESC_REQ_ID_LO_MASK GENMASK(9, 0)
+#define ENA_ETH_IO_TX_META_DESC_OUTR_L3_OFF_HI_SHIFT 10
+#define ENA_ETH_IO_TX_META_DESC_OUTR_L3_OFF_HI_MASK GENMASK(11, 10)
+#define ENA_ETH_IO_TX_META_DESC_EXT_VALID_SHIFT 14
+#define ENA_ETH_IO_TX_META_DESC_EXT_VALID_MASK BIT(14)
+#define ENA_ETH_IO_TX_META_DESC_WORD3_VALID_SHIFT 15
+#define ENA_ETH_IO_TX_META_DESC_WORD3_VALID_MASK BIT(15)
+#define ENA_ETH_IO_TX_META_DESC_MSS_HI_PTP_SHIFT 16
+#define ENA_ETH_IO_TX_META_DESC_MSS_HI_PTP_MASK GENMASK(19, 16)
+#define ENA_ETH_IO_TX_META_DESC_ETH_META_TYPE_SHIFT 20
+#define ENA_ETH_IO_TX_META_DESC_ETH_META_TYPE_MASK BIT(20)
+#define ENA_ETH_IO_TX_META_DESC_META_STORE_SHIFT 21
+#define ENA_ETH_IO_TX_META_DESC_META_STORE_MASK BIT(21)
+#define ENA_ETH_IO_TX_META_DESC_META_DESC_SHIFT 23
+#define ENA_ETH_IO_TX_META_DESC_META_DESC_MASK BIT(23)
+#define ENA_ETH_IO_TX_META_DESC_PHASE_SHIFT 24
+#define ENA_ETH_IO_TX_META_DESC_PHASE_MASK BIT(24)
+#define ENA_ETH_IO_TX_META_DESC_FIRST_SHIFT 26
+#define ENA_ETH_IO_TX_META_DESC_FIRST_MASK BIT(26)
+#define ENA_ETH_IO_TX_META_DESC_LAST_SHIFT 27
+#define ENA_ETH_IO_TX_META_DESC_LAST_MASK BIT(27)
+#define ENA_ETH_IO_TX_META_DESC_COMP_REQ_SHIFT 28
+#define ENA_ETH_IO_TX_META_DESC_COMP_REQ_MASK BIT(28)
+#define ENA_ETH_IO_TX_META_DESC_REQ_ID_HI_MASK GENMASK(5, 0)
+#define ENA_ETH_IO_TX_META_DESC_L3_HDR_LEN_MASK GENMASK(7, 0)
+#define ENA_ETH_IO_TX_META_DESC_L3_HDR_OFF_SHIFT 8
+#define ENA_ETH_IO_TX_META_DESC_L3_HDR_OFF_MASK GENMASK(15, 8)
+#define ENA_ETH_IO_TX_META_DESC_L4_HDR_LEN_IN_WORDS_SHIFT 16
+#define ENA_ETH_IO_TX_META_DESC_L4_HDR_LEN_IN_WORDS_MASK GENMASK(21, 16)
+#define ENA_ETH_IO_TX_META_DESC_MSS_LO_SHIFT 22
+#define ENA_ETH_IO_TX_META_DESC_MSS_LO_MASK GENMASK(31, 22)
+#define ENA_ETH_IO_TX_META_DESC_CRYPTO_INFO_MASK GENMASK(23, 0)
+#define ENA_ETH_IO_TX_META_DESC_OUTR_L3_HDR_LEN_WORDS_SHIFT 24
+#define ENA_ETH_IO_TX_META_DESC_OUTR_L3_HDR_LEN_WORDS_MASK GENMASK(28, 24)
+#define ENA_ETH_IO_TX_META_DESC_OUTR_L3_OFF_LO_SHIFT 29
+#define ENA_ETH_IO_TX_META_DESC_OUTR_L3_OFF_LO_MASK GENMASK(31, 29)
+
+/* tx_cdesc */
+#define ENA_ETH_IO_TX_CDESC_PHASE_MASK BIT(0)
+
+/* rx_desc */
+#define ENA_ETH_IO_RX_DESC_PHASE_MASK BIT(0)
+#define ENA_ETH_IO_RX_DESC_FIRST_SHIFT 2
+#define ENA_ETH_IO_RX_DESC_FIRST_MASK BIT(2)
+#define ENA_ETH_IO_RX_DESC_LAST_SHIFT 3
+#define ENA_ETH_IO_RX_DESC_LAST_MASK BIT(3)
+#define ENA_ETH_IO_RX_DESC_COMP_REQ_SHIFT 4
+#define ENA_ETH_IO_RX_DESC_COMP_REQ_MASK BIT(4)
+
+/* rx_cdesc_base */
+#define ENA_ETH_IO_RX_CDESC_BASE_L3_PROTO_IDX_MASK GENMASK(4, 0)
+#define ENA_ETH_IO_RX_CDESC_BASE_SRC_VLAN_CNT_SHIFT 5
+#define ENA_ETH_IO_RX_CDESC_BASE_SRC_VLAN_CNT_MASK GENMASK(6, 5)
+#define ENA_ETH_IO_RX_CDESC_BASE_TUNNEL_SHIFT 7
+#define ENA_ETH_IO_RX_CDESC_BASE_TUNNEL_MASK BIT(7)
+#define ENA_ETH_IO_RX_CDESC_BASE_L4_PROTO_IDX_SHIFT 8
+#define ENA_ETH_IO_RX_CDESC_BASE_L4_PROTO_IDX_MASK GENMASK(12, 8)
+#define ENA_ETH_IO_RX_CDESC_BASE_L3_CSUM_ERR_SHIFT 13
+#define ENA_ETH_IO_RX_CDESC_BASE_L3_CSUM_ERR_MASK BIT(13)
+#define ENA_ETH_IO_RX_CDESC_BASE_L4_CSUM_ERR_SHIFT 14
+#define ENA_ETH_IO_RX_CDESC_BASE_L4_CSUM_ERR_MASK BIT(14)
+#define ENA_ETH_IO_RX_CDESC_BASE_IPV4_FRAG_SHIFT 15
+#define ENA_ETH_IO_RX_CDESC_BASE_IPV4_FRAG_MASK BIT(15)
+#define ENA_ETH_IO_RX_CDESC_BASE_SECURED_PKT_SHIFT 20
+#define ENA_ETH_IO_RX_CDESC_BASE_SECURED_PKT_MASK BIT(20)
+#define ENA_ETH_IO_RX_CDESC_BASE_CRYPTO_STATUS_SHIFT 21
+#define ENA_ETH_IO_RX_CDESC_BASE_CRYPTO_STATUS_MASK GENMASK(22, 21)
+#define ENA_ETH_IO_RX_CDESC_BASE_PHASE_SHIFT 24
+#define ENA_ETH_IO_RX_CDESC_BASE_PHASE_MASK BIT(24)
+#define ENA_ETH_IO_RX_CDESC_BASE_L3_CSUM2_SHIFT 25
+#define ENA_ETH_IO_RX_CDESC_BASE_L3_CSUM2_MASK BIT(25)
+#define ENA_ETH_IO_RX_CDESC_BASE_FIRST_SHIFT 26
+#define ENA_ETH_IO_RX_CDESC_BASE_FIRST_MASK BIT(26)
+#define ENA_ETH_IO_RX_CDESC_BASE_LAST_SHIFT 27
+#define ENA_ETH_IO_RX_CDESC_BASE_LAST_MASK BIT(27)
+#define ENA_ETH_IO_RX_CDESC_BASE_INR_L4_CSUM_SHIFT 28
+#define ENA_ETH_IO_RX_CDESC_BASE_INR_L4_CSUM_MASK BIT(28)
+#define ENA_ETH_IO_RX_CDESC_BASE_BUFFER_SHIFT 30
+#define ENA_ETH_IO_RX_CDESC_BASE_BUFFER_MASK BIT(30)
+
+/* intr_reg */
+#define ENA_ETH_IO_INTR_REG_RX_INTR_DELAY_MASK GENMASK(14, 0)
+#define ENA_ETH_IO_INTR_REG_TX_INTR_DELAY_SHIFT 15
+#define ENA_ETH_IO_INTR_REG_TX_INTR_DELAY_MASK GENMASK(29, 15)
+#define ENA_ETH_IO_INTR_REG_INTR_UNMASK_SHIFT 30
+#define ENA_ETH_IO_INTR_REG_INTR_UNMASK_MASK BIT(30)
+
+#if !defined(ENA_DEFS_LINUX_MAINLINE)
+static inline uint32_t get_ena_eth_io_tx_desc_length(
+ const struct ena_eth_io_tx_desc *p)
+{
+ return p->len_ctrl & ENA_ETH_IO_TX_DESC_LENGTH_MASK;
+}
+
+static inline void set_ena_eth_io_tx_desc_length(
+ struct ena_eth_io_tx_desc *p,
+ uint32_t val)
+{
+ p->len_ctrl |= val & ENA_ETH_IO_TX_DESC_LENGTH_MASK;
+}
+
+static inline uint32_t get_ena_eth_io_tx_desc_req_id_hi(
+ const struct ena_eth_io_tx_desc *p)
+{
+ return (p->len_ctrl & ENA_ETH_IO_TX_DESC_REQ_ID_HI_MASK)
+ >> ENA_ETH_IO_TX_DESC_REQ_ID_HI_SHIFT;
+}
+
+static inline void set_ena_eth_io_tx_desc_req_id_hi(
+ struct ena_eth_io_tx_desc *p,
+ uint32_t val)
+{
+ p->len_ctrl |=
+ (val << ENA_ETH_IO_TX_DESC_REQ_ID_HI_SHIFT)
+ & ENA_ETH_IO_TX_DESC_REQ_ID_HI_MASK;
+}
+
+static inline uint32_t get_ena_eth_io_tx_desc_meta_desc(
+ const struct ena_eth_io_tx_desc *p)
+{
+ return (p->len_ctrl & ENA_ETH_IO_TX_DESC_META_DESC_MASK)
+ >> ENA_ETH_IO_TX_DESC_META_DESC_SHIFT;
+}
+
+static inline void set_ena_eth_io_tx_desc_meta_desc(
+ struct ena_eth_io_tx_desc *p,
+ uint32_t val)
+{
+ p->len_ctrl |=
+ (val << ENA_ETH_IO_TX_DESC_META_DESC_SHIFT)
+ & ENA_ETH_IO_TX_DESC_META_DESC_MASK;
+}
+
+static inline uint32_t get_ena_eth_io_tx_desc_phase(
+ const struct ena_eth_io_tx_desc *p)
+{
+ return (p->len_ctrl & ENA_ETH_IO_TX_DESC_PHASE_MASK)
+ >> ENA_ETH_IO_TX_DESC_PHASE_SHIFT;
+}
+
+static inline void set_ena_eth_io_tx_desc_phase(
+ struct ena_eth_io_tx_desc *p,
+ uint32_t val)
+{
+ p->len_ctrl |=
+ (val << ENA_ETH_IO_TX_DESC_PHASE_SHIFT)
+ & ENA_ETH_IO_TX_DESC_PHASE_MASK;
+}
+
+static inline uint32_t get_ena_eth_io_tx_desc_first(
+ const struct ena_eth_io_tx_desc *p)
+{
+ return (p->len_ctrl & ENA_ETH_IO_TX_DESC_FIRST_MASK)
+ >> ENA_ETH_IO_TX_DESC_FIRST_SHIFT;
+}
+
+static inline void set_ena_eth_io_tx_desc_first(
+ struct ena_eth_io_tx_desc *p,
+ uint32_t val)
+{
+ p->len_ctrl |=
+ (val << ENA_ETH_IO_TX_DESC_FIRST_SHIFT)
+ & ENA_ETH_IO_TX_DESC_FIRST_MASK;
+}
+
+static inline uint32_t get_ena_eth_io_tx_desc_last(
+ const struct ena_eth_io_tx_desc *p)
+{
+ return (p->len_ctrl & ENA_ETH_IO_TX_DESC_LAST_MASK)
+ >> ENA_ETH_IO_TX_DESC_LAST_SHIFT;
+}
+
+static inline void set_ena_eth_io_tx_desc_last(
+ struct ena_eth_io_tx_desc *p,
+ uint32_t val)
+{
+ p->len_ctrl |=
+ (val << ENA_ETH_IO_TX_DESC_LAST_SHIFT)
+ & ENA_ETH_IO_TX_DESC_LAST_MASK;
+}
+
+static inline uint32_t get_ena_eth_io_tx_desc_comp_req(
+ const struct ena_eth_io_tx_desc *p)
+{
+ return (p->len_ctrl & ENA_ETH_IO_TX_DESC_COMP_REQ_MASK)
+ >> ENA_ETH_IO_TX_DESC_COMP_REQ_SHIFT;
+}
+
+static inline void set_ena_eth_io_tx_desc_comp_req(
+ struct ena_eth_io_tx_desc *p,
+ uint32_t val)
+{
+ p->len_ctrl |=
+ (val << ENA_ETH_IO_TX_DESC_COMP_REQ_SHIFT)
+ & ENA_ETH_IO_TX_DESC_COMP_REQ_MASK;
+}
+
+static inline uint32_t get_ena_eth_io_tx_desc_l3_proto_idx(
+ const struct ena_eth_io_tx_desc *p)
+{
+ return p->meta_ctrl & ENA_ETH_IO_TX_DESC_L3_PROTO_IDX_MASK;
+}
+
+static inline void set_ena_eth_io_tx_desc_l3_proto_idx(
+ struct ena_eth_io_tx_desc *p,
+ uint32_t val)
+{
+ p->meta_ctrl |= val & ENA_ETH_IO_TX_DESC_L3_PROTO_IDX_MASK;
+}
+
+static inline uint32_t get_ena_eth_io_tx_desc_DF(
+ const struct ena_eth_io_tx_desc *p)
+{
+ return (p->meta_ctrl & ENA_ETH_IO_TX_DESC_DF_MASK)
+ >> ENA_ETH_IO_TX_DESC_DF_SHIFT;
+}
+
+static inline void set_ena_eth_io_tx_desc_DF(
+ struct ena_eth_io_tx_desc *p,
+ uint32_t val)
+{
+ p->meta_ctrl |=
+ (val << ENA_ETH_IO_TX_DESC_DF_SHIFT)
+ & ENA_ETH_IO_TX_DESC_DF_MASK;
+}
+
+static inline uint32_t get_ena_eth_io_tx_desc_tso_en(
+ const struct ena_eth_io_tx_desc *p)
+{
+ return (p->meta_ctrl & ENA_ETH_IO_TX_DESC_TSO_EN_MASK)
+ >> ENA_ETH_IO_TX_DESC_TSO_EN_SHIFT;
+}
+
+static inline void set_ena_eth_io_tx_desc_tso_en(
+ struct ena_eth_io_tx_desc *p,
+ uint32_t val)
+{
+ p->meta_ctrl |=
+ (val << ENA_ETH_IO_TX_DESC_TSO_EN_SHIFT)
+ & ENA_ETH_IO_TX_DESC_TSO_EN_MASK;
+}
+
+static inline uint32_t get_ena_eth_io_tx_desc_l4_proto_idx(
+ const struct ena_eth_io_tx_desc *p)
+{
+ return (p->meta_ctrl & ENA_ETH_IO_TX_DESC_L4_PROTO_IDX_MASK)
+ >> ENA_ETH_IO_TX_DESC_L4_PROTO_IDX_SHIFT;
+}
+
+static inline void set_ena_eth_io_tx_desc_l4_proto_idx(
+ struct ena_eth_io_tx_desc *p,
+ uint32_t val)
+{
+ p->meta_ctrl |=
+ (val << ENA_ETH_IO_TX_DESC_L4_PROTO_IDX_SHIFT)
+ & ENA_ETH_IO_TX_DESC_L4_PROTO_IDX_MASK;
+}
+
+static inline uint32_t get_ena_eth_io_tx_desc_l3_csum_en(
+ const struct ena_eth_io_tx_desc *p)
+{
+ return (p->meta_ctrl & ENA_ETH_IO_TX_DESC_L3_CSUM_EN_MASK)
+ >> ENA_ETH_IO_TX_DESC_L3_CSUM_EN_SHIFT;
+}
+
+static inline void set_ena_eth_io_tx_desc_l3_csum_en(
+ struct ena_eth_io_tx_desc *p,
+ uint32_t val)
+{
+ p->meta_ctrl |=
+ (val << ENA_ETH_IO_TX_DESC_L3_CSUM_EN_SHIFT)
+ & ENA_ETH_IO_TX_DESC_L3_CSUM_EN_MASK;
+}
+
+static inline uint32_t get_ena_eth_io_tx_desc_l4_csum_en(
+ const struct ena_eth_io_tx_desc *p)
+{
+ return (p->meta_ctrl & ENA_ETH_IO_TX_DESC_L4_CSUM_EN_MASK)
+ >> ENA_ETH_IO_TX_DESC_L4_CSUM_EN_SHIFT;
+}
+
+static inline void set_ena_eth_io_tx_desc_l4_csum_en(
+ struct ena_eth_io_tx_desc *p,
+ uint32_t val)
+{
+ p->meta_ctrl |=
+ (val << ENA_ETH_IO_TX_DESC_L4_CSUM_EN_SHIFT)
+ & ENA_ETH_IO_TX_DESC_L4_CSUM_EN_MASK;
+}
+
+static inline uint32_t get_ena_eth_io_tx_desc_ethernet_fcs_dis(
+ const struct ena_eth_io_tx_desc *p)
+{
+ return (p->meta_ctrl & ENA_ETH_IO_TX_DESC_ETHERNET_FCS_DIS_MASK)
+ >> ENA_ETH_IO_TX_DESC_ETHERNET_FCS_DIS_SHIFT;
+}
+
+static inline void set_ena_eth_io_tx_desc_ethernet_fcs_dis(
+ struct ena_eth_io_tx_desc *p,
+ uint32_t val)
+{
+ p->meta_ctrl |=
+ (val << ENA_ETH_IO_TX_DESC_ETHERNET_FCS_DIS_SHIFT)
+ & ENA_ETH_IO_TX_DESC_ETHERNET_FCS_DIS_MASK;
+}
+
+static inline uint32_t get_ena_eth_io_tx_desc_l4_csum_partial(
+ const struct ena_eth_io_tx_desc *p)
+{
+ return (p->meta_ctrl & ENA_ETH_IO_TX_DESC_L4_CSUM_PARTIAL_MASK)
+ >> ENA_ETH_IO_TX_DESC_L4_CSUM_PARTIAL_SHIFT;
+}
+
+static inline void set_ena_eth_io_tx_desc_l4_csum_partial(
+ struct ena_eth_io_tx_desc *p,
+ uint32_t val)
+{
+ p->meta_ctrl |=
+ (val << ENA_ETH_IO_TX_DESC_L4_CSUM_PARTIAL_SHIFT)
+ & ENA_ETH_IO_TX_DESC_L4_CSUM_PARTIAL_MASK;
+}
+
+static inline uint32_t get_ena_eth_io_tx_desc_tunnel_ctrl(
+ const struct ena_eth_io_tx_desc *p)
+{
+ return (p->meta_ctrl & ENA_ETH_IO_TX_DESC_TUNNEL_CTRL_MASK)
+ >> ENA_ETH_IO_TX_DESC_TUNNEL_CTRL_SHIFT;
+}
+
+static inline void set_ena_eth_io_tx_desc_tunnel_ctrl(
+ struct ena_eth_io_tx_desc *p,
+ uint32_t val)
+{
+ p->meta_ctrl |=
+ (val << ENA_ETH_IO_TX_DESC_TUNNEL_CTRL_SHIFT)
+ & ENA_ETH_IO_TX_DESC_TUNNEL_CTRL_MASK;
+}
+
+static inline uint32_t get_ena_eth_io_tx_desc_ts_req(
+ const struct ena_eth_io_tx_desc *p)
+{
+ return (p->meta_ctrl & ENA_ETH_IO_TX_DESC_TS_REQ_MASK)
+ >> ENA_ETH_IO_TX_DESC_TS_REQ_SHIFT;
+}
+
+static inline void set_ena_eth_io_tx_desc_ts_req(
+ struct ena_eth_io_tx_desc *p,
+ uint32_t val)
+{
+ p->meta_ctrl |=
+ (val << ENA_ETH_IO_TX_DESC_TS_REQ_SHIFT)
+ & ENA_ETH_IO_TX_DESC_TS_REQ_MASK;
+}
+
+static inline uint32_t get_ena_eth_io_tx_desc_req_id_lo(
+ const struct ena_eth_io_tx_desc *p)
+{
+ return (p->meta_ctrl & ENA_ETH_IO_TX_DESC_REQ_ID_LO_MASK)
+ >> ENA_ETH_IO_TX_DESC_REQ_ID_LO_SHIFT;
+}
+
+static inline void set_ena_eth_io_tx_desc_req_id_lo(
+ struct ena_eth_io_tx_desc *p,
+ uint32_t val)
+{
+ p->meta_ctrl |=
+ (val << ENA_ETH_IO_TX_DESC_REQ_ID_LO_SHIFT)
+ & ENA_ETH_IO_TX_DESC_REQ_ID_LO_MASK;
+}
+
+static inline uint32_t get_ena_eth_io_tx_desc_addr_hi(
+ const struct ena_eth_io_tx_desc *p)
+{
+ return p->buff_addr_hi_hdr_sz & ENA_ETH_IO_TX_DESC_ADDR_HI_MASK;
+}
+
+static inline void set_ena_eth_io_tx_desc_addr_hi(
+ struct ena_eth_io_tx_desc *p,
+ uint32_t val)
+{
+ p->buff_addr_hi_hdr_sz |= val & ENA_ETH_IO_TX_DESC_ADDR_HI_MASK;
+}
+
+static inline uint32_t get_ena_eth_io_tx_desc_header_length(
+ const struct ena_eth_io_tx_desc *p)
+{
+ return (p->buff_addr_hi_hdr_sz & ENA_ETH_IO_TX_DESC_HEADER_LENGTH_MASK)
+ >> ENA_ETH_IO_TX_DESC_HEADER_LENGTH_SHIFT;
+}
+
+static inline void set_ena_eth_io_tx_desc_header_length(
+ struct ena_eth_io_tx_desc *p,
+ uint32_t val)
+{
+ p->buff_addr_hi_hdr_sz |=
+ (val << ENA_ETH_IO_TX_DESC_HEADER_LENGTH_SHIFT)
+ & ENA_ETH_IO_TX_DESC_HEADER_LENGTH_MASK;
+}
+
+static inline uint32_t get_ena_eth_io_tx_meta_desc_req_id_lo(
+ const struct ena_eth_io_tx_meta_desc *p)
+{
+ return p->len_ctrl & ENA_ETH_IO_TX_META_DESC_REQ_ID_LO_MASK;
+}
+
+static inline void set_ena_eth_io_tx_meta_desc_req_id_lo(
+ struct ena_eth_io_tx_meta_desc *p,
+ uint32_t val)
+{
+ p->len_ctrl |= val & ENA_ETH_IO_TX_META_DESC_REQ_ID_LO_MASK;
+}
+
+static inline uint32_t get_ena_eth_io_tx_meta_desc_outr_l3_off_hi(
+ const struct ena_eth_io_tx_meta_desc *p)
+{
+ return (p->len_ctrl & ENA_ETH_IO_TX_META_DESC_OUTR_L3_OFF_HI_MASK)
+ >> ENA_ETH_IO_TX_META_DESC_OUTR_L3_OFF_HI_SHIFT;
+}
+
+static inline void set_ena_eth_io_tx_meta_desc_outr_l3_off_hi(
+ struct ena_eth_io_tx_meta_desc *p,
+ uint32_t val)
+{
+ p->len_ctrl |=
+ (val << ENA_ETH_IO_TX_META_DESC_OUTR_L3_OFF_HI_SHIFT)
+ & ENA_ETH_IO_TX_META_DESC_OUTR_L3_OFF_HI_MASK;
+}
+
+static inline uint32_t get_ena_eth_io_tx_meta_desc_ext_valid(
+ const struct ena_eth_io_tx_meta_desc *p)
+{
+ return (p->len_ctrl & ENA_ETH_IO_TX_META_DESC_EXT_VALID_MASK)
+ >> ENA_ETH_IO_TX_META_DESC_EXT_VALID_SHIFT;
+}
+
+static inline void set_ena_eth_io_tx_meta_desc_ext_valid(
+ struct ena_eth_io_tx_meta_desc *p,
+ uint32_t val)
+{
+ p->len_ctrl |=
+ (val << ENA_ETH_IO_TX_META_DESC_EXT_VALID_SHIFT)
+ & ENA_ETH_IO_TX_META_DESC_EXT_VALID_MASK;
+}
+
+static inline uint32_t get_ena_eth_io_tx_meta_desc_word3_valid(
+ const struct ena_eth_io_tx_meta_desc *p)
+{
+ return (p->len_ctrl & ENA_ETH_IO_TX_META_DESC_WORD3_VALID_MASK)
+ >> ENA_ETH_IO_TX_META_DESC_WORD3_VALID_SHIFT;
+}
+
+static inline void set_ena_eth_io_tx_meta_desc_word3_valid(
+ struct ena_eth_io_tx_meta_desc *p,
+ uint32_t val)
+{
+ p->len_ctrl |=
+ (val << ENA_ETH_IO_TX_META_DESC_WORD3_VALID_SHIFT)
+ & ENA_ETH_IO_TX_META_DESC_WORD3_VALID_MASK;
+}
+
+static inline uint32_t get_ena_eth_io_tx_meta_desc_mss_hi_ptp(
+ const struct ena_eth_io_tx_meta_desc *p)
+{
+ return (p->len_ctrl & ENA_ETH_IO_TX_META_DESC_MSS_HI_PTP_MASK)
+ >> ENA_ETH_IO_TX_META_DESC_MSS_HI_PTP_SHIFT;
+}
+
+static inline void set_ena_eth_io_tx_meta_desc_mss_hi_ptp(
+ struct ena_eth_io_tx_meta_desc *p,
+ uint32_t val)
+{
+ p->len_ctrl |=
+ (val << ENA_ETH_IO_TX_META_DESC_MSS_HI_PTP_SHIFT)
+ & ENA_ETH_IO_TX_META_DESC_MSS_HI_PTP_MASK;
+}
+
+static inline uint32_t get_ena_eth_io_tx_meta_desc_eth_meta_type(
+ const struct ena_eth_io_tx_meta_desc *p)
+{
+ return (p->len_ctrl & ENA_ETH_IO_TX_META_DESC_ETH_META_TYPE_MASK)
+ >> ENA_ETH_IO_TX_META_DESC_ETH_META_TYPE_SHIFT;
+}
+
+static inline void set_ena_eth_io_tx_meta_desc_eth_meta_type(
+ struct ena_eth_io_tx_meta_desc *p,
+ uint32_t val)
+{
+ p->len_ctrl |=
+ (val << ENA_ETH_IO_TX_META_DESC_ETH_META_TYPE_SHIFT)
+ & ENA_ETH_IO_TX_META_DESC_ETH_META_TYPE_MASK;
+}
+
+static inline uint32_t get_ena_eth_io_tx_meta_desc_meta_store(
+ const struct ena_eth_io_tx_meta_desc *p)
+{
+ return (p->len_ctrl & ENA_ETH_IO_TX_META_DESC_META_STORE_MASK)
+ >> ENA_ETH_IO_TX_META_DESC_META_STORE_SHIFT;
+}
+
+static inline void set_ena_eth_io_tx_meta_desc_meta_store(
+ struct ena_eth_io_tx_meta_desc *p,
+ uint32_t val)
+{
+ p->len_ctrl |=
+ (val << ENA_ETH_IO_TX_META_DESC_META_STORE_SHIFT)
+ & ENA_ETH_IO_TX_META_DESC_META_STORE_MASK;
+}
+
+static inline uint32_t get_ena_eth_io_tx_meta_desc_meta_desc(
+ const struct ena_eth_io_tx_meta_desc *p)
+{
+ return (p->len_ctrl & ENA_ETH_IO_TX_META_DESC_META_DESC_MASK)
+ >> ENA_ETH_IO_TX_META_DESC_META_DESC_SHIFT;
+}
+
+static inline void set_ena_eth_io_tx_meta_desc_meta_desc(
+ struct ena_eth_io_tx_meta_desc *p,
+ uint32_t val)
+{
+ p->len_ctrl |=
+ (val << ENA_ETH_IO_TX_META_DESC_META_DESC_SHIFT)
+ & ENA_ETH_IO_TX_META_DESC_META_DESC_MASK;
+}
+
+static inline uint32_t get_ena_eth_io_tx_meta_desc_phase(
+ const struct ena_eth_io_tx_meta_desc *p)
+{
+ return (p->len_ctrl & ENA_ETH_IO_TX_META_DESC_PHASE_MASK)
+ >> ENA_ETH_IO_TX_META_DESC_PHASE_SHIFT;
+}
+
+static inline void set_ena_eth_io_tx_meta_desc_phase(
+ struct ena_eth_io_tx_meta_desc *p,
+ uint32_t val)
+{
+ p->len_ctrl |=
+ (val << ENA_ETH_IO_TX_META_DESC_PHASE_SHIFT)
+ & ENA_ETH_IO_TX_META_DESC_PHASE_MASK;
+}
+
+static inline uint32_t get_ena_eth_io_tx_meta_desc_first(
+ const struct ena_eth_io_tx_meta_desc *p)
+{
+ return (p->len_ctrl & ENA_ETH_IO_TX_META_DESC_FIRST_MASK)
+ >> ENA_ETH_IO_TX_META_DESC_FIRST_SHIFT;
+}
+
+static inline void set_ena_eth_io_tx_meta_desc_first(
+ struct ena_eth_io_tx_meta_desc *p,
+ uint32_t val)
+{
+ p->len_ctrl |=
+ (val << ENA_ETH_IO_TX_META_DESC_FIRST_SHIFT)
+ & ENA_ETH_IO_TX_META_DESC_FIRST_MASK;
+}
+
+static inline uint32_t get_ena_eth_io_tx_meta_desc_last(
+ const struct ena_eth_io_tx_meta_desc *p)
+{
+ return (p->len_ctrl & ENA_ETH_IO_TX_META_DESC_LAST_MASK)
+ >> ENA_ETH_IO_TX_META_DESC_LAST_SHIFT;
+}
+
+static inline void set_ena_eth_io_tx_meta_desc_last(
+ struct ena_eth_io_tx_meta_desc *p,
+ uint32_t val)
+{
+ p->len_ctrl |=
+ (val << ENA_ETH_IO_TX_META_DESC_LAST_SHIFT)
+ & ENA_ETH_IO_TX_META_DESC_LAST_MASK;
+}
+
+static inline uint32_t get_ena_eth_io_tx_meta_desc_comp_req(
+ const struct ena_eth_io_tx_meta_desc *p)
+{
+ return (p->len_ctrl & ENA_ETH_IO_TX_META_DESC_COMP_REQ_MASK)
+ >> ENA_ETH_IO_TX_META_DESC_COMP_REQ_SHIFT;
+}
+
+static inline void set_ena_eth_io_tx_meta_desc_comp_req(
+ struct ena_eth_io_tx_meta_desc *p,
+ uint32_t val)
+{
+ p->len_ctrl |=
+ (val << ENA_ETH_IO_TX_META_DESC_COMP_REQ_SHIFT)
+ & ENA_ETH_IO_TX_META_DESC_COMP_REQ_MASK;
+}
+
+static inline uint32_t get_ena_eth_io_tx_meta_desc_req_id_hi(
+ const struct ena_eth_io_tx_meta_desc *p)
+{
+ return p->word1 & ENA_ETH_IO_TX_META_DESC_REQ_ID_HI_MASK;
+}
+
+static inline void set_ena_eth_io_tx_meta_desc_req_id_hi(
+ struct ena_eth_io_tx_meta_desc *p,
+ uint32_t val)
+{
+ p->word1 |= val & ENA_ETH_IO_TX_META_DESC_REQ_ID_HI_MASK;
+}
+
+static inline uint32_t get_ena_eth_io_tx_meta_desc_l3_hdr_len(
+ const struct ena_eth_io_tx_meta_desc *p)
+{
+ return p->word2 & ENA_ETH_IO_TX_META_DESC_L3_HDR_LEN_MASK;
+}
+
+static inline void set_ena_eth_io_tx_meta_desc_l3_hdr_len(
+ struct ena_eth_io_tx_meta_desc *p,
+ uint32_t val)
+{
+ p->word2 |= val & ENA_ETH_IO_TX_META_DESC_L3_HDR_LEN_MASK;
+}
+
+static inline uint32_t get_ena_eth_io_tx_meta_desc_l3_hdr_off(
+ const struct ena_eth_io_tx_meta_desc *p)
+{
+ return (p->word2 & ENA_ETH_IO_TX_META_DESC_L3_HDR_OFF_MASK)
+ >> ENA_ETH_IO_TX_META_DESC_L3_HDR_OFF_SHIFT;
+}
+
+static inline void set_ena_eth_io_tx_meta_desc_l3_hdr_off(
+ struct ena_eth_io_tx_meta_desc *p,
+ uint32_t val)
+{
+ p->word2 |=
+ (val << ENA_ETH_IO_TX_META_DESC_L3_HDR_OFF_SHIFT)
+ & ENA_ETH_IO_TX_META_DESC_L3_HDR_OFF_MASK;
+}
+
+static inline uint32_t get_ena_eth_io_tx_meta_desc_l4_hdr_len_in_words(
+ const struct ena_eth_io_tx_meta_desc *p)
+{
+ return (p->word2 & ENA_ETH_IO_TX_META_DESC_L4_HDR_LEN_IN_WORDS_MASK)
+ >> ENA_ETH_IO_TX_META_DESC_L4_HDR_LEN_IN_WORDS_SHIFT;
+}
+
+static inline void set_ena_eth_io_tx_meta_desc_l4_hdr_len_in_words(
+ struct ena_eth_io_tx_meta_desc *p,
+ uint32_t val)
+{
+ p->word2 |=
+ (val << ENA_ETH_IO_TX_META_DESC_L4_HDR_LEN_IN_WORDS_SHIFT)
+ & ENA_ETH_IO_TX_META_DESC_L4_HDR_LEN_IN_WORDS_MASK;
+}
+
+static inline uint32_t get_ena_eth_io_tx_meta_desc_mss_lo(
+ const struct ena_eth_io_tx_meta_desc *p)
+{
+ return (p->word2 & ENA_ETH_IO_TX_META_DESC_MSS_LO_MASK)
+ >> ENA_ETH_IO_TX_META_DESC_MSS_LO_SHIFT;
+}
+
+static inline void set_ena_eth_io_tx_meta_desc_mss_lo(
+ struct ena_eth_io_tx_meta_desc *p,
+ uint32_t val)
+{
+ p->word2 |=
+ (val << ENA_ETH_IO_TX_META_DESC_MSS_LO_SHIFT)
+ & ENA_ETH_IO_TX_META_DESC_MSS_LO_MASK;
+}
+
+static inline uint32_t get_ena_eth_io_tx_meta_desc_crypto_info(
+ const struct ena_eth_io_tx_meta_desc *p)
+{
+ return p->word3 & ENA_ETH_IO_TX_META_DESC_CRYPTO_INFO_MASK;
+}
+
+static inline void set_ena_eth_io_tx_meta_desc_crypto_info(
+ struct ena_eth_io_tx_meta_desc *p,
+ uint32_t val)
+{
+ p->word3 |= val & ENA_ETH_IO_TX_META_DESC_CRYPTO_INFO_MASK;
+}
+
+static inline uint32_t get_ena_eth_io_tx_meta_desc_outr_l3_hdr_len_words(
+ const struct ena_eth_io_tx_meta_desc *p)
+{
+ return (p->word3 & ENA_ETH_IO_TX_META_DESC_OUTR_L3_HDR_LEN_WORDS_MASK)
+ >> ENA_ETH_IO_TX_META_DESC_OUTR_L3_HDR_LEN_WORDS_SHIFT;
+}
+
+static inline void set_ena_eth_io_tx_meta_desc_outr_l3_hdr_len_words(
+ struct ena_eth_io_tx_meta_desc *p,
+ uint32_t val)
+{
+ p->word3 |=
+ (val << ENA_ETH_IO_TX_META_DESC_OUTR_L3_HDR_LEN_WORDS_SHIFT)
+ & ENA_ETH_IO_TX_META_DESC_OUTR_L3_HDR_LEN_WORDS_MASK;
+}
+
+static inline uint32_t get_ena_eth_io_tx_meta_desc_outr_l3_off_lo(
+ const struct ena_eth_io_tx_meta_desc *p)
+{
+ return (p->word3 & ENA_ETH_IO_TX_META_DESC_OUTR_L3_OFF_LO_MASK)
+ >> ENA_ETH_IO_TX_META_DESC_OUTR_L3_OFF_LO_SHIFT;
+}
+
+static inline void set_ena_eth_io_tx_meta_desc_outr_l3_off_lo(
+ struct ena_eth_io_tx_meta_desc *p,
+ uint32_t val)
+{
+ p->word3 |=
+ (val << ENA_ETH_IO_TX_META_DESC_OUTR_L3_OFF_LO_SHIFT)
+ & ENA_ETH_IO_TX_META_DESC_OUTR_L3_OFF_LO_MASK;
+}
+
+static inline uint8_t get_ena_eth_io_tx_cdesc_phase(
+ const struct ena_eth_io_tx_cdesc *p)
+{
+ return p->flags & ENA_ETH_IO_TX_CDESC_PHASE_MASK;
+}
+
+static inline void set_ena_eth_io_tx_cdesc_phase(
+ struct ena_eth_io_tx_cdesc *p,
+ uint8_t val)
+{
+ p->flags |= val & ENA_ETH_IO_TX_CDESC_PHASE_MASK;
+}
+
+static inline uint8_t get_ena_eth_io_rx_desc_phase(
+ const struct ena_eth_io_rx_desc *p)
+{
+ return p->ctrl & ENA_ETH_IO_RX_DESC_PHASE_MASK;
+}
+
+static inline void set_ena_eth_io_rx_desc_phase(
+ struct ena_eth_io_rx_desc *p,
+ uint8_t val)
+{
+ p->ctrl |= val & ENA_ETH_IO_RX_DESC_PHASE_MASK;
+}
+
+static inline uint8_t get_ena_eth_io_rx_desc_first(
+ const struct ena_eth_io_rx_desc *p)
+{
+ return (p->ctrl & ENA_ETH_IO_RX_DESC_FIRST_MASK)
+ >> ENA_ETH_IO_RX_DESC_FIRST_SHIFT;
+}
+
+static inline void set_ena_eth_io_rx_desc_first(
+ struct ena_eth_io_rx_desc *p,
+ uint8_t val)
+{
+ p->ctrl |=
+ (val << ENA_ETH_IO_RX_DESC_FIRST_SHIFT)
+ & ENA_ETH_IO_RX_DESC_FIRST_MASK;
+}
+
+static inline uint8_t get_ena_eth_io_rx_desc_last(
+ const struct ena_eth_io_rx_desc *p)
+{
+ return (p->ctrl & ENA_ETH_IO_RX_DESC_LAST_MASK)
+ >> ENA_ETH_IO_RX_DESC_LAST_SHIFT;
+}
+
+static inline void set_ena_eth_io_rx_desc_last(
+ struct ena_eth_io_rx_desc *p,
+ uint8_t val)
+{
+ p->ctrl |=
+ (val << ENA_ETH_IO_RX_DESC_LAST_SHIFT)
+ & ENA_ETH_IO_RX_DESC_LAST_MASK;
+}
+
+static inline uint8_t get_ena_eth_io_rx_desc_comp_req(
+ const struct ena_eth_io_rx_desc *p)
+{
+ return (p->ctrl & ENA_ETH_IO_RX_DESC_COMP_REQ_MASK)
+ >> ENA_ETH_IO_RX_DESC_COMP_REQ_SHIFT;
+}
+
+static inline void set_ena_eth_io_rx_desc_comp_req(
+ struct ena_eth_io_rx_desc *p,
+ uint8_t val)
+{
+ p->ctrl |=
+ (val << ENA_ETH_IO_RX_DESC_COMP_REQ_SHIFT)
+ & ENA_ETH_IO_RX_DESC_COMP_REQ_MASK;
+}
+
+static inline uint32_t get_ena_eth_io_rx_cdesc_base_l3_proto_idx(
+ const struct ena_eth_io_rx_cdesc_base *p)
+{
+ return p->status & ENA_ETH_IO_RX_CDESC_BASE_L3_PROTO_IDX_MASK;
+}
+
+static inline void set_ena_eth_io_rx_cdesc_base_l3_proto_idx(
+ struct ena_eth_io_rx_cdesc_base *p,
+ uint32_t val)
+{
+ p->status |= val & ENA_ETH_IO_RX_CDESC_BASE_L3_PROTO_IDX_MASK;
+}
+
+static inline uint32_t get_ena_eth_io_rx_cdesc_base_src_vlan_cnt(
+ const struct ena_eth_io_rx_cdesc_base *p)
+{
+ return (p->status & ENA_ETH_IO_RX_CDESC_BASE_SRC_VLAN_CNT_MASK)
+ >> ENA_ETH_IO_RX_CDESC_BASE_SRC_VLAN_CNT_SHIFT;
+}
+
+static inline void set_ena_eth_io_rx_cdesc_base_src_vlan_cnt(
+ struct ena_eth_io_rx_cdesc_base *p,
+ uint32_t val)
+{
+ p->status |=
+ (val << ENA_ETH_IO_RX_CDESC_BASE_SRC_VLAN_CNT_SHIFT)
+ & ENA_ETH_IO_RX_CDESC_BASE_SRC_VLAN_CNT_MASK;
+}
+
+static inline uint32_t get_ena_eth_io_rx_cdesc_base_tunnel(
+ const struct ena_eth_io_rx_cdesc_base *p)
+{
+ return (p->status & ENA_ETH_IO_RX_CDESC_BASE_TUNNEL_MASK)
+ >> ENA_ETH_IO_RX_CDESC_BASE_TUNNEL_SHIFT;
+}
+
+static inline void set_ena_eth_io_rx_cdesc_base_tunnel(
+ struct ena_eth_io_rx_cdesc_base *p,
+ uint32_t val)
+{
+ p->status |=
+ (val << ENA_ETH_IO_RX_CDESC_BASE_TUNNEL_SHIFT)
+ & ENA_ETH_IO_RX_CDESC_BASE_TUNNEL_MASK;
+}
+
+static inline uint32_t get_ena_eth_io_rx_cdesc_base_l4_proto_idx(
+ const struct ena_eth_io_rx_cdesc_base *p)
+{
+ return (p->status & ENA_ETH_IO_RX_CDESC_BASE_L4_PROTO_IDX_MASK)
+ >> ENA_ETH_IO_RX_CDESC_BASE_L4_PROTO_IDX_SHIFT;
+}
+
+static inline void set_ena_eth_io_rx_cdesc_base_l4_proto_idx(
+ struct ena_eth_io_rx_cdesc_base *p,
+ uint32_t val)
+{
+ p->status |=
+ (val << ENA_ETH_IO_RX_CDESC_BASE_L4_PROTO_IDX_SHIFT)
+ & ENA_ETH_IO_RX_CDESC_BASE_L4_PROTO_IDX_MASK;
+}
+
+static inline uint32_t get_ena_eth_io_rx_cdesc_base_l3_csum_err(
+ const struct ena_eth_io_rx_cdesc_base *p)
+{
+ return (p->status & ENA_ETH_IO_RX_CDESC_BASE_L3_CSUM_ERR_MASK)
+ >> ENA_ETH_IO_RX_CDESC_BASE_L3_CSUM_ERR_SHIFT;
+}
+
+static inline void set_ena_eth_io_rx_cdesc_base_l3_csum_err(
+ struct ena_eth_io_rx_cdesc_base *p,
+ uint32_t val)
+{
+ p->status |=
+ (val << ENA_ETH_IO_RX_CDESC_BASE_L3_CSUM_ERR_SHIFT)
+ & ENA_ETH_IO_RX_CDESC_BASE_L3_CSUM_ERR_MASK;
+}
+
+static inline uint32_t get_ena_eth_io_rx_cdesc_base_l4_csum_err(
+ const struct ena_eth_io_rx_cdesc_base *p)
+{
+ return (p->status & ENA_ETH_IO_RX_CDESC_BASE_L4_CSUM_ERR_MASK)
+ >> ENA_ETH_IO_RX_CDESC_BASE_L4_CSUM_ERR_SHIFT;
+}
+
+static inline void set_ena_eth_io_rx_cdesc_base_l4_csum_err(
+ struct ena_eth_io_rx_cdesc_base *p,
+ uint32_t val)
+{
+ p->status |=
+ (val << ENA_ETH_IO_RX_CDESC_BASE_L4_CSUM_ERR_SHIFT)
+ & ENA_ETH_IO_RX_CDESC_BASE_L4_CSUM_ERR_MASK;
+}
+
+static inline uint32_t get_ena_eth_io_rx_cdesc_base_ipv4_frag(
+ const struct ena_eth_io_rx_cdesc_base *p)
+{
+ return (p->status & ENA_ETH_IO_RX_CDESC_BASE_IPV4_FRAG_MASK)
+ >> ENA_ETH_IO_RX_CDESC_BASE_IPV4_FRAG_SHIFT;
+}
+
+static inline void set_ena_eth_io_rx_cdesc_base_ipv4_frag(
+ struct ena_eth_io_rx_cdesc_base *p,
+ uint32_t val)
+{
+ p->status |=
+ (val << ENA_ETH_IO_RX_CDESC_BASE_IPV4_FRAG_SHIFT)
+ & ENA_ETH_IO_RX_CDESC_BASE_IPV4_FRAG_MASK;
+}
+
+static inline uint32_t get_ena_eth_io_rx_cdesc_base_secured_pkt(
+ const struct ena_eth_io_rx_cdesc_base *p)
+{
+ return (p->status & ENA_ETH_IO_RX_CDESC_BASE_SECURED_PKT_MASK)
+ >> ENA_ETH_IO_RX_CDESC_BASE_SECURED_PKT_SHIFT;
+}
+
+static inline void set_ena_eth_io_rx_cdesc_base_secured_pkt(
+ struct ena_eth_io_rx_cdesc_base *p,
+ uint32_t val)
+{
+ p->status |=
+ (val << ENA_ETH_IO_RX_CDESC_BASE_SECURED_PKT_SHIFT)
+ & ENA_ETH_IO_RX_CDESC_BASE_SECURED_PKT_MASK;
+}
+
+static inline uint32_t get_ena_eth_io_rx_cdesc_base_crypto_status(
+ const struct ena_eth_io_rx_cdesc_base *p)
+{
+ return (p->status & ENA_ETH_IO_RX_CDESC_BASE_CRYPTO_STATUS_MASK)
+ >> ENA_ETH_IO_RX_CDESC_BASE_CRYPTO_STATUS_SHIFT;
+}
+
+static inline void set_ena_eth_io_rx_cdesc_base_crypto_status(
+ struct ena_eth_io_rx_cdesc_base *p,
+ uint32_t val)
+{
+ p->status |=
+ (val << ENA_ETH_IO_RX_CDESC_BASE_CRYPTO_STATUS_SHIFT)
+ & ENA_ETH_IO_RX_CDESC_BASE_CRYPTO_STATUS_MASK;
+}
+
+static inline uint32_t get_ena_eth_io_rx_cdesc_base_phase(
+ const struct ena_eth_io_rx_cdesc_base *p)
+{
+ return (p->status & ENA_ETH_IO_RX_CDESC_BASE_PHASE_MASK)
+ >> ENA_ETH_IO_RX_CDESC_BASE_PHASE_SHIFT;
+}
+
+static inline void set_ena_eth_io_rx_cdesc_base_phase(
+ struct ena_eth_io_rx_cdesc_base *p,
+ uint32_t val)
+{
+ p->status |=
+ (val << ENA_ETH_IO_RX_CDESC_BASE_PHASE_SHIFT)
+ & ENA_ETH_IO_RX_CDESC_BASE_PHASE_MASK;
+}
+
+static inline uint32_t get_ena_eth_io_rx_cdesc_base_l3_csum2(
+ const struct ena_eth_io_rx_cdesc_base *p)
+{
+ return (p->status & ENA_ETH_IO_RX_CDESC_BASE_L3_CSUM2_MASK)
+ >> ENA_ETH_IO_RX_CDESC_BASE_L3_CSUM2_SHIFT;
+}
+
+static inline void set_ena_eth_io_rx_cdesc_base_l3_csum2(
+ struct ena_eth_io_rx_cdesc_base *p,
+ uint32_t val)
+{
+ p->status |=
+ (val << ENA_ETH_IO_RX_CDESC_BASE_L3_CSUM2_SHIFT)
+ & ENA_ETH_IO_RX_CDESC_BASE_L3_CSUM2_MASK;
+}
+
+static inline uint32_t get_ena_eth_io_rx_cdesc_base_first(
+ const struct ena_eth_io_rx_cdesc_base *p)
+{
+ return (p->status & ENA_ETH_IO_RX_CDESC_BASE_FIRST_MASK)
+ >> ENA_ETH_IO_RX_CDESC_BASE_FIRST_SHIFT;
+}
+
+static inline void set_ena_eth_io_rx_cdesc_base_first(
+ struct ena_eth_io_rx_cdesc_base *p,
+ uint32_t val)
+{
+ p->status |=
+ (val << ENA_ETH_IO_RX_CDESC_BASE_FIRST_SHIFT)
+ & ENA_ETH_IO_RX_CDESC_BASE_FIRST_MASK;
+}
+
+static inline uint32_t get_ena_eth_io_rx_cdesc_base_last(
+ const struct ena_eth_io_rx_cdesc_base *p)
+{
+ return (p->status & ENA_ETH_IO_RX_CDESC_BASE_LAST_MASK)
+ >> ENA_ETH_IO_RX_CDESC_BASE_LAST_SHIFT;
+}
+
+static inline void set_ena_eth_io_rx_cdesc_base_last(
+ struct ena_eth_io_rx_cdesc_base *p,
+ uint32_t val)
+{
+ p->status |=
+ (val << ENA_ETH_IO_RX_CDESC_BASE_LAST_SHIFT)
+ & ENA_ETH_IO_RX_CDESC_BASE_LAST_MASK;
+}
+
+static inline uint32_t get_ena_eth_io_rx_cdesc_base_inr_l4_csum(
+ const struct ena_eth_io_rx_cdesc_base *p)
+{
+ return (p->status & ENA_ETH_IO_RX_CDESC_BASE_INR_L4_CSUM_MASK)
+ >> ENA_ETH_IO_RX_CDESC_BASE_INR_L4_CSUM_SHIFT;
+}
+
+static inline void set_ena_eth_io_rx_cdesc_base_inr_l4_csum(
+ struct ena_eth_io_rx_cdesc_base *p,
+ uint32_t val)
+{
+ p->status |=
+ (val << ENA_ETH_IO_RX_CDESC_BASE_INR_L4_CSUM_SHIFT)
+ & ENA_ETH_IO_RX_CDESC_BASE_INR_L4_CSUM_MASK;
+}
+
+static inline uint32_t get_ena_eth_io_rx_cdesc_base_buffer(
+ const struct ena_eth_io_rx_cdesc_base *p)
+{
+ return (p->status & ENA_ETH_IO_RX_CDESC_BASE_BUFFER_MASK)
+ >> ENA_ETH_IO_RX_CDESC_BASE_BUFFER_SHIFT;
+}
+
+static inline void set_ena_eth_io_rx_cdesc_base_buffer(
+ struct ena_eth_io_rx_cdesc_base *p,
+ uint32_t val)
+{
+ p->status |=
+ (val << ENA_ETH_IO_RX_CDESC_BASE_BUFFER_SHIFT)
+ & ENA_ETH_IO_RX_CDESC_BASE_BUFFER_MASK;
+}
+
+static inline uint32_t get_ena_eth_io_intr_reg_rx_intr_delay(
+ const struct ena_eth_io_intr_reg *p)
+{
+ return p->intr_control & ENA_ETH_IO_INTR_REG_RX_INTR_DELAY_MASK;
+}
+
+static inline void set_ena_eth_io_intr_reg_rx_intr_delay(
+ struct ena_eth_io_intr_reg *p,
+ uint32_t val)
+{
+ p->intr_control |= val & ENA_ETH_IO_INTR_REG_RX_INTR_DELAY_MASK;
+}
+
+static inline uint32_t get_ena_eth_io_intr_reg_tx_intr_delay(
+ const struct ena_eth_io_intr_reg *p)
+{
+ return (p->intr_control & ENA_ETH_IO_INTR_REG_TX_INTR_DELAY_MASK)
+ >> ENA_ETH_IO_INTR_REG_TX_INTR_DELAY_SHIFT;
+}
+
+static inline void set_ena_eth_io_intr_reg_tx_intr_delay(
+ struct ena_eth_io_intr_reg *p,
+ uint32_t val)
+{
+ p->intr_control |=
+ (val << ENA_ETH_IO_INTR_REG_TX_INTR_DELAY_SHIFT)
+ & ENA_ETH_IO_INTR_REG_TX_INTR_DELAY_MASK;
+}
+
+static inline uint32_t get_ena_eth_io_intr_reg_intr_unmask(
+ const struct ena_eth_io_intr_reg *p)
+{
+ return (p->intr_control & ENA_ETH_IO_INTR_REG_INTR_UNMASK_MASK)
+ >> ENA_ETH_IO_INTR_REG_INTR_UNMASK_SHIFT;
+}
+
+static inline void set_ena_eth_io_intr_reg_intr_unmask(
+ struct ena_eth_io_intr_reg *p,
+ uint32_t val)
+{
+ p->intr_control |=
+ (val << ENA_ETH_IO_INTR_REG_INTR_UNMASK_SHIFT)
+ & ENA_ETH_IO_INTR_REG_INTR_UNMASK_MASK;
+}
+
+#endif /* !defined(ENA_DEFS_LINUX_MAINLINE) */
+#endif /*_ENA_ETH_IO_H_ */
diff --git a/drivers/net/ena/base/ena_defs/ena_gen_info.h b/drivers/net/ena/base/ena_defs/ena_gen_info.h
new file mode 100644
index 00000000..4abdffed
--- /dev/null
+++ b/drivers/net/ena/base/ena_defs/ena_gen_info.h
@@ -0,0 +1,35 @@
+/*-
+* BSD LICENSE
+*
+* Copyright (c) 2015-2016 Amazon.com, Inc. or its affiliates.
+* All rights reserved.
+*
+* Redistribution and use in source and binary forms, with or without
+* modification, are permitted provided that the following conditions
+* are met:
+*
+* * Redistributions of source code must retain the above copyright
+* notice, this list of conditions and the following disclaimer.
+* * Redistributions in binary form must reproduce the above copyright
+* notice, this list of conditions and the following disclaimer in
+* the documentation and/or other materials provided with the
+* distribution.
+* * Neither the name of copyright holder nor the names of its
+* contributors may be used to endorse or promote products derived
+* from this software without specific prior written permission.
+*
+* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+
+#define ENA_GEN_DATE "Mon Feb 15 14:33:08 IST 2016"
+#define ENA_GEN_COMMIT "c71ec25"
diff --git a/drivers/net/ena/base/ena_defs/ena_includes.h b/drivers/net/ena/base/ena_defs/ena_includes.h
new file mode 100644
index 00000000..a86c876f
--- /dev/null
+++ b/drivers/net/ena/base/ena_defs/ena_includes.h
@@ -0,0 +1,39 @@
+/*-
+* BSD LICENSE
+*
+* Copyright (c) 2015-2016 Amazon.com, Inc. or its affiliates.
+* All rights reserved.
+*
+* Redistribution and use in source and binary forms, with or without
+* modification, are permitted provided that the following conditions
+* are met:
+*
+* * Redistributions of source code must retain the above copyright
+* notice, this list of conditions and the following disclaimer.
+* * Redistributions in binary form must reproduce the above copyright
+* notice, this list of conditions and the following disclaimer in
+* the documentation and/or other materials provided with the
+* distribution.
+* * Neither the name of copyright holder nor the names of its
+* contributors may be used to endorse or promote products derived
+* from this software without specific prior written permission.
+*
+* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+
+#include "ena_common_defs.h"
+#include "ena_regs_defs.h"
+#include "ena_admin_defs.h"
+#include "ena_eth_io_defs.h"
+#include "ena_efa_admin_defs.h"
+#include "ena_efa_io_defs.h"
diff --git a/drivers/net/ena/base/ena_defs/ena_regs_defs.h b/drivers/net/ena/base/ena_defs/ena_regs_defs.h
new file mode 100644
index 00000000..d0241278
--- /dev/null
+++ b/drivers/net/ena/base/ena_defs/ena_regs_defs.h
@@ -0,0 +1,135 @@
+/*-
+* BSD LICENSE
+*
+* Copyright (c) 2015-2016 Amazon.com, Inc. or its affiliates.
+* All rights reserved.
+*
+* Redistribution and use in source and binary forms, with or without
+* modification, are permitted provided that the following conditions
+* are met:
+*
+* * Redistributions of source code must retain the above copyright
+* notice, this list of conditions and the following disclaimer.
+* * Redistributions in binary form must reproduce the above copyright
+* notice, this list of conditions and the following disclaimer in
+* the documentation and/or other materials provided with the
+* distribution.
+* * Neither the name of copyright holder nor the names of its
+* contributors may be used to endorse or promote products derived
+* from this software without specific prior written permission.
+*
+* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+
+#ifndef _ENA_REGS_H_
+#define _ENA_REGS_H_
+
+/* ena_registers offsets */
+#define ENA_REGS_VERSION_OFF 0x0
+#define ENA_REGS_CONTROLLER_VERSION_OFF 0x4
+#define ENA_REGS_CAPS_OFF 0x8
+#define ENA_REGS_CAPS_EXT_OFF 0xc
+#define ENA_REGS_AQ_BASE_LO_OFF 0x10
+#define ENA_REGS_AQ_BASE_HI_OFF 0x14
+#define ENA_REGS_AQ_CAPS_OFF 0x18
+#define ENA_REGS_ACQ_BASE_LO_OFF 0x20
+#define ENA_REGS_ACQ_BASE_HI_OFF 0x24
+#define ENA_REGS_ACQ_CAPS_OFF 0x28
+#define ENA_REGS_AQ_DB_OFF 0x2c
+#define ENA_REGS_ACQ_TAIL_OFF 0x30
+#define ENA_REGS_AENQ_CAPS_OFF 0x34
+#define ENA_REGS_AENQ_BASE_LO_OFF 0x38
+#define ENA_REGS_AENQ_BASE_HI_OFF 0x3c
+#define ENA_REGS_AENQ_HEAD_DB_OFF 0x40
+#define ENA_REGS_AENQ_TAIL_OFF 0x44
+#define ENA_REGS_INTR_MASK_OFF 0x4c
+#define ENA_REGS_DEV_CTL_OFF 0x54
+#define ENA_REGS_DEV_STS_OFF 0x58
+#define ENA_REGS_MMIO_REG_READ_OFF 0x5c
+#define ENA_REGS_MMIO_RESP_LO_OFF 0x60
+#define ENA_REGS_MMIO_RESP_HI_OFF 0x64
+#define ENA_REGS_RSS_IND_ENTRY_UPDATE_OFF 0x68
+
+/* version register */
+#define ENA_REGS_VERSION_MINOR_VERSION_MASK 0xff
+#define ENA_REGS_VERSION_MAJOR_VERSION_SHIFT 8
+#define ENA_REGS_VERSION_MAJOR_VERSION_MASK 0xff00
+
+/* controller_version register */
+#define ENA_REGS_CONTROLLER_VERSION_SUBMINOR_VERSION_MASK 0xff
+#define ENA_REGS_CONTROLLER_VERSION_MINOR_VERSION_SHIFT 8
+#define ENA_REGS_CONTROLLER_VERSION_MINOR_VERSION_MASK 0xff00
+#define ENA_REGS_CONTROLLER_VERSION_MAJOR_VERSION_SHIFT 16
+#define ENA_REGS_CONTROLLER_VERSION_MAJOR_VERSION_MASK 0xff0000
+#define ENA_REGS_CONTROLLER_VERSION_IMPL_ID_SHIFT 24
+#define ENA_REGS_CONTROLLER_VERSION_IMPL_ID_MASK 0xff000000
+
+/* caps register */
+#define ENA_REGS_CAPS_CONTIGUOUS_QUEUE_REQUIRED_MASK 0x1
+#define ENA_REGS_CAPS_RESET_TIMEOUT_SHIFT 1
+#define ENA_REGS_CAPS_RESET_TIMEOUT_MASK 0x3e
+#define ENA_REGS_CAPS_DMA_ADDR_WIDTH_SHIFT 8
+#define ENA_REGS_CAPS_DMA_ADDR_WIDTH_MASK 0xff00
+
+/* aq_caps register */
+#define ENA_REGS_AQ_CAPS_AQ_DEPTH_MASK 0xffff
+#define ENA_REGS_AQ_CAPS_AQ_ENTRY_SIZE_SHIFT 16
+#define ENA_REGS_AQ_CAPS_AQ_ENTRY_SIZE_MASK 0xffff0000
+
+/* acq_caps register */
+#define ENA_REGS_ACQ_CAPS_ACQ_DEPTH_MASK 0xffff
+#define ENA_REGS_ACQ_CAPS_ACQ_ENTRY_SIZE_SHIFT 16
+#define ENA_REGS_ACQ_CAPS_ACQ_ENTRY_SIZE_MASK 0xffff0000
+
+/* aenq_caps register */
+#define ENA_REGS_AENQ_CAPS_AENQ_DEPTH_MASK 0xffff
+#define ENA_REGS_AENQ_CAPS_AENQ_ENTRY_SIZE_SHIFT 16
+#define ENA_REGS_AENQ_CAPS_AENQ_ENTRY_SIZE_MASK 0xffff0000
+
+/* dev_ctl register */
+#define ENA_REGS_DEV_CTL_DEV_RESET_MASK 0x1
+#define ENA_REGS_DEV_CTL_AQ_RESTART_SHIFT 1
+#define ENA_REGS_DEV_CTL_AQ_RESTART_MASK 0x2
+#define ENA_REGS_DEV_CTL_QUIESCENT_SHIFT 2
+#define ENA_REGS_DEV_CTL_QUIESCENT_MASK 0x4
+#define ENA_REGS_DEV_CTL_IO_RESUME_SHIFT 3
+#define ENA_REGS_DEV_CTL_IO_RESUME_MASK 0x8
+
+/* dev_sts register */
+#define ENA_REGS_DEV_STS_READY_MASK 0x1
+#define ENA_REGS_DEV_STS_AQ_RESTART_IN_PROGRESS_SHIFT 1
+#define ENA_REGS_DEV_STS_AQ_RESTART_IN_PROGRESS_MASK 0x2
+#define ENA_REGS_DEV_STS_AQ_RESTART_FINISHED_SHIFT 2
+#define ENA_REGS_DEV_STS_AQ_RESTART_FINISHED_MASK 0x4
+#define ENA_REGS_DEV_STS_RESET_IN_PROGRESS_SHIFT 3
+#define ENA_REGS_DEV_STS_RESET_IN_PROGRESS_MASK 0x8
+#define ENA_REGS_DEV_STS_RESET_FINISHED_SHIFT 4
+#define ENA_REGS_DEV_STS_RESET_FINISHED_MASK 0x10
+#define ENA_REGS_DEV_STS_FATAL_ERROR_SHIFT 5
+#define ENA_REGS_DEV_STS_FATAL_ERROR_MASK 0x20
+#define ENA_REGS_DEV_STS_QUIESCENT_STATE_IN_PROGRESS_SHIFT 6
+#define ENA_REGS_DEV_STS_QUIESCENT_STATE_IN_PROGRESS_MASK 0x40
+#define ENA_REGS_DEV_STS_QUIESCENT_STATE_ACHIEVED_SHIFT 7
+#define ENA_REGS_DEV_STS_QUIESCENT_STATE_ACHIEVED_MASK 0x80
+
+/* mmio_reg_read register */
+#define ENA_REGS_MMIO_REG_READ_REQ_ID_MASK 0xffff
+#define ENA_REGS_MMIO_REG_READ_REG_OFF_SHIFT 16
+#define ENA_REGS_MMIO_REG_READ_REG_OFF_MASK 0xffff0000
+
+/* rss_ind_entry_update register */
+#define ENA_REGS_RSS_IND_ENTRY_UPDATE_INDEX_MASK 0xffff
+#define ENA_REGS_RSS_IND_ENTRY_UPDATE_CQ_IDX_SHIFT 16
+#define ENA_REGS_RSS_IND_ENTRY_UPDATE_CQ_IDX_MASK 0xffff0000
+
+#endif /*_ENA_REGS_H_ */
diff --git a/drivers/net/ena/base/ena_eth_com.c b/drivers/net/ena/base/ena_eth_com.c
new file mode 100644
index 00000000..459e0bbb
--- /dev/null
+++ b/drivers/net/ena/base/ena_eth_com.c
@@ -0,0 +1,508 @@
+/*-
+* BSD LICENSE
+*
+* Copyright (c) 2015-2016 Amazon.com, Inc. or its affiliates.
+* All rights reserved.
+*
+* Redistribution and use in source and binary forms, with or without
+* modification, are permitted provided that the following conditions
+* are met:
+*
+* * Redistributions of source code must retain the above copyright
+* notice, this list of conditions and the following disclaimer.
+* * Redistributions in binary form must reproduce the above copyright
+* notice, this list of conditions and the following disclaimer in
+* the documentation and/or other materials provided with the
+* distribution.
+* * Neither the name of copyright holder nor the names of its
+* contributors may be used to endorse or promote products derived
+* from this software without specific prior written permission.
+*
+* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+
+#include "ena_eth_com.h"
+
+static inline struct ena_eth_io_rx_cdesc_base *ena_com_get_next_rx_cdesc(
+ struct ena_com_io_cq *io_cq)
+{
+ struct ena_eth_io_rx_cdesc_base *cdesc;
+ u16 expected_phase, head_masked;
+ u16 desc_phase;
+
+ head_masked = io_cq->head & (io_cq->q_depth - 1);
+ expected_phase = io_cq->phase;
+
+ cdesc = (struct ena_eth_io_rx_cdesc_base *)
+ ((unsigned char *)io_cq->cdesc_addr.virt_addr
+ + (head_masked * io_cq->cdesc_entry_size_in_bytes));
+
+ desc_phase = (cdesc->status & ENA_ETH_IO_RX_CDESC_BASE_PHASE_MASK) >>
+ ENA_ETH_IO_RX_CDESC_BASE_PHASE_SHIFT;
+
+ if (desc_phase != expected_phase)
+ return NULL;
+
+ return cdesc;
+}
+
+static inline void ena_com_cq_inc_head(struct ena_com_io_cq *io_cq)
+{
+ io_cq->head++;
+
+ /* Switch phase bit in case of wrap around */
+ if (unlikely((io_cq->head & (io_cq->q_depth - 1)) == 0))
+ io_cq->phase = 1 - io_cq->phase;
+}
+
+static inline void *get_sq_desc(struct ena_com_io_sq *io_sq)
+{
+ u16 tail_masked;
+ u32 offset;
+
+ tail_masked = io_sq->tail & (io_sq->q_depth - 1);
+
+ offset = tail_masked * io_sq->desc_entry_size;
+
+ return (unsigned char *)io_sq->desc_addr.virt_addr + offset;
+}
+
+static inline void ena_com_copy_curr_sq_desc_to_dev(struct ena_com_io_sq *io_sq)
+{
+ u16 tail_masked = io_sq->tail & (io_sq->q_depth - 1);
+ u32 offset = tail_masked * io_sq->desc_entry_size;
+
+ /* In case this queue isn't a LLQ */
+ if (io_sq->mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_HOST)
+ return;
+
+ memcpy_toio((unsigned char *)io_sq->desc_addr.pbuf_dev_addr + offset,
+ (unsigned char *)io_sq->desc_addr.virt_addr + offset,
+ io_sq->desc_entry_size);
+}
+
+static inline void ena_com_sq_update_tail(struct ena_com_io_sq *io_sq)
+{
+ io_sq->tail++;
+
+ /* Switch phase bit in case of wrap around */
+ if (unlikely((io_sq->tail & (io_sq->q_depth - 1)) == 0))
+ io_sq->phase = 1 - io_sq->phase;
+}
+
+static inline int ena_com_write_header(struct ena_com_io_sq *io_sq,
+ u8 *head_src, u16 header_len)
+{
+ u16 tail_masked = io_sq->tail & (io_sq->q_depth - 1);
+ u8 __iomem *dev_head_addr =
+ io_sq->header_addr + (tail_masked * io_sq->tx_max_header_size);
+
+ if (io_sq->mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_HOST)
+ return 0;
+
+ ENA_ASSERT(io_sq->header_addr, "header address is NULL\n");
+
+ memcpy_toio(dev_head_addr, head_src, header_len);
+
+ return 0;
+}
+
+static inline struct ena_eth_io_rx_cdesc_base *
+ ena_com_rx_cdesc_idx_to_ptr(struct ena_com_io_cq *io_cq, u16 idx)
+{
+ idx &= (io_cq->q_depth - 1);
+ return (struct ena_eth_io_rx_cdesc_base *)
+ ((unsigned char *)io_cq->cdesc_addr.virt_addr +
+ idx * io_cq->cdesc_entry_size_in_bytes);
+}
+
+static inline int ena_com_cdesc_rx_pkt_get(struct ena_com_io_cq *io_cq,
+ u16 *first_cdesc_idx,
+ u16 *nb_hw_desc)
+{
+ struct ena_eth_io_rx_cdesc_base *cdesc;
+ u16 count = 0, head_masked;
+ u32 last = 0;
+
+ do {
+ cdesc = ena_com_get_next_rx_cdesc(io_cq);
+ if (!cdesc)
+ break;
+
+ ena_com_cq_inc_head(io_cq);
+ count++;
+ last = (cdesc->status & ENA_ETH_IO_RX_CDESC_BASE_LAST_MASK) >>
+ ENA_ETH_IO_RX_CDESC_BASE_LAST_SHIFT;
+ } while (!last);
+
+ if (last) {
+ *first_cdesc_idx = io_cq->cur_rx_pkt_cdesc_start_idx;
+ count += io_cq->cur_rx_pkt_cdesc_count;
+
+ head_masked = io_cq->head & (io_cq->q_depth - 1);
+
+ io_cq->cur_rx_pkt_cdesc_count = 0;
+ io_cq->cur_rx_pkt_cdesc_start_idx = head_masked;
+
+ ena_trc_dbg("ena q_id: %d packets were completed. first desc idx %u descs# %d\n",
+ io_cq->qid, *first_cdesc_idx, count);
+ } else {
+ io_cq->cur_rx_pkt_cdesc_count += count;
+ count = 0;
+ }
+
+ *nb_hw_desc = count;
+ return 0;
+}
+
+static inline bool ena_com_meta_desc_changed(struct ena_com_io_sq *io_sq,
+ struct ena_com_tx_ctx *ena_tx_ctx)
+{
+ int rc;
+
+ if (ena_tx_ctx->meta_valid) {
+ rc = memcmp(&io_sq->cached_tx_meta,
+ &ena_tx_ctx->ena_meta,
+ sizeof(struct ena_com_tx_meta));
+
+ if (unlikely(rc != 0))
+ return true;
+ }
+
+ return false;
+}
+
+static inline void ena_com_create_and_store_tx_meta_desc(
+ struct ena_com_io_sq *io_sq,
+ struct ena_com_tx_ctx *ena_tx_ctx)
+{
+ struct ena_eth_io_tx_meta_desc *meta_desc = NULL;
+ struct ena_com_tx_meta *ena_meta = &ena_tx_ctx->ena_meta;
+
+ meta_desc = get_sq_desc(io_sq);
+ memset(meta_desc, 0x0, sizeof(struct ena_eth_io_tx_meta_desc));
+
+ meta_desc->len_ctrl |= ENA_ETH_IO_TX_META_DESC_META_DESC_MASK;
+
+ meta_desc->len_ctrl |= ENA_ETH_IO_TX_META_DESC_EXT_VALID_MASK;
+
+ /* bits 0-9 of the mss */
+ meta_desc->word2 |= (ena_meta->mss <<
+ ENA_ETH_IO_TX_META_DESC_MSS_LO_SHIFT) &
+ ENA_ETH_IO_TX_META_DESC_MSS_LO_MASK;
+ /* bits 10-13 of the mss */
+ meta_desc->len_ctrl |= ((ena_meta->mss >> 10) <<
+ ENA_ETH_IO_TX_META_DESC_MSS_HI_PTP_SHIFT) &
+ ENA_ETH_IO_TX_META_DESC_MSS_HI_PTP_MASK;
+
+ /* Extended meta desc */
+ meta_desc->len_ctrl |= ENA_ETH_IO_TX_META_DESC_ETH_META_TYPE_MASK;
+ meta_desc->len_ctrl |= ENA_ETH_IO_TX_META_DESC_META_STORE_MASK;
+ meta_desc->len_ctrl |= (io_sq->phase <<
+ ENA_ETH_IO_TX_META_DESC_PHASE_SHIFT) &
+ ENA_ETH_IO_TX_META_DESC_PHASE_MASK;
+
+ meta_desc->len_ctrl |= ENA_ETH_IO_TX_META_DESC_FIRST_MASK;
+ meta_desc->word2 |= ena_meta->l3_hdr_len &
+ ENA_ETH_IO_TX_META_DESC_L3_HDR_LEN_MASK;
+ meta_desc->word2 |= (ena_meta->l3_hdr_offset <<
+ ENA_ETH_IO_TX_META_DESC_L3_HDR_OFF_SHIFT) &
+ ENA_ETH_IO_TX_META_DESC_L3_HDR_OFF_MASK;
+
+ meta_desc->word2 |= (ena_meta->l4_hdr_len <<
+ ENA_ETH_IO_TX_META_DESC_L4_HDR_LEN_IN_WORDS_SHIFT) &
+ ENA_ETH_IO_TX_META_DESC_L4_HDR_LEN_IN_WORDS_MASK;
+
+ meta_desc->len_ctrl |= ENA_ETH_IO_TX_META_DESC_META_STORE_MASK;
+
+ /* Cached the meta desc */
+ memcpy(&io_sq->cached_tx_meta, ena_meta,
+ sizeof(struct ena_com_tx_meta));
+
+ ena_com_copy_curr_sq_desc_to_dev(io_sq);
+ ena_com_sq_update_tail(io_sq);
+}
+
+static inline void ena_com_rx_set_flags(struct ena_com_rx_ctx *ena_rx_ctx,
+ struct ena_eth_io_rx_cdesc_base *cdesc)
+{
+ ena_rx_ctx->l3_proto = (enum ena_eth_io_l3_proto_index)(cdesc->status &
+ ENA_ETH_IO_RX_CDESC_BASE_L3_PROTO_IDX_MASK);
+ ena_rx_ctx->l4_proto = (enum ena_eth_io_l4_proto_index)
+ ((cdesc->status & ENA_ETH_IO_RX_CDESC_BASE_L4_PROTO_IDX_MASK) >>
+ ENA_ETH_IO_RX_CDESC_BASE_L4_PROTO_IDX_SHIFT);
+ ena_rx_ctx->l3_csum_err =
+ (cdesc->status & ENA_ETH_IO_RX_CDESC_BASE_L3_CSUM_ERR_MASK) >>
+ ENA_ETH_IO_RX_CDESC_BASE_L3_CSUM_ERR_SHIFT;
+ ena_rx_ctx->l4_csum_err =
+ (cdesc->status & ENA_ETH_IO_RX_CDESC_BASE_L4_CSUM_ERR_MASK) >>
+ ENA_ETH_IO_RX_CDESC_BASE_L4_CSUM_ERR_SHIFT;
+ ena_rx_ctx->hash = cdesc->hash;
+ ena_rx_ctx->frag =
+ (cdesc->status & ENA_ETH_IO_RX_CDESC_BASE_IPV4_FRAG_MASK) >>
+ ENA_ETH_IO_RX_CDESC_BASE_IPV4_FRAG_SHIFT;
+
+ ena_trc_dbg("ena_rx_ctx->l3_proto %d ena_rx_ctx->l4_proto %d\nena_rx_ctx->l3_csum_err %d ena_rx_ctx->l4_csum_err %d\nhash frag %d frag: %d cdesc_status: %x\n",
+ ena_rx_ctx->l3_proto,
+ ena_rx_ctx->l4_proto,
+ ena_rx_ctx->l3_csum_err,
+ ena_rx_ctx->l4_csum_err,
+ ena_rx_ctx->hash,
+ ena_rx_ctx->frag,
+ cdesc->status);
+}
+
+/*****************************************************************************/
+/***************************** API **********************************/
+/*****************************************************************************/
+
+int ena_com_prepare_tx(struct ena_com_io_sq *io_sq,
+ struct ena_com_tx_ctx *ena_tx_ctx,
+ int *nb_hw_desc)
+{
+ struct ena_eth_io_tx_desc *desc = NULL;
+ struct ena_com_buf *ena_bufs = ena_tx_ctx->ena_bufs;
+ void *push_header = ena_tx_ctx->push_header;
+ u16 header_len = ena_tx_ctx->header_len;
+ u16 num_bufs = ena_tx_ctx->num_bufs;
+ int total_desc, i, rc;
+ bool have_meta;
+ u64 addr_hi;
+
+ ENA_ASSERT(io_sq->direction == ENA_COM_IO_QUEUE_DIRECTION_TX,
+ "wrong Q type");
+
+ /* num_bufs +1 for potential meta desc */
+ if (ena_com_sq_empty_space(io_sq) < (num_bufs + 1)) {
+ ena_trc_err("Not enough space in the tx queue\n");
+ return ENA_COM_NO_MEM;
+ }
+
+ if (unlikely(header_len > io_sq->tx_max_header_size)) {
+ ena_trc_err("header size is too large %d max header: %d\n",
+ header_len, io_sq->tx_max_header_size);
+ return ENA_COM_INVAL;
+ }
+
+ /* start with pushing the header (if needed) */
+ rc = ena_com_write_header(io_sq, push_header, header_len);
+ if (unlikely(rc))
+ return rc;
+
+ have_meta = ena_tx_ctx->meta_valid && ena_com_meta_desc_changed(io_sq,
+ ena_tx_ctx);
+ if (have_meta)
+ ena_com_create_and_store_tx_meta_desc(io_sq, ena_tx_ctx);
+
+ /* If the caller doesn't want send packets */
+ if (unlikely(!num_bufs && !header_len)) {
+ *nb_hw_desc = have_meta ? 0 : 1;
+ return 0;
+ }
+
+ desc = get_sq_desc(io_sq);
+ memset(desc, 0x0, sizeof(struct ena_eth_io_tx_desc));
+
+ /* Set first desc when we don't have meta descriptor */
+ if (!have_meta)
+ desc->len_ctrl |= ENA_ETH_IO_TX_DESC_FIRST_MASK;
+
+ desc->buff_addr_hi_hdr_sz |= (header_len <<
+ ENA_ETH_IO_TX_DESC_HEADER_LENGTH_SHIFT) &
+ ENA_ETH_IO_TX_DESC_HEADER_LENGTH_MASK;
+ desc->len_ctrl |= (io_sq->phase << ENA_ETH_IO_TX_DESC_PHASE_SHIFT) &
+ ENA_ETH_IO_TX_DESC_PHASE_MASK;
+
+ desc->len_ctrl |= ENA_ETH_IO_TX_DESC_COMP_REQ_MASK;
+
+ /* Bits 0-9 */
+ desc->meta_ctrl |= (ena_tx_ctx->req_id <<
+ ENA_ETH_IO_TX_DESC_REQ_ID_LO_SHIFT) &
+ ENA_ETH_IO_TX_DESC_REQ_ID_LO_MASK;
+
+ desc->meta_ctrl |= (ena_tx_ctx->df <<
+ ENA_ETH_IO_TX_DESC_DF_SHIFT) &
+ ENA_ETH_IO_TX_DESC_DF_MASK;
+
+ /* Bits 10-15 */
+ desc->len_ctrl |= ((ena_tx_ctx->req_id >> 10) <<
+ ENA_ETH_IO_TX_DESC_REQ_ID_HI_SHIFT) &
+ ENA_ETH_IO_TX_DESC_REQ_ID_HI_MASK;
+
+ if (ena_tx_ctx->meta_valid) {
+ desc->meta_ctrl |= (ena_tx_ctx->tso_enable <<
+ ENA_ETH_IO_TX_DESC_TSO_EN_SHIFT) &
+ ENA_ETH_IO_TX_DESC_TSO_EN_MASK;
+ desc->meta_ctrl |= ena_tx_ctx->l3_proto &
+ ENA_ETH_IO_TX_DESC_L3_PROTO_IDX_MASK;
+ desc->meta_ctrl |= (ena_tx_ctx->l4_proto <<
+ ENA_ETH_IO_TX_DESC_L4_PROTO_IDX_SHIFT) &
+ ENA_ETH_IO_TX_DESC_L4_PROTO_IDX_MASK;
+ desc->meta_ctrl |= (ena_tx_ctx->l3_csum_enable <<
+ ENA_ETH_IO_TX_DESC_L3_CSUM_EN_SHIFT) &
+ ENA_ETH_IO_TX_DESC_L3_CSUM_EN_MASK;
+ desc->meta_ctrl |= (ena_tx_ctx->l4_csum_enable <<
+ ENA_ETH_IO_TX_DESC_L4_CSUM_EN_SHIFT) &
+ ENA_ETH_IO_TX_DESC_L4_CSUM_EN_MASK;
+ desc->meta_ctrl |= (ena_tx_ctx->l4_csum_partial <<
+ ENA_ETH_IO_TX_DESC_L4_CSUM_PARTIAL_SHIFT) &
+ ENA_ETH_IO_TX_DESC_L4_CSUM_PARTIAL_MASK;
+ }
+
+ for (i = 0; i < num_bufs; i++) {
+ /* The first desc share the same desc as the header */
+ if (likely(i != 0)) {
+ ena_com_copy_curr_sq_desc_to_dev(io_sq);
+ ena_com_sq_update_tail(io_sq);
+
+ desc = get_sq_desc(io_sq);
+ memset(desc, 0x0, sizeof(struct ena_eth_io_tx_desc));
+
+ desc->len_ctrl |= (io_sq->phase <<
+ ENA_ETH_IO_TX_DESC_PHASE_SHIFT) &
+ ENA_ETH_IO_TX_DESC_PHASE_MASK;
+ }
+
+ desc->len_ctrl |= ena_bufs->len &
+ ENA_ETH_IO_TX_DESC_LENGTH_MASK;
+
+ addr_hi = ((ena_bufs->paddr &
+ GENMASK_ULL(io_sq->dma_addr_bits - 1, 32)) >> 32);
+
+ desc->buff_addr_lo = (u32)ena_bufs->paddr;
+ desc->buff_addr_hi_hdr_sz |= addr_hi &
+ ENA_ETH_IO_TX_DESC_ADDR_HI_MASK;
+ ena_bufs++;
+ }
+
+ /* set the last desc indicator */
+ desc->len_ctrl |= ENA_ETH_IO_TX_DESC_LAST_MASK;
+
+ ena_com_copy_curr_sq_desc_to_dev(io_sq);
+
+ ena_com_sq_update_tail(io_sq);
+
+ total_desc = ENA_MAX16(num_bufs, 1);
+ total_desc += have_meta ? 1 : 0;
+
+ *nb_hw_desc = total_desc;
+ return 0;
+}
+
+int ena_com_rx_pkt(struct ena_com_io_cq *io_cq,
+ struct ena_com_io_sq *io_sq,
+ struct ena_com_rx_ctx *ena_rx_ctx)
+{
+ struct ena_com_rx_buf_info *ena_buf = &ena_rx_ctx->ena_bufs[0];
+ struct ena_eth_io_rx_cdesc_base *cdesc = NULL;
+ u16 cdesc_idx = 0;
+ u16 nb_hw_desc;
+ u16 i;
+ int rc;
+
+ ENA_ASSERT(io_cq->direction == ENA_COM_IO_QUEUE_DIRECTION_RX,
+ "wrong Q type");
+
+ rc = ena_com_cdesc_rx_pkt_get(io_cq, &cdesc_idx, &nb_hw_desc);
+ if (rc || (nb_hw_desc == 0)) {
+ ena_rx_ctx->descs = nb_hw_desc;
+ return rc;
+ }
+
+ ena_trc_dbg("fetch rx packet: queue %d completed desc: %d\n",
+ io_cq->qid, nb_hw_desc);
+
+ if (unlikely(nb_hw_desc >= ena_rx_ctx->max_bufs)) {
+ ena_trc_err("Too many RX cdescs (%d) > MAX(%d)\n",
+ nb_hw_desc, ena_rx_ctx->max_bufs);
+ return ENA_COM_NO_SPACE;
+ }
+
+ for (i = 0; i < nb_hw_desc; i++) {
+ cdesc = ena_com_rx_cdesc_idx_to_ptr(io_cq, cdesc_idx + i);
+
+ ena_buf->len = cdesc->length;
+ ena_buf->req_id = cdesc->req_id;
+ ena_buf++;
+ }
+
+ /* Update SQ head ptr */
+ io_sq->next_to_comp += nb_hw_desc;
+
+ ena_trc_dbg("[%s][QID#%d] Updating SQ head to: %d\n", __func__,
+ io_sq->qid, io_sq->next_to_comp);
+
+ /* Get rx flags from the last pkt */
+ ena_com_rx_set_flags(ena_rx_ctx, cdesc);
+
+ ena_rx_ctx->descs = nb_hw_desc;
+ return 0;
+}
+
+int ena_com_add_single_rx_desc(struct ena_com_io_sq *io_sq,
+ struct ena_com_buf *ena_buf,
+ u16 req_id)
+{
+ struct ena_eth_io_rx_desc *desc;
+
+ ENA_ASSERT(io_sq->direction == ENA_COM_IO_QUEUE_DIRECTION_RX,
+ "wrong Q type");
+
+ if (unlikely(ena_com_sq_empty_space(io_sq) == 0))
+ return -1;
+
+ desc = get_sq_desc(io_sq);
+ memset(desc, 0x0, sizeof(struct ena_eth_io_rx_desc));
+
+ desc->length = ena_buf->len;
+
+ desc->ctrl |= ENA_ETH_IO_RX_DESC_FIRST_MASK;
+ desc->ctrl |= ENA_ETH_IO_RX_DESC_LAST_MASK;
+ desc->ctrl |= io_sq->phase & ENA_ETH_IO_RX_DESC_PHASE_MASK;
+ desc->ctrl |= ENA_ETH_IO_RX_DESC_COMP_REQ_MASK;
+
+ desc->req_id = req_id;
+
+ desc->buff_addr_lo = (u32)ena_buf->paddr;
+ desc->buff_addr_hi =
+ ((ena_buf->paddr &
+ GENMASK_ULL(io_sq->dma_addr_bits - 1, 32)) >> 32);
+
+ ena_com_sq_update_tail(io_sq);
+
+ return 0;
+}
+
+int ena_com_tx_comp_req_id_get(struct ena_com_io_cq *io_cq, u16 *req_id)
+{
+ u8 expected_phase, cdesc_phase;
+ struct ena_eth_io_tx_cdesc *cdesc;
+ u16 masked_head;
+
+ masked_head = io_cq->head & (io_cq->q_depth - 1);
+ expected_phase = io_cq->phase;
+
+ cdesc = (struct ena_eth_io_tx_cdesc *)
+ ((unsigned char *)io_cq->cdesc_addr.virt_addr
+ + (masked_head * io_cq->cdesc_entry_size_in_bytes));
+
+ cdesc_phase = cdesc->flags & ENA_ETH_IO_TX_CDESC_PHASE_MASK;
+ if (cdesc_phase != expected_phase)
+ return -1;
+
+ ena_com_cq_inc_head(io_cq);
+
+ *req_id = cdesc->req_id;
+
+ return 0;
+}
diff --git a/drivers/net/ena/base/ena_eth_com.h b/drivers/net/ena/base/ena_eth_com.h
new file mode 100644
index 00000000..325d69c0
--- /dev/null
+++ b/drivers/net/ena/base/ena_eth_com.h
@@ -0,0 +1,153 @@
+/*-
+* BSD LICENSE
+*
+* Copyright (c) 2015-2016 Amazon.com, Inc. or its affiliates.
+* All rights reserved.
+*
+* Redistribution and use in source and binary forms, with or without
+* modification, are permitted provided that the following conditions
+* are met:
+*
+* * Redistributions of source code must retain the above copyright
+* notice, this list of conditions and the following disclaimer.
+* * Redistributions in binary form must reproduce the above copyright
+* notice, this list of conditions and the following disclaimer in
+* the documentation and/or other materials provided with the
+* distribution.
+* * Neither the name of copyright holder nor the names of its
+* contributors may be used to endorse or promote products derived
+* from this software without specific prior written permission.
+*
+* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+
+#ifndef ENA_ETH_COM_H_
+#define ENA_ETH_COM_H_
+
+#if defined(__cplusplus)
+extern "C" {
+#endif
+#include "ena_com.h"
+
+/* head update threshold in units of (queue size / ENA_COMP_HEAD_THRESH) */
+#define ENA_COMP_HEAD_THRESH 4
+
+struct ena_com_tx_ctx {
+ struct ena_com_tx_meta ena_meta;
+ struct ena_com_buf *ena_bufs;
+ /* For LLQ, header buffer - pushed to the device mem space */
+ void *push_header;
+
+ enum ena_eth_io_l3_proto_index l3_proto;
+ enum ena_eth_io_l4_proto_index l4_proto;
+ u16 num_bufs;
+ u16 req_id;
+ /* For regular queue, indicate the size of the header
+ * For LLQ, indicate the size of the pushed buffer
+ */
+ u16 header_len;
+
+ u8 meta_valid;
+ u8 tso_enable;
+ u8 l3_csum_enable;
+ u8 l4_csum_enable;
+ u8 l4_csum_partial;
+ u8 df; /* Don't fragment */
+};
+
+struct ena_com_rx_ctx {
+ struct ena_com_rx_buf_info *ena_bufs;
+ enum ena_eth_io_l3_proto_index l3_proto;
+ enum ena_eth_io_l4_proto_index l4_proto;
+ bool l3_csum_err;
+ bool l4_csum_err;
+ /* fragmented packet */
+ bool frag;
+ u32 hash;
+ u16 descs;
+ int max_bufs;
+};
+
+int ena_com_prepare_tx(struct ena_com_io_sq *io_sq,
+ struct ena_com_tx_ctx *ena_tx_ctx,
+ int *nb_hw_desc);
+
+int ena_com_rx_pkt(struct ena_com_io_cq *io_cq,
+ struct ena_com_io_sq *io_sq,
+ struct ena_com_rx_ctx *ena_rx_ctx);
+
+int ena_com_add_single_rx_desc(struct ena_com_io_sq *io_sq,
+ struct ena_com_buf *ena_buf,
+ u16 req_id);
+
+int ena_com_tx_comp_req_id_get(struct ena_com_io_cq *io_cq, u16 *req_id);
+
+static inline void ena_com_unmask_intr(struct ena_com_io_cq *io_cq,
+ struct ena_eth_io_intr_reg *intr_reg)
+{
+ ENA_REG_WRITE32(intr_reg->intr_control, io_cq->unmask_reg);
+}
+
+static inline int ena_com_sq_empty_space(struct ena_com_io_sq *io_sq)
+{
+ u16 tail, next_to_comp, cnt;
+
+ next_to_comp = io_sq->next_to_comp;
+ tail = io_sq->tail;
+ cnt = tail - next_to_comp;
+
+ return io_sq->q_depth - 1 - cnt;
+}
+
+static inline int ena_com_write_sq_doorbell(struct ena_com_io_sq *io_sq)
+{
+ u16 tail;
+
+ tail = io_sq->tail;
+
+ ena_trc_dbg("write submission queue doorbell for queue: %d tail: %d\n",
+ io_sq->qid, tail);
+
+ ENA_REG_WRITE32(tail, io_sq->db_addr);
+
+ return 0;
+}
+
+static inline int ena_com_update_dev_comp_head(struct ena_com_io_cq *io_cq)
+{
+ u16 unreported_comp, head;
+ bool need_update;
+
+ head = io_cq->head;
+ unreported_comp = head - io_cq->last_head_update;
+ need_update = unreported_comp > (io_cq->q_depth / ENA_COMP_HEAD_THRESH);
+
+ if (io_cq->cq_head_db_reg && need_update) {
+ ena_trc_dbg("Write completion queue doorbell for queue %d: head: %d\n",
+ io_cq->qid, head);
+ ENA_REG_WRITE32(head, io_cq->cq_head_db_reg);
+ io_cq->last_head_update = head;
+ }
+
+ return 0;
+}
+
+static inline void ena_com_comp_ack(struct ena_com_io_sq *io_sq, u16 elem)
+{
+ io_sq->next_to_comp += elem;
+}
+
+#if defined(__cplusplus)
+}
+#endif
+#endif /* ENA_ETH_COM_H_ */
diff --git a/drivers/net/ena/base/ena_plat.h b/drivers/net/ena/base/ena_plat.h
new file mode 100644
index 00000000..b5b64545
--- /dev/null
+++ b/drivers/net/ena/base/ena_plat.h
@@ -0,0 +1,53 @@
+/*-
+* BSD LICENSE
+*
+* Copyright (c) 2015-2016 Amazon.com, Inc. or its affiliates.
+* All rights reserved.
+*
+* Redistribution and use in source and binary forms, with or without
+* modification, are permitted provided that the following conditions
+* are met:
+*
+* * Redistributions of source code must retain the above copyright
+* notice, this list of conditions and the following disclaimer.
+* * Redistributions in binary form must reproduce the above copyright
+* notice, this list of conditions and the following disclaimer in
+* the documentation and/or other materials provided with the
+* distribution.
+* * Neither the name of copyright holder nor the names of its
+* contributors may be used to endorse or promote products derived
+* from this software without specific prior written permission.
+*
+* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+
+#ifndef ENA_PLAT_H_
+#define ENA_PLAT_H_
+
+#if defined(ENA_IPXE)
+#include "ena_plat_ipxe.h"
+#elif defined(__linux__)
+#if defined(__KERNEL__)
+#include "ena_plat_linux.h"
+#else
+#include "ena_plat_dpdk.h"
+#endif
+#elif defined(__FreeBSD__)
+#include "ena_plat_dpdk.h"
+#elif defined(_WIN32)
+#include "ena_plat_windows.h"
+#else
+#error "Invalid platform"
+#endif
+
+#endif /* ENA_PLAT_H_ */
diff --git a/drivers/net/ena/base/ena_plat_dpdk.h b/drivers/net/ena/base/ena_plat_dpdk.h
new file mode 100644
index 00000000..aab2ac86
--- /dev/null
+++ b/drivers/net/ena/base/ena_plat_dpdk.h
@@ -0,0 +1,220 @@
+/*-
+* BSD LICENSE
+*
+* Copyright (c) 2015-2016 Amazon.com, Inc. or its affiliates.
+* All rights reserved.
+*
+* Redistribution and use in source and binary forms, with or without
+* modification, are permitted provided that the following conditions
+* are met:
+*
+* * Redistributions of source code must retain the above copyright
+* notice, this list of conditions and the following disclaimer.
+* * Redistributions in binary form must reproduce the above copyright
+* notice, this list of conditions and the following disclaimer in
+* the documentation and/or other materials provided with the
+* distribution.
+* * Neither the name of copyright holder nor the names of its
+* contributors may be used to endorse or promote products derived
+* from this software without specific prior written permission.
+*
+* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+
+#ifndef DPDK_ENA_COM_ENA_PLAT_DPDK_H_
+#define DPDK_ENA_COM_ENA_PLAT_DPDK_H_
+
+#include <stdbool.h>
+#include <stdlib.h>
+#include <pthread.h>
+#include <stdint.h>
+#include <string.h>
+#include <errno.h>
+
+#include <rte_atomic.h>
+#include <rte_branch_prediction.h>
+#include <rte_cycles.h>
+#include <rte_log.h>
+#include <rte_malloc.h>
+#include <rte_memzone.h>
+#include <rte_spinlock.h>
+
+#include <sys/time.h>
+
+typedef uint64_t u64;
+typedef uint32_t u32;
+typedef uint16_t u16;
+typedef uint8_t u8;
+
+typedef uint64_t dma_addr_t;
+#ifndef ETIME
+#define ETIME ETIMEDOUT
+#endif
+
+#define ena_atomic32_t rte_atomic32_t
+#define ena_mem_handle_t void *
+
+#define SZ_256 (256)
+#define SZ_4K (4096)
+
+#define ENA_COM_OK 0
+#define ENA_COM_NO_MEM -ENOMEM
+#define ENA_COM_INVAL -EINVAL
+#define ENA_COM_NO_SPACE -ENOSPC
+#define ENA_COM_NO_DEVICE -ENODEV
+#define ENA_COM_PERMISSION -EPERM
+#define ENA_COM_TIMER_EXPIRED -ETIME
+#define ENA_COM_FAULT -EFAULT
+
+#define ____cacheline_aligned __rte_cache_aligned
+
+#define ENA_ABORT() abort()
+
+#define ENA_MSLEEP(x) rte_delay_ms(x)
+#define ENA_UDELAY(x) rte_delay_us(x)
+
+#define memcpy_toio memcpy
+#define wmb rte_wmb
+#define rmb rte_wmb
+#define mb rte_mb
+#define __iomem
+
+#define US_PER_S 1000000
+#define ENA_GET_SYSTEM_USECS() \
+ (rte_get_timer_cycles() * US_PER_S / rte_get_timer_hz())
+
+#define ENA_ASSERT(cond, format, arg...) \
+ do { \
+ if (unlikely(!(cond))) { \
+ printf("Assertion failed on %s:%s:%d: " format, \
+ __FILE__, __func__, __LINE__, ##arg); \
+ rte_exit(EXIT_FAILURE, "ASSERTION FAILED\n"); \
+ } \
+ } while (0)
+
+#define ENA_MAX32(x, y) RTE_MAX((x), (y))
+#define ENA_MAX16(x, y) RTE_MAX((x), (y))
+#define ENA_MAX8(x, y) RTE_MAX((x), (y))
+#define ENA_MIN32(x, y) RTE_MIN((x), (y))
+#define ENA_MIN16(x, y) RTE_MIN((x), (y))
+#define ENA_MIN8(x, y) RTE_MIN((x), (y))
+
+#define U64_C(x) x ## ULL
+#define BIT(nr) (1UL << (nr))
+#define BITS_PER_LONG (__SIZEOF_LONG__ * 8)
+#define GENMASK(h, l) (((~0UL) << (l)) & (~0UL >> (BITS_PER_LONG - 1 - (h))))
+#define GENMASK_ULL(h, l) (((U64_C(1) << ((h) - (l) + 1)) - 1) << (l))
+
+#ifdef RTE_LIBRTE_ENA_COM_DEBUG
+#define ena_trc_dbg(format, arg...) \
+ RTE_LOG(DEBUG, PMD, "[ENA_COM: %s] " format, __func__, ##arg)
+#define ena_trc_info(format, arg...) \
+ RTE_LOG(INFO, PMD, "[ENA_COM: %s] " format, __func__, ##arg)
+#define ena_trc_warn(format, arg...) \
+ RTE_LOG(ERR, PMD, "[ENA_COM: %s] " format, __func__, ##arg)
+#define ena_trc_err(format, arg...) \
+ RTE_LOG(ERR, PMD, "[ENA_COM: %s] " format, __func__, ##arg)
+#else
+#define ena_trc_dbg(format, arg...) do { } while (0)
+#define ena_trc_info(format, arg...) do { } while (0)
+#define ena_trc_warn(format, arg...) do { } while (0)
+#define ena_trc_err(format, arg...) do { } while (0)
+#endif /* RTE_LIBRTE_ENA_COM_DEBUG */
+
+/* Spinlock related methods */
+#define ena_spinlock_t rte_spinlock_t
+#define ENA_SPINLOCK_INIT(spinlock) rte_spinlock_init(&spinlock)
+#define ENA_SPINLOCK_LOCK(spinlock, flags) \
+ ({(void)flags; rte_spinlock_lock(&spinlock); })
+#define ENA_SPINLOCK_UNLOCK(spinlock, flags) \
+ ({(void)flags; rte_spinlock_unlock(&(spinlock)); })
+
+#define q_waitqueue_t \
+ struct { \
+ pthread_cond_t cond; \
+ pthread_mutex_t mutex; \
+ }
+
+#define ena_wait_queue_t q_waitqueue_t
+
+#define ENA_WAIT_EVENT_INIT(waitqueue) \
+ do { \
+ pthread_mutex_init(&(waitqueue).mutex, NULL); \
+ pthread_cond_init(&(waitqueue).cond, NULL); \
+ } while (0)
+
+#define ENA_WAIT_EVENT_WAIT(waitevent, timeout) \
+ do { \
+ struct timespec wait; \
+ struct timeval now; \
+ unsigned long timeout_us; \
+ gettimeofday(&now, NULL); \
+ wait.tv_sec = now.tv_sec + timeout / 1000000UL; \
+ timeout_us = timeout % 1000000UL; \
+ wait.tv_nsec = (now.tv_usec + timeout_us) * 1000UL; \
+ pthread_mutex_lock(&waitevent.mutex); \
+ pthread_cond_timedwait(&waitevent.cond, \
+ &waitevent.mutex, &wait); \
+ pthread_mutex_unlock(&waitevent.mutex); \
+ } while (0)
+#define ENA_WAIT_EVENT_SIGNAL(waitevent) pthread_cond_signal(&waitevent.cond)
+/* pthread condition doesn't need to be rearmed after usage */
+#define ENA_WAIT_EVENT_CLEAR(...)
+
+#define ena_wait_event_t ena_wait_queue_t
+#define ENA_MIGHT_SLEEP()
+
+#define ENA_MEM_ALLOC_COHERENT(dmadev, size, virt, phys, handle) \
+ do { \
+ const struct rte_memzone *mz; \
+ char z_name[RTE_MEMZONE_NAMESIZE]; \
+ (void)dmadev; (void)handle; \
+ snprintf(z_name, sizeof(z_name), \
+ "ena_alloc_%d", ena_alloc_cnt++); \
+ mz = rte_memzone_reserve(z_name, size, SOCKET_ID_ANY, 0); \
+ virt = mz->addr; \
+ phys = mz->phys_addr; \
+ } while (0)
+#define ENA_MEM_FREE_COHERENT(dmadev, size, virt, phys, handle) \
+ ({(void)size; rte_free(virt); })
+#define ENA_MEM_ALLOC(dmadev, size) rte_zmalloc(NULL, size, 1)
+#define ENA_MEM_FREE(dmadev, ptr) ({(void)dmadev; rte_free(ptr); })
+
+static inline void writel(u32 value, volatile void *addr)
+{
+ *(volatile u32 *)addr = value;
+}
+
+static inline u32 readl(const volatile void *addr)
+{
+ return *(const volatile u32 *)addr;
+}
+
+#define ENA_REG_WRITE32(value, reg) writel((value), (reg))
+#define ENA_REG_READ32(reg) readl((reg))
+
+#define ATOMIC32_INC(i32_ptr) rte_atomic32_inc(i32_ptr)
+#define ATOMIC32_DEC(i32_ptr) rte_atomic32_dec(i32_ptr)
+#define ATOMIC32_SET(i32_ptr, val) rte_atomic32_set(i32_ptr, val)
+#define ATOMIC32_READ(i32_ptr) rte_atomic32_read(i32_ptr)
+
+#define msleep(x) rte_delay_us(x * 1000)
+#define udelay(x) rte_delay_us(x)
+
+#define MAX_ERRNO 4095
+#define IS_ERR(x) (((unsigned long)x) >= (unsigned long)-MAX_ERRNO)
+#define ERR_PTR(error) ((void *)(long)error)
+#define PTR_ERR(error) ((long)(void *)error)
+#define might_sleep()
+
+#endif /* DPDK_ENA_COM_ENA_PLAT_DPDK_H_ */
diff --git a/drivers/net/ena/ena_ethdev.c b/drivers/net/ena/ena_ethdev.c
new file mode 100644
index 00000000..02af67a2
--- /dev/null
+++ b/drivers/net/ena/ena_ethdev.c
@@ -0,0 +1,1455 @@
+/*-
+* BSD LICENSE
+*
+* Copyright (c) 2015-2016 Amazon.com, Inc. or its affiliates.
+* All rights reserved.
+*
+* Redistribution and use in source and binary forms, with or without
+* modification, are permitted provided that the following conditions
+* are met:
+*
+* * Redistributions of source code must retain the above copyright
+* notice, this list of conditions and the following disclaimer.
+* * Redistributions in binary form must reproduce the above copyright
+* notice, this list of conditions and the following disclaimer in
+* the documentation and/or other materials provided with the
+* distribution.
+* * Neither the name of copyright holder nor the names of its
+* contributors may be used to endorse or promote products derived
+* from this software without specific prior written permission.
+*
+* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+
+#include <rte_ether.h>
+#include <rte_ethdev.h>
+#include <rte_tcp.h>
+#include <rte_atomic.h>
+#include <rte_dev.h>
+#include <rte_errno.h>
+
+#include "ena_ethdev.h"
+#include "ena_logs.h"
+#include "ena_platform.h"
+#include "ena_com.h"
+#include "ena_eth_com.h"
+
+#include <ena_common_defs.h>
+#include <ena_regs_defs.h>
+#include <ena_admin_defs.h>
+#include <ena_eth_io_defs.h>
+
+#define ENA_IO_TXQ_IDX(q) (2 * (q))
+#define ENA_IO_RXQ_IDX(q) (2 * (q) + 1)
+/*reverse version of ENA_IO_RXQ_IDX*/
+#define ENA_IO_RXQ_IDX_REV(q) ((q - 1) / 2)
+
+/* While processing submitted and completed descriptors (rx and tx path
+ * respectively) in a loop it is desired to:
+ * - perform batch submissions while populating sumbissmion queue
+ * - avoid blocking transmission of other packets during cleanup phase
+ * Hence the utilization ratio of 1/8 of a queue size.
+ */
+#define ENA_RING_DESCS_RATIO(ring_size) (ring_size / 8)
+
+#define __MERGE_64B_H_L(h, l) (((uint64_t)h << 32) | l)
+#define TEST_BIT(val, bit_shift) (val & (1UL << bit_shift))
+
+#define GET_L4_HDR_LEN(mbuf) \
+ ((rte_pktmbuf_mtod_offset(mbuf, struct tcp_hdr *, \
+ mbuf->l3_len + mbuf->l2_len)->data_off) >> 4)
+
+#define ENA_RX_RSS_TABLE_LOG_SIZE 7
+#define ENA_RX_RSS_TABLE_SIZE (1 << ENA_RX_RSS_TABLE_LOG_SIZE)
+#define ENA_HASH_KEY_SIZE 40
+
+/** Vendor ID used by Amazon devices */
+#define PCI_VENDOR_ID_AMAZON 0x1D0F
+/** Amazon devices */
+#define PCI_DEVICE_ID_ENA_VF 0xEC20
+#define PCI_DEVICE_ID_ENA_LLQ_VF 0xEC21
+
+static struct rte_pci_id pci_id_ena_map[] = {
+#define RTE_PCI_DEV_ID_DECL_ENA(vend, dev) {RTE_PCI_DEVICE(vend, dev)},
+
+ RTE_PCI_DEV_ID_DECL_ENA(PCI_VENDOR_ID_AMAZON, PCI_DEVICE_ID_ENA_VF)
+ RTE_PCI_DEV_ID_DECL_ENA(PCI_VENDOR_ID_AMAZON, PCI_DEVICE_ID_ENA_LLQ_VF)
+ {.device_id = 0},
+};
+
+static int ena_device_init(struct ena_com_dev *ena_dev,
+ struct ena_com_dev_get_features_ctx *get_feat_ctx);
+static int ena_dev_configure(struct rte_eth_dev *dev);
+static uint16_t eth_ena_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
+ uint16_t nb_pkts);
+static int ena_tx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
+ uint16_t nb_desc, unsigned int socket_id,
+ const struct rte_eth_txconf *tx_conf);
+static int ena_rx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
+ uint16_t nb_desc, unsigned int socket_id,
+ const struct rte_eth_rxconf *rx_conf,
+ struct rte_mempool *mp);
+static uint16_t eth_ena_recv_pkts(void *rx_queue,
+ struct rte_mbuf **rx_pkts, uint16_t nb_pkts);
+static int ena_populate_rx_queue(struct ena_ring *rxq, unsigned int count);
+static void ena_init_rings(struct ena_adapter *adapter);
+static int ena_mtu_set(struct rte_eth_dev *dev, uint16_t mtu);
+static int ena_start(struct rte_eth_dev *dev);
+static void ena_close(struct rte_eth_dev *dev);
+static void ena_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats);
+static void ena_rx_queue_release_all(struct rte_eth_dev *dev);
+static void ena_tx_queue_release_all(struct rte_eth_dev *dev);
+static void ena_rx_queue_release(void *queue);
+static void ena_tx_queue_release(void *queue);
+static void ena_rx_queue_release_bufs(struct ena_ring *ring);
+static void ena_tx_queue_release_bufs(struct ena_ring *ring);
+static int ena_link_update(struct rte_eth_dev *dev,
+ __rte_unused int wait_to_complete);
+static int ena_queue_restart(struct ena_ring *ring);
+static int ena_queue_restart_all(struct rte_eth_dev *dev,
+ enum ena_ring_type ring_type);
+static void ena_stats_restart(struct rte_eth_dev *dev);
+static void ena_infos_get(__rte_unused struct rte_eth_dev *dev,
+ struct rte_eth_dev_info *dev_info);
+static int ena_rss_reta_update(struct rte_eth_dev *dev,
+ struct rte_eth_rss_reta_entry64 *reta_conf,
+ uint16_t reta_size);
+static int ena_rss_reta_query(struct rte_eth_dev *dev,
+ struct rte_eth_rss_reta_entry64 *reta_conf,
+ uint16_t reta_size);
+
+static struct eth_dev_ops ena_dev_ops = {
+ .dev_configure = ena_dev_configure,
+ .dev_infos_get = ena_infos_get,
+ .rx_queue_setup = ena_rx_queue_setup,
+ .tx_queue_setup = ena_tx_queue_setup,
+ .dev_start = ena_start,
+ .link_update = ena_link_update,
+ .stats_get = ena_stats_get,
+ .mtu_set = ena_mtu_set,
+ .rx_queue_release = ena_rx_queue_release,
+ .tx_queue_release = ena_tx_queue_release,
+ .dev_close = ena_close,
+ .reta_update = ena_rss_reta_update,
+ .reta_query = ena_rss_reta_query,
+};
+
+static inline void ena_rx_mbuf_prepare(struct rte_mbuf *mbuf,
+ struct ena_com_rx_ctx *ena_rx_ctx)
+{
+ uint64_t ol_flags = 0;
+
+ if (ena_rx_ctx->l4_proto == ENA_ETH_IO_L4_PROTO_TCP)
+ ol_flags |= PKT_TX_TCP_CKSUM;
+ else if (ena_rx_ctx->l4_proto == ENA_ETH_IO_L4_PROTO_UDP)
+ ol_flags |= PKT_TX_UDP_CKSUM;
+
+ if (ena_rx_ctx->l3_proto == ENA_ETH_IO_L3_PROTO_IPV4)
+ ol_flags |= PKT_TX_IPV4;
+ else if (ena_rx_ctx->l3_proto == ENA_ETH_IO_L3_PROTO_IPV6)
+ ol_flags |= PKT_TX_IPV6;
+
+ if (unlikely(ena_rx_ctx->l4_csum_err))
+ ol_flags |= PKT_RX_L4_CKSUM_BAD;
+ if (unlikely(ena_rx_ctx->l3_csum_err))
+ ol_flags |= PKT_RX_IP_CKSUM_BAD;
+
+ mbuf->ol_flags = ol_flags;
+}
+
+static inline void ena_tx_mbuf_prepare(struct rte_mbuf *mbuf,
+ struct ena_com_tx_ctx *ena_tx_ctx)
+{
+ struct ena_com_tx_meta *ena_meta = &ena_tx_ctx->ena_meta;
+
+ if (mbuf->ol_flags &
+ (PKT_TX_L4_MASK | PKT_TX_IP_CKSUM | PKT_TX_TCP_SEG)) {
+ /* check if TSO is required */
+ if (mbuf->ol_flags & PKT_TX_TCP_SEG) {
+ ena_tx_ctx->tso_enable = true;
+
+ ena_meta->l4_hdr_len = GET_L4_HDR_LEN(mbuf);
+ }
+
+ /* check if L3 checksum is needed */
+ if (mbuf->ol_flags & PKT_TX_IP_CKSUM)
+ ena_tx_ctx->l3_csum_enable = true;
+
+ if (mbuf->ol_flags & PKT_TX_IPV6) {
+ ena_tx_ctx->l3_proto = ENA_ETH_IO_L3_PROTO_IPV6;
+ } else {
+ ena_tx_ctx->l3_proto = ENA_ETH_IO_L3_PROTO_IPV4;
+
+ /* set don't fragment (DF) flag */
+ if (mbuf->packet_type &
+ (RTE_PTYPE_L4_NONFRAG
+ | RTE_PTYPE_INNER_L4_NONFRAG))
+ ena_tx_ctx->df = true;
+ }
+
+ /* check if L4 checksum is needed */
+ switch (mbuf->ol_flags & PKT_TX_L4_MASK) {
+ case PKT_TX_TCP_CKSUM:
+ ena_tx_ctx->l4_proto = ENA_ETH_IO_L4_PROTO_TCP;
+ ena_tx_ctx->l4_csum_enable = true;
+ break;
+ case PKT_TX_UDP_CKSUM:
+ ena_tx_ctx->l4_proto = ENA_ETH_IO_L4_PROTO_UDP;
+ ena_tx_ctx->l4_csum_enable = true;
+ break;
+ default:
+ ena_tx_ctx->l4_proto = ENA_ETH_IO_L4_PROTO_UNKNOWN;
+ ena_tx_ctx->l4_csum_enable = false;
+ break;
+ }
+
+ ena_meta->mss = mbuf->tso_segsz;
+ ena_meta->l3_hdr_len = mbuf->l3_len;
+ ena_meta->l3_hdr_offset = mbuf->l2_len;
+ /* this param needed only for TSO */
+ ena_meta->l3_outer_hdr_len = 0;
+ ena_meta->l3_outer_hdr_offset = 0;
+
+ ena_tx_ctx->meta_valid = true;
+ } else {
+ ena_tx_ctx->meta_valid = false;
+ }
+}
+
+static void ena_close(struct rte_eth_dev *dev)
+{
+ struct ena_adapter *adapter =
+ (struct ena_adapter *)(dev->data->dev_private);
+
+ adapter->state = ENA_ADAPTER_STATE_STOPPED;
+
+ ena_rx_queue_release_all(dev);
+ ena_tx_queue_release_all(dev);
+}
+
+static int ena_rss_reta_update(struct rte_eth_dev *dev,
+ struct rte_eth_rss_reta_entry64 *reta_conf,
+ uint16_t reta_size)
+{
+ struct ena_adapter *adapter =
+ (struct ena_adapter *)(dev->data->dev_private);
+ struct ena_com_dev *ena_dev = &adapter->ena_dev;
+ int ret, i;
+ u16 entry_value;
+ int conf_idx;
+ int idx;
+
+ if ((reta_size == 0) || (reta_conf == NULL))
+ return -EINVAL;
+
+ if (reta_size > ENA_RX_RSS_TABLE_SIZE) {
+ RTE_LOG(WARNING, PMD,
+ "indirection table %d is bigger than supported (%d)\n",
+ reta_size, ENA_RX_RSS_TABLE_SIZE);
+ ret = -EINVAL;
+ goto err;
+ }
+
+ for (i = 0 ; i < reta_size ; i++) {
+ /* each reta_conf is for 64 entries.
+ * to support 128 we use 2 conf of 64
+ */
+ conf_idx = i / RTE_RETA_GROUP_SIZE;
+ idx = i % RTE_RETA_GROUP_SIZE;
+ if (TEST_BIT(reta_conf[conf_idx].mask, idx)) {
+ entry_value =
+ ENA_IO_RXQ_IDX(reta_conf[conf_idx].reta[idx]);
+ ret = ena_com_indirect_table_fill_entry(ena_dev,
+ i,
+ entry_value);
+ if (unlikely(ret && (ret != ENA_COM_PERMISSION))) {
+ RTE_LOG(ERR, PMD,
+ "Cannot fill indirect table\n");
+ ret = -ENOTSUP;
+ goto err;
+ }
+ }
+ }
+
+ ret = ena_com_indirect_table_set(ena_dev);
+ if (unlikely(ret && (ret != ENA_COM_PERMISSION))) {
+ RTE_LOG(ERR, PMD, "Cannot flush the indirect table\n");
+ ret = -ENOTSUP;
+ goto err;
+ }
+
+ RTE_LOG(DEBUG, PMD, "%s(): RSS configured %d entries for port %d\n",
+ __func__, reta_size, adapter->rte_dev->data->port_id);
+err:
+ return ret;
+}
+
+/* Query redirection table. */
+static int ena_rss_reta_query(struct rte_eth_dev *dev,
+ struct rte_eth_rss_reta_entry64 *reta_conf,
+ uint16_t reta_size)
+{
+ struct ena_adapter *adapter =
+ (struct ena_adapter *)(dev->data->dev_private);
+ struct ena_com_dev *ena_dev = &adapter->ena_dev;
+ int ret;
+ int i;
+ u32 indirect_table[ENA_RX_RSS_TABLE_SIZE] = {0};
+ int reta_conf_idx;
+ int reta_idx;
+
+ if (reta_size == 0 || reta_conf == NULL ||
+ (reta_size > RTE_RETA_GROUP_SIZE && ((reta_conf + 1) == NULL)))
+ return -EINVAL;
+
+ ret = ena_com_indirect_table_get(ena_dev, indirect_table);
+ if (unlikely(ret && (ret != ENA_COM_PERMISSION))) {
+ RTE_LOG(ERR, PMD, "cannot get indirect table\n");
+ ret = -ENOTSUP;
+ goto err;
+ }
+
+ for (i = 0 ; i < reta_size ; i++) {
+ reta_conf_idx = i / RTE_RETA_GROUP_SIZE;
+ reta_idx = i % RTE_RETA_GROUP_SIZE;
+ if (TEST_BIT(reta_conf[reta_conf_idx].mask, reta_idx))
+ reta_conf[reta_conf_idx].reta[reta_idx] =
+ ENA_IO_RXQ_IDX_REV(indirect_table[i]);
+ }
+err:
+ return ret;
+}
+
+static int ena_rss_init_default(struct ena_adapter *adapter)
+{
+ struct ena_com_dev *ena_dev = &adapter->ena_dev;
+ uint16_t nb_rx_queues = adapter->rte_dev->data->nb_rx_queues;
+ int rc, i;
+ u32 val;
+
+ rc = ena_com_rss_init(ena_dev, ENA_RX_RSS_TABLE_LOG_SIZE);
+ if (unlikely(rc)) {
+ RTE_LOG(ERR, PMD, "Cannot init indirect table\n");
+ goto err_rss_init;
+ }
+
+ for (i = 0; i < ENA_RX_RSS_TABLE_SIZE; i++) {
+ val = i % nb_rx_queues;
+ rc = ena_com_indirect_table_fill_entry(ena_dev, i,
+ ENA_IO_RXQ_IDX(val));
+ if (unlikely(rc && (rc != ENA_COM_PERMISSION))) {
+ RTE_LOG(ERR, PMD, "Cannot fill indirect table\n");
+ goto err_fill_indir;
+ }
+ }
+
+ rc = ena_com_fill_hash_function(ena_dev, ENA_ADMIN_CRC32, NULL,
+ ENA_HASH_KEY_SIZE, 0xFFFFFFFF);
+ if (unlikely(rc && (rc != ENA_COM_PERMISSION))) {
+ RTE_LOG(INFO, PMD, "Cannot fill hash function\n");
+ goto err_fill_indir;
+ }
+
+ rc = ena_com_set_default_hash_ctrl(ena_dev);
+ if (unlikely(rc && (rc != ENA_COM_PERMISSION))) {
+ RTE_LOG(INFO, PMD, "Cannot fill hash control\n");
+ goto err_fill_indir;
+ }
+
+ rc = ena_com_indirect_table_set(ena_dev);
+ if (unlikely(rc && (rc != ENA_COM_PERMISSION))) {
+ RTE_LOG(ERR, PMD, "Cannot flush the indirect table\n");
+ goto err_fill_indir;
+ }
+ RTE_LOG(DEBUG, PMD, "RSS configured for port %d\n",
+ adapter->rte_dev->data->port_id);
+
+ return 0;
+
+err_fill_indir:
+ ena_com_rss_destroy(ena_dev);
+err_rss_init:
+
+ return rc;
+}
+
+static void ena_rx_queue_release_all(struct rte_eth_dev *dev)
+{
+ struct ena_ring **queues = (struct ena_ring **)dev->data->rx_queues;
+ int nb_queues = dev->data->nb_rx_queues;
+ int i;
+
+ for (i = 0; i < nb_queues; i++)
+ ena_rx_queue_release(queues[i]);
+}
+
+static void ena_tx_queue_release_all(struct rte_eth_dev *dev)
+{
+ struct ena_ring **queues = (struct ena_ring **)dev->data->tx_queues;
+ int nb_queues = dev->data->nb_tx_queues;
+ int i;
+
+ for (i = 0; i < nb_queues; i++)
+ ena_tx_queue_release(queues[i]);
+}
+
+static void ena_rx_queue_release(void *queue)
+{
+ struct ena_ring *ring = (struct ena_ring *)queue;
+ struct ena_adapter *adapter = ring->adapter;
+ int ena_qid;
+
+ ena_assert_msg(ring->configured,
+ "API violation - releasing not configured queue");
+ ena_assert_msg(ring->adapter->state != ENA_ADAPTER_STATE_RUNNING,
+ "API violation");
+
+ /* Destroy HW queue */
+ ena_qid = ENA_IO_RXQ_IDX(ring->id);
+ ena_com_destroy_io_queue(&adapter->ena_dev, ena_qid);
+
+ /* Free all bufs */
+ ena_rx_queue_release_bufs(ring);
+
+ /* Free ring resources */
+ if (ring->rx_buffer_info)
+ rte_free(ring->rx_buffer_info);
+ ring->rx_buffer_info = NULL;
+
+ ring->configured = 0;
+
+ RTE_LOG(NOTICE, PMD, "RX Queue %d:%d released\n",
+ ring->port_id, ring->id);
+}
+
+static void ena_tx_queue_release(void *queue)
+{
+ struct ena_ring *ring = (struct ena_ring *)queue;
+ struct ena_adapter *adapter = ring->adapter;
+ int ena_qid;
+
+ ena_assert_msg(ring->configured,
+ "API violation. Releasing not configured queue");
+ ena_assert_msg(ring->adapter->state != ENA_ADAPTER_STATE_RUNNING,
+ "API violation");
+
+ /* Destroy HW queue */
+ ena_qid = ENA_IO_TXQ_IDX(ring->id);
+ ena_com_destroy_io_queue(&adapter->ena_dev, ena_qid);
+
+ /* Free all bufs */
+ ena_tx_queue_release_bufs(ring);
+
+ /* Free ring resources */
+ if (ring->tx_buffer_info)
+ rte_free(ring->tx_buffer_info);
+
+ if (ring->empty_tx_reqs)
+ rte_free(ring->empty_tx_reqs);
+
+ ring->empty_tx_reqs = NULL;
+ ring->tx_buffer_info = NULL;
+
+ ring->configured = 0;
+
+ RTE_LOG(NOTICE, PMD, "TX Queue %d:%d released\n",
+ ring->port_id, ring->id);
+}
+
+static void ena_rx_queue_release_bufs(struct ena_ring *ring)
+{
+ unsigned int ring_mask = ring->ring_size - 1;
+
+ while (ring->next_to_clean != ring->next_to_use) {
+ struct rte_mbuf *m =
+ ring->rx_buffer_info[ring->next_to_clean & ring_mask];
+
+ if (m)
+ __rte_mbuf_raw_free(m);
+
+ ring->next_to_clean =
+ ENA_CIRC_INC(ring->next_to_clean, 1, ring->ring_size);
+ }
+}
+
+static void ena_tx_queue_release_bufs(struct ena_ring *ring)
+{
+ unsigned int ring_mask = ring->ring_size - 1;
+
+ while (ring->next_to_clean != ring->next_to_use) {
+ struct ena_tx_buffer *tx_buf =
+ &ring->tx_buffer_info[ring->next_to_clean & ring_mask];
+
+ if (tx_buf->mbuf)
+ rte_pktmbuf_free(tx_buf->mbuf);
+
+ ring->next_to_clean =
+ ENA_CIRC_INC(ring->next_to_clean, 1, ring->ring_size);
+ }
+}
+
+static int ena_link_update(struct rte_eth_dev *dev,
+ __rte_unused int wait_to_complete)
+{
+ struct rte_eth_link *link = &dev->data->dev_link;
+
+ link->link_status = 1;
+ link->link_speed = ETH_SPEED_NUM_10G;
+ link->link_duplex = ETH_LINK_FULL_DUPLEX;
+
+ return 0;
+}
+
+static int ena_queue_restart_all(struct rte_eth_dev *dev,
+ enum ena_ring_type ring_type)
+{
+ struct ena_adapter *adapter =
+ (struct ena_adapter *)(dev->data->dev_private);
+ struct ena_ring *queues = NULL;
+ int i = 0;
+ int rc = 0;
+
+ queues = (ring_type == ENA_RING_TYPE_RX) ?
+ adapter->rx_ring : adapter->tx_ring;
+
+ for (i = 0; i < adapter->num_queues; i++) {
+ if (queues[i].configured) {
+ if (ring_type == ENA_RING_TYPE_RX) {
+ ena_assert_msg(
+ dev->data->rx_queues[i] == &queues[i],
+ "Inconsistent state of rx queues\n");
+ } else {
+ ena_assert_msg(
+ dev->data->tx_queues[i] == &queues[i],
+ "Inconsistent state of tx queues\n");
+ }
+
+ rc = ena_queue_restart(&queues[i]);
+
+ if (rc) {
+ PMD_INIT_LOG(ERR,
+ "failed to restart queue %d type(%d)\n",
+ i, ring_type);
+ return -1;
+ }
+ }
+ }
+
+ return 0;
+}
+
+static uint32_t ena_get_mtu_conf(struct ena_adapter *adapter)
+{
+ uint32_t max_frame_len = adapter->max_mtu;
+
+ if (adapter->rte_eth_dev_data->dev_conf.rxmode.jumbo_frame == 1)
+ max_frame_len =
+ adapter->rte_eth_dev_data->dev_conf.rxmode.max_rx_pkt_len;
+
+ return max_frame_len;
+}
+
+static int ena_check_valid_conf(struct ena_adapter *adapter)
+{
+ uint32_t max_frame_len = ena_get_mtu_conf(adapter);
+
+ if (max_frame_len > adapter->max_mtu) {
+ PMD_INIT_LOG(ERR, "Unsupported MTU of %d\n", max_frame_len);
+ return -1;
+ }
+
+ return 0;
+}
+
+static int
+ena_calc_queue_size(struct ena_com_dev *ena_dev,
+ struct ena_com_dev_get_features_ctx *get_feat_ctx)
+{
+ uint32_t queue_size = ENA_DEFAULT_RING_SIZE;
+
+ queue_size = RTE_MIN(queue_size,
+ get_feat_ctx->max_queues.max_cq_depth);
+ queue_size = RTE_MIN(queue_size,
+ get_feat_ctx->max_queues.max_sq_depth);
+
+ if (ena_dev->tx_mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV)
+ queue_size = RTE_MIN(queue_size,
+ get_feat_ctx->max_queues.max_llq_depth);
+
+ /* Round down to power of 2 */
+ if (!rte_is_power_of_2(queue_size))
+ queue_size = rte_align32pow2(queue_size >> 1);
+
+ if (queue_size == 0) {
+ PMD_INIT_LOG(ERR, "Invalid queue size\n");
+ return -EFAULT;
+ }
+
+ return queue_size;
+}
+
+static void ena_stats_restart(struct rte_eth_dev *dev)
+{
+ struct ena_adapter *adapter =
+ (struct ena_adapter *)(dev->data->dev_private);
+
+ rte_atomic64_init(&adapter->drv_stats->ierrors);
+ rte_atomic64_init(&adapter->drv_stats->oerrors);
+ rte_atomic64_init(&adapter->drv_stats->imcasts);
+ rte_atomic64_init(&adapter->drv_stats->rx_nombuf);
+}
+
+static void ena_stats_get(struct rte_eth_dev *dev,
+ struct rte_eth_stats *stats)
+{
+ struct ena_admin_basic_stats ena_stats;
+ struct ena_adapter *adapter =
+ (struct ena_adapter *)(dev->data->dev_private);
+ struct ena_com_dev *ena_dev = &adapter->ena_dev;
+ int rc;
+
+ if (rte_eal_process_type() != RTE_PROC_PRIMARY)
+ return;
+
+ memset(&ena_stats, 0, sizeof(ena_stats));
+ rc = ena_com_get_dev_basic_stats(ena_dev, &ena_stats);
+ if (unlikely(rc)) {
+ RTE_LOG(ERR, PMD, "Could not retrieve statistics from ENA");
+ return;
+ }
+
+ /* Set of basic statistics from ENA */
+ stats->ipackets = __MERGE_64B_H_L(ena_stats.rx_pkts_high,
+ ena_stats.rx_pkts_low);
+ stats->opackets = __MERGE_64B_H_L(ena_stats.tx_pkts_high,
+ ena_stats.tx_pkts_low);
+ stats->ibytes = __MERGE_64B_H_L(ena_stats.rx_bytes_high,
+ ena_stats.rx_bytes_low);
+ stats->obytes = __MERGE_64B_H_L(ena_stats.tx_bytes_high,
+ ena_stats.tx_bytes_low);
+ stats->imissed = __MERGE_64B_H_L(ena_stats.rx_drops_high,
+ ena_stats.rx_drops_low);
+
+ /* Driver related stats */
+ stats->ierrors = rte_atomic64_read(&adapter->drv_stats->ierrors);
+ stats->oerrors = rte_atomic64_read(&adapter->drv_stats->oerrors);
+ stats->imcasts = rte_atomic64_read(&adapter->drv_stats->imcasts);
+ stats->rx_nombuf = rte_atomic64_read(&adapter->drv_stats->rx_nombuf);
+}
+
+static int ena_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
+{
+ struct ena_adapter *adapter;
+ struct ena_com_dev *ena_dev;
+ int rc = 0;
+
+ ena_assert_msg(dev->data != NULL, "Uninitialized device");
+ ena_assert_msg(dev->data->dev_private != NULL, "Uninitialized device");
+ adapter = (struct ena_adapter *)(dev->data->dev_private);
+
+ ena_dev = &adapter->ena_dev;
+ ena_assert_msg(ena_dev != NULL, "Uninitialized device");
+
+ if (mtu > ena_get_mtu_conf(adapter)) {
+ RTE_LOG(ERR, PMD,
+ "Given MTU (%d) exceeds maximum MTU supported (%d)\n",
+ mtu, ena_get_mtu_conf(adapter));
+ rc = -EINVAL;
+ goto err;
+ }
+
+ rc = ena_com_set_dev_mtu(ena_dev, mtu);
+ if (rc)
+ RTE_LOG(ERR, PMD, "Could not set MTU: %d\n", mtu);
+ else
+ RTE_LOG(NOTICE, PMD, "Set MTU: %d\n", mtu);
+
+err:
+ return rc;
+}
+
+static int ena_start(struct rte_eth_dev *dev)
+{
+ struct ena_adapter *adapter =
+ (struct ena_adapter *)(dev->data->dev_private);
+ int rc = 0;
+
+ if (!(adapter->state == ENA_ADAPTER_STATE_CONFIG ||
+ adapter->state == ENA_ADAPTER_STATE_STOPPED)) {
+ PMD_INIT_LOG(ERR, "API violation");
+ return -1;
+ }
+
+ rc = ena_check_valid_conf(adapter);
+ if (rc)
+ return rc;
+
+ rc = ena_queue_restart_all(dev, ENA_RING_TYPE_RX);
+ if (rc)
+ return rc;
+
+ rc = ena_queue_restart_all(dev, ENA_RING_TYPE_TX);
+ if (rc)
+ return rc;
+
+ if (adapter->rte_dev->data->dev_conf.rxmode.mq_mode &
+ ETH_MQ_RX_RSS_FLAG) {
+ rc = ena_rss_init_default(adapter);
+ if (rc)
+ return rc;
+ }
+
+ ena_stats_restart(dev);
+
+ adapter->state = ENA_ADAPTER_STATE_RUNNING;
+
+ return 0;
+}
+
+static int ena_queue_restart(struct ena_ring *ring)
+{
+ int rc;
+
+ ena_assert_msg(ring->configured == 1,
+ "Trying to restart unconfigured queue\n");
+
+ ring->next_to_clean = 0;
+ ring->next_to_use = 0;
+
+ if (ring->type == ENA_RING_TYPE_TX)
+ return 0;
+
+ rc = ena_populate_rx_queue(ring, ring->ring_size - 1);
+ if ((unsigned int)rc != ring->ring_size - 1) {
+ PMD_INIT_LOG(ERR, "Failed to populate rx ring !\n");
+ return (-1);
+ }
+
+ return 0;
+}
+
+static int ena_tx_queue_setup(struct rte_eth_dev *dev,
+ uint16_t queue_idx,
+ uint16_t nb_desc,
+ __rte_unused unsigned int socket_id,
+ __rte_unused const struct rte_eth_txconf *tx_conf)
+{
+ struct ena_ring *txq = NULL;
+ struct ena_adapter *adapter =
+ (struct ena_adapter *)(dev->data->dev_private);
+ unsigned int i;
+ int ena_qid;
+ int rc;
+ struct ena_com_dev *ena_dev = &adapter->ena_dev;
+
+ txq = &adapter->tx_ring[queue_idx];
+
+ if (txq->configured) {
+ RTE_LOG(CRIT, PMD,
+ "API violation. Queue %d is already configured\n",
+ queue_idx);
+ return -1;
+ }
+
+ if (nb_desc > adapter->tx_ring_size) {
+ RTE_LOG(ERR, PMD,
+ "Unsupported size of TX queue (max size: %d)\n",
+ adapter->tx_ring_size);
+ return -EINVAL;
+ }
+
+ ena_qid = ENA_IO_TXQ_IDX(queue_idx);
+ rc = ena_com_create_io_queue(ena_dev, ena_qid,
+ ENA_COM_IO_QUEUE_DIRECTION_TX,
+ ena_dev->tx_mem_queue_type,
+ -1 /* admin interrupts is not used */,
+ nb_desc);
+ if (rc) {
+ RTE_LOG(ERR, PMD,
+ "failed to create io TX queue #%d (qid:%d) rc: %d\n",
+ queue_idx, ena_qid, rc);
+ }
+ txq->ena_com_io_cq = &ena_dev->io_cq_queues[ena_qid];
+ txq->ena_com_io_sq = &ena_dev->io_sq_queues[ena_qid];
+
+ txq->port_id = dev->data->port_id;
+ txq->next_to_clean = 0;
+ txq->next_to_use = 0;
+ txq->ring_size = nb_desc;
+
+ txq->tx_buffer_info = rte_zmalloc("txq->tx_buffer_info",
+ sizeof(struct ena_tx_buffer) *
+ txq->ring_size,
+ RTE_CACHE_LINE_SIZE);
+ if (!txq->tx_buffer_info) {
+ RTE_LOG(ERR, PMD, "failed to alloc mem for tx buffer info\n");
+ return -ENOMEM;
+ }
+
+ txq->empty_tx_reqs = rte_zmalloc("txq->empty_tx_reqs",
+ sizeof(u16) * txq->ring_size,
+ RTE_CACHE_LINE_SIZE);
+ if (!txq->empty_tx_reqs) {
+ RTE_LOG(ERR, PMD, "failed to alloc mem for tx reqs\n");
+ rte_free(txq->tx_buffer_info);
+ return -ENOMEM;
+ }
+ for (i = 0; i < txq->ring_size; i++)
+ txq->empty_tx_reqs[i] = i;
+
+ /* Store pointer to this queue in upper layer */
+ txq->configured = 1;
+ dev->data->tx_queues[queue_idx] = txq;
+
+ return rc;
+}
+
+static int ena_rx_queue_setup(struct rte_eth_dev *dev,
+ uint16_t queue_idx,
+ uint16_t nb_desc,
+ __rte_unused unsigned int socket_id,
+ __rte_unused const struct rte_eth_rxconf *rx_conf,
+ struct rte_mempool *mp)
+{
+ struct ena_adapter *adapter =
+ (struct ena_adapter *)(dev->data->dev_private);
+ struct ena_ring *rxq = NULL;
+ uint16_t ena_qid = 0;
+ int rc = 0;
+ struct ena_com_dev *ena_dev = &adapter->ena_dev;
+
+ rxq = &adapter->rx_ring[queue_idx];
+ if (rxq->configured) {
+ RTE_LOG(CRIT, PMD,
+ "API violation. Queue %d is already configured\n",
+ queue_idx);
+ return -1;
+ }
+
+ if (nb_desc > adapter->rx_ring_size) {
+ RTE_LOG(ERR, PMD,
+ "Unsupported size of RX queue (max size: %d)\n",
+ adapter->rx_ring_size);
+ return -EINVAL;
+ }
+
+ ena_qid = ENA_IO_RXQ_IDX(queue_idx);
+ rc = ena_com_create_io_queue(ena_dev, ena_qid,
+ ENA_COM_IO_QUEUE_DIRECTION_RX,
+ ENA_ADMIN_PLACEMENT_POLICY_HOST,
+ -1 /* admin interrupts not used */,
+ nb_desc);
+ if (rc)
+ RTE_LOG(ERR, PMD, "failed to create io RX queue #%d rc: %d\n",
+ queue_idx, rc);
+
+ rxq->ena_com_io_cq = &ena_dev->io_cq_queues[ena_qid];
+ rxq->ena_com_io_sq = &ena_dev->io_sq_queues[ena_qid];
+
+ rxq->port_id = dev->data->port_id;
+ rxq->next_to_clean = 0;
+ rxq->next_to_use = 0;
+ rxq->ring_size = nb_desc;
+ rxq->mb_pool = mp;
+
+ rxq->rx_buffer_info = rte_zmalloc("rxq->buffer_info",
+ sizeof(struct rte_mbuf *) * nb_desc,
+ RTE_CACHE_LINE_SIZE);
+ if (!rxq->rx_buffer_info) {
+ RTE_LOG(ERR, PMD, "failed to alloc mem for rx buffer info\n");
+ return -ENOMEM;
+ }
+
+ /* Store pointer to this queue in upper layer */
+ rxq->configured = 1;
+ dev->data->rx_queues[queue_idx] = rxq;
+
+ return rc;
+}
+
+static int ena_populate_rx_queue(struct ena_ring *rxq, unsigned int count)
+{
+ unsigned int i;
+ int rc;
+ unsigned int ring_size = rxq->ring_size;
+ unsigned int ring_mask = ring_size - 1;
+ int next_to_use = rxq->next_to_use & ring_mask;
+ struct rte_mbuf **mbufs = &rxq->rx_buffer_info[0];
+
+ if (unlikely(!count))
+ return 0;
+
+ ena_assert_msg((((ENA_CIRC_COUNT(rxq->next_to_use, rxq->next_to_clean,
+ rxq->ring_size)) +
+ count) < rxq->ring_size), "bad ring state");
+
+ count = RTE_MIN(count, ring_size - next_to_use);
+
+ /* get resources for incoming packets */
+ rc = rte_mempool_get_bulk(rxq->mb_pool,
+ (void **)(&mbufs[next_to_use]), count);
+ if (unlikely(rc < 0)) {
+ rte_atomic64_inc(&rxq->adapter->drv_stats->rx_nombuf);
+ PMD_RX_LOG(DEBUG, "there are no enough free buffers");
+ return 0;
+ }
+
+ for (i = 0; i < count; i++) {
+ struct rte_mbuf *mbuf = mbufs[next_to_use];
+ struct ena_com_buf ebuf;
+
+ rte_prefetch0(mbufs[((next_to_use + 4) & ring_mask)]);
+ /* prepare physical address for DMA transaction */
+ ebuf.paddr = mbuf->buf_physaddr + RTE_PKTMBUF_HEADROOM;
+ ebuf.len = mbuf->buf_len - RTE_PKTMBUF_HEADROOM;
+ /* pass resource to device */
+ rc = ena_com_add_single_rx_desc(rxq->ena_com_io_sq,
+ &ebuf, next_to_use);
+ if (unlikely(rc)) {
+ RTE_LOG(WARNING, PMD, "failed adding rx desc\n");
+ break;
+ }
+ next_to_use = ENA_RX_RING_IDX_NEXT(next_to_use, ring_size);
+ }
+
+ rte_wmb();
+ rxq->next_to_use = next_to_use;
+ /* let HW know that it can fill buffers with data */
+ ena_com_write_sq_doorbell(rxq->ena_com_io_sq);
+
+ return i;
+}
+
+static int ena_device_init(struct ena_com_dev *ena_dev,
+ struct ena_com_dev_get_features_ctx *get_feat_ctx)
+{
+ int rc;
+
+ /* Initialize mmio registers */
+ rc = ena_com_mmio_reg_read_request_init(ena_dev);
+ if (rc) {
+ RTE_LOG(ERR, PMD, "failed to init mmio read less\n");
+ return rc;
+ }
+
+ /* reset device */
+ rc = ena_com_dev_reset(ena_dev);
+ if (rc) {
+ RTE_LOG(ERR, PMD, "cannot reset device\n");
+ goto err_mmio_read_less;
+ }
+
+ /* check FW version */
+ rc = ena_com_validate_version(ena_dev);
+ if (rc) {
+ RTE_LOG(ERR, PMD, "device version is too low\n");
+ goto err_mmio_read_less;
+ }
+
+ ena_dev->dma_addr_bits = ena_com_get_dma_width(ena_dev);
+
+ /* ENA device administration layer init */
+ rc = ena_com_admin_init(ena_dev, NULL, true);
+ if (rc) {
+ RTE_LOG(ERR, PMD,
+ "cannot initialize ena admin queue with device\n");
+ goto err_mmio_read_less;
+ }
+
+ /* To enable the msix interrupts the driver needs to know the number
+ * of queues. So the driver uses polling mode to retrieve this
+ * information.
+ */
+ ena_com_set_admin_polling_mode(ena_dev, true);
+
+ /* Get Device Attributes and features */
+ rc = ena_com_get_dev_attr_feat(ena_dev, get_feat_ctx);
+ if (rc) {
+ RTE_LOG(ERR, PMD,
+ "cannot get attribute for ena device rc= %d\n", rc);
+ goto err_admin_init;
+ }
+
+ return 0;
+
+err_admin_init:
+ ena_com_admin_destroy(ena_dev);
+
+err_mmio_read_less:
+ ena_com_mmio_reg_read_request_destroy(ena_dev);
+
+ return rc;
+}
+
+static int eth_ena_dev_init(struct rte_eth_dev *eth_dev)
+{
+ struct rte_pci_device *pci_dev;
+ struct ena_adapter *adapter =
+ (struct ena_adapter *)(eth_dev->data->dev_private);
+ struct ena_com_dev *ena_dev = &adapter->ena_dev;
+ struct ena_com_dev_get_features_ctx get_feat_ctx;
+ int queue_size, rc;
+
+ static int adapters_found;
+
+ memset(adapter, 0, sizeof(struct ena_adapter));
+ ena_dev = &adapter->ena_dev;
+
+ eth_dev->dev_ops = &ena_dev_ops;
+ eth_dev->rx_pkt_burst = &eth_ena_recv_pkts;
+ eth_dev->tx_pkt_burst = &eth_ena_xmit_pkts;
+ adapter->rte_eth_dev_data = eth_dev->data;
+ adapter->rte_dev = eth_dev;
+
+ if (rte_eal_process_type() != RTE_PROC_PRIMARY)
+ return 0;
+
+ pci_dev = eth_dev->pci_dev;
+ adapter->pdev = pci_dev;
+
+ PMD_INIT_LOG(INFO, "Initializing %x:%x:%x.%d\n",
+ pci_dev->addr.domain,
+ pci_dev->addr.bus,
+ pci_dev->addr.devid,
+ pci_dev->addr.function);
+
+ adapter->regs = pci_dev->mem_resource[ENA_REGS_BAR].addr;
+ adapter->dev_mem_base = pci_dev->mem_resource[ENA_MEM_BAR].addr;
+
+ /* Present ENA_MEM_BAR indicates available LLQ mode.
+ * Use corresponding policy
+ */
+ if (adapter->dev_mem_base)
+ ena_dev->tx_mem_queue_type = ENA_ADMIN_PLACEMENT_POLICY_DEV;
+ else if (adapter->regs)
+ ena_dev->tx_mem_queue_type = ENA_ADMIN_PLACEMENT_POLICY_HOST;
+ else
+ PMD_INIT_LOG(CRIT, "Failed to access registers BAR(%d)\n",
+ ENA_REGS_BAR);
+
+ ena_dev->reg_bar = adapter->regs;
+ ena_dev->dmadev = adapter->pdev;
+
+ adapter->id_number = adapters_found;
+
+ snprintf(adapter->name, ENA_NAME_MAX_LEN, "ena_%d",
+ adapter->id_number);
+
+ /* device specific initialization routine */
+ rc = ena_device_init(ena_dev, &get_feat_ctx);
+ if (rc) {
+ PMD_INIT_LOG(CRIT, "Failed to init ENA device\n");
+ return -1;
+ }
+
+ if (ena_dev->tx_mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV) {
+ if (get_feat_ctx.max_queues.max_llq_num == 0) {
+ PMD_INIT_LOG(ERR,
+ "Trying to use LLQ but llq_num is 0.\n"
+ "Fall back into regular queues.\n");
+ ena_dev->tx_mem_queue_type =
+ ENA_ADMIN_PLACEMENT_POLICY_HOST;
+ adapter->num_queues =
+ get_feat_ctx.max_queues.max_sq_num;
+ } else {
+ adapter->num_queues =
+ get_feat_ctx.max_queues.max_llq_num;
+ }
+ } else {
+ adapter->num_queues = get_feat_ctx.max_queues.max_sq_num;
+ }
+
+ queue_size = ena_calc_queue_size(ena_dev, &get_feat_ctx);
+ if ((queue_size <= 0) || (adapter->num_queues <= 0))
+ return -EFAULT;
+
+ adapter->tx_ring_size = queue_size;
+ adapter->rx_ring_size = queue_size;
+
+ /* prepare ring structures */
+ ena_init_rings(adapter);
+
+ /* Set max MTU for this device */
+ adapter->max_mtu = get_feat_ctx.dev_attr.max_mtu;
+
+ /* Copy MAC address and point DPDK to it */
+ eth_dev->data->mac_addrs = (struct ether_addr *)adapter->mac_addr;
+ ether_addr_copy((struct ether_addr *)get_feat_ctx.dev_attr.mac_addr,
+ (struct ether_addr *)adapter->mac_addr);
+
+ adapter->drv_stats = rte_zmalloc("adapter stats",
+ sizeof(*adapter->drv_stats),
+ RTE_CACHE_LINE_SIZE);
+ if (!adapter->drv_stats) {
+ RTE_LOG(ERR, PMD, "failed to alloc mem for adapter stats\n");
+ return -ENOMEM;
+ }
+
+ adapters_found++;
+ adapter->state = ENA_ADAPTER_STATE_INIT;
+
+ return 0;
+}
+
+static int ena_dev_configure(struct rte_eth_dev *dev)
+{
+ struct ena_adapter *adapter =
+ (struct ena_adapter *)(dev->data->dev_private);
+
+ if (!(adapter->state == ENA_ADAPTER_STATE_INIT ||
+ adapter->state == ENA_ADAPTER_STATE_STOPPED)) {
+ PMD_INIT_LOG(ERR, "Illegal adapter state: %d\n",
+ adapter->state);
+ return -1;
+ }
+
+ switch (adapter->state) {
+ case ENA_ADAPTER_STATE_INIT:
+ case ENA_ADAPTER_STATE_STOPPED:
+ adapter->state = ENA_ADAPTER_STATE_CONFIG;
+ break;
+ case ENA_ADAPTER_STATE_CONFIG:
+ RTE_LOG(WARNING, PMD,
+ "Ivalid driver state while trying to configure device\n");
+ break;
+ default:
+ break;
+ }
+
+ return 0;
+}
+
+static void ena_init_rings(struct ena_adapter *adapter)
+{
+ int i;
+
+ for (i = 0; i < adapter->num_queues; i++) {
+ struct ena_ring *ring = &adapter->tx_ring[i];
+
+ ring->configured = 0;
+ ring->type = ENA_RING_TYPE_TX;
+ ring->adapter = adapter;
+ ring->id = i;
+ ring->tx_mem_queue_type = adapter->ena_dev.tx_mem_queue_type;
+ ring->tx_max_header_size = adapter->ena_dev.tx_max_header_size;
+ }
+
+ for (i = 0; i < adapter->num_queues; i++) {
+ struct ena_ring *ring = &adapter->rx_ring[i];
+
+ ring->configured = 0;
+ ring->type = ENA_RING_TYPE_RX;
+ ring->adapter = adapter;
+ ring->id = i;
+ }
+}
+
+static void ena_infos_get(struct rte_eth_dev *dev,
+ struct rte_eth_dev_info *dev_info)
+{
+ struct ena_adapter *adapter;
+ struct ena_com_dev *ena_dev;
+ struct ena_com_dev_get_features_ctx feat;
+ uint32_t rx_feat = 0, tx_feat = 0;
+ int rc = 0;
+
+ ena_assert_msg(dev->data != NULL, "Uninitialized device");
+ ena_assert_msg(dev->data->dev_private != NULL, "Uninitialized device");
+ adapter = (struct ena_adapter *)(dev->data->dev_private);
+
+ ena_dev = &adapter->ena_dev;
+ ena_assert_msg(ena_dev != NULL, "Uninitialized device");
+
+ dev_info->speed_capa =
+ ETH_LINK_SPEED_1G |
+ ETH_LINK_SPEED_2_5G |
+ ETH_LINK_SPEED_5G |
+ ETH_LINK_SPEED_10G |
+ ETH_LINK_SPEED_25G |
+ ETH_LINK_SPEED_40G |
+ ETH_LINK_SPEED_50G |
+ ETH_LINK_SPEED_100G;
+
+ /* Get supported features from HW */
+ rc = ena_com_get_dev_attr_feat(ena_dev, &feat);
+ if (unlikely(rc)) {
+ RTE_LOG(ERR, PMD,
+ "Cannot get attribute for ena device rc= %d\n", rc);
+ return;
+ }
+
+ /* Set Tx & Rx features available for device */
+ if (feat.offload.tx & ENA_ADMIN_FEATURE_OFFLOAD_DESC_TSO_IPV4_MASK)
+ tx_feat |= DEV_TX_OFFLOAD_TCP_TSO;
+
+ if (feat.offload.tx &
+ ENA_ADMIN_FEATURE_OFFLOAD_DESC_TX_L4_IPV4_CSUM_PART_MASK)
+ tx_feat |= DEV_TX_OFFLOAD_IPV4_CKSUM |
+ DEV_TX_OFFLOAD_UDP_CKSUM |
+ DEV_TX_OFFLOAD_TCP_CKSUM;
+
+ if (feat.offload.tx &
+ ENA_ADMIN_FEATURE_OFFLOAD_DESC_RX_L4_IPV4_CSUM_MASK)
+ rx_feat |= DEV_RX_OFFLOAD_IPV4_CKSUM |
+ DEV_RX_OFFLOAD_UDP_CKSUM |
+ DEV_RX_OFFLOAD_TCP_CKSUM;
+
+ /* Inform framework about available features */
+ dev_info->rx_offload_capa = rx_feat;
+ dev_info->tx_offload_capa = tx_feat;
+
+ dev_info->min_rx_bufsize = ENA_MIN_FRAME_LEN;
+ dev_info->max_rx_pktlen = adapter->max_mtu;
+ dev_info->max_mac_addrs = 1;
+
+ dev_info->max_rx_queues = adapter->num_queues;
+ dev_info->max_tx_queues = adapter->num_queues;
+ dev_info->reta_size = ENA_RX_RSS_TABLE_SIZE;
+}
+
+static uint16_t eth_ena_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
+ uint16_t nb_pkts)
+{
+ struct ena_ring *rx_ring = (struct ena_ring *)(rx_queue);
+ unsigned int ring_size = rx_ring->ring_size;
+ unsigned int ring_mask = ring_size - 1;
+ uint16_t next_to_clean = rx_ring->next_to_clean;
+ int desc_in_use = 0;
+ unsigned int recv_idx = 0;
+ struct rte_mbuf *mbuf = NULL;
+ struct rte_mbuf *mbuf_head = NULL;
+ struct rte_mbuf *mbuf_prev = NULL;
+ struct rte_mbuf **rx_buff_info = rx_ring->rx_buffer_info;
+ unsigned int completed;
+
+ struct ena_com_rx_ctx ena_rx_ctx;
+ int rc = 0;
+
+ /* Check adapter state */
+ if (unlikely(rx_ring->adapter->state != ENA_ADAPTER_STATE_RUNNING)) {
+ RTE_LOG(ALERT, PMD,
+ "Trying to receive pkts while device is NOT running\n");
+ return 0;
+ }
+
+ desc_in_use = ENA_CIRC_COUNT(rx_ring->next_to_use,
+ next_to_clean, ring_size);
+ if (unlikely(nb_pkts > desc_in_use))
+ nb_pkts = desc_in_use;
+
+ for (completed = 0; completed < nb_pkts; completed++) {
+ int segments = 0;
+
+ ena_rx_ctx.max_bufs = rx_ring->ring_size;
+ ena_rx_ctx.ena_bufs = rx_ring->ena_bufs;
+ ena_rx_ctx.descs = 0;
+ /* receive packet context */
+ rc = ena_com_rx_pkt(rx_ring->ena_com_io_cq,
+ rx_ring->ena_com_io_sq,
+ &ena_rx_ctx);
+ if (unlikely(rc)) {
+ RTE_LOG(ERR, PMD, "ena_com_rx_pkt error %d\n", rc);
+ return 0;
+ }
+
+ if (unlikely(ena_rx_ctx.descs == 0))
+ break;
+
+ while (segments < ena_rx_ctx.descs) {
+ mbuf = rx_buff_info[next_to_clean & ring_mask];
+ mbuf->data_len = ena_rx_ctx.ena_bufs[segments].len;
+ mbuf->data_off = RTE_PKTMBUF_HEADROOM;
+ mbuf->refcnt = 1;
+ mbuf->next = NULL;
+ if (segments == 0) {
+ mbuf->nb_segs = ena_rx_ctx.descs;
+ mbuf->port = rx_ring->port_id;
+ mbuf->pkt_len = 0;
+ mbuf_head = mbuf;
+ } else {
+ /* for multi-segment pkts create mbuf chain */
+ mbuf_prev->next = mbuf;
+ }
+ mbuf_head->pkt_len += mbuf->data_len;
+
+ mbuf_prev = mbuf;
+ segments++;
+ next_to_clean =
+ ENA_RX_RING_IDX_NEXT(next_to_clean, ring_size);
+ }
+
+ /* fill mbuf attributes if any */
+ ena_rx_mbuf_prepare(mbuf_head, &ena_rx_ctx);
+ mbuf_head->hash.rss = (uint32_t)rx_ring->id;
+
+ /* pass to DPDK application head mbuf */
+ rx_pkts[recv_idx] = mbuf_head;
+ recv_idx++;
+ }
+
+ /* Burst refill to save doorbells, memory barriers, const interval */
+ if (ring_size - desc_in_use - 1 > ENA_RING_DESCS_RATIO(ring_size))
+ ena_populate_rx_queue(rx_ring, ring_size - desc_in_use - 1);
+
+ rx_ring->next_to_clean = next_to_clean & ring_mask;
+
+ return recv_idx;
+}
+
+static uint16_t eth_ena_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
+ uint16_t nb_pkts)
+{
+ struct ena_ring *tx_ring = (struct ena_ring *)(tx_queue);
+ unsigned int next_to_use = tx_ring->next_to_use;
+ struct rte_mbuf *mbuf;
+ unsigned int ring_size = tx_ring->ring_size;
+ unsigned int ring_mask = ring_size - 1;
+ struct ena_com_tx_ctx ena_tx_ctx;
+ struct ena_tx_buffer *tx_info;
+ struct ena_com_buf *ebuf;
+ uint16_t rc, req_id, total_tx_descs = 0;
+ int sent_idx = 0;
+ int nb_hw_desc;
+
+ /* Check adapter state */
+ if (unlikely(tx_ring->adapter->state != ENA_ADAPTER_STATE_RUNNING)) {
+ RTE_LOG(ALERT, PMD,
+ "Trying to xmit pkts while device is NOT running\n");
+ return 0;
+ }
+
+ for (sent_idx = 0; sent_idx < nb_pkts; sent_idx++) {
+ mbuf = tx_pkts[sent_idx];
+
+ req_id = tx_ring->empty_tx_reqs[next_to_use];
+ tx_info = &tx_ring->tx_buffer_info[req_id];
+ tx_info->mbuf = mbuf;
+ tx_info->num_of_bufs = 0;
+ ebuf = tx_info->bufs;
+
+ /* Prepare TX context */
+ memset(&ena_tx_ctx, 0x0, sizeof(struct ena_com_tx_ctx));
+ memset(&ena_tx_ctx.ena_meta, 0x0,
+ sizeof(struct ena_com_tx_meta));
+ ena_tx_ctx.ena_bufs = ebuf;
+ ena_tx_ctx.req_id = req_id;
+ if (tx_ring->tx_mem_queue_type ==
+ ENA_ADMIN_PLACEMENT_POLICY_DEV) {
+ /* prepare the push buffer with
+ * virtual address of the data
+ */
+ ena_tx_ctx.header_len =
+ RTE_MIN(mbuf->data_len,
+ tx_ring->tx_max_header_size);
+ ena_tx_ctx.push_header =
+ (void *)((char *)mbuf->buf_addr +
+ mbuf->data_off);
+ } /* there's no else as we take advantage of memset zeroing */
+
+ /* Set TX offloads flags, if applicable */
+ ena_tx_mbuf_prepare(mbuf, &ena_tx_ctx);
+
+ if (unlikely(mbuf->ol_flags &
+ (PKT_RX_L4_CKSUM_BAD | PKT_RX_IP_CKSUM_BAD)))
+ rte_atomic64_inc(&tx_ring->adapter->drv_stats->ierrors);
+
+ rte_prefetch0(tx_pkts[(sent_idx + 4) & ring_mask]);
+
+ /* Process first segment taking into
+ * consideration pushed header
+ */
+ if (mbuf->data_len > ena_tx_ctx.header_len) {
+ ebuf->paddr = mbuf->buf_physaddr +
+ mbuf->data_off +
+ ena_tx_ctx.header_len;
+ ebuf->len = mbuf->data_len - ena_tx_ctx.header_len;
+ ebuf++;
+ tx_info->num_of_bufs++;
+ }
+
+ while ((mbuf = mbuf->next) != NULL) {
+ ebuf->paddr = mbuf->buf_physaddr + mbuf->data_off;
+ ebuf->len = mbuf->data_len;
+ ebuf++;
+ tx_info->num_of_bufs++;
+ }
+
+ ena_tx_ctx.num_bufs = tx_info->num_of_bufs;
+
+ /* Write data to device */
+ rc = ena_com_prepare_tx(tx_ring->ena_com_io_sq,
+ &ena_tx_ctx, &nb_hw_desc);
+ if (unlikely(rc))
+ break;
+
+ tx_info->tx_descs = nb_hw_desc;
+
+ next_to_use = ENA_TX_RING_IDX_NEXT(next_to_use, ring_size);
+ }
+
+ /* Let HW do it's best :-) */
+ rte_wmb();
+ ena_com_write_sq_doorbell(tx_ring->ena_com_io_sq);
+
+ /* Clear complete packets */
+ while (ena_com_tx_comp_req_id_get(tx_ring->ena_com_io_cq, &req_id) >= 0) {
+ /* Get Tx info & store how many descs were processed */
+ tx_info = &tx_ring->tx_buffer_info[req_id];
+ total_tx_descs += tx_info->tx_descs;
+
+ /* Free whole mbuf chain */
+ mbuf = tx_info->mbuf;
+ rte_pktmbuf_free(mbuf);
+
+ /* Put back descriptor to the ring for reuse */
+ tx_ring->empty_tx_reqs[tx_ring->next_to_clean] = req_id;
+ tx_ring->next_to_clean =
+ ENA_TX_RING_IDX_NEXT(tx_ring->next_to_clean,
+ tx_ring->ring_size);
+
+ /* If too many descs to clean, leave it for another run */
+ if (unlikely(total_tx_descs > ENA_RING_DESCS_RATIO(ring_size)))
+ break;
+ }
+
+ /* acknowledge completion of sent packets */
+ ena_com_comp_ack(tx_ring->ena_com_io_sq, total_tx_descs);
+ tx_ring->next_to_use = next_to_use;
+ return sent_idx;
+}
+
+static struct eth_driver rte_ena_pmd = {
+ {
+ .name = "rte_ena_pmd",
+ .id_table = pci_id_ena_map,
+ .drv_flags = RTE_PCI_DRV_NEED_MAPPING,
+ },
+ .eth_dev_init = eth_ena_dev_init,
+ .dev_private_size = sizeof(struct ena_adapter),
+};
+
+static int
+rte_ena_pmd_init(const char *name __rte_unused,
+ const char *params __rte_unused)
+{
+ rte_eth_driver_register(&rte_ena_pmd);
+ return 0;
+};
+
+struct rte_driver ena_pmd_drv = {
+ .name = "ena_driver",
+ .type = PMD_PDEV,
+ .init = rte_ena_pmd_init,
+};
+
+PMD_REGISTER_DRIVER(ena_pmd_drv);
diff --git a/drivers/net/ena/ena_ethdev.h b/drivers/net/ena/ena_ethdev.h
new file mode 100644
index 00000000..ba6f01e6
--- /dev/null
+++ b/drivers/net/ena/ena_ethdev.h
@@ -0,0 +1,160 @@
+/*-
+* BSD LICENSE
+*
+* Copyright (c) 2015-2016 Amazon.com, Inc. or its affiliates.
+* All rights reserved.
+*
+* Redistribution and use in source and binary forms, with or without
+* modification, are permitted provided that the following conditions
+* are met:
+*
+* * Redistributions of source code must retain the above copyright
+* notice, this list of conditions and the following disclaimer.
+* * Redistributions in binary form must reproduce the above copyright
+* notice, this list of conditions and the following disclaimer in
+* the documentation and/or other materials provided with the
+* distribution.
+* * Neither the name of copyright holder nor the names of its
+* contributors may be used to endorse or promote products derived
+* from this software without specific prior written permission.
+*
+* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+
+#ifndef _ENA_ETHDEV_H_
+#define _ENA_ETHDEV_H_
+
+#include <rte_pci.h>
+
+#include "ena_com.h"
+
+#define ENA_REGS_BAR 0
+#define ENA_MEM_BAR 2
+
+#define ENA_MAX_NUM_QUEUES 128
+
+#define ENA_DEFAULT_TX_SW_DESCS (1024)
+#define ENA_DEFAULT_TX_HW_DESCS (1024)
+#define ENA_DEFAULT_RING_SIZE (1024)
+
+#define ENA_MIN_FRAME_LEN 64
+
+#define ENA_NAME_MAX_LEN 20
+#define ENA_IRQNAME_SIZE 40
+
+#define ENA_PKT_MAX_BUFS 17
+
+#define ENA_CIRC_COUNT(head, tail, size) \
+ (((uint16_t)((uint16_t)(head) - (uint16_t)(tail))) & ((size) - 1))
+
+#define ENA_CIRC_INC(index, step, size) \
+ ((uint16_t)(index) + (uint16_t)(step))
+#define ENA_CIRC_INC_WRAP(index, step, size) \
+ (((uint16_t)(index) + (uint16_t)(step)) & ((size) - 1))
+
+#define ENA_TX_RING_IDX_NEXT(idx, ring_size) \
+ ENA_CIRC_INC_WRAP(idx, 1, ring_size)
+#define ENA_RX_RING_IDX_NEXT(idx, ring_size) \
+ ENA_CIRC_INC_WRAP(idx, 1, ring_size)
+
+struct ena_adapter;
+
+enum ena_ring_type {
+ ENA_RING_TYPE_RX = 1,
+ ENA_RING_TYPE_TX = 2,
+};
+
+struct ena_tx_buffer {
+ struct rte_mbuf *mbuf;
+ unsigned int tx_descs;
+ unsigned int num_of_bufs;
+ struct ena_com_buf bufs[ENA_PKT_MAX_BUFS];
+};
+
+struct ena_ring {
+ u16 next_to_use;
+ u16 next_to_clean;
+
+ enum ena_ring_type type;
+ enum ena_admin_placement_policy_type tx_mem_queue_type;
+ /* Holds the empty requests for TX OOO completions */
+ uint16_t *empty_tx_reqs;
+ union {
+ struct ena_tx_buffer *tx_buffer_info; /* contex of tx packet */
+ struct rte_mbuf **rx_buffer_info; /* contex of rx packet */
+ };
+ unsigned int ring_size; /* number of tx/rx_buffer_info's entries */
+
+ struct ena_com_io_cq *ena_com_io_cq;
+ struct ena_com_io_sq *ena_com_io_sq;
+
+ struct ena_com_rx_buf_info ena_bufs[ENA_PKT_MAX_BUFS]
+ __rte_cache_aligned;
+
+ struct rte_mempool *mb_pool;
+ unsigned int port_id;
+ unsigned int id;
+ /* Max length PMD can push to device for LLQ */
+ uint8_t tx_max_header_size;
+ int configured;
+ struct ena_adapter *adapter;
+} __rte_cache_aligned;
+
+enum ena_adapter_state {
+ ENA_ADAPTER_STATE_FREE = 0,
+ ENA_ADAPTER_STATE_INIT = 1,
+ ENA_ADAPTER_STATE_RUNNING = 2,
+ ENA_ADAPTER_STATE_STOPPED = 3,
+ ENA_ADAPTER_STATE_CONFIG = 4,
+};
+
+struct ena_driver_stats {
+ rte_atomic64_t ierrors;
+ rte_atomic64_t oerrors;
+ rte_atomic64_t imcasts;
+ rte_atomic64_t rx_nombuf;
+};
+
+/* board specific private data structure */
+struct ena_adapter {
+ /* OS defined structs */
+ struct rte_pci_device *pdev;
+ struct rte_eth_dev_data *rte_eth_dev_data;
+ struct rte_eth_dev *rte_dev;
+
+ struct ena_com_dev ena_dev __rte_cache_aligned;
+
+ /* TX */
+ struct ena_ring tx_ring[ENA_MAX_NUM_QUEUES] __rte_cache_aligned;
+ int tx_ring_size;
+
+ /* RX */
+ struct ena_ring rx_ring[ENA_MAX_NUM_QUEUES] __rte_cache_aligned;
+ int rx_ring_size;
+
+ u16 num_queues;
+ u16 max_mtu;
+
+ int id_number;
+ char name[ENA_NAME_MAX_LEN];
+ u8 mac_addr[ETHER_ADDR_LEN];
+
+ void *regs;
+ void *dev_mem_base;
+
+ struct ena_driver_stats *drv_stats;
+ enum ena_adapter_state state;
+
+};
+
+#endif /* _ENA_ETHDEV_H_ */
diff --git a/drivers/net/ena/ena_logs.h b/drivers/net/ena/ena_logs.h
new file mode 100644
index 00000000..c6c8a41b
--- /dev/null
+++ b/drivers/net/ena/ena_logs.h
@@ -0,0 +1,70 @@
+/*-
+* BSD LICENSE
+*
+* Copyright (c) 2015-2016 Amazon.com, Inc. or its affiliates.
+* All rights reserved.
+*
+* Redistribution and use in source and binary forms, with or without
+* modification, are permitted provided that the following conditions
+* are met:
+*
+* * Redistributions of source code must retain the above copyright
+* notice, this list of conditions and the following disclaimer.
+* * Redistributions in binary form must reproduce the above copyright
+* notice, this list of conditions and the following disclaimer in
+* the documentation and/or other materials provided with the
+* distribution.
+* * Neither the name of copyright holder nor the names of its
+* contributors may be used to endorse or promote products derived
+* from this software without specific prior written permission.
+*
+* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+
+#ifndef _ENA_LOGS_H_
+#define _ENA_LOGS_H_
+
+#define RTE_LOGTYPE_ENA RTE_LOGTYPE_USER1
+
+#define PMD_INIT_LOG(level, fmt, args...) \
+ RTE_LOG(level, PMD, "%s(): " fmt "\n", __func__, ## args)
+
+#ifdef RTE_LIBRTE_ENA_DEBUG_RX
+#define PMD_RX_LOG(level, fmt, args...) \
+ RTE_LOG(level, PMD, "%s(): " fmt "\n", __func__, ## args)
+#else
+#define PMD_RX_LOG(level, fmt, args...) do { } while (0)
+#endif
+
+#ifdef RTE_LIBRTE_ENA_DEBUG_TX
+#define PMD_TX_LOG(level, fmt, args...) \
+ RTE_LOG(level, PMD, "%s(): " fmt "\n", __func__, ## args)
+#else
+#define PMD_TX_LOG(level, fmt, args...) do { } while (0)
+#endif
+
+#ifdef RTE_LIBRTE_ENA_DEBUG_TX_FREE
+#define PMD_TX_FREE_LOG(level, fmt, args...) \
+ RTE_LOG(level, PMD, "%s(): " fmt "\n", __func__, ## args)
+#else
+#define PMD_TX_FREE_LOG(level, fmt, args...) do { } while (0)
+#endif
+
+#ifdef RTE_LIBRTE_ENA_DEBUG_DRIVER
+#define PMD_DRV_LOG(level, fmt, args...) \
+ RTE_LOG(level, PMD, "%s(): " fmt "\n", __func__, ## args)
+#else
+#define PMD_DRV_LOG(level, fmt, args...) do { } while (0)
+#endif
+
+#endif /* _ENA_LOGS_H_ */
diff --git a/drivers/net/ena/ena_platform.h b/drivers/net/ena/ena_platform.h
new file mode 100644
index 00000000..0df82d6f
--- /dev/null
+++ b/drivers/net/ena/ena_platform.h
@@ -0,0 +1,59 @@
+/*-
+* BSD LICENSE
+*
+* Copyright (c) 2015-2016 Amazon.com, Inc. or its affiliates.
+* All rights reserved.
+*
+* Redistribution and use in source and binary forms, with or without
+* modification, are permitted provided that the following conditions
+* are met:
+*
+* * Redistributions of source code must retain the above copyright
+* notice, this list of conditions and the following disclaimer.
+* * Redistributions in binary form must reproduce the above copyright
+* notice, this list of conditions and the following disclaimer in
+* the documentation and/or other materials provided with the
+* distribution.
+* * Neither the name of copyright holder nor the names of its
+* contributors may be used to endorse or promote products derived
+* from this software without specific prior written permission.
+*
+* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+
+#ifndef __ENA_PLATFORM_H__
+#define __ENA_PLATFORM_H__
+
+#define swap16_to_le(x) (x)
+
+#define swap32_to_le(x) (x)
+
+#define swap64_to_le(x) (x)
+
+#define swap16_from_le(x) (x)
+
+#define swap32_from_le(x) (x)
+
+#define swap64_from_le(x) (x)
+
+#define ena_assert_msg(cond, msg) \
+ do { \
+ if (unlikely(!(cond))) { \
+ RTE_LOG(ERR, ENA, \
+ "Assert failed on %s:%s:%d: ", \
+ __FILE__, __func__, __LINE__); \
+ rte_panic(msg); \
+ } \
+ } while (0)
+
+#endif /* __ENA_PLATFORM_H__ */
diff --git a/drivers/net/ena/rte_pmd_ena_version.map b/drivers/net/ena/rte_pmd_ena_version.map
new file mode 100644
index 00000000..349c6e1c
--- /dev/null
+++ b/drivers/net/ena/rte_pmd_ena_version.map
@@ -0,0 +1,4 @@
+DPDK_16.04 {
+
+ local: *;
+};