aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/ena
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/ena')
-rw-r--r--drivers/net/ena/Makefile4
-rw-r--r--drivers/net/ena/base/ena_com.c711
-rw-r--r--drivers/net/ena/base/ena_com.h112
-rw-r--r--drivers/net/ena/base/ena_defs/ena_admin_defs.h1164
-rw-r--r--drivers/net/ena/base/ena_defs/ena_common_defs.h8
-rw-r--r--drivers/net/ena/base/ena_defs/ena_eth_io_defs.h758
-rw-r--r--drivers/net/ena/base/ena_defs/ena_gen_info.h4
-rw-r--r--drivers/net/ena/base/ena_defs/ena_includes.h2
-rw-r--r--drivers/net/ena/base/ena_defs/ena_regs_defs.h36
-rw-r--r--drivers/net/ena/base/ena_eth_com.c78
-rw-r--r--drivers/net/ena/base/ena_eth_com.h10
-rw-r--r--drivers/net/ena/base/ena_plat.h4
-rw-r--r--drivers/net/ena/base/ena_plat_dpdk.h79
-rw-r--r--drivers/net/ena/ena_ethdev.c800
-rw-r--r--drivers/net/ena/ena_ethdev.h32
-rw-r--r--drivers/net/ena/meson.build11
16 files changed, 1743 insertions, 2070 deletions
diff --git a/drivers/net/ena/Makefile b/drivers/net/ena/Makefile
index f9bfe053..ff9ce315 100644
--- a/drivers/net/ena/Makefile
+++ b/drivers/net/ena/Makefile
@@ -43,6 +43,9 @@ INCLUDES :=-I$(SRCDIR) -I$(SRCDIR)/base/ena_defs -I$(SRCDIR)/base
EXPORT_MAP := rte_pmd_ena_version.map
LIBABIVER := 1
+# rte_fbarray is not yet part of stable API
+CFLAGS += -DALLOW_EXPERIMENTAL_API
+
VPATH += $(SRCDIR)/base
#
# all source are stored in SRCS-y
@@ -55,5 +58,6 @@ CFLAGS += $(INCLUDES)
LDLIBS += -lrte_eal -lrte_mbuf -lrte_mempool -lrte_ring
LDLIBS += -lrte_ethdev -lrte_net -lrte_kvargs
LDLIBS += -lrte_bus_pci
+LDLIBS += -lrte_timer
include $(RTE_SDK)/mk/rte.lib.mk
diff --git a/drivers/net/ena/base/ena_com.c b/drivers/net/ena/base/ena_com.c
index 38a05877..4abf1a28 100644
--- a/drivers/net/ena/base/ena_com.c
+++ b/drivers/net/ena/base/ena_com.c
@@ -37,11 +37,19 @@
/*****************************************************************************/
/* Timeout in micro-sec */
-#define ADMIN_CMD_TIMEOUT_US (1000000)
+#define ADMIN_CMD_TIMEOUT_US (3000000)
-#define ENA_ASYNC_QUEUE_DEPTH 4
+#define ENA_ASYNC_QUEUE_DEPTH 16
#define ENA_ADMIN_QUEUE_DEPTH 32
+#ifdef ENA_EXTENDED_STATS
+
+#define ENA_HISTOGRAM_ACTIVE_MASK_OFFSET 0xF08
+#define ENA_EXTENDED_STAT_GET_FUNCT(_funct_queue) (_funct_queue & 0xFFFF)
+#define ENA_EXTENDED_STAT_GET_QUEUE(_funct_queue) (_funct_queue >> 16)
+
+#endif /* ENA_EXTENDED_STATS */
+
#define MIN_ENA_VER (((ENA_COMMON_SPEC_VERSION_MAJOR) << \
ENA_REGS_VERSION_MAJOR_VERSION_SHIFT) \
| (ENA_COMMON_SPEC_VERSION_MINOR))
@@ -62,7 +70,9 @@
#define ENA_MMIO_READ_TIMEOUT 0xFFFFFFFF
-static int ena_alloc_cnt;
+#define ENA_REGS_ADMIN_INTR_MASK 1
+
+#define ENA_POLL_MS 5
/*****************************************************************************/
/*****************************************************************************/
@@ -86,6 +96,11 @@ struct ena_comp_ctx {
bool occupied;
};
+struct ena_com_stats_ctx {
+ struct ena_admin_aq_get_stats_cmd get_cmd;
+ struct ena_admin_acq_get_stats_resp get_resp;
+};
+
static inline int ena_com_mem_addr_set(struct ena_com_dev *ena_dev,
struct ena_common_mem_addr *ena_addr,
dma_addr_t addr)
@@ -95,50 +110,49 @@ static inline int ena_com_mem_addr_set(struct ena_com_dev *ena_dev,
return ENA_COM_INVAL;
}
- ena_addr->mem_addr_low = (u32)addr;
- ena_addr->mem_addr_high =
- ((addr & GENMASK_ULL(ena_dev->dma_addr_bits - 1, 32)) >> 32);
+ ena_addr->mem_addr_low = lower_32_bits(addr);
+ ena_addr->mem_addr_high = (u16)upper_32_bits(addr);
return 0;
}
static int ena_com_admin_init_sq(struct ena_com_admin_queue *queue)
{
- ENA_MEM_ALLOC_COHERENT(queue->q_dmadev,
- ADMIN_SQ_SIZE(queue->q_depth),
- queue->sq.entries,
- queue->sq.dma_addr,
- queue->sq.mem_handle);
+ struct ena_com_admin_sq *sq = &queue->sq;
+ u16 size = ADMIN_SQ_SIZE(queue->q_depth);
- if (!queue->sq.entries) {
+ ENA_MEM_ALLOC_COHERENT(queue->q_dmadev, size, sq->entries, sq->dma_addr,
+ sq->mem_handle);
+
+ if (!sq->entries) {
ena_trc_err("memory allocation failed");
return ENA_COM_NO_MEM;
}
- queue->sq.head = 0;
- queue->sq.tail = 0;
- queue->sq.phase = 1;
+ sq->head = 0;
+ sq->tail = 0;
+ sq->phase = 1;
- queue->sq.db_addr = NULL;
+ sq->db_addr = NULL;
return 0;
}
static int ena_com_admin_init_cq(struct ena_com_admin_queue *queue)
{
- ENA_MEM_ALLOC_COHERENT(queue->q_dmadev,
- ADMIN_CQ_SIZE(queue->q_depth),
- queue->cq.entries,
- queue->cq.dma_addr,
- queue->cq.mem_handle);
+ struct ena_com_admin_cq *cq = &queue->cq;
+ u16 size = ADMIN_CQ_SIZE(queue->q_depth);
+
+ ENA_MEM_ALLOC_COHERENT(queue->q_dmadev, size, cq->entries, cq->dma_addr,
+ cq->mem_handle);
- if (!queue->cq.entries) {
+ if (!cq->entries) {
ena_trc_err("memory allocation failed");
return ENA_COM_NO_MEM;
}
- queue->cq.head = 0;
- queue->cq.phase = 1;
+ cq->head = 0;
+ cq->phase = 1;
return 0;
}
@@ -146,44 +160,44 @@ static int ena_com_admin_init_cq(struct ena_com_admin_queue *queue)
static int ena_com_admin_init_aenq(struct ena_com_dev *dev,
struct ena_aenq_handlers *aenq_handlers)
{
+ struct ena_com_aenq *aenq = &dev->aenq;
u32 addr_low, addr_high, aenq_caps;
+ u16 size;
dev->aenq.q_depth = ENA_ASYNC_QUEUE_DEPTH;
- ENA_MEM_ALLOC_COHERENT(dev->dmadev,
- ADMIN_AENQ_SIZE(dev->aenq.q_depth),
- dev->aenq.entries,
- dev->aenq.dma_addr,
- dev->aenq.mem_handle);
+ size = ADMIN_AENQ_SIZE(ENA_ASYNC_QUEUE_DEPTH);
+ ENA_MEM_ALLOC_COHERENT(dev->dmadev, size,
+ aenq->entries,
+ aenq->dma_addr,
+ aenq->mem_handle);
- if (!dev->aenq.entries) {
+ if (!aenq->entries) {
ena_trc_err("memory allocation failed");
return ENA_COM_NO_MEM;
}
- dev->aenq.head = dev->aenq.q_depth;
- dev->aenq.phase = 1;
+ aenq->head = aenq->q_depth;
+ aenq->phase = 1;
- addr_low = ENA_DMA_ADDR_TO_UINT32_LOW(dev->aenq.dma_addr);
- addr_high = ENA_DMA_ADDR_TO_UINT32_HIGH(dev->aenq.dma_addr);
+ addr_low = ENA_DMA_ADDR_TO_UINT32_LOW(aenq->dma_addr);
+ addr_high = ENA_DMA_ADDR_TO_UINT32_HIGH(aenq->dma_addr);
- ENA_REG_WRITE32(addr_low, (unsigned char *)dev->reg_bar
- + ENA_REGS_AENQ_BASE_LO_OFF);
- ENA_REG_WRITE32(addr_high, (unsigned char *)dev->reg_bar
- + ENA_REGS_AENQ_BASE_HI_OFF);
+ ENA_REG_WRITE32(dev->bus, addr_low, dev->reg_bar + ENA_REGS_AENQ_BASE_LO_OFF);
+ ENA_REG_WRITE32(dev->bus, addr_high, dev->reg_bar + ENA_REGS_AENQ_BASE_HI_OFF);
aenq_caps = 0;
aenq_caps |= dev->aenq.q_depth & ENA_REGS_AENQ_CAPS_AENQ_DEPTH_MASK;
aenq_caps |= (sizeof(struct ena_admin_aenq_entry) <<
ENA_REGS_AENQ_CAPS_AENQ_ENTRY_SIZE_SHIFT) &
ENA_REGS_AENQ_CAPS_AENQ_ENTRY_SIZE_MASK;
+ ENA_REG_WRITE32(dev->bus, aenq_caps, dev->reg_bar + ENA_REGS_AENQ_CAPS_OFF);
- ENA_REG_WRITE32(aenq_caps, (unsigned char *)dev->reg_bar
- + ENA_REGS_AENQ_CAPS_OFF);
-
- if (unlikely(!aenq_handlers))
+ if (unlikely(!aenq_handlers)) {
ena_trc_err("aenq handlers pointer is NULL\n");
+ return ENA_COM_INVAL;
+ }
- dev->aenq.aenq_handlers = aenq_handlers;
+ aenq->aenq_handlers = aenq_handlers;
return 0;
}
@@ -217,12 +231,11 @@ static struct ena_comp_ctx *get_comp_ctxt(struct ena_com_admin_queue *queue,
return &queue->comp_ctx[command_id];
}
-static struct ena_comp_ctx *
-__ena_com_submit_admin_cmd(struct ena_com_admin_queue *admin_queue,
- struct ena_admin_aq_entry *cmd,
- size_t cmd_size_in_bytes,
- struct ena_admin_acq_entry *comp,
- size_t comp_size_in_bytes)
+static struct ena_comp_ctx *__ena_com_submit_admin_cmd(struct ena_com_admin_queue *admin_queue,
+ struct ena_admin_aq_entry *cmd,
+ size_t cmd_size_in_bytes,
+ struct ena_admin_acq_entry *comp,
+ size_t comp_size_in_bytes)
{
struct ena_comp_ctx *comp_ctx;
u16 tail_masked, cmd_id;
@@ -234,12 +247,9 @@ __ena_com_submit_admin_cmd(struct ena_com_admin_queue *admin_queue,
tail_masked = admin_queue->sq.tail & queue_size_mask;
/* In case of queue FULL */
- cnt = admin_queue->sq.tail - admin_queue->sq.head;
+ cnt = ATOMIC32_READ(&admin_queue->outstanding_cmds);
if (cnt >= admin_queue->q_depth) {
- ena_trc_dbg("admin queue is FULL (tail %d head %d depth: %d)\n",
- admin_queue->sq.tail,
- admin_queue->sq.head,
- admin_queue->q_depth);
+ ena_trc_dbg("admin queue is full.\n");
admin_queue->stats.out_of_space++;
return ERR_PTR(ENA_COM_NO_SPACE);
}
@@ -253,6 +263,8 @@ __ena_com_submit_admin_cmd(struct ena_com_admin_queue *admin_queue,
ENA_ADMIN_AQ_COMMON_DESC_COMMAND_ID_MASK;
comp_ctx = get_comp_ctxt(admin_queue, cmd_id, true);
+ if (unlikely(!comp_ctx))
+ return ERR_PTR(ENA_COM_INVAL);
comp_ctx->status = ENA_CMD_SUBMITTED;
comp_ctx->comp_size = (u32)comp_size_in_bytes;
@@ -272,7 +284,8 @@ __ena_com_submit_admin_cmd(struct ena_com_admin_queue *admin_queue,
if (unlikely((admin_queue->sq.tail & queue_size_mask) == 0))
admin_queue->sq.phase = !admin_queue->sq.phase;
- ENA_REG_WRITE32(admin_queue->sq.tail, admin_queue->sq.db_addr);
+ ENA_REG_WRITE32(admin_queue->bus, admin_queue->sq.tail,
+ admin_queue->sq.db_addr);
return comp_ctx;
}
@@ -298,12 +311,11 @@ static inline int ena_com_init_comp_ctxt(struct ena_com_admin_queue *queue)
return 0;
}
-static struct ena_comp_ctx *
-ena_com_submit_admin_cmd(struct ena_com_admin_queue *admin_queue,
- struct ena_admin_aq_entry *cmd,
- size_t cmd_size_in_bytes,
- struct ena_admin_acq_entry *comp,
- size_t comp_size_in_bytes)
+static struct ena_comp_ctx *ena_com_submit_admin_cmd(struct ena_com_admin_queue *admin_queue,
+ struct ena_admin_aq_entry *cmd,
+ size_t cmd_size_in_bytes,
+ struct ena_admin_acq_entry *comp,
+ size_t comp_size_in_bytes)
{
unsigned long flags = 0;
struct ena_comp_ctx *comp_ctx;
@@ -317,7 +329,7 @@ ena_com_submit_admin_cmd(struct ena_com_admin_queue *admin_queue,
cmd_size_in_bytes,
comp,
comp_size_in_bytes);
- if (unlikely(IS_ERR(comp_ctx)))
+ if (IS_ERR(comp_ctx))
admin_queue->running_state = false;
ENA_SPINLOCK_UNLOCK(admin_queue->q_lock, flags);
@@ -331,9 +343,7 @@ static int ena_com_init_io_sq(struct ena_com_dev *ena_dev,
size_t size;
int dev_node = 0;
- ENA_TOUCH(ctx);
-
- memset(&io_sq->desc_addr, 0x0, sizeof(struct ena_com_io_desc_addr));
+ memset(&io_sq->desc_addr, 0x0, sizeof(io_sq->desc_addr));
io_sq->desc_entry_size =
(io_sq->direction == ENA_COM_IO_QUEUE_DIRECTION_TX) ?
@@ -347,23 +357,26 @@ static int ena_com_init_io_sq(struct ena_com_dev *ena_dev,
size,
io_sq->desc_addr.virt_addr,
io_sq->desc_addr.phys_addr,
+ io_sq->desc_addr.mem_handle,
ctx->numa_node,
dev_node);
- if (!io_sq->desc_addr.virt_addr)
+ if (!io_sq->desc_addr.virt_addr) {
ENA_MEM_ALLOC_COHERENT(ena_dev->dmadev,
size,
io_sq->desc_addr.virt_addr,
io_sq->desc_addr.phys_addr,
io_sq->desc_addr.mem_handle);
+ }
} else {
ENA_MEM_ALLOC_NODE(ena_dev->dmadev,
size,
io_sq->desc_addr.virt_addr,
ctx->numa_node,
dev_node);
- if (!io_sq->desc_addr.virt_addr)
+ if (!io_sq->desc_addr.virt_addr) {
io_sq->desc_addr.virt_addr =
ENA_MEM_ALLOC(ena_dev->dmadev, size);
+ }
}
if (!io_sq->desc_addr.virt_addr) {
@@ -385,8 +398,7 @@ static int ena_com_init_io_cq(struct ena_com_dev *ena_dev,
size_t size;
int prev_node = 0;
- ENA_TOUCH(ctx);
- memset(&io_cq->cdesc_addr, 0x0, sizeof(struct ena_com_io_desc_addr));
+ memset(&io_cq->cdesc_addr, 0x0, sizeof(io_cq->cdesc_addr));
/* Use the basic completion descriptor for Rx */
io_cq->cdesc_entry_size_in_bytes =
@@ -397,17 +409,19 @@ static int ena_com_init_io_cq(struct ena_com_dev *ena_dev,
size = io_cq->cdesc_entry_size_in_bytes * io_cq->q_depth;
ENA_MEM_ALLOC_COHERENT_NODE(ena_dev->dmadev,
- size,
- io_cq->cdesc_addr.virt_addr,
- io_cq->cdesc_addr.phys_addr,
- ctx->numa_node,
- prev_node);
- if (!io_cq->cdesc_addr.virt_addr)
+ size,
+ io_cq->cdesc_addr.virt_addr,
+ io_cq->cdesc_addr.phys_addr,
+ io_cq->cdesc_addr.mem_handle,
+ ctx->numa_node,
+ prev_node);
+ if (!io_cq->cdesc_addr.virt_addr) {
ENA_MEM_ALLOC_COHERENT(ena_dev->dmadev,
size,
io_cq->cdesc_addr.virt_addr,
io_cq->cdesc_addr.phys_addr,
io_cq->cdesc_addr.mem_handle);
+ }
if (!io_cq->cdesc_addr.virt_addr) {
ena_trc_err("memory allocation failed");
@@ -420,9 +434,8 @@ static int ena_com_init_io_cq(struct ena_com_dev *ena_dev,
return 0;
}
-static void
-ena_com_handle_single_admin_completion(struct ena_com_admin_queue *admin_queue,
- struct ena_admin_acq_entry *cqe)
+static void ena_com_handle_single_admin_completion(struct ena_com_admin_queue *admin_queue,
+ struct ena_admin_acq_entry *cqe)
{
struct ena_comp_ctx *comp_ctx;
u16 cmd_id;
@@ -447,8 +460,7 @@ ena_com_handle_single_admin_completion(struct ena_com_admin_queue *admin_queue,
ENA_WAIT_EVENT_SIGNAL(comp_ctx->wait_event);
}
-static void
-ena_com_handle_admin_completion(struct ena_com_admin_queue *admin_queue)
+static void ena_com_handle_admin_completion(struct ena_com_admin_queue *admin_queue)
{
struct ena_admin_acq_entry *cqe = NULL;
u16 comp_num = 0;
@@ -499,7 +511,7 @@ static int ena_com_comp_status_to_errno(u8 comp_status)
case ENA_ADMIN_RESOURCE_ALLOCATION_FAILURE:
return ENA_COM_NO_MEM;
case ENA_ADMIN_UNSUPPORTED_OPCODE:
- return ENA_COM_PERMISSION;
+ return ENA_COM_UNSUPPORTED;
case ENA_ADMIN_BAD_OPCODE:
case ENA_ADMIN_MALFORMED_REQUEST:
case ENA_ADMIN_ILLEGAL_PARAMETER:
@@ -510,20 +522,24 @@ static int ena_com_comp_status_to_errno(u8 comp_status)
return 0;
}
-static int
-ena_com_wait_and_process_admin_cq_polling(
- struct ena_comp_ctx *comp_ctx,
- struct ena_com_admin_queue *admin_queue)
+static int ena_com_wait_and_process_admin_cq_polling(struct ena_comp_ctx *comp_ctx,
+ struct ena_com_admin_queue *admin_queue)
{
unsigned long flags = 0;
- u64 start_time;
+ unsigned long timeout;
int ret;
- start_time = ENA_GET_SYSTEM_USECS();
+ timeout = ENA_GET_SYSTEM_TIMEOUT(admin_queue->completion_timeout);
+
+ while (1) {
+ ENA_SPINLOCK_LOCK(admin_queue->q_lock, flags);
+ ena_com_handle_admin_completion(admin_queue);
+ ENA_SPINLOCK_UNLOCK(admin_queue->q_lock, flags);
+
+ if (comp_ctx->status != ENA_CMD_SUBMITTED)
+ break;
- while (comp_ctx->status == ENA_CMD_SUBMITTED) {
- if ((ENA_GET_SYSTEM_USECS() - start_time) >
- ADMIN_CMD_TIMEOUT_US) {
+ if (ENA_TIME_EXPIRE(timeout)) {
ena_trc_err("Wait for completion (polling) timeout\n");
/* ENA didn't have any completion */
ENA_SPINLOCK_LOCK(admin_queue->q_lock, flags);
@@ -535,9 +551,7 @@ ena_com_wait_and_process_admin_cq_polling(
goto err;
}
- ENA_SPINLOCK_LOCK(admin_queue->q_lock, flags);
- ena_com_handle_admin_completion(admin_queue);
- ENA_SPINLOCK_UNLOCK(admin_queue->q_lock, flags);
+ ENA_MSLEEP(ENA_POLL_MS);
}
if (unlikely(comp_ctx->status == ENA_CMD_ABORTED)) {
@@ -549,8 +563,8 @@ ena_com_wait_and_process_admin_cq_polling(
goto err;
}
- ENA_ASSERT(comp_ctx->status == ENA_CMD_COMPLETED,
- "Invalid comp status %d\n", comp_ctx->status);
+ ENA_WARN(comp_ctx->status != ENA_CMD_COMPLETED,
+ "Invalid comp status %d\n", comp_ctx->status);
ret = ena_com_comp_status_to_errno(comp_ctx->comp_status);
err:
@@ -558,16 +572,14 @@ err:
return ret;
}
-static int
-ena_com_wait_and_process_admin_cq_interrupts(
- struct ena_comp_ctx *comp_ctx,
- struct ena_com_admin_queue *admin_queue)
+static int ena_com_wait_and_process_admin_cq_interrupts(struct ena_comp_ctx *comp_ctx,
+ struct ena_com_admin_queue *admin_queue)
{
unsigned long flags = 0;
- int ret = 0;
+ int ret;
ENA_WAIT_EVENT_WAIT(comp_ctx->wait_event,
- ADMIN_CMD_TIMEOUT_US);
+ admin_queue->completion_timeout);
/* In case the command wasn't completed find out the root cause.
* There might be 2 kinds of errors
@@ -607,16 +619,18 @@ static u32 ena_com_reg_bar_read32(struct ena_com_dev *ena_dev, u16 offset)
struct ena_com_mmio_read *mmio_read = &ena_dev->mmio_read;
volatile struct ena_admin_ena_mmio_req_read_less_resp *read_resp =
mmio_read->read_resp;
- u32 mmio_read_reg, ret;
+ u32 mmio_read_reg, ret, i;
unsigned long flags = 0;
- int i;
+ u32 timeout = mmio_read->reg_read_to;
ENA_MIGHT_SLEEP();
+ if (timeout == 0)
+ timeout = ENA_REG_READ_TIMEOUT;
+
/* If readless is disabled, perform regular read */
if (!mmio_read->readless_supported)
- return ENA_REG_READ32((unsigned char *)ena_dev->reg_bar +
- offset);
+ return ENA_REG_READ32(ena_dev->bus, ena_dev->reg_bar + offset);
ENA_SPINLOCK_LOCK(mmio_read->lock, flags);
mmio_read->seq_num++;
@@ -632,17 +646,16 @@ static u32 ena_com_reg_bar_read32(struct ena_com_dev *ena_dev, u16 offset)
*/
wmb();
- ENA_REG_WRITE32(mmio_read_reg, (unsigned char *)ena_dev->reg_bar
- + ENA_REGS_MMIO_REG_READ_OFF);
+ ENA_REG_WRITE32(ena_dev->bus, mmio_read_reg, ena_dev->reg_bar + ENA_REGS_MMIO_REG_READ_OFF);
- for (i = 0; i < ENA_REG_READ_TIMEOUT; i++) {
+ for (i = 0; i < timeout; i++) {
if (read_resp->req_id == mmio_read->seq_num)
break;
ENA_UDELAY(1);
}
- if (unlikely(i == ENA_REG_READ_TIMEOUT)) {
+ if (unlikely(i == timeout)) {
ena_trc_err("reading reg failed for timeout. expected: req id[%hu] offset[%hu] actual: req id[%hu] offset[%hu]\n",
mmio_read->seq_num,
offset,
@@ -653,7 +666,7 @@ static u32 ena_com_reg_bar_read32(struct ena_com_dev *ena_dev, u16 offset)
}
if (read_resp->reg_off != offset) {
- ena_trc_err("reading failed for wrong offset value");
+ ena_trc_err("Read failure: wrong offset provided");
ret = ENA_MMIO_READ_TIMEOUT;
} else {
ret = read_resp->reg_val;
@@ -671,9 +684,8 @@ err:
* It is expected that the IRQ called ena_com_handle_admin_completion
* to mark the completions.
*/
-static int
-ena_com_wait_and_process_admin_cq(struct ena_comp_ctx *comp_ctx,
- struct ena_com_admin_queue *admin_queue)
+static int ena_com_wait_and_process_admin_cq(struct ena_comp_ctx *comp_ctx,
+ struct ena_com_admin_queue *admin_queue)
{
if (admin_queue->polling)
return ena_com_wait_and_process_admin_cq_polling(comp_ctx,
@@ -692,7 +704,7 @@ static int ena_com_destroy_io_sq(struct ena_com_dev *ena_dev,
u8 direction;
int ret;
- memset(&destroy_cmd, 0x0, sizeof(struct ena_admin_aq_destroy_sq_cmd));
+ memset(&destroy_cmd, 0x0, sizeof(destroy_cmd));
if (io_sq->direction == ENA_COM_IO_QUEUE_DIRECTION_TX)
direction = ENA_ADMIN_SQ_DIRECTION_TX;
@@ -706,12 +718,11 @@ static int ena_com_destroy_io_sq(struct ena_com_dev *ena_dev,
destroy_cmd.sq.sq_idx = io_sq->idx;
destroy_cmd.aq_common_descriptor.opcode = ENA_ADMIN_DESTROY_SQ;
- ret = ena_com_execute_admin_command(
- admin_queue,
- (struct ena_admin_aq_entry *)&destroy_cmd,
- sizeof(destroy_cmd),
- (struct ena_admin_acq_entry *)&destroy_resp,
- sizeof(destroy_resp));
+ ret = ena_com_execute_admin_command(admin_queue,
+ (struct ena_admin_aq_entry *)&destroy_cmd,
+ sizeof(destroy_cmd),
+ (struct ena_admin_acq_entry *)&destroy_resp,
+ sizeof(destroy_resp));
if (unlikely(ret && (ret != ENA_COM_NO_DEVICE)))
ena_trc_err("failed to destroy io sq error: %d\n", ret);
@@ -747,18 +758,20 @@ static void ena_com_io_queue_free(struct ena_com_dev *ena_dev,
io_sq->desc_addr.phys_addr,
io_sq->desc_addr.mem_handle);
else
- ENA_MEM_FREE(ena_dev->dmadev,
- io_sq->desc_addr.virt_addr);
+ ENA_MEM_FREE(ena_dev->dmadev, io_sq->desc_addr.virt_addr);
io_sq->desc_addr.virt_addr = NULL;
}
}
-static int wait_for_reset_state(struct ena_com_dev *ena_dev,
- u32 timeout, u16 exp_state)
+static int wait_for_reset_state(struct ena_com_dev *ena_dev, u32 timeout,
+ u16 exp_state)
{
u32 val, i;
+ /* Convert timeout from resolution of 100ms to ENA_POLL_MS */
+ timeout = (timeout * 100) / ENA_POLL_MS;
+
for (i = 0; i < timeout; i++) {
val = ena_com_reg_bar_read32(ena_dev, ENA_REGS_DEV_STS_OFF);
@@ -771,16 +784,14 @@ static int wait_for_reset_state(struct ena_com_dev *ena_dev,
exp_state)
return 0;
- /* The resolution of the timeout is 100ms */
- ENA_MSLEEP(100);
+ ENA_MSLEEP(ENA_POLL_MS);
}
return ENA_COM_TIMER_EXPIRED;
}
-static bool
-ena_com_check_supported_feature_id(struct ena_com_dev *ena_dev,
- enum ena_admin_aq_feature_id feature_id)
+static bool ena_com_check_supported_feature_id(struct ena_com_dev *ena_dev,
+ enum ena_admin_aq_feature_id feature_id)
{
u32 feature_mask = 1 << feature_id;
@@ -802,14 +813,9 @@ static int ena_com_get_feature_ex(struct ena_com_dev *ena_dev,
struct ena_admin_get_feat_cmd get_cmd;
int ret;
- if (!ena_dev) {
- ena_trc_err("%s : ena_dev is NULL\n", __func__);
- return ENA_COM_NO_DEVICE;
- }
-
if (!ena_com_check_supported_feature_id(ena_dev, feature_id)) {
- ena_trc_info("Feature %d isn't supported\n", feature_id);
- return ENA_COM_PERMISSION;
+ ena_trc_dbg("Feature %d isn't supported\n", feature_id);
+ return ENA_COM_UNSUPPORTED;
}
memset(&get_cmd, 0x0, sizeof(get_cmd));
@@ -945,10 +951,10 @@ static int ena_com_indirect_table_allocate(struct ena_com_dev *ena_dev,
sizeof(struct ena_admin_rss_ind_table_entry);
ENA_MEM_ALLOC_COHERENT(ena_dev->dmadev,
- tbl_size,
- rss->rss_ind_tbl,
- rss->rss_ind_tbl_dma_addr,
- rss->rss_ind_tbl_mem_handle);
+ tbl_size,
+ rss->rss_ind_tbl,
+ rss->rss_ind_tbl_dma_addr,
+ rss->rss_ind_tbl_mem_handle);
if (unlikely(!rss->rss_ind_tbl))
goto mem_err1;
@@ -1005,7 +1011,7 @@ static int ena_com_create_io_sq(struct ena_com_dev *ena_dev,
u8 direction;
int ret;
- memset(&create_cmd, 0x0, sizeof(struct ena_admin_aq_create_sq_cmd));
+ memset(&create_cmd, 0x0, sizeof(create_cmd));
create_cmd.aq_common_descriptor.opcode = ENA_ADMIN_CREATE_SQ;
@@ -1041,12 +1047,11 @@ static int ena_com_create_io_sq(struct ena_com_dev *ena_dev,
}
}
- ret = ena_com_execute_admin_command(
- admin_queue,
- (struct ena_admin_aq_entry *)&create_cmd,
- sizeof(create_cmd),
- (struct ena_admin_acq_entry *)&cmd_completion,
- sizeof(cmd_completion));
+ ret = ena_com_execute_admin_command(admin_queue,
+ (struct ena_admin_aq_entry *)&create_cmd,
+ sizeof(create_cmd),
+ (struct ena_admin_acq_entry *)&cmd_completion,
+ sizeof(cmd_completion));
if (unlikely(ret)) {
ena_trc_err("Failed to create IO SQ. error: %d\n", ret);
return ret;
@@ -1133,9 +1138,8 @@ static int ena_com_init_interrupt_moderation_table(struct ena_com_dev *ena_dev)
return 0;
}
-static void
-ena_com_update_intr_delay_resolution(struct ena_com_dev *ena_dev,
- u16 intr_delay_resolution)
+static void ena_com_update_intr_delay_resolution(struct ena_com_dev *ena_dev,
+ u16 intr_delay_resolution)
{
struct ena_intr_moder_entry *intr_moder_tbl = ena_dev->intr_moder_tbl;
unsigned int i;
@@ -1165,13 +1169,18 @@ int ena_com_execute_admin_command(struct ena_com_admin_queue *admin_queue,
size_t comp_size)
{
struct ena_comp_ctx *comp_ctx;
- int ret = 0;
+ int ret;
comp_ctx = ena_com_submit_admin_cmd(admin_queue, cmd, cmd_size,
comp, comp_size);
- if (unlikely(IS_ERR(comp_ctx))) {
- ena_trc_err("Failed to submit command [%ld]\n",
- PTR_ERR(comp_ctx));
+ if (IS_ERR(comp_ctx)) {
+ if (comp_ctx == ERR_PTR(ENA_COM_NO_DEVICE))
+ ena_trc_dbg("Failed to submit command [%ld]\n",
+ PTR_ERR(comp_ctx));
+ else
+ ena_trc_err("Failed to submit command [%ld]\n",
+ PTR_ERR(comp_ctx));
+
return PTR_ERR(comp_ctx);
}
@@ -1195,7 +1204,7 @@ int ena_com_create_io_cq(struct ena_com_dev *ena_dev,
struct ena_admin_acq_create_cq_resp_desc cmd_completion;
int ret;
- memset(&create_cmd, 0x0, sizeof(struct ena_admin_aq_create_cq_cmd));
+ memset(&create_cmd, 0x0, sizeof(create_cmd));
create_cmd.aq_common_descriptor.opcode = ENA_ADMIN_CREATE_CQ;
@@ -1215,12 +1224,11 @@ int ena_com_create_io_cq(struct ena_com_dev *ena_dev,
return ret;
}
- ret = ena_com_execute_admin_command(
- admin_queue,
- (struct ena_admin_aq_entry *)&create_cmd,
- sizeof(create_cmd),
- (struct ena_admin_acq_entry *)&cmd_completion,
- sizeof(cmd_completion));
+ ret = ena_com_execute_admin_command(admin_queue,
+ (struct ena_admin_aq_entry *)&create_cmd,
+ sizeof(create_cmd),
+ (struct ena_admin_acq_entry *)&cmd_completion,
+ sizeof(cmd_completion));
if (unlikely(ret)) {
ena_trc_err("Failed to create IO CQ. error: %d\n", ret);
return ret;
@@ -1290,7 +1298,7 @@ void ena_com_wait_for_abort_completion(struct ena_com_dev *ena_dev)
ENA_SPINLOCK_LOCK(admin_queue->q_lock, flags);
while (ATOMIC32_READ(&admin_queue->outstanding_cmds) != 0) {
ENA_SPINLOCK_UNLOCK(admin_queue->q_lock, flags);
- ENA_MSLEEP(20);
+ ENA_MSLEEP(ENA_POLL_MS);
ENA_SPINLOCK_LOCK(admin_queue->q_lock, flags);
}
ENA_SPINLOCK_UNLOCK(admin_queue->q_lock, flags);
@@ -1304,17 +1312,16 @@ int ena_com_destroy_io_cq(struct ena_com_dev *ena_dev,
struct ena_admin_acq_destroy_cq_resp_desc destroy_resp;
int ret;
- memset(&destroy_cmd, 0x0, sizeof(struct ena_admin_aq_destroy_sq_cmd));
+ memset(&destroy_cmd, 0x0, sizeof(destroy_cmd));
destroy_cmd.cq_idx = io_cq->idx;
destroy_cmd.aq_common_descriptor.opcode = ENA_ADMIN_DESTROY_CQ;
- ret = ena_com_execute_admin_command(
- admin_queue,
- (struct ena_admin_aq_entry *)&destroy_cmd,
- sizeof(destroy_cmd),
- (struct ena_admin_acq_entry *)&destroy_resp,
- sizeof(destroy_resp));
+ ret = ena_com_execute_admin_command(admin_queue,
+ (struct ena_admin_aq_entry *)&destroy_cmd,
+ sizeof(destroy_cmd),
+ (struct ena_admin_acq_entry *)&destroy_resp,
+ sizeof(destroy_resp));
if (unlikely(ret && (ret != ENA_COM_NO_DEVICE)))
ena_trc_err("Failed to destroy IO CQ. error: %d\n", ret);
@@ -1341,13 +1348,12 @@ void ena_com_admin_aenq_enable(struct ena_com_dev *ena_dev)
{
u16 depth = ena_dev->aenq.q_depth;
- ENA_ASSERT(ena_dev->aenq.head == depth, "Invalid AENQ state\n");
+ ENA_WARN(ena_dev->aenq.head != depth, "Invalid AENQ state\n");
/* Init head_db to mark that all entries in the queue
* are initially available
*/
- ENA_REG_WRITE32(depth, (unsigned char *)ena_dev->reg_bar
- + ENA_REGS_AENQ_HEAD_DB_OFF);
+ ENA_REG_WRITE32(ena_dev->bus, depth, ena_dev->reg_bar + ENA_REGS_AENQ_HEAD_DB_OFF);
}
int ena_com_set_aenq_config(struct ena_com_dev *ena_dev, u32 groups_flag)
@@ -1356,12 +1362,7 @@ int ena_com_set_aenq_config(struct ena_com_dev *ena_dev, u32 groups_flag)
struct ena_admin_set_feat_cmd cmd;
struct ena_admin_set_feat_resp resp;
struct ena_admin_get_feat_resp get_resp;
- int ret = 0;
-
- if (unlikely(!ena_dev)) {
- ena_trc_err("%s : ena_dev is NULL\n", __func__);
- return ENA_COM_NO_DEVICE;
- }
+ int ret;
ret = ena_com_get_feature(ena_dev, &get_resp, ENA_ADMIN_AENQ_CONFIG);
if (ret) {
@@ -1373,7 +1374,7 @@ int ena_com_set_aenq_config(struct ena_com_dev *ena_dev, u32 groups_flag)
ena_trc_warn("Trying to set unsupported aenq events. supported flag: %x asked flag: %x\n",
get_resp.u.aenq.supported_groups,
groups_flag);
- return ENA_COM_PERMISSION;
+ return ENA_COM_UNSUPPORTED;
}
memset(&cmd, 0x0, sizeof(cmd));
@@ -1476,41 +1477,42 @@ int ena_com_validate_version(struct ena_com_dev *ena_dev)
void ena_com_admin_destroy(struct ena_com_dev *ena_dev)
{
struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue;
+ struct ena_com_admin_cq *cq = &admin_queue->cq;
+ struct ena_com_admin_sq *sq = &admin_queue->sq;
+ struct ena_com_aenq *aenq = &ena_dev->aenq;
+ u16 size;
- if (!admin_queue)
- return;
-
+ ENA_WAIT_EVENT_DESTROY(admin_queue->comp_ctx->wait_event);
if (admin_queue->comp_ctx)
ENA_MEM_FREE(ena_dev->dmadev, admin_queue->comp_ctx);
admin_queue->comp_ctx = NULL;
-
- if (admin_queue->sq.entries)
- ENA_MEM_FREE_COHERENT(ena_dev->dmadev,
- ADMIN_SQ_SIZE(admin_queue->q_depth),
- admin_queue->sq.entries,
- admin_queue->sq.dma_addr,
- admin_queue->sq.mem_handle);
- admin_queue->sq.entries = NULL;
-
- if (admin_queue->cq.entries)
- ENA_MEM_FREE_COHERENT(ena_dev->dmadev,
- ADMIN_CQ_SIZE(admin_queue->q_depth),
- admin_queue->cq.entries,
- admin_queue->cq.dma_addr,
- admin_queue->cq.mem_handle);
- admin_queue->cq.entries = NULL;
-
+ size = ADMIN_SQ_SIZE(admin_queue->q_depth);
+ if (sq->entries)
+ ENA_MEM_FREE_COHERENT(ena_dev->dmadev, size, sq->entries,
+ sq->dma_addr, sq->mem_handle);
+ sq->entries = NULL;
+
+ size = ADMIN_CQ_SIZE(admin_queue->q_depth);
+ if (cq->entries)
+ ENA_MEM_FREE_COHERENT(ena_dev->dmadev, size, cq->entries,
+ cq->dma_addr, cq->mem_handle);
+ cq->entries = NULL;
+
+ size = ADMIN_AENQ_SIZE(aenq->q_depth);
if (ena_dev->aenq.entries)
- ENA_MEM_FREE_COHERENT(ena_dev->dmadev,
- ADMIN_AENQ_SIZE(ena_dev->aenq.q_depth),
- ena_dev->aenq.entries,
- ena_dev->aenq.dma_addr,
- ena_dev->aenq.mem_handle);
- ena_dev->aenq.entries = NULL;
+ ENA_MEM_FREE_COHERENT(ena_dev->dmadev, size, aenq->entries,
+ aenq->dma_addr, aenq->mem_handle);
+ aenq->entries = NULL;
}
void ena_com_set_admin_polling_mode(struct ena_com_dev *ena_dev, bool polling)
{
+ u32 mask_value = 0;
+
+ if (polling)
+ mask_value = ENA_REGS_ADMIN_INTR_MASK;
+
+ ENA_REG_WRITE32(ena_dev->bus, mask_value, ena_dev->reg_bar + ENA_REGS_INTR_MASK_OFF);
ena_dev->admin_queue.polling = polling;
}
@@ -1536,8 +1538,7 @@ int ena_com_mmio_reg_read_request_init(struct ena_com_dev *ena_dev)
return 0;
}
-void
-ena_com_set_mmio_read_mode(struct ena_com_dev *ena_dev, bool readless_supported)
+void ena_com_set_mmio_read_mode(struct ena_com_dev *ena_dev, bool readless_supported)
{
struct ena_com_mmio_read *mmio_read = &ena_dev->mmio_read;
@@ -1548,10 +1549,8 @@ void ena_com_mmio_reg_read_request_destroy(struct ena_com_dev *ena_dev)
{
struct ena_com_mmio_read *mmio_read = &ena_dev->mmio_read;
- ENA_REG_WRITE32(0x0, (unsigned char *)ena_dev->reg_bar
- + ENA_REGS_MMIO_RESP_LO_OFF);
- ENA_REG_WRITE32(0x0, (unsigned char *)ena_dev->reg_bar
- + ENA_REGS_MMIO_RESP_HI_OFF);
+ ENA_REG_WRITE32(ena_dev->bus, 0x0, ena_dev->reg_bar + ENA_REGS_MMIO_RESP_LO_OFF);
+ ENA_REG_WRITE32(ena_dev->bus, 0x0, ena_dev->reg_bar + ENA_REGS_MMIO_RESP_HI_OFF);
ENA_MEM_FREE_COHERENT(ena_dev->dmadev,
sizeof(*mmio_read->read_resp),
@@ -1570,10 +1569,8 @@ void ena_com_mmio_reg_read_request_write_dev_addr(struct ena_com_dev *ena_dev)
addr_low = ENA_DMA_ADDR_TO_UINT32_LOW(mmio_read->read_resp_dma_addr);
addr_high = ENA_DMA_ADDR_TO_UINT32_HIGH(mmio_read->read_resp_dma_addr);
- ENA_REG_WRITE32(addr_low, (unsigned char *)ena_dev->reg_bar
- + ENA_REGS_MMIO_RESP_LO_OFF);
- ENA_REG_WRITE32(addr_high, (unsigned char *)ena_dev->reg_bar
- + ENA_REGS_MMIO_RESP_HI_OFF);
+ ENA_REG_WRITE32(ena_dev->bus, addr_low, ena_dev->reg_bar + ENA_REGS_MMIO_RESP_LO_OFF);
+ ENA_REG_WRITE32(ena_dev->bus, addr_high, ena_dev->reg_bar + ENA_REGS_MMIO_RESP_HI_OFF);
}
int ena_com_admin_init(struct ena_com_dev *ena_dev,
@@ -1619,24 +1616,20 @@ int ena_com_admin_init(struct ena_com_dev *ena_dev,
if (ret)
goto error;
- admin_queue->sq.db_addr = (u32 __iomem *)
- ((unsigned char *)ena_dev->reg_bar + ENA_REGS_AQ_DB_OFF);
+ admin_queue->sq.db_addr = (u32 __iomem *)((uintptr_t)ena_dev->reg_bar +
+ ENA_REGS_AQ_DB_OFF);
addr_low = ENA_DMA_ADDR_TO_UINT32_LOW(admin_queue->sq.dma_addr);
addr_high = ENA_DMA_ADDR_TO_UINT32_HIGH(admin_queue->sq.dma_addr);
- ENA_REG_WRITE32(addr_low, (unsigned char *)ena_dev->reg_bar
- + ENA_REGS_AQ_BASE_LO_OFF);
- ENA_REG_WRITE32(addr_high, (unsigned char *)ena_dev->reg_bar
- + ENA_REGS_AQ_BASE_HI_OFF);
+ ENA_REG_WRITE32(ena_dev->bus, addr_low, ena_dev->reg_bar + ENA_REGS_AQ_BASE_LO_OFF);
+ ENA_REG_WRITE32(ena_dev->bus, addr_high, ena_dev->reg_bar + ENA_REGS_AQ_BASE_HI_OFF);
addr_low = ENA_DMA_ADDR_TO_UINT32_LOW(admin_queue->cq.dma_addr);
addr_high = ENA_DMA_ADDR_TO_UINT32_HIGH(admin_queue->cq.dma_addr);
- ENA_REG_WRITE32(addr_low, (unsigned char *)ena_dev->reg_bar
- + ENA_REGS_ACQ_BASE_LO_OFF);
- ENA_REG_WRITE32(addr_high, (unsigned char *)ena_dev->reg_bar
- + ENA_REGS_ACQ_BASE_HI_OFF);
+ ENA_REG_WRITE32(ena_dev->bus, addr_low, ena_dev->reg_bar + ENA_REGS_ACQ_BASE_LO_OFF);
+ ENA_REG_WRITE32(ena_dev->bus, addr_high, ena_dev->reg_bar + ENA_REGS_ACQ_BASE_HI_OFF);
aq_caps = 0;
aq_caps |= admin_queue->q_depth & ENA_REGS_AQ_CAPS_AQ_DEPTH_MASK;
@@ -1650,10 +1643,8 @@ int ena_com_admin_init(struct ena_com_dev *ena_dev,
ENA_REGS_ACQ_CAPS_ACQ_ENTRY_SIZE_SHIFT) &
ENA_REGS_ACQ_CAPS_ACQ_ENTRY_SIZE_MASK;
- ENA_REG_WRITE32(aq_caps, (unsigned char *)ena_dev->reg_bar
- + ENA_REGS_AQ_CAPS_OFF);
- ENA_REG_WRITE32(acq_caps, (unsigned char *)ena_dev->reg_bar
- + ENA_REGS_ACQ_CAPS_OFF);
+ ENA_REG_WRITE32(ena_dev->bus, aq_caps, ena_dev->reg_bar + ENA_REGS_AQ_CAPS_OFF);
+ ENA_REG_WRITE32(ena_dev->bus, acq_caps, ena_dev->reg_bar + ENA_REGS_ACQ_CAPS_OFF);
ret = ena_com_admin_init_aenq(ena_dev, aenq_handlers);
if (ret)
goto error;
@@ -1672,7 +1663,7 @@ int ena_com_create_io_queue(struct ena_com_dev *ena_dev,
{
struct ena_com_io_sq *io_sq;
struct ena_com_io_cq *io_cq;
- int ret = 0;
+ int ret;
if (ctx->qid >= ENA_TOTAL_NUM_QUEUES) {
ena_trc_err("Qid (%d) is bigger than max num of queues (%d)\n",
@@ -1683,8 +1674,8 @@ int ena_com_create_io_queue(struct ena_com_dev *ena_dev,
io_sq = &ena_dev->io_sq_queues[ctx->qid];
io_cq = &ena_dev->io_cq_queues[ctx->qid];
- memset(io_sq, 0x0, sizeof(struct ena_com_io_sq));
- memset(io_cq, 0x0, sizeof(struct ena_com_io_cq));
+ memset(io_sq, 0x0, sizeof(*io_sq));
+ memset(io_cq, 0x0, sizeof(*io_cq));
/* Init CQ */
io_cq->q_depth = ctx->queue_size;
@@ -1794,6 +1785,19 @@ int ena_com_get_dev_attr_feat(struct ena_com_dev *ena_dev,
memcpy(&get_feat_ctx->offload, &get_resp.u.offload,
sizeof(get_resp.u.offload));
+ /* Driver hints isn't mandatory admin command. So in case the
+ * command isn't supported set driver hints to 0
+ */
+ rc = ena_com_get_feature(ena_dev, &get_resp, ENA_ADMIN_HW_HINTS);
+
+ if (!rc)
+ memcpy(&get_feat_ctx->hw_hints, &get_resp.u.hw_hints,
+ sizeof(get_resp.u.hw_hints));
+ else if (rc == ENA_COM_UNSUPPORTED)
+ memset(&get_feat_ctx->hw_hints, 0x0, sizeof(get_feat_ctx->hw_hints));
+ else
+ return rc;
+
return 0;
}
@@ -1826,6 +1830,7 @@ void ena_com_aenq_intr_handler(struct ena_com_dev *dev, void *data)
struct ena_admin_aenq_common_desc *aenq_common;
struct ena_com_aenq *aenq = &dev->aenq;
ena_aenq_handler handler_cb;
+ unsigned long long timestamp;
u16 masked_head, processed = 0;
u8 phase;
@@ -1837,11 +1842,13 @@ void ena_com_aenq_intr_handler(struct ena_com_dev *dev, void *data)
/* Go over all the events */
while ((aenq_common->flags & ENA_ADMIN_AENQ_COMMON_DESC_PHASE_MASK) ==
phase) {
+ timestamp = (unsigned long long)aenq_common->timestamp_low |
+ ((unsigned long long)aenq_common->timestamp_high << 32);
+ ENA_TOUCH(timestamp); /* In case debug is disabled */
ena_trc_dbg("AENQ! Group[%x] Syndrom[%x] timestamp: [%llus]\n",
aenq_common->group,
aenq_common->syndrom,
- (unsigned long long)aenq_common->timestamp_low +
- ((u64)aenq_common->timestamp_high << 32));
+ timestamp);
/* Handle specific event*/
handler_cb = ena_com_get_specific_aenq_cb(dev,
@@ -1869,11 +1876,11 @@ void ena_com_aenq_intr_handler(struct ena_com_dev *dev, void *data)
/* write the aenq doorbell after all AENQ descriptors were read */
mb();
- ENA_REG_WRITE32((u32)aenq->head, (unsigned char *)dev->reg_bar
- + ENA_REGS_AENQ_HEAD_DB_OFF);
+ ENA_REG_WRITE32(dev->bus, (u32)aenq->head, dev->reg_bar + ENA_REGS_AENQ_HEAD_DB_OFF);
}
-int ena_com_dev_reset(struct ena_com_dev *ena_dev)
+int ena_com_dev_reset(struct ena_com_dev *ena_dev,
+ enum ena_regs_reset_reason_types reset_reason)
{
u32 stat, timeout, cap, reset_val;
int rc;
@@ -1901,8 +1908,9 @@ int ena_com_dev_reset(struct ena_com_dev *ena_dev)
/* start reset */
reset_val = ENA_REGS_DEV_CTL_DEV_RESET_MASK;
- ENA_REG_WRITE32(reset_val, (unsigned char *)ena_dev->reg_bar
- + ENA_REGS_DEV_CTL_OFF);
+ reset_val |= (reset_reason << ENA_REGS_DEV_CTL_RESET_REASON_SHIFT) &
+ ENA_REGS_DEV_CTL_RESET_REASON_MASK;
+ ENA_REG_WRITE32(ena_dev->bus, reset_val, ena_dev->reg_bar + ENA_REGS_DEV_CTL_OFF);
/* Write again the MMIO read request address */
ena_com_mmio_reg_read_request_write_dev_addr(ena_dev);
@@ -1915,29 +1923,32 @@ int ena_com_dev_reset(struct ena_com_dev *ena_dev)
}
/* reset done */
- ENA_REG_WRITE32(0, (unsigned char *)ena_dev->reg_bar
- + ENA_REGS_DEV_CTL_OFF);
+ ENA_REG_WRITE32(ena_dev->bus, 0, ena_dev->reg_bar + ENA_REGS_DEV_CTL_OFF);
rc = wait_for_reset_state(ena_dev, timeout, 0);
if (rc != 0) {
ena_trc_err("Reset indication didn't turn off\n");
return rc;
}
+ timeout = (cap & ENA_REGS_CAPS_ADMIN_CMD_TO_MASK) >>
+ ENA_REGS_CAPS_ADMIN_CMD_TO_SHIFT;
+ if (timeout)
+ /* the resolution of timeout reg is 100ms */
+ ena_dev->admin_queue.completion_timeout = timeout * 100000;
+ else
+ ena_dev->admin_queue.completion_timeout = ADMIN_CMD_TIMEOUT_US;
+
return 0;
}
static int ena_get_dev_stats(struct ena_com_dev *ena_dev,
- struct ena_admin_aq_get_stats_cmd *get_cmd,
- struct ena_admin_acq_get_stats_resp *get_resp,
+ struct ena_com_stats_ctx *ctx,
enum ena_admin_get_stats_type type)
{
+ struct ena_admin_aq_get_stats_cmd *get_cmd = &ctx->get_cmd;
+ struct ena_admin_acq_get_stats_resp *get_resp = &ctx->get_resp;
struct ena_com_admin_queue *admin_queue;
- int ret = 0;
-
- if (!ena_dev) {
- ena_trc_err("%s : ena_dev is NULL\n", __func__);
- return ENA_COM_NO_DEVICE;
- }
+ int ret;
admin_queue = &ena_dev->admin_queue;
@@ -1945,12 +1956,11 @@ static int ena_get_dev_stats(struct ena_com_dev *ena_dev,
get_cmd->aq_common_descriptor.flags = 0;
get_cmd->type = type;
- ret = ena_com_execute_admin_command(
- admin_queue,
- (struct ena_admin_aq_entry *)get_cmd,
- sizeof(*get_cmd),
- (struct ena_admin_acq_entry *)get_resp,
- sizeof(*get_resp));
+ ret = ena_com_execute_admin_command(admin_queue,
+ (struct ena_admin_aq_entry *)get_cmd,
+ sizeof(*get_cmd),
+ (struct ena_admin_acq_entry *)get_resp,
+ sizeof(*get_resp));
if (unlikely(ret))
ena_trc_err("Failed to get stats. error: %d\n", ret);
@@ -1961,78 +1971,28 @@ static int ena_get_dev_stats(struct ena_com_dev *ena_dev,
int ena_com_get_dev_basic_stats(struct ena_com_dev *ena_dev,
struct ena_admin_basic_stats *stats)
{
- int ret = 0;
- struct ena_admin_aq_get_stats_cmd get_cmd;
- struct ena_admin_acq_get_stats_resp get_resp;
+ struct ena_com_stats_ctx ctx;
+ int ret;
- memset(&get_cmd, 0x0, sizeof(get_cmd));
- ret = ena_get_dev_stats(ena_dev, &get_cmd, &get_resp,
- ENA_ADMIN_GET_STATS_TYPE_BASIC);
+ memset(&ctx, 0x0, sizeof(ctx));
+ ret = ena_get_dev_stats(ena_dev, &ctx, ENA_ADMIN_GET_STATS_TYPE_BASIC);
if (likely(ret == 0))
- memcpy(stats, &get_resp.basic_stats,
- sizeof(get_resp.basic_stats));
+ memcpy(stats, &ctx.get_resp.basic_stats,
+ sizeof(ctx.get_resp.basic_stats));
return ret;
}
-int ena_com_get_dev_extended_stats(struct ena_com_dev *ena_dev, char *buff,
- u32 len)
-{
- int ret = 0;
- struct ena_admin_aq_get_stats_cmd get_cmd;
- struct ena_admin_acq_get_stats_resp get_resp;
- ena_mem_handle_t mem_handle = 0;
- void *virt_addr;
- dma_addr_t phys_addr;
-
- ENA_MEM_ALLOC_COHERENT(ena_dev->dmadev, len,
- virt_addr, phys_addr, mem_handle);
- if (!virt_addr) {
- ret = ENA_COM_NO_MEM;
- goto done;
- }
- memset(&get_cmd, 0x0, sizeof(get_cmd));
- ret = ena_com_mem_addr_set(ena_dev,
- &get_cmd.u.control_buffer.address,
- phys_addr);
- if (unlikely(ret)) {
- ena_trc_err("memory address set failed\n");
- return ret;
- }
- get_cmd.u.control_buffer.length = len;
-
- get_cmd.device_id = ena_dev->stats_func;
- get_cmd.queue_idx = ena_dev->stats_queue;
-
- ret = ena_get_dev_stats(ena_dev, &get_cmd, &get_resp,
- ENA_ADMIN_GET_STATS_TYPE_EXTENDED);
- if (ret < 0)
- goto free_ext_stats_mem;
-
- ret = snprintf(buff, len, "%s", (char *)virt_addr);
-
-free_ext_stats_mem:
- ENA_MEM_FREE_COHERENT(ena_dev->dmadev, len, virt_addr, phys_addr,
- mem_handle);
-done:
- return ret;
-}
-
int ena_com_set_dev_mtu(struct ena_com_dev *ena_dev, int mtu)
{
struct ena_com_admin_queue *admin_queue;
struct ena_admin_set_feat_cmd cmd;
struct ena_admin_set_feat_resp resp;
- int ret = 0;
-
- if (unlikely(!ena_dev)) {
- ena_trc_err("%s : ena_dev is NULL\n", __func__);
- return ENA_COM_NO_DEVICE;
- }
+ int ret;
if (!ena_com_check_supported_feature_id(ena_dev, ENA_ADMIN_MTU)) {
- ena_trc_info("Feature %d isn't supported\n", ENA_ADMIN_MTU);
- return ENA_COM_PERMISSION;
+ ena_trc_dbg("Feature %d isn't supported\n", ENA_ADMIN_MTU);
+ return ENA_COM_UNSUPPORTED;
}
memset(&cmd, 0x0, sizeof(cmd));
@@ -2049,11 +2009,10 @@ int ena_com_set_dev_mtu(struct ena_com_dev *ena_dev, int mtu)
(struct ena_admin_acq_entry *)&resp,
sizeof(resp));
- if (unlikely(ret)) {
+ if (unlikely(ret))
ena_trc_err("Failed to set mtu %d. error: %d\n", mtu, ret);
- return ENA_COM_INVAL;
- }
- return 0;
+
+ return ret;
}
int ena_com_get_offload_settings(struct ena_com_dev *ena_dev,
@@ -2066,7 +2025,7 @@ int ena_com_get_offload_settings(struct ena_com_dev *ena_dev,
ENA_ADMIN_STATELESS_OFFLOAD_CONFIG);
if (unlikely(ret)) {
ena_trc_err("Failed to get offload capabilities %d\n", ret);
- return ENA_COM_INVAL;
+ return ret;
}
memcpy(offload, &resp.u.offload, sizeof(resp.u.offload));
@@ -2085,9 +2044,9 @@ int ena_com_set_hash_function(struct ena_com_dev *ena_dev)
if (!ena_com_check_supported_feature_id(ena_dev,
ENA_ADMIN_RSS_HASH_FUNCTION)) {
- ena_trc_info("Feature %d isn't supported\n",
- ENA_ADMIN_RSS_HASH_FUNCTION);
- return ENA_COM_PERMISSION;
+ ena_trc_dbg("Feature %d isn't supported\n",
+ ENA_ADMIN_RSS_HASH_FUNCTION);
+ return ENA_COM_UNSUPPORTED;
}
/* Validate hash function is supported */
@@ -2099,7 +2058,7 @@ int ena_com_set_hash_function(struct ena_com_dev *ena_dev)
if (get_resp.u.flow_hash_func.supported_func & (1 << rss->hash_func)) {
ena_trc_err("Func hash %d isn't supported by device, abort\n",
rss->hash_func);
- return ENA_COM_PERMISSION;
+ return ENA_COM_UNSUPPORTED;
}
memset(&cmd, 0x0, sizeof(cmd));
@@ -2158,7 +2117,7 @@ int ena_com_fill_hash_function(struct ena_com_dev *ena_dev,
if (!((1 << func) & get_resp.u.flow_hash_func.supported_func)) {
ena_trc_err("Flow hash function %d isn't supported\n", func);
- return ENA_COM_PERMISSION;
+ return ENA_COM_UNSUPPORTED;
}
switch (func) {
@@ -2207,7 +2166,7 @@ int ena_com_get_hash_function(struct ena_com_dev *ena_dev,
if (unlikely(rc))
return rc;
- rss->hash_func = (enum ena_admin_hash_functions)get_resp.u.flow_hash_func.selected_func;
+ rss->hash_func = get_resp.u.flow_hash_func.selected_func;
if (func)
*func = rss->hash_func;
@@ -2242,17 +2201,20 @@ int ena_com_set_hash_ctrl(struct ena_com_dev *ena_dev)
{
struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue;
struct ena_rss *rss = &ena_dev->rss;
+ struct ena_admin_feature_rss_hash_control *hash_ctrl = rss->hash_ctrl;
struct ena_admin_set_feat_cmd cmd;
struct ena_admin_set_feat_resp resp;
int ret;
if (!ena_com_check_supported_feature_id(ena_dev,
ENA_ADMIN_RSS_HASH_INPUT)) {
- ena_trc_info("Feature %d isn't supported\n",
- ENA_ADMIN_RSS_HASH_INPUT);
- return ENA_COM_PERMISSION;
+ ena_trc_dbg("Feature %d isn't supported\n",
+ ENA_ADMIN_RSS_HASH_INPUT);
+ return ENA_COM_UNSUPPORTED;
}
+ memset(&cmd, 0x0, sizeof(cmd));
+
cmd.aq_common_descriptor.opcode = ENA_ADMIN_SET_FEATURE;
cmd.aq_common_descriptor.flags =
ENA_ADMIN_AQ_COMMON_DESC_CTRL_DATA_INDIRECT_MASK;
@@ -2268,20 +2230,17 @@ int ena_com_set_hash_ctrl(struct ena_com_dev *ena_dev)
ena_trc_err("memory address set failed\n");
return ret;
}
- cmd.control_buffer.length =
- sizeof(struct ena_admin_feature_rss_hash_control);
+ cmd.control_buffer.length = sizeof(*hash_ctrl);
ret = ena_com_execute_admin_command(admin_queue,
(struct ena_admin_aq_entry *)&cmd,
sizeof(cmd),
(struct ena_admin_acq_entry *)&resp,
sizeof(resp));
- if (unlikely(ret)) {
+ if (unlikely(ret))
ena_trc_err("Failed to set hash input. error: %d\n", ret);
- return ENA_COM_INVAL;
- }
- return 0;
+ return ret;
}
int ena_com_set_default_hash_ctrl(struct ena_com_dev *ena_dev)
@@ -2293,7 +2252,7 @@ int ena_com_set_default_hash_ctrl(struct ena_com_dev *ena_dev)
int rc, i;
/* Get the supported hash input */
- rc = ena_com_get_hash_ctrl(ena_dev, (enum ena_admin_flow_hash_proto)0, NULL);
+ rc = ena_com_get_hash_ctrl(ena_dev, 0, NULL);
if (unlikely(rc))
return rc;
@@ -2322,7 +2281,7 @@ int ena_com_set_default_hash_ctrl(struct ena_com_dev *ena_dev)
hash_ctrl->selected_fields[ENA_ADMIN_RSS_IP4_FRAG].fields =
ENA_ADMIN_RSS_L3_SA | ENA_ADMIN_RSS_L3_DA;
- hash_ctrl->selected_fields[ENA_ADMIN_RSS_IP4_FRAG].fields =
+ hash_ctrl->selected_fields[ENA_ADMIN_RSS_NOT_IP].fields =
ENA_ADMIN_RSS_L2_DA | ENA_ADMIN_RSS_L2_SA;
for (i = 0; i < ENA_ADMIN_RSS_PROTO_NUM; i++) {
@@ -2332,7 +2291,7 @@ int ena_com_set_default_hash_ctrl(struct ena_com_dev *ena_dev)
ena_trc_err("hash control doesn't support all the desire configuration. proto %x supported %x selected %x\n",
i, hash_ctrl->supported_fields[i].fields,
hash_ctrl->selected_fields[i].fields);
- return ENA_COM_PERMISSION;
+ return ENA_COM_UNSUPPORTED;
}
}
@@ -2340,7 +2299,7 @@ int ena_com_set_default_hash_ctrl(struct ena_com_dev *ena_dev)
/* In case of failure, restore the old hash ctrl */
if (unlikely(rc))
- ena_com_get_hash_ctrl(ena_dev, (enum ena_admin_flow_hash_proto)0, NULL);
+ ena_com_get_hash_ctrl(ena_dev, 0, NULL);
return rc;
}
@@ -2377,7 +2336,7 @@ int ena_com_fill_hash_ctrl(struct ena_com_dev *ena_dev,
/* In case of failure, restore the old hash ctrl */
if (unlikely(rc))
- ena_com_get_hash_ctrl(ena_dev, (enum ena_admin_flow_hash_proto)0, NULL);
+ ena_com_get_hash_ctrl(ena_dev, 0, NULL);
return 0;
}
@@ -2404,14 +2363,13 @@ int ena_com_indirect_table_set(struct ena_com_dev *ena_dev)
struct ena_rss *rss = &ena_dev->rss;
struct ena_admin_set_feat_cmd cmd;
struct ena_admin_set_feat_resp resp;
- int ret = 0;
+ int ret;
- if (!ena_com_check_supported_feature_id(
- ena_dev,
- ENA_ADMIN_RSS_REDIRECTION_TABLE_CONFIG)) {
- ena_trc_info("Feature %d isn't supported\n",
- ENA_ADMIN_RSS_REDIRECTION_TABLE_CONFIG);
- return ENA_COM_PERMISSION;
+ if (!ena_com_check_supported_feature_id(ena_dev,
+ ENA_ADMIN_RSS_REDIRECTION_TABLE_CONFIG)) {
+ ena_trc_dbg("Feature %d isn't supported\n",
+ ENA_ADMIN_RSS_REDIRECTION_TABLE_CONFIG);
+ return ENA_COM_UNSUPPORTED;
}
ret = ena_com_ind_tbl_convert_to_device(ena_dev);
@@ -2446,12 +2404,10 @@ int ena_com_indirect_table_set(struct ena_com_dev *ena_dev)
(struct ena_admin_acq_entry *)&resp,
sizeof(resp));
- if (unlikely(ret)) {
+ if (unlikely(ret))
ena_trc_err("Failed to set indirect table. error: %d\n", ret);
- return ENA_COM_INVAL;
- }
- return 0;
+ return ret;
}
int ena_com_indirect_table_get(struct ena_com_dev *ena_dev, u32 *ind_tbl)
@@ -2538,17 +2494,18 @@ int ena_com_allocate_host_info(struct ena_com_dev *ena_dev)
}
int ena_com_allocate_debug_area(struct ena_com_dev *ena_dev,
- u32 debug_area_size) {
+ u32 debug_area_size)
+{
struct ena_host_attribute *host_attr = &ena_dev->host_attr;
- ENA_MEM_ALLOC_COHERENT(ena_dev->dmadev,
- debug_area_size,
- host_attr->debug_area_virt_addr,
- host_attr->debug_area_dma_addr,
- host_attr->debug_area_dma_handle);
- if (unlikely(!host_attr->debug_area_virt_addr)) {
- host_attr->debug_area_size = 0;
- return ENA_COM_NO_MEM;
+ ENA_MEM_ALLOC_COHERENT(ena_dev->dmadev,
+ debug_area_size,
+ host_attr->debug_area_virt_addr,
+ host_attr->debug_area_dma_addr,
+ host_attr->debug_area_dma_handle);
+ if (unlikely(!host_attr->debug_area_virt_addr)) {
+ host_attr->debug_area_size = 0;
+ return ENA_COM_NO_MEM;
}
host_attr->debug_area_size = debug_area_size;
@@ -2590,6 +2547,7 @@ int ena_com_set_host_attributes(struct ena_com_dev *ena_dev)
struct ena_com_admin_queue *admin_queue;
struct ena_admin_set_feat_cmd cmd;
struct ena_admin_set_feat_resp resp;
+
int ret;
/* Host attribute config is called before ena_com_get_dev_attr_feat
@@ -2635,14 +2593,12 @@ int ena_com_set_host_attributes(struct ena_com_dev *ena_dev)
/* Interrupt moderation */
bool ena_com_interrupt_moderation_supported(struct ena_com_dev *ena_dev)
{
- return ena_com_check_supported_feature_id(
- ena_dev,
- ENA_ADMIN_INTERRUPT_MODERATION);
+ return ena_com_check_supported_feature_id(ena_dev,
+ ENA_ADMIN_INTERRUPT_MODERATION);
}
-int
-ena_com_update_nonadaptive_moderation_interval_tx(struct ena_com_dev *ena_dev,
- u32 tx_coalesce_usecs)
+int ena_com_update_nonadaptive_moderation_interval_tx(struct ena_com_dev *ena_dev,
+ u32 tx_coalesce_usecs)
{
if (!ena_dev->intr_delay_resolution) {
ena_trc_err("Illegal interrupt delay granularity value\n");
@@ -2655,9 +2611,8 @@ ena_com_update_nonadaptive_moderation_interval_tx(struct ena_com_dev *ena_dev,
return 0;
}
-int
-ena_com_update_nonadaptive_moderation_interval_rx(struct ena_com_dev *ena_dev,
- u32 rx_coalesce_usecs)
+int ena_com_update_nonadaptive_moderation_interval_rx(struct ena_com_dev *ena_dev,
+ u32 rx_coalesce_usecs)
{
if (!ena_dev->intr_delay_resolution) {
ena_trc_err("Illegal interrupt delay granularity value\n");
@@ -2690,9 +2645,9 @@ int ena_com_init_interrupt_moderation(struct ena_com_dev *ena_dev)
ENA_ADMIN_INTERRUPT_MODERATION);
if (rc) {
- if (rc == ENA_COM_PERMISSION) {
- ena_trc_info("Feature %d isn't supported\n",
- ENA_ADMIN_INTERRUPT_MODERATION);
+ if (rc == ENA_COM_UNSUPPORTED) {
+ ena_trc_dbg("Feature %d isn't supported\n",
+ ENA_ADMIN_INTERRUPT_MODERATION);
rc = 0;
} else {
ena_trc_err("Failed to get interrupt moderation admin cmd. rc: %d\n",
@@ -2719,8 +2674,7 @@ err:
return rc;
}
-void
-ena_com_config_default_interrupt_moderation_table(struct ena_com_dev *ena_dev)
+void ena_com_config_default_interrupt_moderation_table(struct ena_com_dev *ena_dev)
{
struct ena_intr_moder_entry *intr_moder_tbl = ena_dev->intr_moder_tbl;
@@ -2763,14 +2717,12 @@ ena_com_config_default_interrupt_moderation_table(struct ena_com_dev *ena_dev)
ENA_INTR_HIGHEST_BYTES;
}
-unsigned int
-ena_com_get_nonadaptive_moderation_interval_tx(struct ena_com_dev *ena_dev)
+unsigned int ena_com_get_nonadaptive_moderation_interval_tx(struct ena_com_dev *ena_dev)
{
return ena_dev->intr_moder_tx_interval;
}
-unsigned int
-ena_com_get_nonadaptive_moderation_interval_rx(struct ena_com_dev *ena_dev)
+unsigned int ena_com_get_nonadaptive_moderation_interval_rx(struct ena_com_dev *ena_dev)
{
struct ena_intr_moder_entry *intr_moder_tbl = ena_dev->intr_moder_tbl;
@@ -2794,7 +2746,10 @@ void ena_com_init_intr_moderation_entry(struct ena_com_dev *ena_dev,
intr_moder_tbl[level].intr_moder_interval /=
ena_dev->intr_delay_resolution;
intr_moder_tbl[level].pkts_per_interval = entry->pkts_per_interval;
- intr_moder_tbl[level].bytes_per_interval = entry->bytes_per_interval;
+
+ /* use hardcoded value until ethtool supports bytecount parameter */
+ if (entry->bytes_per_interval != ENA_INTR_BYTE_COUNT_NOT_SUPPORTED)
+ intr_moder_tbl[level].bytes_per_interval = entry->bytes_per_interval;
}
void ena_com_get_intr_moderation_entry(struct ena_com_dev *ena_dev,
diff --git a/drivers/net/ena/base/ena_com.h b/drivers/net/ena/base/ena_com.h
index e5345926..f58cd86a 100644
--- a/drivers/net/ena/base/ena_com.h
+++ b/drivers/net/ena/base/ena_com.h
@@ -35,15 +35,7 @@
#define ENA_COM
#include "ena_plat.h"
-#include "ena_common_defs.h"
-#include "ena_admin_defs.h"
-#include "ena_eth_io_defs.h"
-#include "ena_regs_defs.h"
-#if defined(__linux__) && !defined(__KERNEL__)
-#include <rte_lcore.h>
-#include <rte_spinlock.h>
-#define __iomem
-#endif
+#include "ena_includes.h"
#define ENA_MAX_NUM_IO_QUEUES 128U
/* We need to queues for each IO (on for Tx and one for Rx) */
@@ -89,6 +81,11 @@
#define ENA_INTR_DELAY_OLD_VALUE_WEIGHT 6
#define ENA_INTR_DELAY_NEW_VALUE_WEIGHT 4
+#define ENA_INTR_MODER_LEVEL_STRIDE 1
+#define ENA_INTR_BYTE_COUNT_NOT_SUPPORTED 0xFFFFFF
+
+#define ENA_HW_HINTS_NO_TIMEOUT 0xFFFF
+
enum ena_intr_moder_level {
ENA_INTR_MODER_LOWEST = 0,
ENA_INTR_MODER_LOW,
@@ -120,8 +117,8 @@ struct ena_com_rx_buf_info {
};
struct ena_com_io_desc_addr {
- u8 __iomem *pbuf_dev_addr; /* LLQ address */
- u8 *virt_addr;
+ u8 __iomem *pbuf_dev_addr; /* LLQ address */
+ u8 *virt_addr;
dma_addr_t phys_addr;
ena_mem_handle_t mem_handle;
};
@@ -130,13 +127,12 @@ struct ena_com_tx_meta {
u16 mss;
u16 l3_hdr_len;
u16 l3_hdr_offset;
- u16 l3_outer_hdr_len; /* In words */
- u16 l3_outer_hdr_offset;
u16 l4_hdr_len; /* In words */
};
struct ena_com_io_cq {
struct ena_com_io_desc_addr cdesc_addr;
+ void *bus;
/* Interrupt unmask register */
u32 __iomem *unmask_reg;
@@ -174,6 +170,7 @@ struct ena_com_io_cq {
struct ena_com_io_sq {
struct ena_com_io_desc_addr desc_addr;
+ void *bus;
u32 __iomem *db_addr;
u8 __iomem *header_addr;
@@ -228,8 +225,11 @@ struct ena_com_stats_admin {
struct ena_com_admin_queue {
void *q_dmadev;
+ void *bus;
ena_spinlock_t q_lock; /* spinlock for the admin queue */
+
struct ena_comp_ctx *comp_ctx;
+ u32 completion_timeout;
u16 q_depth;
struct ena_com_admin_cq cq;
struct ena_com_admin_sq sq;
@@ -266,6 +266,7 @@ struct ena_com_mmio_read {
struct ena_admin_ena_mmio_req_read_less_resp *read_resp;
dma_addr_t read_resp_dma_addr;
ena_mem_handle_t read_resp_mem_handle;
+ u32 reg_read_to; /* in us */
u16 seq_num;
bool readless_supported;
/* spin lock to ensure a single outstanding read */
@@ -316,6 +317,7 @@ struct ena_com_dev {
u8 __iomem *reg_bar;
void __iomem *mem_bar;
void *dmadev;
+ void *bus;
enum ena_admin_placement_policy_type tx_mem_queue_type;
u32 tx_max_header_size;
@@ -340,6 +342,7 @@ struct ena_com_dev_get_features_ctx {
struct ena_admin_device_attr_feature_desc dev_attr;
struct ena_admin_feature_aenq_desc aenq;
struct ena_admin_feature_offload_desc offload;
+ struct ena_admin_ena_hw_hints hw_hints;
};
struct ena_com_create_io_ctx {
@@ -379,7 +382,7 @@ int ena_com_mmio_reg_read_request_init(struct ena_com_dev *ena_dev);
/* ena_com_set_mmio_read_mode - Enable/disable the mmio reg read mechanism
* @ena_dev: ENA communication layer struct
- * @realess_supported: readless mode (enable/disable)
+ * @readless_supported: readless mode (enable/disable)
*/
void ena_com_set_mmio_read_mode(struct ena_com_dev *ena_dev,
bool readless_supported);
@@ -421,14 +424,16 @@ void ena_com_admin_destroy(struct ena_com_dev *ena_dev);
/* ena_com_dev_reset - Perform device FLR to the device.
* @ena_dev: ENA communication layer struct
+ * @reset_reason: Specify what is the trigger for the reset in case of an error.
*
* @return - 0 on success, negative value on failure.
*/
-int ena_com_dev_reset(struct ena_com_dev *ena_dev);
+int ena_com_dev_reset(struct ena_com_dev *ena_dev,
+ enum ena_regs_reset_reason_types reset_reason);
/* ena_com_create_io_queue - Create io queue.
* @ena_dev: ENA communication layer struct
- * ena_com_create_io_ctx - create context structure
+ * @ctx - create context structure
*
* Create the submission and the completion queues.
*
@@ -437,8 +442,9 @@ int ena_com_dev_reset(struct ena_com_dev *ena_dev);
int ena_com_create_io_queue(struct ena_com_dev *ena_dev,
struct ena_com_create_io_ctx *ctx);
-/* ena_com_admin_destroy - Destroy IO queue with the queue id - qid.
+/* ena_com_destroy_io_queue - Destroy IO queue with the queue id - qid.
* @ena_dev: ENA communication layer struct
+ * @qid - the caller virtual queue id.
*/
void ena_com_destroy_io_queue(struct ena_com_dev *ena_dev, u16 qid);
@@ -581,9 +587,8 @@ int ena_com_set_aenq_config(struct ena_com_dev *ena_dev, u32 groups_flag);
*
* @return: 0 on Success and negative value otherwise.
*/
-int
-ena_com_get_dev_attr_feat(struct ena_com_dev *ena_dev,
- struct ena_com_dev_get_features_ctx *get_feat_ctx);
+int ena_com_get_dev_attr_feat(struct ena_com_dev *ena_dev,
+ struct ena_com_dev_get_features_ctx *get_feat_ctx);
/* ena_com_get_dev_basic_stats - Get device basic statistics
* @ena_dev: ENA communication layer struct
@@ -608,9 +613,8 @@ int ena_com_set_dev_mtu(struct ena_com_dev *ena_dev, int mtu);
*
* @return: 0 on Success and negative value otherwise.
*/
-int
-ena_com_get_offload_settings(struct ena_com_dev *ena_dev,
- struct ena_admin_feature_offload_desc *offload);
+int ena_com_get_offload_settings(struct ena_com_dev *ena_dev,
+ struct ena_admin_feature_offload_desc *offload);
/* ena_com_rss_init - Init RSS
* @ena_dev: ENA communication layer struct
@@ -765,8 +769,8 @@ int ena_com_indirect_table_set(struct ena_com_dev *ena_dev);
*
* Retrieve the RSS indirection table from the device.
*
- * @note: If the caller called ena_com_indirect_table_fill_entry but didn't
- * flash it to the device, the new configuration will be lost.
+ * @note: If the caller called ena_com_indirect_table_fill_entry but didn't flash
+ * it to the device, the new configuration will be lost.
*
* @return: 0 on Success and negative value otherwise.
*/
@@ -874,8 +878,7 @@ bool ena_com_interrupt_moderation_supported(struct ena_com_dev *ena_dev);
* moderation table back to the default parameters.
* @ena_dev: ENA communication layer struct
*/
-void
-ena_com_config_default_interrupt_moderation_table(struct ena_com_dev *ena_dev);
+void ena_com_config_default_interrupt_moderation_table(struct ena_com_dev *ena_dev);
/* ena_com_update_nonadaptive_moderation_interval_tx - Update the
* non-adaptive interval in Tx direction.
@@ -884,9 +887,8 @@ ena_com_config_default_interrupt_moderation_table(struct ena_com_dev *ena_dev);
*
* @return - 0 on success, negative value on failure.
*/
-int
-ena_com_update_nonadaptive_moderation_interval_tx(struct ena_com_dev *ena_dev,
- u32 tx_coalesce_usecs);
+int ena_com_update_nonadaptive_moderation_interval_tx(struct ena_com_dev *ena_dev,
+ u32 tx_coalesce_usecs);
/* ena_com_update_nonadaptive_moderation_interval_rx - Update the
* non-adaptive interval in Rx direction.
@@ -895,9 +897,8 @@ ena_com_update_nonadaptive_moderation_interval_tx(struct ena_com_dev *ena_dev,
*
* @return - 0 on success, negative value on failure.
*/
-int
-ena_com_update_nonadaptive_moderation_interval_rx(struct ena_com_dev *ena_dev,
- u32 rx_coalesce_usecs);
+int ena_com_update_nonadaptive_moderation_interval_rx(struct ena_com_dev *ena_dev,
+ u32 rx_coalesce_usecs);
/* ena_com_get_nonadaptive_moderation_interval_tx - Retrieve the
* non-adaptive interval in Tx direction.
@@ -905,8 +906,7 @@ ena_com_update_nonadaptive_moderation_interval_rx(struct ena_com_dev *ena_dev,
*
* @return - interval in usec
*/
-unsigned int
-ena_com_get_nonadaptive_moderation_interval_tx(struct ena_com_dev *ena_dev);
+unsigned int ena_com_get_nonadaptive_moderation_interval_tx(struct ena_com_dev *ena_dev);
/* ena_com_get_nonadaptive_moderation_interval_rx - Retrieve the
* non-adaptive interval in Rx direction.
@@ -914,8 +914,7 @@ ena_com_get_nonadaptive_moderation_interval_tx(struct ena_com_dev *ena_dev);
*
* @return - interval in usec
*/
-unsigned int
-ena_com_get_nonadaptive_moderation_interval_rx(struct ena_com_dev *ena_dev);
+unsigned int ena_com_get_nonadaptive_moderation_interval_rx(struct ena_com_dev *ena_dev);
/* ena_com_init_intr_moderation_entry - Update a single entry in the interrupt
* moderation table.
@@ -940,20 +939,17 @@ void ena_com_get_intr_moderation_entry(struct ena_com_dev *ena_dev,
enum ena_intr_moder_level level,
struct ena_intr_moder_entry *entry);
-static inline bool
-ena_com_get_adaptive_moderation_enabled(struct ena_com_dev *ena_dev)
+static inline bool ena_com_get_adaptive_moderation_enabled(struct ena_com_dev *ena_dev)
{
return ena_dev->adaptive_coalescing;
}
-static inline void
-ena_com_enable_adaptive_moderation(struct ena_com_dev *ena_dev)
+static inline void ena_com_enable_adaptive_moderation(struct ena_com_dev *ena_dev)
{
ena_dev->adaptive_coalescing = true;
}
-static inline void
-ena_com_disable_adaptive_moderation(struct ena_com_dev *ena_dev)
+static inline void ena_com_disable_adaptive_moderation(struct ena_com_dev *ena_dev)
{
ena_dev->adaptive_coalescing = false;
}
@@ -966,12 +962,11 @@ ena_com_disable_adaptive_moderation(struct ena_com_dev *ena_dev)
* @moder_tbl_idx: Current table level as input update new level as return
* value.
*/
-static inline void
-ena_com_calculate_interrupt_delay(struct ena_com_dev *ena_dev,
- unsigned int pkts,
- unsigned int bytes,
- unsigned int *smoothed_interval,
- unsigned int *moder_tbl_idx)
+static inline void ena_com_calculate_interrupt_delay(struct ena_com_dev *ena_dev,
+ unsigned int pkts,
+ unsigned int bytes,
+ unsigned int *smoothed_interval,
+ unsigned int *moder_tbl_idx)
{
enum ena_intr_moder_level curr_moder_idx, new_moder_idx;
struct ena_intr_moder_entry *curr_moder_entry;
@@ -1001,17 +996,20 @@ ena_com_calculate_interrupt_delay(struct ena_com_dev *ena_dev,
if (curr_moder_idx == ENA_INTR_MODER_LOWEST) {
if ((pkts > curr_moder_entry->pkts_per_interval) ||
(bytes > curr_moder_entry->bytes_per_interval))
- new_moder_idx = (enum ena_intr_moder_level)(curr_moder_idx + 1);
+ new_moder_idx =
+ (enum ena_intr_moder_level)(curr_moder_idx + ENA_INTR_MODER_LEVEL_STRIDE);
} else {
- pred_moder_entry = &intr_moder_tbl[curr_moder_idx - 1];
+ pred_moder_entry = &intr_moder_tbl[curr_moder_idx - ENA_INTR_MODER_LEVEL_STRIDE];
if ((pkts <= pred_moder_entry->pkts_per_interval) ||
(bytes <= pred_moder_entry->bytes_per_interval))
- new_moder_idx = (enum ena_intr_moder_level)(curr_moder_idx - 1);
+ new_moder_idx =
+ (enum ena_intr_moder_level)(curr_moder_idx - ENA_INTR_MODER_LEVEL_STRIDE);
else if ((pkts > curr_moder_entry->pkts_per_interval) ||
(bytes > curr_moder_entry->bytes_per_interval)) {
if (curr_moder_idx != ENA_INTR_MODER_HIGHEST)
- new_moder_idx = (enum ena_intr_moder_level)(curr_moder_idx + 1);
+ new_moder_idx =
+ (enum ena_intr_moder_level)(curr_moder_idx + ENA_INTR_MODER_LEVEL_STRIDE);
}
}
new_moder_entry = &intr_moder_tbl[new_moder_idx];
@@ -1044,18 +1042,12 @@ static inline void ena_com_update_intr_reg(struct ena_eth_io_intr_reg *intr_reg,
intr_reg->intr_control |=
(tx_delay_interval << ENA_ETH_IO_INTR_REG_TX_INTR_DELAY_SHIFT)
- & ENA_ETH_IO_INTR_REG_RX_INTR_DELAY_MASK;
+ & ENA_ETH_IO_INTR_REG_TX_INTR_DELAY_MASK;
if (unmask)
intr_reg->intr_control |= ENA_ETH_IO_INTR_REG_INTR_UNMASK_MASK;
}
-int ena_com_get_dev_extended_stats(struct ena_com_dev *ena_dev, char *buff,
- u32 len);
-
-int ena_com_extended_stats_set_func_queue(struct ena_com_dev *ena_dev,
- u32 funct_queue);
-
#if defined(__cplusplus)
}
#endif /* __cplusplus */
diff --git a/drivers/net/ena/base/ena_defs/ena_admin_defs.h b/drivers/net/ena/base/ena_defs/ena_admin_defs.h
index 7a031d90..04d4e9a5 100644
--- a/drivers/net/ena/base/ena_defs/ena_admin_defs.h
+++ b/drivers/net/ena/base/ena_defs/ena_admin_defs.h
@@ -34,174 +34,140 @@
#ifndef _ENA_ADMIN_H_
#define _ENA_ADMIN_H_
-/* admin commands opcodes */
enum ena_admin_aq_opcode {
- /* create submission queue */
- ENA_ADMIN_CREATE_SQ = 1,
+ ENA_ADMIN_CREATE_SQ = 1,
- /* destroy submission queue */
- ENA_ADMIN_DESTROY_SQ = 2,
+ ENA_ADMIN_DESTROY_SQ = 2,
- /* create completion queue */
- ENA_ADMIN_CREATE_CQ = 3,
+ ENA_ADMIN_CREATE_CQ = 3,
- /* destroy completion queue */
- ENA_ADMIN_DESTROY_CQ = 4,
+ ENA_ADMIN_DESTROY_CQ = 4,
- /* get capabilities of particular feature */
- ENA_ADMIN_GET_FEATURE = 8,
+ ENA_ADMIN_GET_FEATURE = 8,
- /* get capabilities of particular feature */
- ENA_ADMIN_SET_FEATURE = 9,
+ ENA_ADMIN_SET_FEATURE = 9,
- /* get statistics */
- ENA_ADMIN_GET_STATS = 11,
+ ENA_ADMIN_GET_STATS = 11,
};
-/* admin command completion status codes */
enum ena_admin_aq_completion_status {
- /* Request completed successfully */
- ENA_ADMIN_SUCCESS = 0,
+ ENA_ADMIN_SUCCESS = 0,
- /* no resources to satisfy request */
- ENA_ADMIN_RESOURCE_ALLOCATION_FAILURE = 1,
+ ENA_ADMIN_RESOURCE_ALLOCATION_FAILURE = 1,
- /* Bad opcode in request descriptor */
- ENA_ADMIN_BAD_OPCODE = 2,
+ ENA_ADMIN_BAD_OPCODE = 2,
- /* Unsupported opcode in request descriptor */
- ENA_ADMIN_UNSUPPORTED_OPCODE = 3,
+ ENA_ADMIN_UNSUPPORTED_OPCODE = 3,
- /* Wrong request format */
- ENA_ADMIN_MALFORMED_REQUEST = 4,
+ ENA_ADMIN_MALFORMED_REQUEST = 4,
- /* One of parameters is not valid. Provided in ACQ entry
- * extended_status
- */
- ENA_ADMIN_ILLEGAL_PARAMETER = 5,
+ /* Additional status is provided in ACQ entry extended_status */
+ ENA_ADMIN_ILLEGAL_PARAMETER = 5,
- /* unexpected error */
- ENA_ADMIN_UNKNOWN_ERROR = 6,
+ ENA_ADMIN_UNKNOWN_ERROR = 6,
};
-/* get/set feature subcommands opcodes */
enum ena_admin_aq_feature_id {
- /* list of all supported attributes/capabilities in the ENA */
- ENA_ADMIN_DEVICE_ATTRIBUTES = 1,
+ ENA_ADMIN_DEVICE_ATTRIBUTES = 1,
+
+ ENA_ADMIN_MAX_QUEUES_NUM = 2,
- /* max number of supported queues per for every queues type */
- ENA_ADMIN_MAX_QUEUES_NUM = 2,
+ ENA_ADMIN_HW_HINTS = 3,
- /* Receive Side Scaling (RSS) function */
- ENA_ADMIN_RSS_HASH_FUNCTION = 10,
+ ENA_ADMIN_RSS_HASH_FUNCTION = 10,
- /* stateless TCP/UDP/IP offload capabilities. */
- ENA_ADMIN_STATELESS_OFFLOAD_CONFIG = 11,
+ ENA_ADMIN_STATELESS_OFFLOAD_CONFIG = 11,
- /* Multiple tuples flow table configuration */
- ENA_ADMIN_RSS_REDIRECTION_TABLE_CONFIG = 12,
+ ENA_ADMIN_RSS_REDIRECTION_TABLE_CONFIG = 12,
- /* max MTU, current MTU */
- ENA_ADMIN_MTU = 14,
+ ENA_ADMIN_MTU = 14,
- /* Receive Side Scaling (RSS) hash input */
- ENA_ADMIN_RSS_HASH_INPUT = 18,
+ ENA_ADMIN_RSS_HASH_INPUT = 18,
- /* interrupt moderation parameters */
- ENA_ADMIN_INTERRUPT_MODERATION = 20,
+ ENA_ADMIN_INTERRUPT_MODERATION = 20,
- /* AENQ configuration */
- ENA_ADMIN_AENQ_CONFIG = 26,
+ ENA_ADMIN_AENQ_CONFIG = 26,
- /* Link configuration */
- ENA_ADMIN_LINK_CONFIG = 27,
+ ENA_ADMIN_LINK_CONFIG = 27,
- /* Host attributes configuration */
- ENA_ADMIN_HOST_ATTR_CONFIG = 28,
+ ENA_ADMIN_HOST_ATTR_CONFIG = 28,
- /* Number of valid opcodes */
- ENA_ADMIN_FEATURES_OPCODE_NUM = 32,
+ ENA_ADMIN_FEATURES_OPCODE_NUM = 32,
};
-/* descriptors and headers placement */
enum ena_admin_placement_policy_type {
- /* descriptors and headers are in OS memory */
- ENA_ADMIN_PLACEMENT_POLICY_HOST = 1,
+ /* descriptors and headers are in host memory */
+ ENA_ADMIN_PLACEMENT_POLICY_HOST = 1,
- /* descriptors and headers in device memory (a.k.a Low Latency
+ /* descriptors and headers are in device memory (a.k.a Low Latency
* Queue)
*/
- ENA_ADMIN_PLACEMENT_POLICY_DEV = 3,
+ ENA_ADMIN_PLACEMENT_POLICY_DEV = 3,
};
-/* link speeds */
enum ena_admin_link_types {
- ENA_ADMIN_LINK_SPEED_1G = 0x1,
+ ENA_ADMIN_LINK_SPEED_1G = 0x1,
- ENA_ADMIN_LINK_SPEED_2_HALF_G = 0x2,
+ ENA_ADMIN_LINK_SPEED_2_HALF_G = 0x2,
- ENA_ADMIN_LINK_SPEED_5G = 0x4,
+ ENA_ADMIN_LINK_SPEED_5G = 0x4,
- ENA_ADMIN_LINK_SPEED_10G = 0x8,
+ ENA_ADMIN_LINK_SPEED_10G = 0x8,
- ENA_ADMIN_LINK_SPEED_25G = 0x10,
+ ENA_ADMIN_LINK_SPEED_25G = 0x10,
- ENA_ADMIN_LINK_SPEED_40G = 0x20,
+ ENA_ADMIN_LINK_SPEED_40G = 0x20,
- ENA_ADMIN_LINK_SPEED_50G = 0x40,
+ ENA_ADMIN_LINK_SPEED_50G = 0x40,
- ENA_ADMIN_LINK_SPEED_100G = 0x80,
+ ENA_ADMIN_LINK_SPEED_100G = 0x80,
- ENA_ADMIN_LINK_SPEED_200G = 0x100,
+ ENA_ADMIN_LINK_SPEED_200G = 0x100,
- ENA_ADMIN_LINK_SPEED_400G = 0x200,
+ ENA_ADMIN_LINK_SPEED_400G = 0x200,
};
-/* completion queue update policy */
enum ena_admin_completion_policy_type {
- /* cqe for each sq descriptor */
- ENA_ADMIN_COMPLETION_POLICY_DESC = 0,
+ /* completion queue entry for each sq descriptor */
+ ENA_ADMIN_COMPLETION_POLICY_DESC = 0,
- /* cqe upon request in sq descriptor */
- ENA_ADMIN_COMPLETION_POLICY_DESC_ON_DEMAND = 1,
+ /* completion queue entry upon request in sq descriptor */
+ ENA_ADMIN_COMPLETION_POLICY_DESC_ON_DEMAND = 1,
/* current queue head pointer is updated in OS memory upon sq
* descriptor request
*/
- ENA_ADMIN_COMPLETION_POLICY_HEAD_ON_DEMAND = 2,
+ ENA_ADMIN_COMPLETION_POLICY_HEAD_ON_DEMAND = 2,
/* current queue head pointer is updated in OS memory for each sq
* descriptor
*/
- ENA_ADMIN_COMPLETION_POLICY_HEAD = 3,
+ ENA_ADMIN_COMPLETION_POLICY_HEAD = 3,
};
-/* type of get statistics command */
+/* basic stats return ena_admin_basic_stats while extanded stats return a
+ * buffer (string format) with additional statistics per queue and per
+ * device id
+ */
enum ena_admin_get_stats_type {
- /* Basic statistics */
- ENA_ADMIN_GET_STATS_TYPE_BASIC = 0,
+ ENA_ADMIN_GET_STATS_TYPE_BASIC = 0,
- /* Extended statistics */
- ENA_ADMIN_GET_STATS_TYPE_EXTENDED = 1,
+ ENA_ADMIN_GET_STATS_TYPE_EXTENDED = 1,
};
-/* scope of get statistics command */
enum ena_admin_get_stats_scope {
- ENA_ADMIN_SPECIFIC_QUEUE = 0,
+ ENA_ADMIN_SPECIFIC_QUEUE = 0,
- ENA_ADMIN_ETH_TRAFFIC = 1,
+ ENA_ADMIN_ETH_TRAFFIC = 1,
};
-/* ENA Admin Queue (AQ) common descriptor */
struct ena_admin_aq_common_desc {
- /* word 0 : */
- /* command identificator to associate it with the completion
- * 11:0 : command_id
+ /* 11:0 : command_id
* 15:12 : reserved12
*/
uint16_t command_id;
- /* as appears in ena_aq_opcode */
+ /* as appears in ena_admin_aq_opcode */
uint8_t opcode;
/* 0 : phase
@@ -214,24 +180,17 @@ struct ena_admin_aq_common_desc {
uint8_t flags;
};
-/* used in ena_aq_entry. Can point directly to control data, or to a page
- * list chunk. Used also at the end of indirect mode page list chunks, for
- * chaining.
+/* used in ena_admin_aq_entry. Can point directly to control data, or to a
+ * page list chunk. Used also at the end of indirect mode page list chunks,
+ * for chaining.
*/
struct ena_admin_ctrl_buff_info {
- /* word 0 : indicates length of the buffer pointed by
- * control_buffer_address.
- */
uint32_t length;
- /* words 1:2 : points to control buffer (direct or indirect) */
struct ena_common_mem_addr address;
};
-/* submission queue full identification */
struct ena_admin_sq {
- /* word 0 : */
- /* queue id */
uint16_t sq_idx;
/* 4:0 : reserved
@@ -242,36 +201,25 @@ struct ena_admin_sq {
uint8_t reserved1;
};
-/* AQ entry format */
struct ena_admin_aq_entry {
- /* words 0 : */
struct ena_admin_aq_common_desc aq_common_descriptor;
- /* words 1:3 : */
union {
- /* command specific inline data */
uint32_t inline_data_w1[3];
- /* words 1:3 : points to control buffer (direct or
- * indirect, chained if needed)
- */
struct ena_admin_ctrl_buff_info control_buffer;
} u;
- /* command specific inline data */
uint32_t inline_data_w4[12];
};
-/* ENA Admin Completion Queue (ACQ) common descriptor */
struct ena_admin_acq_common_desc {
- /* word 0 : */
/* command identifier to associate it with the aq descriptor
* 11:0 : command_id
* 15:12 : reserved12
*/
uint16_t command;
- /* status of request execution */
uint8_t status;
/* 0 : phase
@@ -279,33 +227,21 @@ struct ena_admin_acq_common_desc {
*/
uint8_t flags;
- /* word 1 : */
- /* provides additional info */
uint16_t extended_status;
- /* submission queue head index, serves as a hint what AQ entries can
- * be revoked
- */
+ /* serves as a hint what AQ entries can be revoked */
uint16_t sq_head_indx;
};
-/* ACQ entry format */
struct ena_admin_acq_entry {
- /* words 0:1 : */
struct ena_admin_acq_common_desc acq_common_descriptor;
- /* response type specific data */
uint32_t response_specific_data[14];
};
-/* ENA AQ Create Submission Queue command. Placed in control buffer pointed
- * by AQ entry
- */
struct ena_admin_aq_create_sq_cmd {
- /* words 0 : */
struct ena_admin_aq_common_desc aq_common_descriptor;
- /* word 1 : */
/* 4:0 : reserved0_w1
* 7:5 : sq_direction - 0x1 - Tx, 0x2 - Rx
*/
@@ -337,7 +273,6 @@ struct ena_admin_aq_create_sq_cmd {
*/
uint8_t sq_caps_3;
- /* word 2 : */
/* associated completion queue id. This CQ must be created prior to
* SQ creation
*/
@@ -346,85 +281,62 @@ struct ena_admin_aq_create_sq_cmd {
/* submission queue depth in entries */
uint16_t sq_depth;
- /* words 3:4 : SQ physical base address in OS memory. This field
- * should not be used for Low Latency queues. Has to be page
- * aligned.
+ /* SQ physical base address in OS memory. This field should not be
+ * used for Low Latency queues. Has to be page aligned.
*/
struct ena_common_mem_addr sq_ba;
- /* words 5:6 : specifies queue head writeback location in OS
- * memory. Valid if completion_policy is set to
- * completion_policy_head_on_demand or completion_policy_head. Has
- * to be cache aligned
+ /* specifies queue head writeback location in OS memory. Valid if
+ * completion_policy is set to completion_policy_head_on_demand or
+ * completion_policy_head. Has to be cache aligned
*/
struct ena_common_mem_addr sq_head_writeback;
- /* word 7 : reserved word */
uint32_t reserved0_w7;
- /* word 8 : reserved word */
uint32_t reserved0_w8;
};
-/* submission queue direction */
enum ena_admin_sq_direction {
- ENA_ADMIN_SQ_DIRECTION_TX = 1,
+ ENA_ADMIN_SQ_DIRECTION_TX = 1,
- ENA_ADMIN_SQ_DIRECTION_RX = 2,
+ ENA_ADMIN_SQ_DIRECTION_RX = 2,
};
-/* ENA Response for Create SQ Command. Appears in ACQ entry as
- * response_specific_data
- */
struct ena_admin_acq_create_sq_resp_desc {
- /* words 0:1 : Common Admin Queue completion descriptor */
struct ena_admin_acq_common_desc acq_common_desc;
- /* word 2 : */
- /* sq identifier */
uint16_t sq_idx;
uint16_t reserved;
- /* word 3 : queue doorbell address as an offset to PCIe MMIO REG BAR */
+ /* queue doorbell address as an offset to PCIe MMIO REG BAR */
uint32_t sq_doorbell_offset;
- /* word 4 : low latency queue ring base address as an offset to
- * PCIe MMIO LLQ_MEM BAR
+ /* low latency queue ring base address as an offset to PCIe MMIO
+ * LLQ_MEM BAR
*/
uint32_t llq_descriptors_offset;
- /* word 5 : low latency queue headers' memory as an offset to PCIe
- * MMIO LLQ_MEM BAR
+ /* low latency queue headers' memory as an offset to PCIe MMIO
+ * LLQ_MEM BAR
*/
uint32_t llq_headers_offset;
};
-/* ENA AQ Destroy Submission Queue command. Placed in control buffer
- * pointed by AQ entry
- */
struct ena_admin_aq_destroy_sq_cmd {
- /* words 0 : */
struct ena_admin_aq_common_desc aq_common_descriptor;
- /* words 1 : */
struct ena_admin_sq sq;
};
-/* ENA Response for Destroy SQ Command. Appears in ACQ entry as
- * response_specific_data
- */
struct ena_admin_acq_destroy_sq_resp_desc {
- /* words 0:1 : Common Admin Queue completion descriptor */
struct ena_admin_acq_common_desc acq_common_desc;
};
-/* ENA AQ Create Completion Queue command */
struct ena_admin_aq_create_cq_cmd {
- /* words 0 : */
struct ena_admin_aq_common_desc aq_common_descriptor;
- /* word 1 : */
/* 4:0 : reserved5
* 5 : interrupt_mode_enabled - if set, cq operates
* in interrupt mode, otherwise - polling
@@ -441,62 +353,39 @@ struct ena_admin_aq_create_cq_cmd {
/* completion queue depth in # of entries. must be power of 2 */
uint16_t cq_depth;
- /* word 2 : msix vector assigned to this cq */
+ /* msix vector assigned to this cq */
uint32_t msix_vector;
- /* words 3:4 : cq physical base address in OS memory. CQ must be
- * physically contiguous
+ /* cq physical base address in OS memory. CQ must be physically
+ * contiguous
*/
struct ena_common_mem_addr cq_ba;
};
-/* ENA Response for Create CQ Command. Appears in ACQ entry as response
- * specific data
- */
struct ena_admin_acq_create_cq_resp_desc {
- /* words 0:1 : Common Admin Queue completion descriptor */
struct ena_admin_acq_common_desc acq_common_desc;
- /* word 2 : */
- /* cq identifier */
uint16_t cq_idx;
- /* actual cq depth in # of entries */
+ /* actual cq depth in number of entries */
uint16_t cq_actual_depth;
- /* word 3 : cpu numa node address as an offset to PCIe MMIO REG BAR */
uint32_t numa_node_register_offset;
- /* word 4 : completion head doorbell address as an offset to PCIe
- * MMIO REG BAR
- */
uint32_t cq_head_db_register_offset;
- /* word 5 : interrupt unmask register address as an offset into
- * PCIe MMIO REG BAR
- */
uint32_t cq_interrupt_unmask_register_offset;
};
-/* ENA AQ Destroy Completion Queue command. Placed in control buffer
- * pointed by AQ entry
- */
struct ena_admin_aq_destroy_cq_cmd {
- /* words 0 : */
struct ena_admin_aq_common_desc aq_common_descriptor;
- /* word 1 : */
- /* associated queue id. */
uint16_t cq_idx;
uint16_t reserved1;
};
-/* ENA Response for Destroy CQ Command. Appears in ACQ entry as
- * response_specific_data
- */
struct ena_admin_acq_destroy_cq_resp_desc {
- /* words 0:1 : Common Admin Queue completion descriptor */
struct ena_admin_acq_common_desc acq_common_desc;
};
@@ -504,21 +393,15 @@ struct ena_admin_acq_destroy_cq_resp_desc {
* buffer pointed by AQ entry
*/
struct ena_admin_aq_get_stats_cmd {
- /* words 0 : */
struct ena_admin_aq_common_desc aq_common_descriptor;
- /* words 1:3 : */
union {
/* command specific inline data */
uint32_t inline_data_w1[3];
- /* words 1:3 : points to control buffer (direct or
- * indirect, chained if needed)
- */
struct ena_admin_ctrl_buff_info control_buffer;
} u;
- /* word 4 : */
/* stats type as defined in enum ena_admin_get_stats_type */
uint8_t type;
@@ -527,7 +410,6 @@ struct ena_admin_aq_get_stats_cmd {
uint16_t reserved3;
- /* word 5 : */
/* queue id. used when scope is specific_queue */
uint16_t queue_idx;
@@ -539,89 +421,60 @@ struct ena_admin_aq_get_stats_cmd {
/* Basic Statistics Command. */
struct ena_admin_basic_stats {
- /* word 0 : */
uint32_t tx_bytes_low;
- /* word 1 : */
uint32_t tx_bytes_high;
- /* word 2 : */
uint32_t tx_pkts_low;
- /* word 3 : */
uint32_t tx_pkts_high;
- /* word 4 : */
uint32_t rx_bytes_low;
- /* word 5 : */
uint32_t rx_bytes_high;
- /* word 6 : */
uint32_t rx_pkts_low;
- /* word 7 : */
uint32_t rx_pkts_high;
- /* word 8 : */
uint32_t rx_drops_low;
- /* word 9 : */
uint32_t rx_drops_high;
};
-/* ENA Response for Get Statistics Command. Appears in ACQ entry as
- * response_specific_data
- */
struct ena_admin_acq_get_stats_resp {
- /* words 0:1 : Common Admin Queue completion descriptor */
struct ena_admin_acq_common_desc acq_common_desc;
- /* words 2:11 : */
struct ena_admin_basic_stats basic_stats;
};
-/* ENA Get/Set Feature common descriptor. Appears as inline word in
- * ena_aq_entry
- */
struct ena_admin_get_set_feature_common_desc {
- /* word 0 : */
/* 1:0 : select - 0x1 - current value; 0x3 - default
* value
* 7:3 : reserved3
*/
uint8_t flags;
- /* as appears in ena_feature_id */
+ /* as appears in ena_admin_aq_feature_id */
uint8_t feature_id;
- /* reserved16 */
uint16_t reserved16;
};
-/* ENA Device Attributes Feature descriptor. */
struct ena_admin_device_attr_feature_desc {
- /* word 0 : implementation id */
uint32_t impl_id;
- /* word 1 : device version */
uint32_t device_version;
- /* word 2 : bit map of which bits are supported value of 1
- * indicated that this feature is supported and can perform SET/GET
- * for it
- */
+ /* bitmap of ena_admin_aq_feature_id */
uint32_t supported_features;
- /* word 3 : */
uint32_t reserved3;
- /* word 4 : Indicates how many bits are used physical address
- * access.
- */
+ /* Indicates how many bits are used physical address access. */
uint32_t phys_addr_width;
- /* word 5 : Indicates how many bits are used virtual address access. */
+ /* Indicates how many bits are used virtual address access. */
uint32_t virt_addr_width;
/* unicast MAC address (in Network byte order) */
@@ -629,36 +482,27 @@ struct ena_admin_device_attr_feature_desc {
uint8_t reserved7[2];
- /* word 8 : Max supported MTU value */
uint32_t max_mtu;
};
-/* ENA Max Queues Feature descriptor. */
struct ena_admin_queue_feature_desc {
- /* word 0 : Max number of submission queues (including LLQs) */
+ /* including LLQs */
uint32_t max_sq_num;
- /* word 1 : Max submission queue depth */
uint32_t max_sq_depth;
- /* word 2 : Max number of completion queues */
uint32_t max_cq_num;
- /* word 3 : Max completion queue depth */
uint32_t max_cq_depth;
- /* word 4 : Max number of LLQ submission queues */
uint32_t max_llq_num;
- /* word 5 : Max submission queue depth of LLQ */
uint32_t max_llq_depth;
- /* word 6 : Max header size */
uint32_t max_header_size;
- /* word 7 : */
- /* Maximum Descriptors number, including meta descriptors, allowed
- * for a single Tx packet
+ /* Maximum Descriptors number, including meta descriptor, allowed for
+ * a single Tx packet
*/
uint16_t max_packet_tx_descs;
@@ -666,86 +510,69 @@ struct ena_admin_queue_feature_desc {
uint16_t max_packet_rx_descs;
};
-/* ENA MTU Set Feature descriptor. */
struct ena_admin_set_feature_mtu_desc {
- /* word 0 : mtu payload size (exclude L2) */
+ /* exclude L2 */
uint32_t mtu;
};
-/* ENA host attributes Set Feature descriptor. */
struct ena_admin_set_feature_host_attr_desc {
- /* words 0:1 : host OS info base address in OS memory. host info is
- * 4KB of physically contiguous
+ /* host OS info base address in OS memory. host info is 4KB of
+ * physically contiguous
*/
struct ena_common_mem_addr os_info_ba;
- /* words 2:3 : host debug area base address in OS memory. debug
- * area must be physically contiguous
+ /* host debug area base address in OS memory. debug area must be
+ * physically contiguous
*/
struct ena_common_mem_addr debug_ba;
- /* word 4 : debug area size */
+ /* debug area size */
uint32_t debug_area_size;
};
-/* ENA Interrupt Moderation Get Feature descriptor. */
struct ena_admin_feature_intr_moder_desc {
- /* word 0 : */
/* interrupt delay granularity in usec */
uint16_t intr_delay_resolution;
uint16_t reserved;
};
-/* ENA Link Get Feature descriptor. */
struct ena_admin_get_feature_link_desc {
- /* word 0 : Link speed in Mb */
+ /* Link speed in Mb */
uint32_t speed;
- /* word 1 : supported speeds (bit field of enum ena_admin_link
- * types)
- */
+ /* bit field of enum ena_admin_link types */
uint32_t supported;
- /* word 2 : */
- /* 0 : autoneg - auto negotiation
+ /* 0 : autoneg
* 1 : duplex - Full Duplex
* 31:2 : reserved2
*/
uint32_t flags;
};
-/* ENA AENQ Feature descriptor. */
struct ena_admin_feature_aenq_desc {
- /* word 0 : bitmask for AENQ groups the device can report */
+ /* bitmask for AENQ groups the device can report */
uint32_t supported_groups;
- /* word 1 : bitmask for AENQ groups to report */
+ /* bitmask for AENQ groups to report */
uint32_t enabled_groups;
};
-/* ENA Stateless Offload Feature descriptor. */
struct ena_admin_feature_offload_desc {
- /* word 0 : */
- /* Trasmit side stateless offload
- * 0 : TX_L3_csum_ipv4 - IPv4 checksum
- * 1 : TX_L4_ipv4_csum_part - TCP/UDP over IPv4
- * checksum, the checksum field should be initialized
- * with pseudo header checksum
- * 2 : TX_L4_ipv4_csum_full - TCP/UDP over IPv4
- * checksum
- * 3 : TX_L4_ipv6_csum_part - TCP/UDP over IPv6
- * checksum, the checksum field should be initialized
- * with pseudo header checksum
- * 4 : TX_L4_ipv6_csum_full - TCP/UDP over IPv6
- * checksum
- * 5 : tso_ipv4 - TCP/IPv4 Segmentation Offloading
- * 6 : tso_ipv6 - TCP/IPv6 Segmentation Offloading
- * 7 : tso_ecn - TCP Segmentation with ECN
+ /* 0 : TX_L3_csum_ipv4
+ * 1 : TX_L4_ipv4_csum_part - The checksum field
+ * should be initialized with pseudo header checksum
+ * 2 : TX_L4_ipv4_csum_full
+ * 3 : TX_L4_ipv6_csum_part - The checksum field
+ * should be initialized with pseudo header checksum
+ * 4 : TX_L4_ipv6_csum_full
+ * 5 : tso_ipv4
+ * 6 : tso_ipv6
+ * 7 : tso_ecn
*/
uint32_t tx;
- /* word 1 : */
/* Receive side supported stateless offload
* 0 : RX_L3_csum_ipv4 - IPv4 checksum
* 1 : RX_L4_ipv4_csum - TCP/UDP/IPv4 checksum
@@ -754,118 +581,94 @@ struct ena_admin_feature_offload_desc {
*/
uint32_t rx_supported;
- /* word 2 : */
- /* Receive side enabled stateless offload */
uint32_t rx_enabled;
};
-/* hash functions */
enum ena_admin_hash_functions {
- /* Toeplitz hash */
- ENA_ADMIN_TOEPLITZ = 1,
+ ENA_ADMIN_TOEPLITZ = 1,
- /* CRC32 hash */
- ENA_ADMIN_CRC32 = 2,
+ ENA_ADMIN_CRC32 = 2,
};
-/* ENA RSS flow hash control buffer structure */
struct ena_admin_feature_rss_flow_hash_control {
- /* word 0 : number of valid keys */
uint32_t keys_num;
- /* word 1 : */
uint32_t reserved;
- /* Toeplitz keys */
uint32_t key[10];
};
-/* ENA RSS Flow Hash Function */
struct ena_admin_feature_rss_flow_hash_function {
- /* word 0 : */
- /* supported hash functions
- * 7:0 : funcs - supported hash functions (bitmask
- * accroding to ena_admin_hash_functions)
- */
+ /* 7:0 : funcs - bitmask of ena_admin_hash_functions */
uint32_t supported_func;
- /* word 1 : */
- /* selected hash func
- * 7:0 : selected_func - selected hash function
- * (bitmask accroding to ena_admin_hash_functions)
+ /* 7:0 : selected_func - bitmask of
+ * ena_admin_hash_functions
*/
uint32_t selected_func;
- /* word 2 : initial value */
+ /* initial value */
uint32_t init_val;
};
/* RSS flow hash protocols */
enum ena_admin_flow_hash_proto {
- /* tcp/ipv4 */
- ENA_ADMIN_RSS_TCP4 = 0,
+ ENA_ADMIN_RSS_TCP4 = 0,
- /* udp/ipv4 */
- ENA_ADMIN_RSS_UDP4 = 1,
+ ENA_ADMIN_RSS_UDP4 = 1,
- /* tcp/ipv6 */
- ENA_ADMIN_RSS_TCP6 = 2,
+ ENA_ADMIN_RSS_TCP6 = 2,
- /* udp/ipv6 */
- ENA_ADMIN_RSS_UDP6 = 3,
+ ENA_ADMIN_RSS_UDP6 = 3,
- /* ipv4 not tcp/udp */
- ENA_ADMIN_RSS_IP4 = 4,
+ ENA_ADMIN_RSS_IP4 = 4,
- /* ipv6 not tcp/udp */
- ENA_ADMIN_RSS_IP6 = 5,
+ ENA_ADMIN_RSS_IP6 = 5,
- /* fragmented ipv4 */
- ENA_ADMIN_RSS_IP4_FRAG = 6,
+ ENA_ADMIN_RSS_IP4_FRAG = 6,
- /* not ipv4/6 */
- ENA_ADMIN_RSS_NOT_IP = 7,
+ ENA_ADMIN_RSS_NOT_IP = 7,
- /* max number of protocols */
- ENA_ADMIN_RSS_PROTO_NUM = 16,
+ /* TCPv6 with extension header */
+ ENA_ADMIN_RSS_TCP6_EX = 8,
+
+ /* IPv6 with extension header */
+ ENA_ADMIN_RSS_IP6_EX = 9,
+
+ ENA_ADMIN_RSS_PROTO_NUM = 16,
};
/* RSS flow hash fields */
enum ena_admin_flow_hash_fields {
/* Ethernet Dest Addr */
- ENA_ADMIN_RSS_L2_DA = 0,
+ ENA_ADMIN_RSS_L2_DA = BIT(0),
/* Ethernet Src Addr */
- ENA_ADMIN_RSS_L2_SA = 1,
+ ENA_ADMIN_RSS_L2_SA = BIT(1),
/* ipv4/6 Dest Addr */
- ENA_ADMIN_RSS_L3_DA = 2,
+ ENA_ADMIN_RSS_L3_DA = BIT(2),
/* ipv4/6 Src Addr */
- ENA_ADMIN_RSS_L3_SA = 5,
+ ENA_ADMIN_RSS_L3_SA = BIT(3),
/* tcp/udp Dest Port */
- ENA_ADMIN_RSS_L4_DP = 6,
+ ENA_ADMIN_RSS_L4_DP = BIT(4),
/* tcp/udp Src Port */
- ENA_ADMIN_RSS_L4_SP = 7,
+ ENA_ADMIN_RSS_L4_SP = BIT(5),
};
-/* hash input fields for flow protocol */
struct ena_admin_proto_input {
- /* word 0 : */
/* flow hash fields (bitwise according to ena_admin_flow_hash_fields) */
uint16_t fields;
uint16_t reserved2;
};
-/* ENA RSS hash control buffer structure */
struct ena_admin_feature_rss_hash_control {
- /* supported input fields */
struct ena_admin_proto_input supported_fields[ENA_ADMIN_RSS_PROTO_NUM];
- /* selected input fields */
struct ena_admin_proto_input selected_fields[ENA_ADMIN_RSS_PROTO_NUM];
struct ena_admin_proto_input reserved2[ENA_ADMIN_RSS_PROTO_NUM];
@@ -873,11 +676,9 @@ struct ena_admin_feature_rss_hash_control {
struct ena_admin_proto_input reserved3[ENA_ADMIN_RSS_PROTO_NUM];
};
-/* ENA RSS flow hash input */
struct ena_admin_feature_rss_flow_hash_input {
- /* word 0 : */
/* supported hash input sorting
- * 1 : L3_sort - support swap L3 addresses if DA
+ * 1 : L3_sort - support swap L3 addresses if DA is
* smaller than SA
* 2 : L4_sort - support swap L4 ports if DP smaller
* SP
@@ -893,46 +694,37 @@ struct ena_admin_feature_rss_flow_hash_input {
uint16_t enabled_input_sort;
};
-/* Operating system type */
enum ena_admin_os_type {
- /* Linux OS */
- ENA_ADMIN_OS_LINUX = 1,
+ ENA_ADMIN_OS_LINUX = 1,
- /* Windows OS */
- ENA_ADMIN_OS_WIN = 2,
+ ENA_ADMIN_OS_WIN = 2,
- /* DPDK OS */
- ENA_ADMIN_OS_DPDK = 3,
+ ENA_ADMIN_OS_DPDK = 3,
- /* FreeBSD OS */
- ENA_ADMIN_OS_FREEBSD = 4,
+ ENA_ADMIN_OS_FREEBSD = 4,
- /* PXE OS */
- ENA_ADMIN_OS_IPXE = 5,
+ ENA_ADMIN_OS_IPXE = 5,
};
-/* host info */
struct ena_admin_host_info {
- /* word 0 : OS type defined in enum ena_os_type */
+ /* defined in enum ena_admin_os_type */
uint32_t os_type;
/* os distribution string format */
uint8_t os_dist_str[128];
- /* word 33 : OS distribution numeric format */
+ /* OS distribution numeric format */
uint32_t os_dist;
/* kernel version string format */
uint8_t kernel_ver_str[32];
- /* word 42 : Kernel version numeric format */
+ /* Kernel version numeric format */
uint32_t kernel_ver;
- /* word 43 : */
- /* driver version
- * 7:0 : major - major
- * 15:8 : minor - minor
- * 23:16 : sub_minor - sub minor
+ /* 7:0 : major
+ * 15:8 : minor
+ * 23:16 : sub_minor
*/
uint32_t driver_version;
@@ -940,220 +732,200 @@ struct ena_admin_host_info {
uint32_t supported_network_features[4];
};
-/* ENA RSS indirection table entry */
struct ena_admin_rss_ind_table_entry {
- /* word 0 : */
- /* cq identifier */
uint16_t cq_idx;
uint16_t reserved;
};
-/* ENA RSS indirection table */
struct ena_admin_feature_rss_ind_table {
- /* word 0 : */
/* min supported table size (2^min_size) */
uint16_t min_size;
/* max supported table size (2^max_size) */
uint16_t max_size;
- /* word 1 : */
/* table size (2^size) */
uint16_t size;
uint16_t reserved;
- /* word 2 : index of the inline entry. 0xFFFFFFFF means invalid */
+ /* index of the inline entry. 0xFFFFFFFF means invalid */
uint32_t inline_index;
- /* words 3 : used for updating single entry, ignored when setting
- * the entire table through the control buffer.
+ /* used for updating single entry, ignored when setting the entire
+ * table through the control buffer.
*/
struct ena_admin_rss_ind_table_entry inline_entry;
};
-/* ENA Get Feature command */
+/* When hint value is 0, driver should use it's own predefined value */
+struct ena_admin_ena_hw_hints {
+ /* value in ms */
+ uint16_t mmio_read_timeout;
+
+ /* value in ms */
+ uint16_t driver_watchdog_timeout;
+
+ /* Per packet tx completion timeout. value in ms */
+ uint16_t missing_tx_completion_timeout;
+
+ uint16_t missed_tx_completion_count_threshold_to_reset;
+
+ /* value in ms */
+ uint16_t admin_completion_tx_timeout;
+
+ uint16_t netdev_wd_timeout;
+
+ uint16_t max_tx_sgl_size;
+
+ uint16_t max_rx_sgl_size;
+
+ uint16_t reserved[8];
+};
+
struct ena_admin_get_feat_cmd {
- /* words 0 : */
struct ena_admin_aq_common_desc aq_common_descriptor;
- /* words 1:3 : points to control buffer (direct or indirect,
- * chained if needed)
- */
struct ena_admin_ctrl_buff_info control_buffer;
- /* words 4 : */
struct ena_admin_get_set_feature_common_desc feat_common;
- /* words 5:15 : */
- union {
- /* raw words */
- uint32_t raw[11];
- } u;
+ uint32_t raw[11];
};
-/* ENA Get Feature command response */
struct ena_admin_get_feat_resp {
- /* words 0:1 : */
struct ena_admin_acq_common_desc acq_common_desc;
- /* words 2:15 : */
union {
- /* raw words */
uint32_t raw[14];
- /* words 2:10 : Get Device Attributes */
struct ena_admin_device_attr_feature_desc dev_attr;
- /* words 2:5 : Max queues num */
struct ena_admin_queue_feature_desc max_queue;
- /* words 2:3 : AENQ configuration */
struct ena_admin_feature_aenq_desc aenq;
- /* words 2:4 : Get Link configuration */
struct ena_admin_get_feature_link_desc link;
- /* words 2:4 : offload configuration */
struct ena_admin_feature_offload_desc offload;
- /* words 2:4 : rss flow hash function */
struct ena_admin_feature_rss_flow_hash_function flow_hash_func;
- /* words 2 : rss flow hash input */
struct ena_admin_feature_rss_flow_hash_input flow_hash_input;
- /* words 2:3 : rss indirection table */
struct ena_admin_feature_rss_ind_table ind_table;
- /* words 2 : interrupt moderation configuration */
struct ena_admin_feature_intr_moder_desc intr_moderation;
+
+ struct ena_admin_ena_hw_hints hw_hints;
} u;
};
-/* ENA Set Feature command */
struct ena_admin_set_feat_cmd {
- /* words 0 : */
struct ena_admin_aq_common_desc aq_common_descriptor;
- /* words 1:3 : points to control buffer (direct or indirect,
- * chained if needed)
- */
struct ena_admin_ctrl_buff_info control_buffer;
- /* words 4 : */
struct ena_admin_get_set_feature_common_desc feat_common;
- /* words 5:15 : */
union {
- /* raw words */
uint32_t raw[11];
- /* words 5 : mtu size */
+ /* mtu size */
struct ena_admin_set_feature_mtu_desc mtu;
- /* words 5:7 : host attributes */
+ /* host attributes */
struct ena_admin_set_feature_host_attr_desc host_attr;
- /* words 5:6 : AENQ configuration */
+ /* AENQ configuration */
struct ena_admin_feature_aenq_desc aenq;
- /* words 5:7 : rss flow hash function */
+ /* rss flow hash function */
struct ena_admin_feature_rss_flow_hash_function flow_hash_func;
- /* words 5 : rss flow hash input */
+ /* rss flow hash input */
struct ena_admin_feature_rss_flow_hash_input flow_hash_input;
- /* words 5:6 : rss indirection table */
+ /* rss indirection table */
struct ena_admin_feature_rss_ind_table ind_table;
} u;
};
-/* ENA Set Feature command response */
struct ena_admin_set_feat_resp {
- /* words 0:1 : */
struct ena_admin_acq_common_desc acq_common_desc;
- /* words 2:15 : */
union {
- /* raw words */
uint32_t raw[14];
} u;
};
-/* ENA Asynchronous Event Notification Queue descriptor. */
struct ena_admin_aenq_common_desc {
- /* word 0 : */
uint16_t group;
uint16_t syndrom;
- /* word 1 : */
/* 0 : phase */
uint8_t flags;
uint8_t reserved1[3];
- /* word 2 : Timestamp LSB */
uint32_t timestamp_low;
- /* word 3 : Timestamp MSB */
uint32_t timestamp_high;
};
/* asynchronous event notification groups */
enum ena_admin_aenq_group {
- /* Link State Change */
- ENA_ADMIN_LINK_CHANGE = 0,
+ ENA_ADMIN_LINK_CHANGE = 0,
- ENA_ADMIN_FATAL_ERROR = 1,
+ ENA_ADMIN_FATAL_ERROR = 1,
- ENA_ADMIN_WARNING = 2,
+ ENA_ADMIN_WARNING = 2,
- ENA_ADMIN_NOTIFICATION = 3,
+ ENA_ADMIN_NOTIFICATION = 3,
- ENA_ADMIN_KEEP_ALIVE = 4,
+ ENA_ADMIN_KEEP_ALIVE = 4,
- ENA_ADMIN_AENQ_GROUPS_NUM = 5,
+ ENA_ADMIN_AENQ_GROUPS_NUM = 5,
};
-/* syndorm of AENQ notification group */
enum ena_admin_aenq_notification_syndrom {
- ENA_ADMIN_SUSPEND = 0,
+ ENA_ADMIN_SUSPEND = 0,
+
+ ENA_ADMIN_RESUME = 1,
- ENA_ADMIN_RESUME = 1,
+ ENA_ADMIN_UPDATE_HINTS = 2,
};
-/* ENA Asynchronous Event Notification generic descriptor. */
struct ena_admin_aenq_entry {
- /* words 0:3 : */
struct ena_admin_aenq_common_desc aenq_common_desc;
/* command specific inline data */
uint32_t inline_data_w4[12];
};
-/* ENA Asynchronous Event Notification Queue Link Change descriptor. */
struct ena_admin_aenq_link_change_desc {
- /* words 0:3 : */
struct ena_admin_aenq_common_desc aenq_common_desc;
- /* word 4 : */
/* 0 : link_status */
uint32_t flags;
};
-/* ENA MMIO Readless response interface */
+struct ena_admin_aenq_keep_alive_desc {
+ struct ena_admin_aenq_common_desc aenq_common_desc;
+
+ uint32_t rx_drops_low;
+
+ uint32_t rx_drops_high;
+};
+
struct ena_admin_ena_mmio_req_read_less_resp {
- /* word 0 : */
- /* request id */
uint16_t req_id;
- /* register offset */
uint16_t reg_off;
- /* word 1 : value is valid when poll is cleared */
+ /* value is valid when poll is cleared */
uint32_t reg_val;
};
@@ -1220,8 +992,7 @@ struct ena_admin_ena_mmio_req_read_less_resp {
/* feature_rss_flow_hash_function */
#define ENA_ADMIN_FEATURE_RSS_FLOW_HASH_FUNCTION_FUNCS_MASK GENMASK(7, 0)
-#define ENA_ADMIN_FEATURE_RSS_FLOW_HASH_FUNCTION_SELECTED_FUNC_MASK \
- GENMASK(7, 0)
+#define ENA_ADMIN_FEATURE_RSS_FLOW_HASH_FUNCTION_SELECTED_FUNC_MASK GENMASK(7, 0)
/* feature_rss_flow_hash_input */
#define ENA_ADMIN_FEATURE_RSS_FLOW_HASH_INPUT_L3_SORT_SHIFT 1
@@ -1247,653 +1018,392 @@ struct ena_admin_ena_mmio_req_read_less_resp {
#define ENA_ADMIN_AENQ_LINK_CHANGE_DESC_LINK_STATUS_MASK BIT(0)
#if !defined(ENA_DEFS_LINUX_MAINLINE)
-static inline uint16_t
-get_ena_admin_aq_common_desc_command_id(
- const struct ena_admin_aq_common_desc *p)
+static inline uint16_t get_ena_admin_aq_common_desc_command_id(const struct ena_admin_aq_common_desc *p)
{
return p->command_id & ENA_ADMIN_AQ_COMMON_DESC_COMMAND_ID_MASK;
}
-static inline void
-set_ena_admin_aq_common_desc_command_id(struct ena_admin_aq_common_desc *p,
- uint16_t val)
+static inline void set_ena_admin_aq_common_desc_command_id(struct ena_admin_aq_common_desc *p, uint16_t val)
{
p->command_id |= val & ENA_ADMIN_AQ_COMMON_DESC_COMMAND_ID_MASK;
}
-static inline uint8_t
-get_ena_admin_aq_common_desc_phase(const struct ena_admin_aq_common_desc *p)
+static inline uint8_t get_ena_admin_aq_common_desc_phase(const struct ena_admin_aq_common_desc *p)
{
return p->flags & ENA_ADMIN_AQ_COMMON_DESC_PHASE_MASK;
}
-static inline void
-set_ena_admin_aq_common_desc_phase(struct ena_admin_aq_common_desc *p,
- uint8_t val)
+static inline void set_ena_admin_aq_common_desc_phase(struct ena_admin_aq_common_desc *p, uint8_t val)
{
p->flags |= val & ENA_ADMIN_AQ_COMMON_DESC_PHASE_MASK;
}
-static inline uint8_t
-get_ena_admin_aq_common_desc_ctrl_data(
- const struct ena_admin_aq_common_desc *p)
+static inline uint8_t get_ena_admin_aq_common_desc_ctrl_data(const struct ena_admin_aq_common_desc *p)
{
- return (p->flags & ENA_ADMIN_AQ_COMMON_DESC_CTRL_DATA_MASK) >>
- ENA_ADMIN_AQ_COMMON_DESC_CTRL_DATA_SHIFT;
+ return (p->flags & ENA_ADMIN_AQ_COMMON_DESC_CTRL_DATA_MASK) >> ENA_ADMIN_AQ_COMMON_DESC_CTRL_DATA_SHIFT;
}
-static inline void
-set_ena_admin_aq_common_desc_ctrl_data(struct ena_admin_aq_common_desc *p,
- uint8_t val)
+static inline void set_ena_admin_aq_common_desc_ctrl_data(struct ena_admin_aq_common_desc *p, uint8_t val)
{
- p->flags |= (val << ENA_ADMIN_AQ_COMMON_DESC_CTRL_DATA_SHIFT)
- & ENA_ADMIN_AQ_COMMON_DESC_CTRL_DATA_MASK;
+ p->flags |= (val << ENA_ADMIN_AQ_COMMON_DESC_CTRL_DATA_SHIFT) & ENA_ADMIN_AQ_COMMON_DESC_CTRL_DATA_MASK;
}
-static inline uint8_t
-get_ena_admin_aq_common_desc_ctrl_data_indirect(
- const struct ena_admin_aq_common_desc *p)
+static inline uint8_t get_ena_admin_aq_common_desc_ctrl_data_indirect(const struct ena_admin_aq_common_desc *p)
{
- return (p->flags & ENA_ADMIN_AQ_COMMON_DESC_CTRL_DATA_INDIRECT_MASK)
- >> ENA_ADMIN_AQ_COMMON_DESC_CTRL_DATA_INDIRECT_SHIFT;
+ return (p->flags & ENA_ADMIN_AQ_COMMON_DESC_CTRL_DATA_INDIRECT_MASK) >> ENA_ADMIN_AQ_COMMON_DESC_CTRL_DATA_INDIRECT_SHIFT;
}
-static inline void
-set_ena_admin_aq_common_desc_ctrl_data_indirect(
- struct ena_admin_aq_common_desc *p,
- uint8_t val)
+static inline void set_ena_admin_aq_common_desc_ctrl_data_indirect(struct ena_admin_aq_common_desc *p, uint8_t val)
{
- p->flags |= (val << ENA_ADMIN_AQ_COMMON_DESC_CTRL_DATA_INDIRECT_SHIFT)
- & ENA_ADMIN_AQ_COMMON_DESC_CTRL_DATA_INDIRECT_MASK;
+ p->flags |= (val << ENA_ADMIN_AQ_COMMON_DESC_CTRL_DATA_INDIRECT_SHIFT) & ENA_ADMIN_AQ_COMMON_DESC_CTRL_DATA_INDIRECT_MASK;
}
-static inline uint8_t
-get_ena_admin_sq_sq_direction(const struct ena_admin_sq *p)
+static inline uint8_t get_ena_admin_sq_sq_direction(const struct ena_admin_sq *p)
{
- return (p->sq_identity & ENA_ADMIN_SQ_SQ_DIRECTION_MASK)
- >> ENA_ADMIN_SQ_SQ_DIRECTION_SHIFT;
+ return (p->sq_identity & ENA_ADMIN_SQ_SQ_DIRECTION_MASK) >> ENA_ADMIN_SQ_SQ_DIRECTION_SHIFT;
}
-static inline void
-set_ena_admin_sq_sq_direction(struct ena_admin_sq *p, uint8_t val)
+static inline void set_ena_admin_sq_sq_direction(struct ena_admin_sq *p, uint8_t val)
{
- p->sq_identity |= (val << ENA_ADMIN_SQ_SQ_DIRECTION_SHIFT) &
- ENA_ADMIN_SQ_SQ_DIRECTION_MASK;
+ p->sq_identity |= (val << ENA_ADMIN_SQ_SQ_DIRECTION_SHIFT) & ENA_ADMIN_SQ_SQ_DIRECTION_MASK;
}
-static inline uint16_t
-get_ena_admin_acq_common_desc_command_id(
- const struct ena_admin_acq_common_desc *p)
+static inline uint16_t get_ena_admin_acq_common_desc_command_id(const struct ena_admin_acq_common_desc *p)
{
return p->command & ENA_ADMIN_ACQ_COMMON_DESC_COMMAND_ID_MASK;
}
-static inline void
-set_ena_admin_acq_common_desc_command_id(struct ena_admin_acq_common_desc *p,
- uint16_t val)
+static inline void set_ena_admin_acq_common_desc_command_id(struct ena_admin_acq_common_desc *p, uint16_t val)
{
p->command |= val & ENA_ADMIN_ACQ_COMMON_DESC_COMMAND_ID_MASK;
}
-static inline uint8_t
-get_ena_admin_acq_common_desc_phase(const struct ena_admin_acq_common_desc *p)
+static inline uint8_t get_ena_admin_acq_common_desc_phase(const struct ena_admin_acq_common_desc *p)
{
return p->flags & ENA_ADMIN_ACQ_COMMON_DESC_PHASE_MASK;
}
-static inline void
-set_ena_admin_acq_common_desc_phase(struct ena_admin_acq_common_desc *p,
- uint8_t val)
+static inline void set_ena_admin_acq_common_desc_phase(struct ena_admin_acq_common_desc *p, uint8_t val)
{
p->flags |= val & ENA_ADMIN_ACQ_COMMON_DESC_PHASE_MASK;
}
-static inline uint8_t
-get_ena_admin_aq_create_sq_cmd_sq_direction(
- const struct ena_admin_aq_create_sq_cmd *p)
+static inline uint8_t get_ena_admin_aq_create_sq_cmd_sq_direction(const struct ena_admin_aq_create_sq_cmd *p)
{
- return (p->sq_identity & ENA_ADMIN_AQ_CREATE_SQ_CMD_SQ_DIRECTION_MASK)
- >> ENA_ADMIN_AQ_CREATE_SQ_CMD_SQ_DIRECTION_SHIFT;
+ return (p->sq_identity & ENA_ADMIN_AQ_CREATE_SQ_CMD_SQ_DIRECTION_MASK) >> ENA_ADMIN_AQ_CREATE_SQ_CMD_SQ_DIRECTION_SHIFT;
}
-static inline void
-set_ena_admin_aq_create_sq_cmd_sq_direction(
- struct ena_admin_aq_create_sq_cmd *p,
- uint8_t val)
+static inline void set_ena_admin_aq_create_sq_cmd_sq_direction(struct ena_admin_aq_create_sq_cmd *p, uint8_t val)
{
- p->sq_identity |= (val <<
- ENA_ADMIN_AQ_CREATE_SQ_CMD_SQ_DIRECTION_SHIFT)
- & ENA_ADMIN_AQ_CREATE_SQ_CMD_SQ_DIRECTION_MASK;
+ p->sq_identity |= (val << ENA_ADMIN_AQ_CREATE_SQ_CMD_SQ_DIRECTION_SHIFT) & ENA_ADMIN_AQ_CREATE_SQ_CMD_SQ_DIRECTION_MASK;
}
-static inline uint8_t
-get_ena_admin_aq_create_sq_cmd_placement_policy(
- const struct ena_admin_aq_create_sq_cmd *p)
+static inline uint8_t get_ena_admin_aq_create_sq_cmd_placement_policy(const struct ena_admin_aq_create_sq_cmd *p)
{
return p->sq_caps_2 & ENA_ADMIN_AQ_CREATE_SQ_CMD_PLACEMENT_POLICY_MASK;
}
-static inline void
-set_ena_admin_aq_create_sq_cmd_placement_policy(
- struct ena_admin_aq_create_sq_cmd *p,
- uint8_t val)
+static inline void set_ena_admin_aq_create_sq_cmd_placement_policy(struct ena_admin_aq_create_sq_cmd *p, uint8_t val)
{
p->sq_caps_2 |= val & ENA_ADMIN_AQ_CREATE_SQ_CMD_PLACEMENT_POLICY_MASK;
}
-static inline uint8_t
-get_ena_admin_aq_create_sq_cmd_completion_policy(
- const struct ena_admin_aq_create_sq_cmd *p)
+static inline uint8_t get_ena_admin_aq_create_sq_cmd_completion_policy(const struct ena_admin_aq_create_sq_cmd *p)
{
- return (p->sq_caps_2
- & ENA_ADMIN_AQ_CREATE_SQ_CMD_COMPLETION_POLICY_MASK)
- >> ENA_ADMIN_AQ_CREATE_SQ_CMD_COMPLETION_POLICY_SHIFT;
+ return (p->sq_caps_2 & ENA_ADMIN_AQ_CREATE_SQ_CMD_COMPLETION_POLICY_MASK) >> ENA_ADMIN_AQ_CREATE_SQ_CMD_COMPLETION_POLICY_SHIFT;
}
-static inline void
-set_ena_admin_aq_create_sq_cmd_completion_policy(
- struct ena_admin_aq_create_sq_cmd *p,
- uint8_t val)
+static inline void set_ena_admin_aq_create_sq_cmd_completion_policy(struct ena_admin_aq_create_sq_cmd *p, uint8_t val)
{
- p->sq_caps_2 |=
- (val << ENA_ADMIN_AQ_CREATE_SQ_CMD_COMPLETION_POLICY_SHIFT)
- & ENA_ADMIN_AQ_CREATE_SQ_CMD_COMPLETION_POLICY_MASK;
+ p->sq_caps_2 |= (val << ENA_ADMIN_AQ_CREATE_SQ_CMD_COMPLETION_POLICY_SHIFT) & ENA_ADMIN_AQ_CREATE_SQ_CMD_COMPLETION_POLICY_MASK;
}
-static inline uint8_t
-get_ena_admin_aq_create_sq_cmd_is_physically_contiguous(
- const struct ena_admin_aq_create_sq_cmd *p)
+static inline uint8_t get_ena_admin_aq_create_sq_cmd_is_physically_contiguous(const struct ena_admin_aq_create_sq_cmd *p)
{
- return p->sq_caps_3 &
- ENA_ADMIN_AQ_CREATE_SQ_CMD_IS_PHYSICALLY_CONTIGUOUS_MASK;
+ return p->sq_caps_3 & ENA_ADMIN_AQ_CREATE_SQ_CMD_IS_PHYSICALLY_CONTIGUOUS_MASK;
}
-static inline void
-set_ena_admin_aq_create_sq_cmd_is_physically_contiguous(
- struct ena_admin_aq_create_sq_cmd *p,
- uint8_t val)
+static inline void set_ena_admin_aq_create_sq_cmd_is_physically_contiguous(struct ena_admin_aq_create_sq_cmd *p, uint8_t val)
{
- p->sq_caps_3 |= val &
- ENA_ADMIN_AQ_CREATE_SQ_CMD_IS_PHYSICALLY_CONTIGUOUS_MASK;
+ p->sq_caps_3 |= val & ENA_ADMIN_AQ_CREATE_SQ_CMD_IS_PHYSICALLY_CONTIGUOUS_MASK;
}
-static inline uint8_t
-get_ena_admin_aq_create_cq_cmd_interrupt_mode_enabled(
- const struct ena_admin_aq_create_cq_cmd *p)
+static inline uint8_t get_ena_admin_aq_create_cq_cmd_interrupt_mode_enabled(const struct ena_admin_aq_create_cq_cmd *p)
{
- return (p->cq_caps_1 &
- ENA_ADMIN_AQ_CREATE_CQ_CMD_INTERRUPT_MODE_ENABLED_MASK)
- >> ENA_ADMIN_AQ_CREATE_CQ_CMD_INTERRUPT_MODE_ENABLED_SHIFT;
+ return (p->cq_caps_1 & ENA_ADMIN_AQ_CREATE_CQ_CMD_INTERRUPT_MODE_ENABLED_MASK) >> ENA_ADMIN_AQ_CREATE_CQ_CMD_INTERRUPT_MODE_ENABLED_SHIFT;
}
-static inline void
-set_ena_admin_aq_create_cq_cmd_interrupt_mode_enabled(
- struct ena_admin_aq_create_cq_cmd *p,
- uint8_t val)
+static inline void set_ena_admin_aq_create_cq_cmd_interrupt_mode_enabled(struct ena_admin_aq_create_cq_cmd *p, uint8_t val)
{
- p->cq_caps_1 |=
- (val << ENA_ADMIN_AQ_CREATE_CQ_CMD_INTERRUPT_MODE_ENABLED_SHIFT)
- & ENA_ADMIN_AQ_CREATE_CQ_CMD_INTERRUPT_MODE_ENABLED_MASK;
+ p->cq_caps_1 |= (val << ENA_ADMIN_AQ_CREATE_CQ_CMD_INTERRUPT_MODE_ENABLED_SHIFT) & ENA_ADMIN_AQ_CREATE_CQ_CMD_INTERRUPT_MODE_ENABLED_MASK;
}
-static inline uint8_t
-get_ena_admin_aq_create_cq_cmd_cq_entry_size_words(
- const struct ena_admin_aq_create_cq_cmd *p)
+static inline uint8_t get_ena_admin_aq_create_cq_cmd_cq_entry_size_words(const struct ena_admin_aq_create_cq_cmd *p)
{
- return p->cq_caps_2
- & ENA_ADMIN_AQ_CREATE_CQ_CMD_CQ_ENTRY_SIZE_WORDS_MASK;
+ return p->cq_caps_2 & ENA_ADMIN_AQ_CREATE_CQ_CMD_CQ_ENTRY_SIZE_WORDS_MASK;
}
-static inline void
-set_ena_admin_aq_create_cq_cmd_cq_entry_size_words(
- struct ena_admin_aq_create_cq_cmd *p,
- uint8_t val)
+static inline void set_ena_admin_aq_create_cq_cmd_cq_entry_size_words(struct ena_admin_aq_create_cq_cmd *p, uint8_t val)
{
- p->cq_caps_2 |=
- val & ENA_ADMIN_AQ_CREATE_CQ_CMD_CQ_ENTRY_SIZE_WORDS_MASK;
+ p->cq_caps_2 |= val & ENA_ADMIN_AQ_CREATE_CQ_CMD_CQ_ENTRY_SIZE_WORDS_MASK;
}
-static inline uint8_t
-get_ena_admin_get_set_feature_common_desc_select(
- const struct ena_admin_get_set_feature_common_desc *p)
+static inline uint8_t get_ena_admin_get_set_feature_common_desc_select(const struct ena_admin_get_set_feature_common_desc *p)
{
return p->flags & ENA_ADMIN_GET_SET_FEATURE_COMMON_DESC_SELECT_MASK;
}
-static inline void
-set_ena_admin_get_set_feature_common_desc_select(
- struct ena_admin_get_set_feature_common_desc *p,
- uint8_t val)
+static inline void set_ena_admin_get_set_feature_common_desc_select(struct ena_admin_get_set_feature_common_desc *p, uint8_t val)
{
p->flags |= val & ENA_ADMIN_GET_SET_FEATURE_COMMON_DESC_SELECT_MASK;
}
-static inline uint32_t
-get_ena_admin_get_feature_link_desc_autoneg(
- const struct ena_admin_get_feature_link_desc *p)
+static inline uint32_t get_ena_admin_get_feature_link_desc_autoneg(const struct ena_admin_get_feature_link_desc *p)
{
return p->flags & ENA_ADMIN_GET_FEATURE_LINK_DESC_AUTONEG_MASK;
}
-static inline void
-set_ena_admin_get_feature_link_desc_autoneg(
- struct ena_admin_get_feature_link_desc *p,
- uint32_t val)
+static inline void set_ena_admin_get_feature_link_desc_autoneg(struct ena_admin_get_feature_link_desc *p, uint32_t val)
{
p->flags |= val & ENA_ADMIN_GET_FEATURE_LINK_DESC_AUTONEG_MASK;
}
-static inline uint32_t
-get_ena_admin_get_feature_link_desc_duplex(
- const struct ena_admin_get_feature_link_desc *p)
+static inline uint32_t get_ena_admin_get_feature_link_desc_duplex(const struct ena_admin_get_feature_link_desc *p)
{
- return (p->flags & ENA_ADMIN_GET_FEATURE_LINK_DESC_DUPLEX_MASK)
- >> ENA_ADMIN_GET_FEATURE_LINK_DESC_DUPLEX_SHIFT;
+ return (p->flags & ENA_ADMIN_GET_FEATURE_LINK_DESC_DUPLEX_MASK) >> ENA_ADMIN_GET_FEATURE_LINK_DESC_DUPLEX_SHIFT;
}
-static inline void
-set_ena_admin_get_feature_link_desc_duplex(
- struct ena_admin_get_feature_link_desc *p,
- uint32_t val)
+static inline void set_ena_admin_get_feature_link_desc_duplex(struct ena_admin_get_feature_link_desc *p, uint32_t val)
{
- p->flags |= (val << ENA_ADMIN_GET_FEATURE_LINK_DESC_DUPLEX_SHIFT)
- & ENA_ADMIN_GET_FEATURE_LINK_DESC_DUPLEX_MASK;
+ p->flags |= (val << ENA_ADMIN_GET_FEATURE_LINK_DESC_DUPLEX_SHIFT) & ENA_ADMIN_GET_FEATURE_LINK_DESC_DUPLEX_MASK;
}
-static inline uint32_t
-get_ena_admin_feature_offload_desc_TX_L3_csum_ipv4(
- const struct ena_admin_feature_offload_desc *p)
+static inline uint32_t get_ena_admin_feature_offload_desc_TX_L3_csum_ipv4(const struct ena_admin_feature_offload_desc *p)
{
return p->tx & ENA_ADMIN_FEATURE_OFFLOAD_DESC_TX_L3_CSUM_IPV4_MASK;
}
-static inline void
-set_ena_admin_feature_offload_desc_TX_L3_csum_ipv4(
- struct ena_admin_feature_offload_desc *p,
- uint32_t val)
+static inline void set_ena_admin_feature_offload_desc_TX_L3_csum_ipv4(struct ena_admin_feature_offload_desc *p, uint32_t val)
{
p->tx |= val & ENA_ADMIN_FEATURE_OFFLOAD_DESC_TX_L3_CSUM_IPV4_MASK;
}
-static inline uint32_t
-get_ena_admin_feature_offload_desc_TX_L4_ipv4_csum_part(
- const struct ena_admin_feature_offload_desc *p)
+static inline uint32_t get_ena_admin_feature_offload_desc_TX_L4_ipv4_csum_part(const struct ena_admin_feature_offload_desc *p)
{
- return (p->tx &
- ENA_ADMIN_FEATURE_OFFLOAD_DESC_TX_L4_IPV4_CSUM_PART_MASK)
- >> ENA_ADMIN_FEATURE_OFFLOAD_DESC_TX_L4_IPV4_CSUM_PART_SHIFT;
+ return (p->tx & ENA_ADMIN_FEATURE_OFFLOAD_DESC_TX_L4_IPV4_CSUM_PART_MASK) >> ENA_ADMIN_FEATURE_OFFLOAD_DESC_TX_L4_IPV4_CSUM_PART_SHIFT;
}
-static inline void
-set_ena_admin_feature_offload_desc_TX_L4_ipv4_csum_part(
- struct ena_admin_feature_offload_desc *p,
- uint32_t val)
+static inline void set_ena_admin_feature_offload_desc_TX_L4_ipv4_csum_part(struct ena_admin_feature_offload_desc *p, uint32_t val)
{
- p->tx |= (val <<
- ENA_ADMIN_FEATURE_OFFLOAD_DESC_TX_L4_IPV4_CSUM_PART_SHIFT)
- & ENA_ADMIN_FEATURE_OFFLOAD_DESC_TX_L4_IPV4_CSUM_PART_MASK;
+ p->tx |= (val << ENA_ADMIN_FEATURE_OFFLOAD_DESC_TX_L4_IPV4_CSUM_PART_SHIFT) & ENA_ADMIN_FEATURE_OFFLOAD_DESC_TX_L4_IPV4_CSUM_PART_MASK;
}
-static inline uint32_t
-get_ena_admin_feature_offload_desc_TX_L4_ipv4_csum_full(
- const struct ena_admin_feature_offload_desc *p)
+static inline uint32_t get_ena_admin_feature_offload_desc_TX_L4_ipv4_csum_full(const struct ena_admin_feature_offload_desc *p)
{
- return (p->tx &
- ENA_ADMIN_FEATURE_OFFLOAD_DESC_TX_L4_IPV4_CSUM_FULL_MASK)
- >> ENA_ADMIN_FEATURE_OFFLOAD_DESC_TX_L4_IPV4_CSUM_FULL_SHIFT;
+ return (p->tx & ENA_ADMIN_FEATURE_OFFLOAD_DESC_TX_L4_IPV4_CSUM_FULL_MASK) >> ENA_ADMIN_FEATURE_OFFLOAD_DESC_TX_L4_IPV4_CSUM_FULL_SHIFT;
}
-static inline void
-set_ena_admin_feature_offload_desc_TX_L4_ipv4_csum_full(
- struct ena_admin_feature_offload_desc *p,
- uint32_t val)
+static inline void set_ena_admin_feature_offload_desc_TX_L4_ipv4_csum_full(struct ena_admin_feature_offload_desc *p, uint32_t val)
{
- p->tx |= (val <<
- ENA_ADMIN_FEATURE_OFFLOAD_DESC_TX_L4_IPV4_CSUM_FULL_SHIFT)
- & ENA_ADMIN_FEATURE_OFFLOAD_DESC_TX_L4_IPV4_CSUM_FULL_MASK;
+ p->tx |= (val << ENA_ADMIN_FEATURE_OFFLOAD_DESC_TX_L4_IPV4_CSUM_FULL_SHIFT) & ENA_ADMIN_FEATURE_OFFLOAD_DESC_TX_L4_IPV4_CSUM_FULL_MASK;
}
-static inline uint32_t
-get_ena_admin_feature_offload_desc_TX_L4_ipv6_csum_part(
- const struct ena_admin_feature_offload_desc *p)
+static inline uint32_t get_ena_admin_feature_offload_desc_TX_L4_ipv6_csum_part(const struct ena_admin_feature_offload_desc *p)
{
- return (p->tx &
- ENA_ADMIN_FEATURE_OFFLOAD_DESC_TX_L4_IPV6_CSUM_PART_MASK)
- >> ENA_ADMIN_FEATURE_OFFLOAD_DESC_TX_L4_IPV6_CSUM_PART_SHIFT;
+ return (p->tx & ENA_ADMIN_FEATURE_OFFLOAD_DESC_TX_L4_IPV6_CSUM_PART_MASK) >> ENA_ADMIN_FEATURE_OFFLOAD_DESC_TX_L4_IPV6_CSUM_PART_SHIFT;
}
-static inline void
-set_ena_admin_feature_offload_desc_TX_L4_ipv6_csum_part(
- struct ena_admin_feature_offload_desc *p,
- uint32_t val)
+static inline void set_ena_admin_feature_offload_desc_TX_L4_ipv6_csum_part(struct ena_admin_feature_offload_desc *p, uint32_t val)
{
- p->tx |= (val <<
- ENA_ADMIN_FEATURE_OFFLOAD_DESC_TX_L4_IPV6_CSUM_PART_SHIFT)
- & ENA_ADMIN_FEATURE_OFFLOAD_DESC_TX_L4_IPV6_CSUM_PART_MASK;
+ p->tx |= (val << ENA_ADMIN_FEATURE_OFFLOAD_DESC_TX_L4_IPV6_CSUM_PART_SHIFT) & ENA_ADMIN_FEATURE_OFFLOAD_DESC_TX_L4_IPV6_CSUM_PART_MASK;
}
-static inline uint32_t
-get_ena_admin_feature_offload_desc_TX_L4_ipv6_csum_full(
- const struct ena_admin_feature_offload_desc *p)
+static inline uint32_t get_ena_admin_feature_offload_desc_TX_L4_ipv6_csum_full(const struct ena_admin_feature_offload_desc *p)
{
- return (p->tx &
- ENA_ADMIN_FEATURE_OFFLOAD_DESC_TX_L4_IPV6_CSUM_FULL_MASK)
- >> ENA_ADMIN_FEATURE_OFFLOAD_DESC_TX_L4_IPV6_CSUM_FULL_SHIFT;
+ return (p->tx & ENA_ADMIN_FEATURE_OFFLOAD_DESC_TX_L4_IPV6_CSUM_FULL_MASK) >> ENA_ADMIN_FEATURE_OFFLOAD_DESC_TX_L4_IPV6_CSUM_FULL_SHIFT;
}
-static inline void
-set_ena_admin_feature_offload_desc_TX_L4_ipv6_csum_full(
- struct ena_admin_feature_offload_desc *p,
- uint32_t val)
+static inline void set_ena_admin_feature_offload_desc_TX_L4_ipv6_csum_full(struct ena_admin_feature_offload_desc *p, uint32_t val)
{
- p->tx |= (val <<
- ENA_ADMIN_FEATURE_OFFLOAD_DESC_TX_L4_IPV6_CSUM_FULL_SHIFT)
- & ENA_ADMIN_FEATURE_OFFLOAD_DESC_TX_L4_IPV6_CSUM_FULL_MASK;
+ p->tx |= (val << ENA_ADMIN_FEATURE_OFFLOAD_DESC_TX_L4_IPV6_CSUM_FULL_SHIFT) & ENA_ADMIN_FEATURE_OFFLOAD_DESC_TX_L4_IPV6_CSUM_FULL_MASK;
}
-static inline uint32_t
-get_ena_admin_feature_offload_desc_tso_ipv4(
- const struct ena_admin_feature_offload_desc *p)
+static inline uint32_t get_ena_admin_feature_offload_desc_tso_ipv4(const struct ena_admin_feature_offload_desc *p)
{
- return (p->tx & ENA_ADMIN_FEATURE_OFFLOAD_DESC_TSO_IPV4_MASK)
- >> ENA_ADMIN_FEATURE_OFFLOAD_DESC_TSO_IPV4_SHIFT;
+ return (p->tx & ENA_ADMIN_FEATURE_OFFLOAD_DESC_TSO_IPV4_MASK) >> ENA_ADMIN_FEATURE_OFFLOAD_DESC_TSO_IPV4_SHIFT;
}
-static inline void
-set_ena_admin_feature_offload_desc_tso_ipv4(
- struct ena_admin_feature_offload_desc *p,
- uint32_t val)
+static inline void set_ena_admin_feature_offload_desc_tso_ipv4(struct ena_admin_feature_offload_desc *p, uint32_t val)
{
- p->tx |= (val << ENA_ADMIN_FEATURE_OFFLOAD_DESC_TSO_IPV4_SHIFT)
- & ENA_ADMIN_FEATURE_OFFLOAD_DESC_TSO_IPV4_MASK;
+ p->tx |= (val << ENA_ADMIN_FEATURE_OFFLOAD_DESC_TSO_IPV4_SHIFT) & ENA_ADMIN_FEATURE_OFFLOAD_DESC_TSO_IPV4_MASK;
}
-static inline uint32_t
-get_ena_admin_feature_offload_desc_tso_ipv6(
- const struct ena_admin_feature_offload_desc *p)
+static inline uint32_t get_ena_admin_feature_offload_desc_tso_ipv6(const struct ena_admin_feature_offload_desc *p)
{
- return (p->tx & ENA_ADMIN_FEATURE_OFFLOAD_DESC_TSO_IPV6_MASK)
- >> ENA_ADMIN_FEATURE_OFFLOAD_DESC_TSO_IPV6_SHIFT;
+ return (p->tx & ENA_ADMIN_FEATURE_OFFLOAD_DESC_TSO_IPV6_MASK) >> ENA_ADMIN_FEATURE_OFFLOAD_DESC_TSO_IPV6_SHIFT;
}
-static inline void
-set_ena_admin_feature_offload_desc_tso_ipv6(
- struct ena_admin_feature_offload_desc *p,
- uint32_t val)
+static inline void set_ena_admin_feature_offload_desc_tso_ipv6(struct ena_admin_feature_offload_desc *p, uint32_t val)
{
- p->tx |= (val << ENA_ADMIN_FEATURE_OFFLOAD_DESC_TSO_IPV6_SHIFT)
- & ENA_ADMIN_FEATURE_OFFLOAD_DESC_TSO_IPV6_MASK;
+ p->tx |= (val << ENA_ADMIN_FEATURE_OFFLOAD_DESC_TSO_IPV6_SHIFT) & ENA_ADMIN_FEATURE_OFFLOAD_DESC_TSO_IPV6_MASK;
}
-static inline uint32_t
-get_ena_admin_feature_offload_desc_tso_ecn(
- const struct ena_admin_feature_offload_desc *p)
+static inline uint32_t get_ena_admin_feature_offload_desc_tso_ecn(const struct ena_admin_feature_offload_desc *p)
{
- return (p->tx & ENA_ADMIN_FEATURE_OFFLOAD_DESC_TSO_ECN_MASK)
- >> ENA_ADMIN_FEATURE_OFFLOAD_DESC_TSO_ECN_SHIFT;
+ return (p->tx & ENA_ADMIN_FEATURE_OFFLOAD_DESC_TSO_ECN_MASK) >> ENA_ADMIN_FEATURE_OFFLOAD_DESC_TSO_ECN_SHIFT;
}
-static inline void
-set_ena_admin_feature_offload_desc_tso_ecn(
- struct ena_admin_feature_offload_desc *p,
- uint32_t val)
+static inline void set_ena_admin_feature_offload_desc_tso_ecn(struct ena_admin_feature_offload_desc *p, uint32_t val)
{
- p->tx |= (val << ENA_ADMIN_FEATURE_OFFLOAD_DESC_TSO_ECN_SHIFT)
- & ENA_ADMIN_FEATURE_OFFLOAD_DESC_TSO_ECN_MASK;
+ p->tx |= (val << ENA_ADMIN_FEATURE_OFFLOAD_DESC_TSO_ECN_SHIFT) & ENA_ADMIN_FEATURE_OFFLOAD_DESC_TSO_ECN_MASK;
}
-static inline uint32_t
-get_ena_admin_feature_offload_desc_RX_L3_csum_ipv4(
- const struct ena_admin_feature_offload_desc *p)
+static inline uint32_t get_ena_admin_feature_offload_desc_RX_L3_csum_ipv4(const struct ena_admin_feature_offload_desc *p)
{
- return p->rx_supported &
- ENA_ADMIN_FEATURE_OFFLOAD_DESC_RX_L3_CSUM_IPV4_MASK;
+ return p->rx_supported & ENA_ADMIN_FEATURE_OFFLOAD_DESC_RX_L3_CSUM_IPV4_MASK;
}
-static inline void
-set_ena_admin_feature_offload_desc_RX_L3_csum_ipv4(
- struct ena_admin_feature_offload_desc *p,
- uint32_t val)
+static inline void set_ena_admin_feature_offload_desc_RX_L3_csum_ipv4(struct ena_admin_feature_offload_desc *p, uint32_t val)
{
- p->rx_supported |=
- val & ENA_ADMIN_FEATURE_OFFLOAD_DESC_RX_L3_CSUM_IPV4_MASK;
+ p->rx_supported |= val & ENA_ADMIN_FEATURE_OFFLOAD_DESC_RX_L3_CSUM_IPV4_MASK;
}
-static inline uint32_t
-get_ena_admin_feature_offload_desc_RX_L4_ipv4_csum(
- const struct ena_admin_feature_offload_desc *p)
+static inline uint32_t get_ena_admin_feature_offload_desc_RX_L4_ipv4_csum(const struct ena_admin_feature_offload_desc *p)
{
- return (p->rx_supported &
- ENA_ADMIN_FEATURE_OFFLOAD_DESC_RX_L4_IPV4_CSUM_MASK)
- >> ENA_ADMIN_FEATURE_OFFLOAD_DESC_RX_L4_IPV4_CSUM_SHIFT;
+ return (p->rx_supported & ENA_ADMIN_FEATURE_OFFLOAD_DESC_RX_L4_IPV4_CSUM_MASK) >> ENA_ADMIN_FEATURE_OFFLOAD_DESC_RX_L4_IPV4_CSUM_SHIFT;
}
-static inline void
-set_ena_admin_feature_offload_desc_RX_L4_ipv4_csum(
- struct ena_admin_feature_offload_desc *p,
- uint32_t val)
+static inline void set_ena_admin_feature_offload_desc_RX_L4_ipv4_csum(struct ena_admin_feature_offload_desc *p, uint32_t val)
{
- p->rx_supported |=
- (val << ENA_ADMIN_FEATURE_OFFLOAD_DESC_RX_L4_IPV4_CSUM_SHIFT)
- & ENA_ADMIN_FEATURE_OFFLOAD_DESC_RX_L4_IPV4_CSUM_MASK;
+ p->rx_supported |= (val << ENA_ADMIN_FEATURE_OFFLOAD_DESC_RX_L4_IPV4_CSUM_SHIFT) & ENA_ADMIN_FEATURE_OFFLOAD_DESC_RX_L4_IPV4_CSUM_MASK;
}
-static inline uint32_t
-get_ena_admin_feature_offload_desc_RX_L4_ipv6_csum(
- const struct ena_admin_feature_offload_desc *p)
+static inline uint32_t get_ena_admin_feature_offload_desc_RX_L4_ipv6_csum(const struct ena_admin_feature_offload_desc *p)
{
- return (p->rx_supported &
- ENA_ADMIN_FEATURE_OFFLOAD_DESC_RX_L4_IPV6_CSUM_MASK)
- >> ENA_ADMIN_FEATURE_OFFLOAD_DESC_RX_L4_IPV6_CSUM_SHIFT;
+ return (p->rx_supported & ENA_ADMIN_FEATURE_OFFLOAD_DESC_RX_L4_IPV6_CSUM_MASK) >> ENA_ADMIN_FEATURE_OFFLOAD_DESC_RX_L4_IPV6_CSUM_SHIFT;
}
-static inline void
-set_ena_admin_feature_offload_desc_RX_L4_ipv6_csum(
- struct ena_admin_feature_offload_desc *p,
- uint32_t val)
+static inline void set_ena_admin_feature_offload_desc_RX_L4_ipv6_csum(struct ena_admin_feature_offload_desc *p, uint32_t val)
{
- p->rx_supported |=
- (val << ENA_ADMIN_FEATURE_OFFLOAD_DESC_RX_L4_IPV6_CSUM_SHIFT)
- & ENA_ADMIN_FEATURE_OFFLOAD_DESC_RX_L4_IPV6_CSUM_MASK;
+ p->rx_supported |= (val << ENA_ADMIN_FEATURE_OFFLOAD_DESC_RX_L4_IPV6_CSUM_SHIFT) & ENA_ADMIN_FEATURE_OFFLOAD_DESC_RX_L4_IPV6_CSUM_MASK;
}
-static inline uint32_t
-get_ena_admin_feature_offload_desc_RX_hash(
- const struct ena_admin_feature_offload_desc *p)
+static inline uint32_t get_ena_admin_feature_offload_desc_RX_hash(const struct ena_admin_feature_offload_desc *p)
{
- return (p->rx_supported &
- ENA_ADMIN_FEATURE_OFFLOAD_DESC_RX_HASH_MASK)
- >> ENA_ADMIN_FEATURE_OFFLOAD_DESC_RX_HASH_SHIFT;
+ return (p->rx_supported & ENA_ADMIN_FEATURE_OFFLOAD_DESC_RX_HASH_MASK) >> ENA_ADMIN_FEATURE_OFFLOAD_DESC_RX_HASH_SHIFT;
}
-static inline void
-set_ena_admin_feature_offload_desc_RX_hash(
- struct ena_admin_feature_offload_desc *p,
- uint32_t val)
+static inline void set_ena_admin_feature_offload_desc_RX_hash(struct ena_admin_feature_offload_desc *p, uint32_t val)
{
- p->rx_supported |=
- (val << ENA_ADMIN_FEATURE_OFFLOAD_DESC_RX_HASH_SHIFT)
- & ENA_ADMIN_FEATURE_OFFLOAD_DESC_RX_HASH_MASK;
+ p->rx_supported |= (val << ENA_ADMIN_FEATURE_OFFLOAD_DESC_RX_HASH_SHIFT) & ENA_ADMIN_FEATURE_OFFLOAD_DESC_RX_HASH_MASK;
}
-static inline uint32_t
-get_ena_admin_feature_rss_flow_hash_function_funcs(
- const struct ena_admin_feature_rss_flow_hash_function *p)
+static inline uint32_t get_ena_admin_feature_rss_flow_hash_function_funcs(const struct ena_admin_feature_rss_flow_hash_function *p)
{
- return p->supported_func &
- ENA_ADMIN_FEATURE_RSS_FLOW_HASH_FUNCTION_FUNCS_MASK;
+ return p->supported_func & ENA_ADMIN_FEATURE_RSS_FLOW_HASH_FUNCTION_FUNCS_MASK;
}
-static inline void
-set_ena_admin_feature_rss_flow_hash_function_funcs(
- struct ena_admin_feature_rss_flow_hash_function *p,
- uint32_t val)
+static inline void set_ena_admin_feature_rss_flow_hash_function_funcs(struct ena_admin_feature_rss_flow_hash_function *p, uint32_t val)
{
- p->supported_func |=
- val & ENA_ADMIN_FEATURE_RSS_FLOW_HASH_FUNCTION_FUNCS_MASK;
+ p->supported_func |= val & ENA_ADMIN_FEATURE_RSS_FLOW_HASH_FUNCTION_FUNCS_MASK;
}
-static inline uint32_t
-get_ena_admin_feature_rss_flow_hash_function_selected_func(
- const struct ena_admin_feature_rss_flow_hash_function *p)
+static inline uint32_t get_ena_admin_feature_rss_flow_hash_function_selected_func(const struct ena_admin_feature_rss_flow_hash_function *p)
{
- return p->selected_func &
- ENA_ADMIN_FEATURE_RSS_FLOW_HASH_FUNCTION_SELECTED_FUNC_MASK;
+ return p->selected_func & ENA_ADMIN_FEATURE_RSS_FLOW_HASH_FUNCTION_SELECTED_FUNC_MASK;
}
-static inline void
-set_ena_admin_feature_rss_flow_hash_function_selected_func(
- struct ena_admin_feature_rss_flow_hash_function *p,
- uint32_t val)
+static inline void set_ena_admin_feature_rss_flow_hash_function_selected_func(struct ena_admin_feature_rss_flow_hash_function *p, uint32_t val)
{
- p->selected_func |=
- val &
- ENA_ADMIN_FEATURE_RSS_FLOW_HASH_FUNCTION_SELECTED_FUNC_MASK;
+ p->selected_func |= val & ENA_ADMIN_FEATURE_RSS_FLOW_HASH_FUNCTION_SELECTED_FUNC_MASK;
}
-static inline uint16_t
-get_ena_admin_feature_rss_flow_hash_input_L3_sort(
- const struct ena_admin_feature_rss_flow_hash_input *p)
+static inline uint16_t get_ena_admin_feature_rss_flow_hash_input_L3_sort(const struct ena_admin_feature_rss_flow_hash_input *p)
{
- return (p->supported_input_sort &
- ENA_ADMIN_FEATURE_RSS_FLOW_HASH_INPUT_L3_SORT_MASK)
- >> ENA_ADMIN_FEATURE_RSS_FLOW_HASH_INPUT_L3_SORT_SHIFT;
+ return (p->supported_input_sort & ENA_ADMIN_FEATURE_RSS_FLOW_HASH_INPUT_L3_SORT_MASK) >> ENA_ADMIN_FEATURE_RSS_FLOW_HASH_INPUT_L3_SORT_SHIFT;
}
-static inline void
-set_ena_admin_feature_rss_flow_hash_input_L3_sort(
- struct ena_admin_feature_rss_flow_hash_input *p, uint16_t val)
+static inline void set_ena_admin_feature_rss_flow_hash_input_L3_sort(struct ena_admin_feature_rss_flow_hash_input *p, uint16_t val)
{
- p->supported_input_sort |=
- (val << ENA_ADMIN_FEATURE_RSS_FLOW_HASH_INPUT_L3_SORT_SHIFT)
- & ENA_ADMIN_FEATURE_RSS_FLOW_HASH_INPUT_L3_SORT_MASK;
+ p->supported_input_sort |= (val << ENA_ADMIN_FEATURE_RSS_FLOW_HASH_INPUT_L3_SORT_SHIFT) & ENA_ADMIN_FEATURE_RSS_FLOW_HASH_INPUT_L3_SORT_MASK;
}
-static inline uint16_t
-get_ena_admin_feature_rss_flow_hash_input_L4_sort(
- const struct ena_admin_feature_rss_flow_hash_input *p)
+static inline uint16_t get_ena_admin_feature_rss_flow_hash_input_L4_sort(const struct ena_admin_feature_rss_flow_hash_input *p)
{
- return (p->supported_input_sort &
- ENA_ADMIN_FEATURE_RSS_FLOW_HASH_INPUT_L4_SORT_MASK)
- >> ENA_ADMIN_FEATURE_RSS_FLOW_HASH_INPUT_L4_SORT_SHIFT;
+ return (p->supported_input_sort & ENA_ADMIN_FEATURE_RSS_FLOW_HASH_INPUT_L4_SORT_MASK) >> ENA_ADMIN_FEATURE_RSS_FLOW_HASH_INPUT_L4_SORT_SHIFT;
}
-static inline void
-set_ena_admin_feature_rss_flow_hash_input_L4_sort(
- struct ena_admin_feature_rss_flow_hash_input *p,
- uint16_t val)
+static inline void set_ena_admin_feature_rss_flow_hash_input_L4_sort(struct ena_admin_feature_rss_flow_hash_input *p, uint16_t val)
{
- p->supported_input_sort |=
- (val << ENA_ADMIN_FEATURE_RSS_FLOW_HASH_INPUT_L4_SORT_SHIFT)
- & ENA_ADMIN_FEATURE_RSS_FLOW_HASH_INPUT_L4_SORT_MASK;
+ p->supported_input_sort |= (val << ENA_ADMIN_FEATURE_RSS_FLOW_HASH_INPUT_L4_SORT_SHIFT) & ENA_ADMIN_FEATURE_RSS_FLOW_HASH_INPUT_L4_SORT_MASK;
}
-static inline uint16_t
-get_ena_admin_feature_rss_flow_hash_input_enable_L3_sort(
- const struct ena_admin_feature_rss_flow_hash_input *p)
+static inline uint16_t get_ena_admin_feature_rss_flow_hash_input_enable_L3_sort(const struct ena_admin_feature_rss_flow_hash_input *p)
{
- return (p->enabled_input_sort &
- ENA_ADMIN_FEATURE_RSS_FLOW_HASH_INPUT_ENABLE_L3_SORT_MASK)
- >> ENA_ADMIN_FEATURE_RSS_FLOW_HASH_INPUT_ENABLE_L3_SORT_SHIFT;
+ return (p->enabled_input_sort & ENA_ADMIN_FEATURE_RSS_FLOW_HASH_INPUT_ENABLE_L3_SORT_MASK) >> ENA_ADMIN_FEATURE_RSS_FLOW_HASH_INPUT_ENABLE_L3_SORT_SHIFT;
}
-static inline void
-set_ena_admin_feature_rss_flow_hash_input_enable_L3_sort(
- struct ena_admin_feature_rss_flow_hash_input *p,
- uint16_t val)
+static inline void set_ena_admin_feature_rss_flow_hash_input_enable_L3_sort(struct ena_admin_feature_rss_flow_hash_input *p, uint16_t val)
{
- p->enabled_input_sort |=
- (val <<
- ENA_ADMIN_FEATURE_RSS_FLOW_HASH_INPUT_ENABLE_L3_SORT_SHIFT)
- & ENA_ADMIN_FEATURE_RSS_FLOW_HASH_INPUT_ENABLE_L3_SORT_MASK;
+ p->enabled_input_sort |= (val << ENA_ADMIN_FEATURE_RSS_FLOW_HASH_INPUT_ENABLE_L3_SORT_SHIFT) & ENA_ADMIN_FEATURE_RSS_FLOW_HASH_INPUT_ENABLE_L3_SORT_MASK;
}
-static inline uint16_t
-get_ena_admin_feature_rss_flow_hash_input_enable_L4_sort(
- const struct ena_admin_feature_rss_flow_hash_input *p)
+static inline uint16_t get_ena_admin_feature_rss_flow_hash_input_enable_L4_sort(const struct ena_admin_feature_rss_flow_hash_input *p)
{
- return (p->enabled_input_sort &
- ENA_ADMIN_FEATURE_RSS_FLOW_HASH_INPUT_ENABLE_L4_SORT_MASK)
- >> ENA_ADMIN_FEATURE_RSS_FLOW_HASH_INPUT_ENABLE_L4_SORT_SHIFT;
+ return (p->enabled_input_sort & ENA_ADMIN_FEATURE_RSS_FLOW_HASH_INPUT_ENABLE_L4_SORT_MASK) >> ENA_ADMIN_FEATURE_RSS_FLOW_HASH_INPUT_ENABLE_L4_SORT_SHIFT;
}
-static inline void
-set_ena_admin_feature_rss_flow_hash_input_enable_L4_sort(
- struct ena_admin_feature_rss_flow_hash_input *p,
- uint16_t val)
+static inline void set_ena_admin_feature_rss_flow_hash_input_enable_L4_sort(struct ena_admin_feature_rss_flow_hash_input *p, uint16_t val)
{
- p->enabled_input_sort |=
- (val <<
- ENA_ADMIN_FEATURE_RSS_FLOW_HASH_INPUT_ENABLE_L4_SORT_SHIFT)
- & ENA_ADMIN_FEATURE_RSS_FLOW_HASH_INPUT_ENABLE_L4_SORT_MASK;
+ p->enabled_input_sort |= (val << ENA_ADMIN_FEATURE_RSS_FLOW_HASH_INPUT_ENABLE_L4_SORT_SHIFT) & ENA_ADMIN_FEATURE_RSS_FLOW_HASH_INPUT_ENABLE_L4_SORT_MASK;
}
-static inline uint32_t
-get_ena_admin_host_info_major(const struct ena_admin_host_info *p)
+static inline uint32_t get_ena_admin_host_info_major(const struct ena_admin_host_info *p)
{
return p->driver_version & ENA_ADMIN_HOST_INFO_MAJOR_MASK;
}
-static inline void
-set_ena_admin_host_info_major(struct ena_admin_host_info *p, uint32_t val)
+static inline void set_ena_admin_host_info_major(struct ena_admin_host_info *p, uint32_t val)
{
p->driver_version |= val & ENA_ADMIN_HOST_INFO_MAJOR_MASK;
}
-static inline uint32_t
-get_ena_admin_host_info_minor(const struct ena_admin_host_info *p)
+static inline uint32_t get_ena_admin_host_info_minor(const struct ena_admin_host_info *p)
{
- return (p->driver_version & ENA_ADMIN_HOST_INFO_MINOR_MASK)
- >> ENA_ADMIN_HOST_INFO_MINOR_SHIFT;
+ return (p->driver_version & ENA_ADMIN_HOST_INFO_MINOR_MASK) >> ENA_ADMIN_HOST_INFO_MINOR_SHIFT;
}
-static inline void
-set_ena_admin_host_info_minor(struct ena_admin_host_info *p, uint32_t val)
+static inline void set_ena_admin_host_info_minor(struct ena_admin_host_info *p, uint32_t val)
{
- p->driver_version |= (val << ENA_ADMIN_HOST_INFO_MINOR_SHIFT)
- & ENA_ADMIN_HOST_INFO_MINOR_MASK;
+ p->driver_version |= (val << ENA_ADMIN_HOST_INFO_MINOR_SHIFT) & ENA_ADMIN_HOST_INFO_MINOR_MASK;
}
-static inline uint32_t
-get_ena_admin_host_info_sub_minor(const struct ena_admin_host_info *p)
+static inline uint32_t get_ena_admin_host_info_sub_minor(const struct ena_admin_host_info *p)
{
- return (p->driver_version & ENA_ADMIN_HOST_INFO_SUB_MINOR_MASK)
- >> ENA_ADMIN_HOST_INFO_SUB_MINOR_SHIFT;
+ return (p->driver_version & ENA_ADMIN_HOST_INFO_SUB_MINOR_MASK) >> ENA_ADMIN_HOST_INFO_SUB_MINOR_SHIFT;
}
-static inline void
-set_ena_admin_host_info_sub_minor(struct ena_admin_host_info *p, uint32_t val)
+static inline void set_ena_admin_host_info_sub_minor(struct ena_admin_host_info *p, uint32_t val)
{
- p->driver_version |= (val << ENA_ADMIN_HOST_INFO_SUB_MINOR_SHIFT)
- & ENA_ADMIN_HOST_INFO_SUB_MINOR_MASK;
+ p->driver_version |= (val << ENA_ADMIN_HOST_INFO_SUB_MINOR_SHIFT) & ENA_ADMIN_HOST_INFO_SUB_MINOR_MASK;
}
-static inline uint8_t
-get_ena_admin_aenq_common_desc_phase(
- const struct ena_admin_aenq_common_desc *p)
+static inline uint8_t get_ena_admin_aenq_common_desc_phase(const struct ena_admin_aenq_common_desc *p)
{
return p->flags & ENA_ADMIN_AENQ_COMMON_DESC_PHASE_MASK;
}
-static inline void
-set_ena_admin_aenq_common_desc_phase(
- struct ena_admin_aenq_common_desc *p,
- uint8_t val)
+static inline void set_ena_admin_aenq_common_desc_phase(struct ena_admin_aenq_common_desc *p, uint8_t val)
{
p->flags |= val & ENA_ADMIN_AENQ_COMMON_DESC_PHASE_MASK;
}
-static inline uint32_t
-get_ena_admin_aenq_link_change_desc_link_status(
- const struct ena_admin_aenq_link_change_desc *p)
+static inline uint32_t get_ena_admin_aenq_link_change_desc_link_status(const struct ena_admin_aenq_link_change_desc *p)
{
return p->flags & ENA_ADMIN_AENQ_LINK_CHANGE_DESC_LINK_STATUS_MASK;
}
-static inline void
-set_ena_admin_aenq_link_change_desc_link_status(
- struct ena_admin_aenq_link_change_desc *p,
- uint32_t val)
+static inline void set_ena_admin_aenq_link_change_desc_link_status(struct ena_admin_aenq_link_change_desc *p, uint32_t val)
{
p->flags |= val & ENA_ADMIN_AENQ_LINK_CHANGE_DESC_LINK_STATUS_MASK;
}
diff --git a/drivers/net/ena/base/ena_defs/ena_common_defs.h b/drivers/net/ena/base/ena_defs/ena_common_defs.h
index 95e0f389..072e6c1f 100644
--- a/drivers/net/ena/base/ena_defs/ena_common_defs.h
+++ b/drivers/net/ena/base/ena_defs/ena_common_defs.h
@@ -34,17 +34,13 @@
#ifndef _ENA_COMMON_H_
#define _ENA_COMMON_H_
-/* spec version */
-#define ENA_COMMON_SPEC_VERSION_MAJOR 0 /* spec version major */
-#define ENA_COMMON_SPEC_VERSION_MINOR 10 /* spec version minor */
+#define ENA_COMMON_SPEC_VERSION_MAJOR 0 /* */
+#define ENA_COMMON_SPEC_VERSION_MINOR 10 /* */
/* ENA operates with 48-bit memory addresses. ena_mem_addr_t */
struct ena_common_mem_addr {
- /* word 0 : low 32 bit of the memory address */
uint32_t mem_addr_low;
- /* word 1 : */
- /* high 16 bits of the memory address */
uint16_t mem_addr_high;
/* MBZ */
diff --git a/drivers/net/ena/base/ena_defs/ena_eth_io_defs.h b/drivers/net/ena/base/ena_defs/ena_eth_io_defs.h
index 6bc3d6a7..4cf0b205 100644
--- a/drivers/net/ena/base/ena_defs/ena_eth_io_defs.h
+++ b/drivers/net/ena/base/ena_defs/ena_eth_io_defs.h
@@ -34,35 +34,30 @@
#ifndef _ENA_ETH_IO_H_
#define _ENA_ETH_IO_H_
-/* Layer 3 protocol index */
enum ena_eth_io_l3_proto_index {
- ENA_ETH_IO_L3_PROTO_UNKNOWN = 0,
+ ENA_ETH_IO_L3_PROTO_UNKNOWN = 0,
- ENA_ETH_IO_L3_PROTO_IPV4 = 8,
+ ENA_ETH_IO_L3_PROTO_IPV4 = 8,
- ENA_ETH_IO_L3_PROTO_IPV6 = 11,
+ ENA_ETH_IO_L3_PROTO_IPV6 = 11,
- ENA_ETH_IO_L3_PROTO_FCOE = 21,
+ ENA_ETH_IO_L3_PROTO_FCOE = 21,
- ENA_ETH_IO_L3_PROTO_ROCE = 22,
+ ENA_ETH_IO_L3_PROTO_ROCE = 22,
};
-/* Layer 4 protocol index */
enum ena_eth_io_l4_proto_index {
- ENA_ETH_IO_L4_PROTO_UNKNOWN = 0,
+ ENA_ETH_IO_L4_PROTO_UNKNOWN = 0,
- ENA_ETH_IO_L4_PROTO_TCP = 12,
+ ENA_ETH_IO_L4_PROTO_TCP = 12,
- ENA_ETH_IO_L4_PROTO_UDP = 13,
+ ENA_ETH_IO_L4_PROTO_UDP = 13,
- ENA_ETH_IO_L4_PROTO_ROUTEABLE_ROCE = 23,
+ ENA_ETH_IO_L4_PROTO_ROUTEABLE_ROCE = 23,
};
-/* ENA IO Queue Tx descriptor */
struct ena_eth_io_tx_desc {
- /* word 0 : */
- /* length, request id and control flags
- * 15:0 : length - Buffer length in bytes, must
+ /* 15:0 : length - Buffer length in bytes, must
* include any packet trailers that the ENA supposed
* to update like End-to-End CRC, Authentication GMAC
* etc. This length must not include the
@@ -85,9 +80,7 @@ struct ena_eth_io_tx_desc {
*/
uint32_t len_ctrl;
- /* word 1 : */
- /* ethernet control
- * 3:0 : l3_proto_idx - L3 protocol. This field
+ /* 3:0 : l3_proto_idx - L3 protocol. This field
* required when l3_csum_en,l3_csum or tso_en are set.
* 4 : DF - IPv4 DF, must be 0 if packet is IPv4 and
* DF flags of the IPv4 header is 0. Otherwise must
@@ -119,10 +112,8 @@ struct ena_eth_io_tx_desc {
*/
uint32_t meta_ctrl;
- /* word 2 : Buffer address bits[31:0] */
uint32_t buff_addr_lo;
- /* word 3 : */
/* address high and header size
* 15:0 : addr_hi - Buffer Pointer[47:32]
* 23:16 : reserved16_w2
@@ -141,20 +132,16 @@ struct ena_eth_io_tx_desc {
uint32_t buff_addr_hi_hdr_sz;
};
-/* ENA IO Queue Tx Meta descriptor */
struct ena_eth_io_tx_meta_desc {
- /* word 0 : */
- /* length, request id and control flags
- * 9:0 : req_id_lo - Request ID[9:0]
+ /* 9:0 : req_id_lo - Request ID[9:0]
* 11:10 : reserved10 - MBZ
* 12 : reserved12 - MBZ
* 13 : reserved13 - MBZ
* 14 : ext_valid - if set, offset fields in Word2
- * are valid Also MSS High in Word 0 and Outer L3
- * Offset High in WORD 0 and bits [31:24] in Word 3
- * 15 : word3_valid - If set Crypto Info[23:0] of
- * Word 3 is valid
- * 19:16 : mss_hi_ptp
+ * are valid Also MSS High in Word 0 and bits [31:24]
+ * in Word 3
+ * 15 : reserved15
+ * 19:16 : mss_hi
* 20 : eth_meta_type - 0: Tx Metadata Descriptor, 1:
* Extended Metadata Descriptor
* 21 : meta_store - Store extended metadata in queue
@@ -175,19 +162,13 @@ struct ena_eth_io_tx_meta_desc {
*/
uint32_t len_ctrl;
- /* word 1 : */
- /* word 1
- * 5:0 : req_id_hi
+ /* 5:0 : req_id_hi
* 31:6 : reserved6 - MBZ
*/
uint32_t word1;
- /* word 2 : */
- /* word 2
- * 7:0 : l3_hdr_len - the header length L3 IP header.
- * 15:8 : l3_hdr_off - the offset of the first byte
- * in the L3 header from the beginning of the to-be
- * transmitted packet.
+ /* 7:0 : l3_hdr_len
+ * 15:8 : l3_hdr_off
* 21:16 : l4_hdr_len_in_words - counts the L4 header
* length in words. there is an explicit assumption
* that L4 header appears right after L3 header and
@@ -196,13 +177,10 @@ struct ena_eth_io_tx_meta_desc {
*/
uint32_t word2;
- /* word 3 : */
uint32_t reserved;
};
-/* ENA IO Queue Tx completions descriptor */
struct ena_eth_io_tx_cdesc {
- /* word 0 : */
/* Request ID[15:0] */
uint16_t req_id;
@@ -214,24 +192,19 @@ struct ena_eth_io_tx_cdesc {
*/
uint8_t flags;
- /* word 1 : */
uint16_t sub_qid;
- /* indicates location of submission queue head */
uint16_t sq_head_idx;
};
-/* ENA IO Queue Rx descriptor */
struct ena_eth_io_rx_desc {
- /* word 0 : */
/* In bytes. 0 means 64KB */
uint16_t length;
/* MBZ */
uint8_t reserved2;
- /* control flags
- * 0 : phase
+ /* 0 : phase
* 1 : reserved1 - MBZ
* 2 : first - Indicates first descriptor in
* transaction
@@ -242,32 +215,27 @@ struct ena_eth_io_rx_desc {
*/
uint8_t ctrl;
- /* word 1 : */
uint16_t req_id;
/* MBZ */
uint16_t reserved6;
- /* word 2 : Buffer address bits[31:0] */
uint32_t buff_addr_lo;
- /* word 3 : */
- /* Buffer Address bits[47:16] */
uint16_t buff_addr_hi;
/* MBZ */
uint16_t reserved16_w3;
};
-/* ENA IO Queue Rx Completion Base Descriptor (4-word format). Note: all
- * ethernet parsing information are valid only when last=1
+/* 4-word format Note: all ethernet parsing information are valid only when
+ * last=1
*/
struct ena_eth_io_rx_cdesc_base {
- /* word 0 : */
- /* 4:0 : l3_proto_idx - L3 protocol index
- * 6:5 : src_vlan_cnt - Source VLAN count
+ /* 4:0 : l3_proto_idx
+ * 6:5 : src_vlan_cnt
* 7 : reserved7 - MBZ
- * 12:8 : l4_proto_idx - L4 protocol index
+ * 12:8 : l4_proto_idx
* 13 : l3_csum_err - when set, either the L3
* checksum error detected, or, the controller didn't
* validate the checksum. This bit is valid only when
@@ -292,56 +260,43 @@ struct ena_eth_io_rx_cdesc_base {
*/
uint32_t status;
- /* word 1 : */
uint16_t length;
uint16_t req_id;
- /* word 2 : 32-bit hash result */
+ /* 32-bit hash result */
uint32_t hash;
- /* word 3 : */
- /* submission queue number */
uint16_t sub_qid;
uint16_t reserved;
};
-/* ENA IO Queue Rx Completion Descriptor (8-word format) */
+/* 8-word format */
struct ena_eth_io_rx_cdesc_ext {
- /* words 0:3 : Rx Completion Extended */
struct ena_eth_io_rx_cdesc_base base;
- /* word 4 : Completed Buffer address bits[31:0] */
uint32_t buff_addr_lo;
- /* word 5 : */
- /* the buffer address used bits[47:32] */
uint16_t buff_addr_hi;
uint16_t reserved16;
- /* word 6 : Reserved */
uint32_t reserved_w6;
- /* word 7 : Reserved */
uint32_t reserved_w7;
};
-/* ENA Interrupt Unmask Register */
struct ena_eth_io_intr_reg {
- /* word 0 : */
- /* 14:0 : rx_intr_delay - rx interrupt delay value
- * 29:15 : tx_intr_delay - tx interrupt delay value
- * 30 : intr_unmask - if set, unmasks interrupt
+ /* 14:0 : rx_intr_delay
+ * 29:15 : tx_intr_delay
+ * 30 : intr_unmask
* 31 : reserved
*/
uint32_t intr_control;
};
-/* ENA NUMA Node configuration register */
struct ena_eth_io_numa_node_cfg_reg {
- /* word 0 : */
/* 7:0 : numa
* 30:8 : reserved
* 31 : enabled
@@ -388,10 +343,8 @@ struct ena_eth_io_numa_node_cfg_reg {
#define ENA_ETH_IO_TX_META_DESC_REQ_ID_LO_MASK GENMASK(9, 0)
#define ENA_ETH_IO_TX_META_DESC_EXT_VALID_SHIFT 14
#define ENA_ETH_IO_TX_META_DESC_EXT_VALID_MASK BIT(14)
-#define ENA_ETH_IO_TX_META_DESC_WORD3_VALID_SHIFT 15
-#define ENA_ETH_IO_TX_META_DESC_WORD3_VALID_MASK BIT(15)
-#define ENA_ETH_IO_TX_META_DESC_MSS_HI_PTP_SHIFT 16
-#define ENA_ETH_IO_TX_META_DESC_MSS_HI_PTP_MASK GENMASK(19, 16)
+#define ENA_ETH_IO_TX_META_DESC_MSS_HI_SHIFT 16
+#define ENA_ETH_IO_TX_META_DESC_MSS_HI_MASK GENMASK(19, 16)
#define ENA_ETH_IO_TX_META_DESC_ETH_META_TYPE_SHIFT 20
#define ENA_ETH_IO_TX_META_DESC_ETH_META_TYPE_MASK BIT(20)
#define ENA_ETH_IO_TX_META_DESC_META_STORE_SHIFT 21
@@ -463,803 +416,544 @@ struct ena_eth_io_numa_node_cfg_reg {
#define ENA_ETH_IO_NUMA_NODE_CFG_REG_ENABLED_MASK BIT(31)
#if !defined(ENA_DEFS_LINUX_MAINLINE)
-static inline uint32_t get_ena_eth_io_tx_desc_length(
- const struct ena_eth_io_tx_desc *p)
+static inline uint32_t get_ena_eth_io_tx_desc_length(const struct ena_eth_io_tx_desc *p)
{
return p->len_ctrl & ENA_ETH_IO_TX_DESC_LENGTH_MASK;
}
-static inline void set_ena_eth_io_tx_desc_length(
- struct ena_eth_io_tx_desc *p,
- uint32_t val)
+static inline void set_ena_eth_io_tx_desc_length(struct ena_eth_io_tx_desc *p, uint32_t val)
{
p->len_ctrl |= val & ENA_ETH_IO_TX_DESC_LENGTH_MASK;
}
-static inline uint32_t get_ena_eth_io_tx_desc_req_id_hi(
- const struct ena_eth_io_tx_desc *p)
+static inline uint32_t get_ena_eth_io_tx_desc_req_id_hi(const struct ena_eth_io_tx_desc *p)
{
- return (p->len_ctrl & ENA_ETH_IO_TX_DESC_REQ_ID_HI_MASK)
- >> ENA_ETH_IO_TX_DESC_REQ_ID_HI_SHIFT;
+ return (p->len_ctrl & ENA_ETH_IO_TX_DESC_REQ_ID_HI_MASK) >> ENA_ETH_IO_TX_DESC_REQ_ID_HI_SHIFT;
}
-static inline void set_ena_eth_io_tx_desc_req_id_hi(
- struct ena_eth_io_tx_desc *p,
- uint32_t val)
+static inline void set_ena_eth_io_tx_desc_req_id_hi(struct ena_eth_io_tx_desc *p, uint32_t val)
{
- p->len_ctrl |=
- (val << ENA_ETH_IO_TX_DESC_REQ_ID_HI_SHIFT)
- & ENA_ETH_IO_TX_DESC_REQ_ID_HI_MASK;
+ p->len_ctrl |= (val << ENA_ETH_IO_TX_DESC_REQ_ID_HI_SHIFT) & ENA_ETH_IO_TX_DESC_REQ_ID_HI_MASK;
}
-static inline uint32_t get_ena_eth_io_tx_desc_meta_desc(
- const struct ena_eth_io_tx_desc *p)
+static inline uint32_t get_ena_eth_io_tx_desc_meta_desc(const struct ena_eth_io_tx_desc *p)
{
- return (p->len_ctrl & ENA_ETH_IO_TX_DESC_META_DESC_MASK)
- >> ENA_ETH_IO_TX_DESC_META_DESC_SHIFT;
+ return (p->len_ctrl & ENA_ETH_IO_TX_DESC_META_DESC_MASK) >> ENA_ETH_IO_TX_DESC_META_DESC_SHIFT;
}
-static inline void set_ena_eth_io_tx_desc_meta_desc(
- struct ena_eth_io_tx_desc *p,
- uint32_t val)
+static inline void set_ena_eth_io_tx_desc_meta_desc(struct ena_eth_io_tx_desc *p, uint32_t val)
{
- p->len_ctrl |=
- (val << ENA_ETH_IO_TX_DESC_META_DESC_SHIFT)
- & ENA_ETH_IO_TX_DESC_META_DESC_MASK;
+ p->len_ctrl |= (val << ENA_ETH_IO_TX_DESC_META_DESC_SHIFT) & ENA_ETH_IO_TX_DESC_META_DESC_MASK;
}
-static inline uint32_t get_ena_eth_io_tx_desc_phase(
- const struct ena_eth_io_tx_desc *p)
+static inline uint32_t get_ena_eth_io_tx_desc_phase(const struct ena_eth_io_tx_desc *p)
{
- return (p->len_ctrl & ENA_ETH_IO_TX_DESC_PHASE_MASK)
- >> ENA_ETH_IO_TX_DESC_PHASE_SHIFT;
+ return (p->len_ctrl & ENA_ETH_IO_TX_DESC_PHASE_MASK) >> ENA_ETH_IO_TX_DESC_PHASE_SHIFT;
}
-static inline void set_ena_eth_io_tx_desc_phase(
- struct ena_eth_io_tx_desc *p,
- uint32_t val)
+static inline void set_ena_eth_io_tx_desc_phase(struct ena_eth_io_tx_desc *p, uint32_t val)
{
- p->len_ctrl |=
- (val << ENA_ETH_IO_TX_DESC_PHASE_SHIFT)
- & ENA_ETH_IO_TX_DESC_PHASE_MASK;
+ p->len_ctrl |= (val << ENA_ETH_IO_TX_DESC_PHASE_SHIFT) & ENA_ETH_IO_TX_DESC_PHASE_MASK;
}
-static inline uint32_t get_ena_eth_io_tx_desc_first(
- const struct ena_eth_io_tx_desc *p)
+static inline uint32_t get_ena_eth_io_tx_desc_first(const struct ena_eth_io_tx_desc *p)
{
- return (p->len_ctrl & ENA_ETH_IO_TX_DESC_FIRST_MASK)
- >> ENA_ETH_IO_TX_DESC_FIRST_SHIFT;
+ return (p->len_ctrl & ENA_ETH_IO_TX_DESC_FIRST_MASK) >> ENA_ETH_IO_TX_DESC_FIRST_SHIFT;
}
-static inline void set_ena_eth_io_tx_desc_first(
- struct ena_eth_io_tx_desc *p,
- uint32_t val)
+static inline void set_ena_eth_io_tx_desc_first(struct ena_eth_io_tx_desc *p, uint32_t val)
{
- p->len_ctrl |=
- (val << ENA_ETH_IO_TX_DESC_FIRST_SHIFT)
- & ENA_ETH_IO_TX_DESC_FIRST_MASK;
+ p->len_ctrl |= (val << ENA_ETH_IO_TX_DESC_FIRST_SHIFT) & ENA_ETH_IO_TX_DESC_FIRST_MASK;
}
-static inline uint32_t get_ena_eth_io_tx_desc_last(
- const struct ena_eth_io_tx_desc *p)
+static inline uint32_t get_ena_eth_io_tx_desc_last(const struct ena_eth_io_tx_desc *p)
{
- return (p->len_ctrl & ENA_ETH_IO_TX_DESC_LAST_MASK)
- >> ENA_ETH_IO_TX_DESC_LAST_SHIFT;
+ return (p->len_ctrl & ENA_ETH_IO_TX_DESC_LAST_MASK) >> ENA_ETH_IO_TX_DESC_LAST_SHIFT;
}
-static inline void set_ena_eth_io_tx_desc_last(
- struct ena_eth_io_tx_desc *p,
- uint32_t val)
+static inline void set_ena_eth_io_tx_desc_last(struct ena_eth_io_tx_desc *p, uint32_t val)
{
- p->len_ctrl |=
- (val << ENA_ETH_IO_TX_DESC_LAST_SHIFT)
- & ENA_ETH_IO_TX_DESC_LAST_MASK;
+ p->len_ctrl |= (val << ENA_ETH_IO_TX_DESC_LAST_SHIFT) & ENA_ETH_IO_TX_DESC_LAST_MASK;
}
-static inline uint32_t get_ena_eth_io_tx_desc_comp_req(
- const struct ena_eth_io_tx_desc *p)
+static inline uint32_t get_ena_eth_io_tx_desc_comp_req(const struct ena_eth_io_tx_desc *p)
{
- return (p->len_ctrl & ENA_ETH_IO_TX_DESC_COMP_REQ_MASK)
- >> ENA_ETH_IO_TX_DESC_COMP_REQ_SHIFT;
+ return (p->len_ctrl & ENA_ETH_IO_TX_DESC_COMP_REQ_MASK) >> ENA_ETH_IO_TX_DESC_COMP_REQ_SHIFT;
}
-static inline void set_ena_eth_io_tx_desc_comp_req(
- struct ena_eth_io_tx_desc *p,
- uint32_t val)
+static inline void set_ena_eth_io_tx_desc_comp_req(struct ena_eth_io_tx_desc *p, uint32_t val)
{
- p->len_ctrl |=
- (val << ENA_ETH_IO_TX_DESC_COMP_REQ_SHIFT)
- & ENA_ETH_IO_TX_DESC_COMP_REQ_MASK;
+ p->len_ctrl |= (val << ENA_ETH_IO_TX_DESC_COMP_REQ_SHIFT) & ENA_ETH_IO_TX_DESC_COMP_REQ_MASK;
}
-static inline uint32_t get_ena_eth_io_tx_desc_l3_proto_idx(
- const struct ena_eth_io_tx_desc *p)
+static inline uint32_t get_ena_eth_io_tx_desc_l3_proto_idx(const struct ena_eth_io_tx_desc *p)
{
return p->meta_ctrl & ENA_ETH_IO_TX_DESC_L3_PROTO_IDX_MASK;
}
-static inline void set_ena_eth_io_tx_desc_l3_proto_idx(
- struct ena_eth_io_tx_desc *p,
- uint32_t val)
+static inline void set_ena_eth_io_tx_desc_l3_proto_idx(struct ena_eth_io_tx_desc *p, uint32_t val)
{
p->meta_ctrl |= val & ENA_ETH_IO_TX_DESC_L3_PROTO_IDX_MASK;
}
-static inline uint32_t get_ena_eth_io_tx_desc_DF(
- const struct ena_eth_io_tx_desc *p)
+static inline uint32_t get_ena_eth_io_tx_desc_DF(const struct ena_eth_io_tx_desc *p)
{
- return (p->meta_ctrl & ENA_ETH_IO_TX_DESC_DF_MASK)
- >> ENA_ETH_IO_TX_DESC_DF_SHIFT;
+ return (p->meta_ctrl & ENA_ETH_IO_TX_DESC_DF_MASK) >> ENA_ETH_IO_TX_DESC_DF_SHIFT;
}
-static inline void set_ena_eth_io_tx_desc_DF(
- struct ena_eth_io_tx_desc *p,
- uint32_t val)
+static inline void set_ena_eth_io_tx_desc_DF(struct ena_eth_io_tx_desc *p, uint32_t val)
{
- p->meta_ctrl |=
- (val << ENA_ETH_IO_TX_DESC_DF_SHIFT)
- & ENA_ETH_IO_TX_DESC_DF_MASK;
+ p->meta_ctrl |= (val << ENA_ETH_IO_TX_DESC_DF_SHIFT) & ENA_ETH_IO_TX_DESC_DF_MASK;
}
-static inline uint32_t get_ena_eth_io_tx_desc_tso_en(
- const struct ena_eth_io_tx_desc *p)
+static inline uint32_t get_ena_eth_io_tx_desc_tso_en(const struct ena_eth_io_tx_desc *p)
{
- return (p->meta_ctrl & ENA_ETH_IO_TX_DESC_TSO_EN_MASK)
- >> ENA_ETH_IO_TX_DESC_TSO_EN_SHIFT;
+ return (p->meta_ctrl & ENA_ETH_IO_TX_DESC_TSO_EN_MASK) >> ENA_ETH_IO_TX_DESC_TSO_EN_SHIFT;
}
-static inline void set_ena_eth_io_tx_desc_tso_en(
- struct ena_eth_io_tx_desc *p,
- uint32_t val)
+static inline void set_ena_eth_io_tx_desc_tso_en(struct ena_eth_io_tx_desc *p, uint32_t val)
{
- p->meta_ctrl |=
- (val << ENA_ETH_IO_TX_DESC_TSO_EN_SHIFT)
- & ENA_ETH_IO_TX_DESC_TSO_EN_MASK;
+ p->meta_ctrl |= (val << ENA_ETH_IO_TX_DESC_TSO_EN_SHIFT) & ENA_ETH_IO_TX_DESC_TSO_EN_MASK;
}
-static inline uint32_t get_ena_eth_io_tx_desc_l4_proto_idx(
- const struct ena_eth_io_tx_desc *p)
+static inline uint32_t get_ena_eth_io_tx_desc_l4_proto_idx(const struct ena_eth_io_tx_desc *p)
{
- return (p->meta_ctrl & ENA_ETH_IO_TX_DESC_L4_PROTO_IDX_MASK)
- >> ENA_ETH_IO_TX_DESC_L4_PROTO_IDX_SHIFT;
+ return (p->meta_ctrl & ENA_ETH_IO_TX_DESC_L4_PROTO_IDX_MASK) >> ENA_ETH_IO_TX_DESC_L4_PROTO_IDX_SHIFT;
}
-static inline void set_ena_eth_io_tx_desc_l4_proto_idx(
- struct ena_eth_io_tx_desc *p,
- uint32_t val)
+static inline void set_ena_eth_io_tx_desc_l4_proto_idx(struct ena_eth_io_tx_desc *p, uint32_t val)
{
- p->meta_ctrl |=
- (val << ENA_ETH_IO_TX_DESC_L4_PROTO_IDX_SHIFT)
- & ENA_ETH_IO_TX_DESC_L4_PROTO_IDX_MASK;
+ p->meta_ctrl |= (val << ENA_ETH_IO_TX_DESC_L4_PROTO_IDX_SHIFT) & ENA_ETH_IO_TX_DESC_L4_PROTO_IDX_MASK;
}
-static inline uint32_t get_ena_eth_io_tx_desc_l3_csum_en(
- const struct ena_eth_io_tx_desc *p)
+static inline uint32_t get_ena_eth_io_tx_desc_l3_csum_en(const struct ena_eth_io_tx_desc *p)
{
- return (p->meta_ctrl & ENA_ETH_IO_TX_DESC_L3_CSUM_EN_MASK)
- >> ENA_ETH_IO_TX_DESC_L3_CSUM_EN_SHIFT;
+ return (p->meta_ctrl & ENA_ETH_IO_TX_DESC_L3_CSUM_EN_MASK) >> ENA_ETH_IO_TX_DESC_L3_CSUM_EN_SHIFT;
}
-static inline void set_ena_eth_io_tx_desc_l3_csum_en(
- struct ena_eth_io_tx_desc *p,
- uint32_t val)
+static inline void set_ena_eth_io_tx_desc_l3_csum_en(struct ena_eth_io_tx_desc *p, uint32_t val)
{
- p->meta_ctrl |=
- (val << ENA_ETH_IO_TX_DESC_L3_CSUM_EN_SHIFT)
- & ENA_ETH_IO_TX_DESC_L3_CSUM_EN_MASK;
+ p->meta_ctrl |= (val << ENA_ETH_IO_TX_DESC_L3_CSUM_EN_SHIFT) & ENA_ETH_IO_TX_DESC_L3_CSUM_EN_MASK;
}
-static inline uint32_t get_ena_eth_io_tx_desc_l4_csum_en(
- const struct ena_eth_io_tx_desc *p)
+static inline uint32_t get_ena_eth_io_tx_desc_l4_csum_en(const struct ena_eth_io_tx_desc *p)
{
- return (p->meta_ctrl & ENA_ETH_IO_TX_DESC_L4_CSUM_EN_MASK)
- >> ENA_ETH_IO_TX_DESC_L4_CSUM_EN_SHIFT;
+ return (p->meta_ctrl & ENA_ETH_IO_TX_DESC_L4_CSUM_EN_MASK) >> ENA_ETH_IO_TX_DESC_L4_CSUM_EN_SHIFT;
}
-static inline void set_ena_eth_io_tx_desc_l4_csum_en(
- struct ena_eth_io_tx_desc *p,
- uint32_t val)
+static inline void set_ena_eth_io_tx_desc_l4_csum_en(struct ena_eth_io_tx_desc *p, uint32_t val)
{
- p->meta_ctrl |=
- (val << ENA_ETH_IO_TX_DESC_L4_CSUM_EN_SHIFT)
- & ENA_ETH_IO_TX_DESC_L4_CSUM_EN_MASK;
+ p->meta_ctrl |= (val << ENA_ETH_IO_TX_DESC_L4_CSUM_EN_SHIFT) & ENA_ETH_IO_TX_DESC_L4_CSUM_EN_MASK;
}
-static inline uint32_t get_ena_eth_io_tx_desc_ethernet_fcs_dis(
- const struct ena_eth_io_tx_desc *p)
+static inline uint32_t get_ena_eth_io_tx_desc_ethernet_fcs_dis(const struct ena_eth_io_tx_desc *p)
{
- return (p->meta_ctrl & ENA_ETH_IO_TX_DESC_ETHERNET_FCS_DIS_MASK)
- >> ENA_ETH_IO_TX_DESC_ETHERNET_FCS_DIS_SHIFT;
+ return (p->meta_ctrl & ENA_ETH_IO_TX_DESC_ETHERNET_FCS_DIS_MASK) >> ENA_ETH_IO_TX_DESC_ETHERNET_FCS_DIS_SHIFT;
}
-static inline void set_ena_eth_io_tx_desc_ethernet_fcs_dis(
- struct ena_eth_io_tx_desc *p,
- uint32_t val)
+static inline void set_ena_eth_io_tx_desc_ethernet_fcs_dis(struct ena_eth_io_tx_desc *p, uint32_t val)
{
- p->meta_ctrl |=
- (val << ENA_ETH_IO_TX_DESC_ETHERNET_FCS_DIS_SHIFT)
- & ENA_ETH_IO_TX_DESC_ETHERNET_FCS_DIS_MASK;
+ p->meta_ctrl |= (val << ENA_ETH_IO_TX_DESC_ETHERNET_FCS_DIS_SHIFT) & ENA_ETH_IO_TX_DESC_ETHERNET_FCS_DIS_MASK;
}
-static inline uint32_t get_ena_eth_io_tx_desc_l4_csum_partial(
- const struct ena_eth_io_tx_desc *p)
+static inline uint32_t get_ena_eth_io_tx_desc_l4_csum_partial(const struct ena_eth_io_tx_desc *p)
{
- return (p->meta_ctrl & ENA_ETH_IO_TX_DESC_L4_CSUM_PARTIAL_MASK)
- >> ENA_ETH_IO_TX_DESC_L4_CSUM_PARTIAL_SHIFT;
+ return (p->meta_ctrl & ENA_ETH_IO_TX_DESC_L4_CSUM_PARTIAL_MASK) >> ENA_ETH_IO_TX_DESC_L4_CSUM_PARTIAL_SHIFT;
}
-static inline void set_ena_eth_io_tx_desc_l4_csum_partial(
- struct ena_eth_io_tx_desc *p,
- uint32_t val)
+static inline void set_ena_eth_io_tx_desc_l4_csum_partial(struct ena_eth_io_tx_desc *p, uint32_t val)
{
- p->meta_ctrl |=
- (val << ENA_ETH_IO_TX_DESC_L4_CSUM_PARTIAL_SHIFT)
- & ENA_ETH_IO_TX_DESC_L4_CSUM_PARTIAL_MASK;
+ p->meta_ctrl |= (val << ENA_ETH_IO_TX_DESC_L4_CSUM_PARTIAL_SHIFT) & ENA_ETH_IO_TX_DESC_L4_CSUM_PARTIAL_MASK;
}
-static inline uint32_t get_ena_eth_io_tx_desc_req_id_lo(
- const struct ena_eth_io_tx_desc *p)
+static inline uint32_t get_ena_eth_io_tx_desc_req_id_lo(const struct ena_eth_io_tx_desc *p)
{
- return (p->meta_ctrl & ENA_ETH_IO_TX_DESC_REQ_ID_LO_MASK)
- >> ENA_ETH_IO_TX_DESC_REQ_ID_LO_SHIFT;
+ return (p->meta_ctrl & ENA_ETH_IO_TX_DESC_REQ_ID_LO_MASK) >> ENA_ETH_IO_TX_DESC_REQ_ID_LO_SHIFT;
}
-static inline void set_ena_eth_io_tx_desc_req_id_lo(
- struct ena_eth_io_tx_desc *p, uint32_t val)
+static inline void set_ena_eth_io_tx_desc_req_id_lo(struct ena_eth_io_tx_desc *p, uint32_t val)
{
- p->meta_ctrl |= (val << ENA_ETH_IO_TX_DESC_REQ_ID_LO_SHIFT)
- & ENA_ETH_IO_TX_DESC_REQ_ID_LO_MASK;
+ p->meta_ctrl |= (val << ENA_ETH_IO_TX_DESC_REQ_ID_LO_SHIFT) & ENA_ETH_IO_TX_DESC_REQ_ID_LO_MASK;
}
-static inline uint32_t get_ena_eth_io_tx_desc_addr_hi(
- const struct ena_eth_io_tx_desc *p)
+static inline uint32_t get_ena_eth_io_tx_desc_addr_hi(const struct ena_eth_io_tx_desc *p)
{
return p->buff_addr_hi_hdr_sz & ENA_ETH_IO_TX_DESC_ADDR_HI_MASK;
}
-static inline void set_ena_eth_io_tx_desc_addr_hi(
- struct ena_eth_io_tx_desc *p,
- uint32_t val)
+static inline void set_ena_eth_io_tx_desc_addr_hi(struct ena_eth_io_tx_desc *p, uint32_t val)
{
p->buff_addr_hi_hdr_sz |= val & ENA_ETH_IO_TX_DESC_ADDR_HI_MASK;
}
-static inline uint32_t get_ena_eth_io_tx_desc_header_length(
- const struct ena_eth_io_tx_desc *p)
+static inline uint32_t get_ena_eth_io_tx_desc_header_length(const struct ena_eth_io_tx_desc *p)
{
- return (p->buff_addr_hi_hdr_sz & ENA_ETH_IO_TX_DESC_HEADER_LENGTH_MASK)
- >> ENA_ETH_IO_TX_DESC_HEADER_LENGTH_SHIFT;
+ return (p->buff_addr_hi_hdr_sz & ENA_ETH_IO_TX_DESC_HEADER_LENGTH_MASK) >> ENA_ETH_IO_TX_DESC_HEADER_LENGTH_SHIFT;
}
-static inline void set_ena_eth_io_tx_desc_header_length(
- struct ena_eth_io_tx_desc *p,
- uint32_t val)
+static inline void set_ena_eth_io_tx_desc_header_length(struct ena_eth_io_tx_desc *p, uint32_t val)
{
- p->buff_addr_hi_hdr_sz |=
- (val << ENA_ETH_IO_TX_DESC_HEADER_LENGTH_SHIFT)
- & ENA_ETH_IO_TX_DESC_HEADER_LENGTH_MASK;
+ p->buff_addr_hi_hdr_sz |= (val << ENA_ETH_IO_TX_DESC_HEADER_LENGTH_SHIFT) & ENA_ETH_IO_TX_DESC_HEADER_LENGTH_MASK;
}
-static inline uint32_t get_ena_eth_io_tx_meta_desc_req_id_lo(
- const struct ena_eth_io_tx_meta_desc *p)
+static inline uint32_t get_ena_eth_io_tx_meta_desc_req_id_lo(const struct ena_eth_io_tx_meta_desc *p)
{
return p->len_ctrl & ENA_ETH_IO_TX_META_DESC_REQ_ID_LO_MASK;
}
-static inline void set_ena_eth_io_tx_meta_desc_req_id_lo(
- struct ena_eth_io_tx_meta_desc *p,
- uint32_t val)
+static inline void set_ena_eth_io_tx_meta_desc_req_id_lo(struct ena_eth_io_tx_meta_desc *p, uint32_t val)
{
p->len_ctrl |= val & ENA_ETH_IO_TX_META_DESC_REQ_ID_LO_MASK;
}
-static inline uint32_t get_ena_eth_io_tx_meta_desc_ext_valid(
- const struct ena_eth_io_tx_meta_desc *p)
-{
- return (p->len_ctrl & ENA_ETH_IO_TX_META_DESC_EXT_VALID_MASK)
- >> ENA_ETH_IO_TX_META_DESC_EXT_VALID_SHIFT;
-}
-
-static inline void set_ena_eth_io_tx_meta_desc_ext_valid(
- struct ena_eth_io_tx_meta_desc *p, uint32_t val)
-{
- p->len_ctrl |= (val << ENA_ETH_IO_TX_META_DESC_EXT_VALID_SHIFT)
- & ENA_ETH_IO_TX_META_DESC_EXT_VALID_MASK;
-}
-
-static inline uint32_t get_ena_eth_io_tx_meta_desc_word3_valid(
- const struct ena_eth_io_tx_meta_desc *p)
+static inline uint32_t get_ena_eth_io_tx_meta_desc_ext_valid(const struct ena_eth_io_tx_meta_desc *p)
{
- return (p->len_ctrl & ENA_ETH_IO_TX_META_DESC_WORD3_VALID_MASK)
- >> ENA_ETH_IO_TX_META_DESC_WORD3_VALID_SHIFT;
+ return (p->len_ctrl & ENA_ETH_IO_TX_META_DESC_EXT_VALID_MASK) >> ENA_ETH_IO_TX_META_DESC_EXT_VALID_SHIFT;
}
-static inline void set_ena_eth_io_tx_meta_desc_word3_valid(
- struct ena_eth_io_tx_meta_desc *p, uint32_t val)
+static inline void set_ena_eth_io_tx_meta_desc_ext_valid(struct ena_eth_io_tx_meta_desc *p, uint32_t val)
{
- p->len_ctrl |= (val << ENA_ETH_IO_TX_META_DESC_WORD3_VALID_SHIFT)
- & ENA_ETH_IO_TX_META_DESC_WORD3_VALID_MASK;
+ p->len_ctrl |= (val << ENA_ETH_IO_TX_META_DESC_EXT_VALID_SHIFT) & ENA_ETH_IO_TX_META_DESC_EXT_VALID_MASK;
}
-static inline uint32_t get_ena_eth_io_tx_meta_desc_mss_hi_ptp(
- const struct ena_eth_io_tx_meta_desc *p)
+static inline uint32_t get_ena_eth_io_tx_meta_desc_mss_hi(const struct ena_eth_io_tx_meta_desc *p)
{
- return (p->len_ctrl & ENA_ETH_IO_TX_META_DESC_MSS_HI_PTP_MASK)
- >> ENA_ETH_IO_TX_META_DESC_MSS_HI_PTP_SHIFT;
+ return (p->len_ctrl & ENA_ETH_IO_TX_META_DESC_MSS_HI_MASK) >> ENA_ETH_IO_TX_META_DESC_MSS_HI_SHIFT;
}
-static inline void set_ena_eth_io_tx_meta_desc_mss_hi_ptp(
- struct ena_eth_io_tx_meta_desc *p, uint32_t val)
+static inline void set_ena_eth_io_tx_meta_desc_mss_hi(struct ena_eth_io_tx_meta_desc *p, uint32_t val)
{
- p->len_ctrl |= (val << ENA_ETH_IO_TX_META_DESC_MSS_HI_PTP_SHIFT)
- & ENA_ETH_IO_TX_META_DESC_MSS_HI_PTP_MASK;
+ p->len_ctrl |= (val << ENA_ETH_IO_TX_META_DESC_MSS_HI_SHIFT) & ENA_ETH_IO_TX_META_DESC_MSS_HI_MASK;
}
-static inline uint32_t get_ena_eth_io_tx_meta_desc_eth_meta_type(
- const struct ena_eth_io_tx_meta_desc *p)
+static inline uint32_t get_ena_eth_io_tx_meta_desc_eth_meta_type(const struct ena_eth_io_tx_meta_desc *p)
{
- return (p->len_ctrl & ENA_ETH_IO_TX_META_DESC_ETH_META_TYPE_MASK)
- >> ENA_ETH_IO_TX_META_DESC_ETH_META_TYPE_SHIFT;
+ return (p->len_ctrl & ENA_ETH_IO_TX_META_DESC_ETH_META_TYPE_MASK) >> ENA_ETH_IO_TX_META_DESC_ETH_META_TYPE_SHIFT;
}
-static inline void set_ena_eth_io_tx_meta_desc_eth_meta_type(
- struct ena_eth_io_tx_meta_desc *p, uint32_t val)
+static inline void set_ena_eth_io_tx_meta_desc_eth_meta_type(struct ena_eth_io_tx_meta_desc *p, uint32_t val)
{
- p->len_ctrl |= (val << ENA_ETH_IO_TX_META_DESC_ETH_META_TYPE_SHIFT)
- & ENA_ETH_IO_TX_META_DESC_ETH_META_TYPE_MASK;
+ p->len_ctrl |= (val << ENA_ETH_IO_TX_META_DESC_ETH_META_TYPE_SHIFT) & ENA_ETH_IO_TX_META_DESC_ETH_META_TYPE_MASK;
}
-static inline uint32_t get_ena_eth_io_tx_meta_desc_meta_store(
- const struct ena_eth_io_tx_meta_desc *p)
+static inline uint32_t get_ena_eth_io_tx_meta_desc_meta_store(const struct ena_eth_io_tx_meta_desc *p)
{
- return (p->len_ctrl & ENA_ETH_IO_TX_META_DESC_META_STORE_MASK)
- >> ENA_ETH_IO_TX_META_DESC_META_STORE_SHIFT;
+ return (p->len_ctrl & ENA_ETH_IO_TX_META_DESC_META_STORE_MASK) >> ENA_ETH_IO_TX_META_DESC_META_STORE_SHIFT;
}
-static inline void set_ena_eth_io_tx_meta_desc_meta_store(
- struct ena_eth_io_tx_meta_desc *p, uint32_t val)
+static inline void set_ena_eth_io_tx_meta_desc_meta_store(struct ena_eth_io_tx_meta_desc *p, uint32_t val)
{
- p->len_ctrl |= (val << ENA_ETH_IO_TX_META_DESC_META_STORE_SHIFT)
- & ENA_ETH_IO_TX_META_DESC_META_STORE_MASK;
+ p->len_ctrl |= (val << ENA_ETH_IO_TX_META_DESC_META_STORE_SHIFT) & ENA_ETH_IO_TX_META_DESC_META_STORE_MASK;
}
-static inline uint32_t get_ena_eth_io_tx_meta_desc_meta_desc(
- const struct ena_eth_io_tx_meta_desc *p)
+static inline uint32_t get_ena_eth_io_tx_meta_desc_meta_desc(const struct ena_eth_io_tx_meta_desc *p)
{
- return (p->len_ctrl & ENA_ETH_IO_TX_META_DESC_META_DESC_MASK)
- >> ENA_ETH_IO_TX_META_DESC_META_DESC_SHIFT;
+ return (p->len_ctrl & ENA_ETH_IO_TX_META_DESC_META_DESC_MASK) >> ENA_ETH_IO_TX_META_DESC_META_DESC_SHIFT;
}
-static inline void set_ena_eth_io_tx_meta_desc_meta_desc(
- struct ena_eth_io_tx_meta_desc *p, uint32_t val)
+static inline void set_ena_eth_io_tx_meta_desc_meta_desc(struct ena_eth_io_tx_meta_desc *p, uint32_t val)
{
- p->len_ctrl |= (val << ENA_ETH_IO_TX_META_DESC_META_DESC_SHIFT)
- & ENA_ETH_IO_TX_META_DESC_META_DESC_MASK;
+ p->len_ctrl |= (val << ENA_ETH_IO_TX_META_DESC_META_DESC_SHIFT) & ENA_ETH_IO_TX_META_DESC_META_DESC_MASK;
}
-static inline uint32_t get_ena_eth_io_tx_meta_desc_phase(
- const struct ena_eth_io_tx_meta_desc *p)
+static inline uint32_t get_ena_eth_io_tx_meta_desc_phase(const struct ena_eth_io_tx_meta_desc *p)
{
- return (p->len_ctrl & ENA_ETH_IO_TX_META_DESC_PHASE_MASK)
- >> ENA_ETH_IO_TX_META_DESC_PHASE_SHIFT;
+ return (p->len_ctrl & ENA_ETH_IO_TX_META_DESC_PHASE_MASK) >> ENA_ETH_IO_TX_META_DESC_PHASE_SHIFT;
}
-static inline void set_ena_eth_io_tx_meta_desc_phase(
- struct ena_eth_io_tx_meta_desc *p, uint32_t val)
+static inline void set_ena_eth_io_tx_meta_desc_phase(struct ena_eth_io_tx_meta_desc *p, uint32_t val)
{
- p->len_ctrl |= (val << ENA_ETH_IO_TX_META_DESC_PHASE_SHIFT)
- & ENA_ETH_IO_TX_META_DESC_PHASE_MASK;
+ p->len_ctrl |= (val << ENA_ETH_IO_TX_META_DESC_PHASE_SHIFT) & ENA_ETH_IO_TX_META_DESC_PHASE_MASK;
}
-static inline uint32_t get_ena_eth_io_tx_meta_desc_first(
- const struct ena_eth_io_tx_meta_desc *p)
+static inline uint32_t get_ena_eth_io_tx_meta_desc_first(const struct ena_eth_io_tx_meta_desc *p)
{
- return (p->len_ctrl & ENA_ETH_IO_TX_META_DESC_FIRST_MASK)
- >> ENA_ETH_IO_TX_META_DESC_FIRST_SHIFT;
+ return (p->len_ctrl & ENA_ETH_IO_TX_META_DESC_FIRST_MASK) >> ENA_ETH_IO_TX_META_DESC_FIRST_SHIFT;
}
-static inline void set_ena_eth_io_tx_meta_desc_first(
- struct ena_eth_io_tx_meta_desc *p, uint32_t val)
+static inline void set_ena_eth_io_tx_meta_desc_first(struct ena_eth_io_tx_meta_desc *p, uint32_t val)
{
- p->len_ctrl |= (val << ENA_ETH_IO_TX_META_DESC_FIRST_SHIFT)
- & ENA_ETH_IO_TX_META_DESC_FIRST_MASK;
+ p->len_ctrl |= (val << ENA_ETH_IO_TX_META_DESC_FIRST_SHIFT) & ENA_ETH_IO_TX_META_DESC_FIRST_MASK;
}
-static inline uint32_t get_ena_eth_io_tx_meta_desc_last(
- const struct ena_eth_io_tx_meta_desc *p)
+static inline uint32_t get_ena_eth_io_tx_meta_desc_last(const struct ena_eth_io_tx_meta_desc *p)
{
- return (p->len_ctrl & ENA_ETH_IO_TX_META_DESC_LAST_MASK)
- >> ENA_ETH_IO_TX_META_DESC_LAST_SHIFT;
+ return (p->len_ctrl & ENA_ETH_IO_TX_META_DESC_LAST_MASK) >> ENA_ETH_IO_TX_META_DESC_LAST_SHIFT;
}
-static inline void set_ena_eth_io_tx_meta_desc_last(
- struct ena_eth_io_tx_meta_desc *p, uint32_t val)
+static inline void set_ena_eth_io_tx_meta_desc_last(struct ena_eth_io_tx_meta_desc *p, uint32_t val)
{
- p->len_ctrl |= (val << ENA_ETH_IO_TX_META_DESC_LAST_SHIFT)
- & ENA_ETH_IO_TX_META_DESC_LAST_MASK;
+ p->len_ctrl |= (val << ENA_ETH_IO_TX_META_DESC_LAST_SHIFT) & ENA_ETH_IO_TX_META_DESC_LAST_MASK;
}
-static inline uint32_t get_ena_eth_io_tx_meta_desc_comp_req(
- const struct ena_eth_io_tx_meta_desc *p)
+static inline uint32_t get_ena_eth_io_tx_meta_desc_comp_req(const struct ena_eth_io_tx_meta_desc *p)
{
- return (p->len_ctrl & ENA_ETH_IO_TX_META_DESC_COMP_REQ_MASK)
- >> ENA_ETH_IO_TX_META_DESC_COMP_REQ_SHIFT;
+ return (p->len_ctrl & ENA_ETH_IO_TX_META_DESC_COMP_REQ_MASK) >> ENA_ETH_IO_TX_META_DESC_COMP_REQ_SHIFT;
}
-static inline void set_ena_eth_io_tx_meta_desc_comp_req(
- struct ena_eth_io_tx_meta_desc *p, uint32_t val)
+static inline void set_ena_eth_io_tx_meta_desc_comp_req(struct ena_eth_io_tx_meta_desc *p, uint32_t val)
{
- p->len_ctrl |= (val << ENA_ETH_IO_TX_META_DESC_COMP_REQ_SHIFT)
- & ENA_ETH_IO_TX_META_DESC_COMP_REQ_MASK;
+ p->len_ctrl |= (val << ENA_ETH_IO_TX_META_DESC_COMP_REQ_SHIFT) & ENA_ETH_IO_TX_META_DESC_COMP_REQ_MASK;
}
-static inline uint32_t get_ena_eth_io_tx_meta_desc_req_id_hi(
- const struct ena_eth_io_tx_meta_desc *p)
+static inline uint32_t get_ena_eth_io_tx_meta_desc_req_id_hi(const struct ena_eth_io_tx_meta_desc *p)
{
return p->word1 & ENA_ETH_IO_TX_META_DESC_REQ_ID_HI_MASK;
}
-static inline void set_ena_eth_io_tx_meta_desc_req_id_hi(
- struct ena_eth_io_tx_meta_desc *p,
- uint32_t val)
+static inline void set_ena_eth_io_tx_meta_desc_req_id_hi(struct ena_eth_io_tx_meta_desc *p, uint32_t val)
{
p->word1 |= val & ENA_ETH_IO_TX_META_DESC_REQ_ID_HI_MASK;
}
-static inline uint32_t get_ena_eth_io_tx_meta_desc_l3_hdr_len(
- const struct ena_eth_io_tx_meta_desc *p)
+static inline uint32_t get_ena_eth_io_tx_meta_desc_l3_hdr_len(const struct ena_eth_io_tx_meta_desc *p)
{
return p->word2 & ENA_ETH_IO_TX_META_DESC_L3_HDR_LEN_MASK;
}
-static inline void set_ena_eth_io_tx_meta_desc_l3_hdr_len(
- struct ena_eth_io_tx_meta_desc *p,
- uint32_t val)
+static inline void set_ena_eth_io_tx_meta_desc_l3_hdr_len(struct ena_eth_io_tx_meta_desc *p, uint32_t val)
{
p->word2 |= val & ENA_ETH_IO_TX_META_DESC_L3_HDR_LEN_MASK;
}
-static inline uint32_t get_ena_eth_io_tx_meta_desc_l3_hdr_off(
- const struct ena_eth_io_tx_meta_desc *p)
+static inline uint32_t get_ena_eth_io_tx_meta_desc_l3_hdr_off(const struct ena_eth_io_tx_meta_desc *p)
{
- return (p->word2 & ENA_ETH_IO_TX_META_DESC_L3_HDR_OFF_MASK)
- >> ENA_ETH_IO_TX_META_DESC_L3_HDR_OFF_SHIFT;
+ return (p->word2 & ENA_ETH_IO_TX_META_DESC_L3_HDR_OFF_MASK) >> ENA_ETH_IO_TX_META_DESC_L3_HDR_OFF_SHIFT;
}
-static inline void set_ena_eth_io_tx_meta_desc_l3_hdr_off(
- struct ena_eth_io_tx_meta_desc *p,
- uint32_t val)
+static inline void set_ena_eth_io_tx_meta_desc_l3_hdr_off(struct ena_eth_io_tx_meta_desc *p, uint32_t val)
{
- p->word2 |=
- (val << ENA_ETH_IO_TX_META_DESC_L3_HDR_OFF_SHIFT)
- & ENA_ETH_IO_TX_META_DESC_L3_HDR_OFF_MASK;
+ p->word2 |= (val << ENA_ETH_IO_TX_META_DESC_L3_HDR_OFF_SHIFT) & ENA_ETH_IO_TX_META_DESC_L3_HDR_OFF_MASK;
}
-static inline uint32_t get_ena_eth_io_tx_meta_desc_l4_hdr_len_in_words(
- const struct ena_eth_io_tx_meta_desc *p)
+static inline uint32_t get_ena_eth_io_tx_meta_desc_l4_hdr_len_in_words(const struct ena_eth_io_tx_meta_desc *p)
{
- return (p->word2 & ENA_ETH_IO_TX_META_DESC_L4_HDR_LEN_IN_WORDS_MASK)
- >> ENA_ETH_IO_TX_META_DESC_L4_HDR_LEN_IN_WORDS_SHIFT;
+ return (p->word2 & ENA_ETH_IO_TX_META_DESC_L4_HDR_LEN_IN_WORDS_MASK) >> ENA_ETH_IO_TX_META_DESC_L4_HDR_LEN_IN_WORDS_SHIFT;
}
-static inline void set_ena_eth_io_tx_meta_desc_l4_hdr_len_in_words(
- struct ena_eth_io_tx_meta_desc *p,
- uint32_t val)
+static inline void set_ena_eth_io_tx_meta_desc_l4_hdr_len_in_words(struct ena_eth_io_tx_meta_desc *p, uint32_t val)
{
- p->word2 |=
- (val << ENA_ETH_IO_TX_META_DESC_L4_HDR_LEN_IN_WORDS_SHIFT)
- & ENA_ETH_IO_TX_META_DESC_L4_HDR_LEN_IN_WORDS_MASK;
+ p->word2 |= (val << ENA_ETH_IO_TX_META_DESC_L4_HDR_LEN_IN_WORDS_SHIFT) & ENA_ETH_IO_TX_META_DESC_L4_HDR_LEN_IN_WORDS_MASK;
}
-static inline uint32_t get_ena_eth_io_tx_meta_desc_mss_lo(
- const struct ena_eth_io_tx_meta_desc *p)
+static inline uint32_t get_ena_eth_io_tx_meta_desc_mss_lo(const struct ena_eth_io_tx_meta_desc *p)
{
- return (p->word2 & ENA_ETH_IO_TX_META_DESC_MSS_LO_MASK)
- >> ENA_ETH_IO_TX_META_DESC_MSS_LO_SHIFT;
+ return (p->word2 & ENA_ETH_IO_TX_META_DESC_MSS_LO_MASK) >> ENA_ETH_IO_TX_META_DESC_MSS_LO_SHIFT;
}
-static inline void set_ena_eth_io_tx_meta_desc_mss_lo(
- struct ena_eth_io_tx_meta_desc *p,
- uint32_t val)
+static inline void set_ena_eth_io_tx_meta_desc_mss_lo(struct ena_eth_io_tx_meta_desc *p, uint32_t val)
{
- p->word2 |=
- (val << ENA_ETH_IO_TX_META_DESC_MSS_LO_SHIFT)
- & ENA_ETH_IO_TX_META_DESC_MSS_LO_MASK;
+ p->word2 |= (val << ENA_ETH_IO_TX_META_DESC_MSS_LO_SHIFT) & ENA_ETH_IO_TX_META_DESC_MSS_LO_MASK;
}
-static inline uint8_t get_ena_eth_io_tx_cdesc_phase(
- const struct ena_eth_io_tx_cdesc *p)
+static inline uint8_t get_ena_eth_io_tx_cdesc_phase(const struct ena_eth_io_tx_cdesc *p)
{
return p->flags & ENA_ETH_IO_TX_CDESC_PHASE_MASK;
}
-static inline void set_ena_eth_io_tx_cdesc_phase(
- struct ena_eth_io_tx_cdesc *p,
- uint8_t val)
+static inline void set_ena_eth_io_tx_cdesc_phase(struct ena_eth_io_tx_cdesc *p, uint8_t val)
{
p->flags |= val & ENA_ETH_IO_TX_CDESC_PHASE_MASK;
}
-static inline uint8_t get_ena_eth_io_rx_desc_phase(
- const struct ena_eth_io_rx_desc *p)
+static inline uint8_t get_ena_eth_io_rx_desc_phase(const struct ena_eth_io_rx_desc *p)
{
return p->ctrl & ENA_ETH_IO_RX_DESC_PHASE_MASK;
}
-static inline void set_ena_eth_io_rx_desc_phase(
- struct ena_eth_io_rx_desc *p,
- uint8_t val)
+static inline void set_ena_eth_io_rx_desc_phase(struct ena_eth_io_rx_desc *p, uint8_t val)
{
p->ctrl |= val & ENA_ETH_IO_RX_DESC_PHASE_MASK;
}
-static inline uint8_t get_ena_eth_io_rx_desc_first(
- const struct ena_eth_io_rx_desc *p)
+static inline uint8_t get_ena_eth_io_rx_desc_first(const struct ena_eth_io_rx_desc *p)
{
- return (p->ctrl & ENA_ETH_IO_RX_DESC_FIRST_MASK)
- >> ENA_ETH_IO_RX_DESC_FIRST_SHIFT;
+ return (p->ctrl & ENA_ETH_IO_RX_DESC_FIRST_MASK) >> ENA_ETH_IO_RX_DESC_FIRST_SHIFT;
}
-static inline void set_ena_eth_io_rx_desc_first(
- struct ena_eth_io_rx_desc *p,
- uint8_t val)
+static inline void set_ena_eth_io_rx_desc_first(struct ena_eth_io_rx_desc *p, uint8_t val)
{
- p->ctrl |=
- (val << ENA_ETH_IO_RX_DESC_FIRST_SHIFT)
- & ENA_ETH_IO_RX_DESC_FIRST_MASK;
+ p->ctrl |= (val << ENA_ETH_IO_RX_DESC_FIRST_SHIFT) & ENA_ETH_IO_RX_DESC_FIRST_MASK;
}
-static inline uint8_t get_ena_eth_io_rx_desc_last(
- const struct ena_eth_io_rx_desc *p)
+static inline uint8_t get_ena_eth_io_rx_desc_last(const struct ena_eth_io_rx_desc *p)
{
- return (p->ctrl & ENA_ETH_IO_RX_DESC_LAST_MASK)
- >> ENA_ETH_IO_RX_DESC_LAST_SHIFT;
+ return (p->ctrl & ENA_ETH_IO_RX_DESC_LAST_MASK) >> ENA_ETH_IO_RX_DESC_LAST_SHIFT;
}
-static inline void set_ena_eth_io_rx_desc_last(
- struct ena_eth_io_rx_desc *p,
- uint8_t val)
+static inline void set_ena_eth_io_rx_desc_last(struct ena_eth_io_rx_desc *p, uint8_t val)
{
- p->ctrl |=
- (val << ENA_ETH_IO_RX_DESC_LAST_SHIFT)
- & ENA_ETH_IO_RX_DESC_LAST_MASK;
+ p->ctrl |= (val << ENA_ETH_IO_RX_DESC_LAST_SHIFT) & ENA_ETH_IO_RX_DESC_LAST_MASK;
}
-static inline uint8_t get_ena_eth_io_rx_desc_comp_req(
- const struct ena_eth_io_rx_desc *p)
+static inline uint8_t get_ena_eth_io_rx_desc_comp_req(const struct ena_eth_io_rx_desc *p)
{
- return (p->ctrl & ENA_ETH_IO_RX_DESC_COMP_REQ_MASK)
- >> ENA_ETH_IO_RX_DESC_COMP_REQ_SHIFT;
+ return (p->ctrl & ENA_ETH_IO_RX_DESC_COMP_REQ_MASK) >> ENA_ETH_IO_RX_DESC_COMP_REQ_SHIFT;
}
-static inline void set_ena_eth_io_rx_desc_comp_req(
- struct ena_eth_io_rx_desc *p,
- uint8_t val)
+static inline void set_ena_eth_io_rx_desc_comp_req(struct ena_eth_io_rx_desc *p, uint8_t val)
{
- p->ctrl |=
- (val << ENA_ETH_IO_RX_DESC_COMP_REQ_SHIFT)
- & ENA_ETH_IO_RX_DESC_COMP_REQ_MASK;
+ p->ctrl |= (val << ENA_ETH_IO_RX_DESC_COMP_REQ_SHIFT) & ENA_ETH_IO_RX_DESC_COMP_REQ_MASK;
}
-static inline uint32_t get_ena_eth_io_rx_cdesc_base_l3_proto_idx(
- const struct ena_eth_io_rx_cdesc_base *p)
+static inline uint32_t get_ena_eth_io_rx_cdesc_base_l3_proto_idx(const struct ena_eth_io_rx_cdesc_base *p)
{
return p->status & ENA_ETH_IO_RX_CDESC_BASE_L3_PROTO_IDX_MASK;
}
-static inline void set_ena_eth_io_rx_cdesc_base_l3_proto_idx(
- struct ena_eth_io_rx_cdesc_base *p,
- uint32_t val)
+static inline void set_ena_eth_io_rx_cdesc_base_l3_proto_idx(struct ena_eth_io_rx_cdesc_base *p, uint32_t val)
{
p->status |= val & ENA_ETH_IO_RX_CDESC_BASE_L3_PROTO_IDX_MASK;
}
-static inline uint32_t get_ena_eth_io_rx_cdesc_base_src_vlan_cnt(
- const struct ena_eth_io_rx_cdesc_base *p)
+static inline uint32_t get_ena_eth_io_rx_cdesc_base_src_vlan_cnt(const struct ena_eth_io_rx_cdesc_base *p)
{
- return (p->status & ENA_ETH_IO_RX_CDESC_BASE_SRC_VLAN_CNT_MASK)
- >> ENA_ETH_IO_RX_CDESC_BASE_SRC_VLAN_CNT_SHIFT;
+ return (p->status & ENA_ETH_IO_RX_CDESC_BASE_SRC_VLAN_CNT_MASK) >> ENA_ETH_IO_RX_CDESC_BASE_SRC_VLAN_CNT_SHIFT;
}
-static inline void set_ena_eth_io_rx_cdesc_base_src_vlan_cnt(
- struct ena_eth_io_rx_cdesc_base *p,
- uint32_t val)
+static inline void set_ena_eth_io_rx_cdesc_base_src_vlan_cnt(struct ena_eth_io_rx_cdesc_base *p, uint32_t val)
{
- p->status |=
- (val << ENA_ETH_IO_RX_CDESC_BASE_SRC_VLAN_CNT_SHIFT)
- & ENA_ETH_IO_RX_CDESC_BASE_SRC_VLAN_CNT_MASK;
+ p->status |= (val << ENA_ETH_IO_RX_CDESC_BASE_SRC_VLAN_CNT_SHIFT) & ENA_ETH_IO_RX_CDESC_BASE_SRC_VLAN_CNT_MASK;
}
-static inline uint32_t get_ena_eth_io_rx_cdesc_base_l4_proto_idx(
- const struct ena_eth_io_rx_cdesc_base *p)
+static inline uint32_t get_ena_eth_io_rx_cdesc_base_l4_proto_idx(const struct ena_eth_io_rx_cdesc_base *p)
{
- return (p->status & ENA_ETH_IO_RX_CDESC_BASE_L4_PROTO_IDX_MASK)
- >> ENA_ETH_IO_RX_CDESC_BASE_L4_PROTO_IDX_SHIFT;
+ return (p->status & ENA_ETH_IO_RX_CDESC_BASE_L4_PROTO_IDX_MASK) >> ENA_ETH_IO_RX_CDESC_BASE_L4_PROTO_IDX_SHIFT;
}
-static inline void set_ena_eth_io_rx_cdesc_base_l4_proto_idx(
- struct ena_eth_io_rx_cdesc_base *p, uint32_t val)
+static inline void set_ena_eth_io_rx_cdesc_base_l4_proto_idx(struct ena_eth_io_rx_cdesc_base *p, uint32_t val)
{
- p->status |= (val << ENA_ETH_IO_RX_CDESC_BASE_L4_PROTO_IDX_SHIFT)
- & ENA_ETH_IO_RX_CDESC_BASE_L4_PROTO_IDX_MASK;
+ p->status |= (val << ENA_ETH_IO_RX_CDESC_BASE_L4_PROTO_IDX_SHIFT) & ENA_ETH_IO_RX_CDESC_BASE_L4_PROTO_IDX_MASK;
}
-static inline uint32_t get_ena_eth_io_rx_cdesc_base_l3_csum_err(
- const struct ena_eth_io_rx_cdesc_base *p)
+static inline uint32_t get_ena_eth_io_rx_cdesc_base_l3_csum_err(const struct ena_eth_io_rx_cdesc_base *p)
{
- return (p->status & ENA_ETH_IO_RX_CDESC_BASE_L3_CSUM_ERR_MASK)
- >> ENA_ETH_IO_RX_CDESC_BASE_L3_CSUM_ERR_SHIFT;
+ return (p->status & ENA_ETH_IO_RX_CDESC_BASE_L3_CSUM_ERR_MASK) >> ENA_ETH_IO_RX_CDESC_BASE_L3_CSUM_ERR_SHIFT;
}
-static inline void set_ena_eth_io_rx_cdesc_base_l3_csum_err(
- struct ena_eth_io_rx_cdesc_base *p, uint32_t val)
+static inline void set_ena_eth_io_rx_cdesc_base_l3_csum_err(struct ena_eth_io_rx_cdesc_base *p, uint32_t val)
{
- p->status |= (val << ENA_ETH_IO_RX_CDESC_BASE_L3_CSUM_ERR_SHIFT)
- & ENA_ETH_IO_RX_CDESC_BASE_L3_CSUM_ERR_MASK;
+ p->status |= (val << ENA_ETH_IO_RX_CDESC_BASE_L3_CSUM_ERR_SHIFT) & ENA_ETH_IO_RX_CDESC_BASE_L3_CSUM_ERR_MASK;
}
-static inline uint32_t get_ena_eth_io_rx_cdesc_base_l4_csum_err(
- const struct ena_eth_io_rx_cdesc_base *p)
+static inline uint32_t get_ena_eth_io_rx_cdesc_base_l4_csum_err(const struct ena_eth_io_rx_cdesc_base *p)
{
- return (p->status & ENA_ETH_IO_RX_CDESC_BASE_L4_CSUM_ERR_MASK)
- >> ENA_ETH_IO_RX_CDESC_BASE_L4_CSUM_ERR_SHIFT;
+ return (p->status & ENA_ETH_IO_RX_CDESC_BASE_L4_CSUM_ERR_MASK) >> ENA_ETH_IO_RX_CDESC_BASE_L4_CSUM_ERR_SHIFT;
}
-static inline void set_ena_eth_io_rx_cdesc_base_l4_csum_err(
- struct ena_eth_io_rx_cdesc_base *p, uint32_t val)
+static inline void set_ena_eth_io_rx_cdesc_base_l4_csum_err(struct ena_eth_io_rx_cdesc_base *p, uint32_t val)
{
- p->status |= (val << ENA_ETH_IO_RX_CDESC_BASE_L4_CSUM_ERR_SHIFT)
- & ENA_ETH_IO_RX_CDESC_BASE_L4_CSUM_ERR_MASK;
+ p->status |= (val << ENA_ETH_IO_RX_CDESC_BASE_L4_CSUM_ERR_SHIFT) & ENA_ETH_IO_RX_CDESC_BASE_L4_CSUM_ERR_MASK;
}
-static inline uint32_t get_ena_eth_io_rx_cdesc_base_ipv4_frag(
- const struct ena_eth_io_rx_cdesc_base *p)
+static inline uint32_t get_ena_eth_io_rx_cdesc_base_ipv4_frag(const struct ena_eth_io_rx_cdesc_base *p)
{
- return (p->status & ENA_ETH_IO_RX_CDESC_BASE_IPV4_FRAG_MASK)
- >> ENA_ETH_IO_RX_CDESC_BASE_IPV4_FRAG_SHIFT;
+ return (p->status & ENA_ETH_IO_RX_CDESC_BASE_IPV4_FRAG_MASK) >> ENA_ETH_IO_RX_CDESC_BASE_IPV4_FRAG_SHIFT;
}
-static inline void set_ena_eth_io_rx_cdesc_base_ipv4_frag(
- struct ena_eth_io_rx_cdesc_base *p, uint32_t val)
+static inline void set_ena_eth_io_rx_cdesc_base_ipv4_frag(struct ena_eth_io_rx_cdesc_base *p, uint32_t val)
{
- p->status |= (val << ENA_ETH_IO_RX_CDESC_BASE_IPV4_FRAG_SHIFT)
- & ENA_ETH_IO_RX_CDESC_BASE_IPV4_FRAG_MASK;
+ p->status |= (val << ENA_ETH_IO_RX_CDESC_BASE_IPV4_FRAG_SHIFT) & ENA_ETH_IO_RX_CDESC_BASE_IPV4_FRAG_MASK;
}
-static inline uint32_t get_ena_eth_io_rx_cdesc_base_phase(
- const struct ena_eth_io_rx_cdesc_base *p)
+static inline uint32_t get_ena_eth_io_rx_cdesc_base_phase(const struct ena_eth_io_rx_cdesc_base *p)
{
- return (p->status & ENA_ETH_IO_RX_CDESC_BASE_PHASE_MASK)
- >> ENA_ETH_IO_RX_CDESC_BASE_PHASE_SHIFT;
+ return (p->status & ENA_ETH_IO_RX_CDESC_BASE_PHASE_MASK) >> ENA_ETH_IO_RX_CDESC_BASE_PHASE_SHIFT;
}
-static inline void set_ena_eth_io_rx_cdesc_base_phase(
- struct ena_eth_io_rx_cdesc_base *p, uint32_t val)
+static inline void set_ena_eth_io_rx_cdesc_base_phase(struct ena_eth_io_rx_cdesc_base *p, uint32_t val)
{
- p->status |= (val << ENA_ETH_IO_RX_CDESC_BASE_PHASE_SHIFT)
- & ENA_ETH_IO_RX_CDESC_BASE_PHASE_MASK;
+ p->status |= (val << ENA_ETH_IO_RX_CDESC_BASE_PHASE_SHIFT) & ENA_ETH_IO_RX_CDESC_BASE_PHASE_MASK;
}
-static inline uint32_t get_ena_eth_io_rx_cdesc_base_l3_csum2(
- const struct ena_eth_io_rx_cdesc_base *p)
+static inline uint32_t get_ena_eth_io_rx_cdesc_base_l3_csum2(const struct ena_eth_io_rx_cdesc_base *p)
{
- return (p->status & ENA_ETH_IO_RX_CDESC_BASE_L3_CSUM2_MASK)
- >> ENA_ETH_IO_RX_CDESC_BASE_L3_CSUM2_SHIFT;
+ return (p->status & ENA_ETH_IO_RX_CDESC_BASE_L3_CSUM2_MASK) >> ENA_ETH_IO_RX_CDESC_BASE_L3_CSUM2_SHIFT;
}
-static inline void set_ena_eth_io_rx_cdesc_base_l3_csum2(
- struct ena_eth_io_rx_cdesc_base *p, uint32_t val)
+static inline void set_ena_eth_io_rx_cdesc_base_l3_csum2(struct ena_eth_io_rx_cdesc_base *p, uint32_t val)
{
- p->status |= (val << ENA_ETH_IO_RX_CDESC_BASE_L3_CSUM2_SHIFT)
- & ENA_ETH_IO_RX_CDESC_BASE_L3_CSUM2_MASK;
+ p->status |= (val << ENA_ETH_IO_RX_CDESC_BASE_L3_CSUM2_SHIFT) & ENA_ETH_IO_RX_CDESC_BASE_L3_CSUM2_MASK;
}
-static inline uint32_t get_ena_eth_io_rx_cdesc_base_first(
- const struct ena_eth_io_rx_cdesc_base *p)
+static inline uint32_t get_ena_eth_io_rx_cdesc_base_first(const struct ena_eth_io_rx_cdesc_base *p)
{
- return (p->status & ENA_ETH_IO_RX_CDESC_BASE_FIRST_MASK)
- >> ENA_ETH_IO_RX_CDESC_BASE_FIRST_SHIFT;
+ return (p->status & ENA_ETH_IO_RX_CDESC_BASE_FIRST_MASK) >> ENA_ETH_IO_RX_CDESC_BASE_FIRST_SHIFT;
}
-static inline void set_ena_eth_io_rx_cdesc_base_first(
- struct ena_eth_io_rx_cdesc_base *p, uint32_t val)
+static inline void set_ena_eth_io_rx_cdesc_base_first(struct ena_eth_io_rx_cdesc_base *p, uint32_t val)
{
- p->status |= (val << ENA_ETH_IO_RX_CDESC_BASE_FIRST_SHIFT)
- & ENA_ETH_IO_RX_CDESC_BASE_FIRST_MASK;
+ p->status |= (val << ENA_ETH_IO_RX_CDESC_BASE_FIRST_SHIFT) & ENA_ETH_IO_RX_CDESC_BASE_FIRST_MASK;
}
-static inline uint32_t get_ena_eth_io_rx_cdesc_base_last(
- const struct ena_eth_io_rx_cdesc_base *p)
+static inline uint32_t get_ena_eth_io_rx_cdesc_base_last(const struct ena_eth_io_rx_cdesc_base *p)
{
- return (p->status & ENA_ETH_IO_RX_CDESC_BASE_LAST_MASK)
- >> ENA_ETH_IO_RX_CDESC_BASE_LAST_SHIFT;
+ return (p->status & ENA_ETH_IO_RX_CDESC_BASE_LAST_MASK) >> ENA_ETH_IO_RX_CDESC_BASE_LAST_SHIFT;
}
-static inline void set_ena_eth_io_rx_cdesc_base_last(
- struct ena_eth_io_rx_cdesc_base *p, uint32_t val)
+static inline void set_ena_eth_io_rx_cdesc_base_last(struct ena_eth_io_rx_cdesc_base *p, uint32_t val)
{
- p->status |= (val << ENA_ETH_IO_RX_CDESC_BASE_LAST_SHIFT)
- & ENA_ETH_IO_RX_CDESC_BASE_LAST_MASK;
+ p->status |= (val << ENA_ETH_IO_RX_CDESC_BASE_LAST_SHIFT) & ENA_ETH_IO_RX_CDESC_BASE_LAST_MASK;
}
-static inline uint32_t get_ena_eth_io_rx_cdesc_base_buffer(
- const struct ena_eth_io_rx_cdesc_base *p)
+static inline uint32_t get_ena_eth_io_rx_cdesc_base_buffer(const struct ena_eth_io_rx_cdesc_base *p)
{
- return (p->status & ENA_ETH_IO_RX_CDESC_BASE_BUFFER_MASK)
- >> ENA_ETH_IO_RX_CDESC_BASE_BUFFER_SHIFT;
+ return (p->status & ENA_ETH_IO_RX_CDESC_BASE_BUFFER_MASK) >> ENA_ETH_IO_RX_CDESC_BASE_BUFFER_SHIFT;
}
-static inline void set_ena_eth_io_rx_cdesc_base_buffer(
- struct ena_eth_io_rx_cdesc_base *p, uint32_t val)
+static inline void set_ena_eth_io_rx_cdesc_base_buffer(struct ena_eth_io_rx_cdesc_base *p, uint32_t val)
{
- p->status |= (val << ENA_ETH_IO_RX_CDESC_BASE_BUFFER_SHIFT)
- & ENA_ETH_IO_RX_CDESC_BASE_BUFFER_MASK;
+ p->status |= (val << ENA_ETH_IO_RX_CDESC_BASE_BUFFER_SHIFT) & ENA_ETH_IO_RX_CDESC_BASE_BUFFER_MASK;
}
-static inline uint32_t get_ena_eth_io_intr_reg_rx_intr_delay(
- const struct ena_eth_io_intr_reg *p)
+static inline uint32_t get_ena_eth_io_intr_reg_rx_intr_delay(const struct ena_eth_io_intr_reg *p)
{
return p->intr_control & ENA_ETH_IO_INTR_REG_RX_INTR_DELAY_MASK;
}
-static inline void set_ena_eth_io_intr_reg_rx_intr_delay(
- struct ena_eth_io_intr_reg *p, uint32_t val)
+static inline void set_ena_eth_io_intr_reg_rx_intr_delay(struct ena_eth_io_intr_reg *p, uint32_t val)
{
p->intr_control |= val & ENA_ETH_IO_INTR_REG_RX_INTR_DELAY_MASK;
}
-static inline uint32_t get_ena_eth_io_intr_reg_tx_intr_delay(
- const struct ena_eth_io_intr_reg *p)
+static inline uint32_t get_ena_eth_io_intr_reg_tx_intr_delay(const struct ena_eth_io_intr_reg *p)
{
- return (p->intr_control & ENA_ETH_IO_INTR_REG_TX_INTR_DELAY_MASK)
- >> ENA_ETH_IO_INTR_REG_TX_INTR_DELAY_SHIFT;
+ return (p->intr_control & ENA_ETH_IO_INTR_REG_TX_INTR_DELAY_MASK) >> ENA_ETH_IO_INTR_REG_TX_INTR_DELAY_SHIFT;
}
-static inline void set_ena_eth_io_intr_reg_tx_intr_delay(
- struct ena_eth_io_intr_reg *p, uint32_t val)
+static inline void set_ena_eth_io_intr_reg_tx_intr_delay(struct ena_eth_io_intr_reg *p, uint32_t val)
{
- p->intr_control |= (val << ENA_ETH_IO_INTR_REG_TX_INTR_DELAY_SHIFT)
- & ENA_ETH_IO_INTR_REG_TX_INTR_DELAY_MASK;
+ p->intr_control |= (val << ENA_ETH_IO_INTR_REG_TX_INTR_DELAY_SHIFT) & ENA_ETH_IO_INTR_REG_TX_INTR_DELAY_MASK;
}
-static inline uint32_t get_ena_eth_io_intr_reg_intr_unmask(
- const struct ena_eth_io_intr_reg *p)
+static inline uint32_t get_ena_eth_io_intr_reg_intr_unmask(const struct ena_eth_io_intr_reg *p)
{
- return (p->intr_control & ENA_ETH_IO_INTR_REG_INTR_UNMASK_MASK)
- >> ENA_ETH_IO_INTR_REG_INTR_UNMASK_SHIFT;
+ return (p->intr_control & ENA_ETH_IO_INTR_REG_INTR_UNMASK_MASK) >> ENA_ETH_IO_INTR_REG_INTR_UNMASK_SHIFT;
}
-static inline void set_ena_eth_io_intr_reg_intr_unmask(
- struct ena_eth_io_intr_reg *p, uint32_t val)
+static inline void set_ena_eth_io_intr_reg_intr_unmask(struct ena_eth_io_intr_reg *p, uint32_t val)
{
- p->intr_control |= (val << ENA_ETH_IO_INTR_REG_INTR_UNMASK_SHIFT)
- & ENA_ETH_IO_INTR_REG_INTR_UNMASK_MASK;
+ p->intr_control |= (val << ENA_ETH_IO_INTR_REG_INTR_UNMASK_SHIFT) & ENA_ETH_IO_INTR_REG_INTR_UNMASK_MASK;
}
-static inline uint32_t get_ena_eth_io_numa_node_cfg_reg_numa(
- const struct ena_eth_io_numa_node_cfg_reg *p)
+static inline uint32_t get_ena_eth_io_numa_node_cfg_reg_numa(const struct ena_eth_io_numa_node_cfg_reg *p)
{
return p->numa_cfg & ENA_ETH_IO_NUMA_NODE_CFG_REG_NUMA_MASK;
}
-static inline void set_ena_eth_io_numa_node_cfg_reg_numa(
- struct ena_eth_io_numa_node_cfg_reg *p, uint32_t val)
+static inline void set_ena_eth_io_numa_node_cfg_reg_numa(struct ena_eth_io_numa_node_cfg_reg *p, uint32_t val)
{
p->numa_cfg |= val & ENA_ETH_IO_NUMA_NODE_CFG_REG_NUMA_MASK;
}
-static inline uint32_t get_ena_eth_io_numa_node_cfg_reg_enabled(
- const struct ena_eth_io_numa_node_cfg_reg *p)
+static inline uint32_t get_ena_eth_io_numa_node_cfg_reg_enabled(const struct ena_eth_io_numa_node_cfg_reg *p)
{
- return (p->numa_cfg & ENA_ETH_IO_NUMA_NODE_CFG_REG_ENABLED_MASK)
- >> ENA_ETH_IO_NUMA_NODE_CFG_REG_ENABLED_SHIFT;
+ return (p->numa_cfg & ENA_ETH_IO_NUMA_NODE_CFG_REG_ENABLED_MASK) >> ENA_ETH_IO_NUMA_NODE_CFG_REG_ENABLED_SHIFT;
}
-static inline void set_ena_eth_io_numa_node_cfg_reg_enabled(
- struct ena_eth_io_numa_node_cfg_reg *p, uint32_t val)
+static inline void set_ena_eth_io_numa_node_cfg_reg_enabled(struct ena_eth_io_numa_node_cfg_reg *p, uint32_t val)
{
- p->numa_cfg |= (val << ENA_ETH_IO_NUMA_NODE_CFG_REG_ENABLED_SHIFT)
- & ENA_ETH_IO_NUMA_NODE_CFG_REG_ENABLED_MASK;
+ p->numa_cfg |= (val << ENA_ETH_IO_NUMA_NODE_CFG_REG_ENABLED_SHIFT) & ENA_ETH_IO_NUMA_NODE_CFG_REG_ENABLED_MASK;
}
#endif /* !defined(ENA_DEFS_LINUX_MAINLINE) */
diff --git a/drivers/net/ena/base/ena_defs/ena_gen_info.h b/drivers/net/ena/base/ena_defs/ena_gen_info.h
index 3d252096..e87bcfd8 100644
--- a/drivers/net/ena/base/ena_defs/ena_gen_info.h
+++ b/drivers/net/ena/base/ena_defs/ena_gen_info.h
@@ -31,5 +31,5 @@
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
-#define ENA_GEN_DATE "Sun Jun 5 10:24:39 IDT 2016"
-#define ENA_GEN_COMMIT "17146ed"
+#define ENA_GEN_DATE "Sun Oct 23 12:27:32 IDT 2016"
+#define ENA_GEN_COMMIT "79d82fa"
diff --git a/drivers/net/ena/base/ena_defs/ena_includes.h b/drivers/net/ena/base/ena_defs/ena_includes.h
index a86c876f..30a920a8 100644
--- a/drivers/net/ena/base/ena_defs/ena_includes.h
+++ b/drivers/net/ena/base/ena_defs/ena_includes.h
@@ -35,5 +35,3 @@
#include "ena_regs_defs.h"
#include "ena_admin_defs.h"
#include "ena_eth_io_defs.h"
-#include "ena_efa_admin_defs.h"
-#include "ena_efa_io_defs.h"
diff --git a/drivers/net/ena/base/ena_defs/ena_regs_defs.h b/drivers/net/ena/base/ena_defs/ena_regs_defs.h
index d0241278..b0870f25 100644
--- a/drivers/net/ena/base/ena_defs/ena_regs_defs.h
+++ b/drivers/net/ena/base/ena_defs/ena_regs_defs.h
@@ -34,6 +34,38 @@
#ifndef _ENA_REGS_H_
#define _ENA_REGS_H_
+enum ena_regs_reset_reason_types {
+ ENA_REGS_RESET_NORMAL = 0,
+
+ ENA_REGS_RESET_KEEP_ALIVE_TO = 1,
+
+ ENA_REGS_RESET_ADMIN_TO = 2,
+
+ ENA_REGS_RESET_MISS_TX_CMPL = 3,
+
+ ENA_REGS_RESET_INV_RX_REQ_ID = 4,
+
+ ENA_REGS_RESET_INV_TX_REQ_ID = 5,
+
+ ENA_REGS_RESET_TOO_MANY_RX_DESCS = 6,
+
+ ENA_REGS_RESET_INIT_ERR = 7,
+
+ ENA_REGS_RESET_DRIVER_INVALID_STATE = 8,
+
+ ENA_REGS_RESET_OS_TRIGGER = 9,
+
+ ENA_REGS_RESET_OS_NETDEV_WD = 10,
+
+ ENA_REGS_RESET_SHUTDOWN = 11,
+
+ ENA_REGS_RESET_USER_TRIGGER = 12,
+
+ ENA_REGS_RESET_GENERIC = 13,
+
+ ENA_REGS_RESET_MISS_INTERRUPT = 14,
+};
+
/* ena_registers offsets */
#define ENA_REGS_VERSION_OFF 0x0
#define ENA_REGS_CONTROLLER_VERSION_OFF 0x4
@@ -80,6 +112,8 @@
#define ENA_REGS_CAPS_RESET_TIMEOUT_MASK 0x3e
#define ENA_REGS_CAPS_DMA_ADDR_WIDTH_SHIFT 8
#define ENA_REGS_CAPS_DMA_ADDR_WIDTH_MASK 0xff00
+#define ENA_REGS_CAPS_ADMIN_CMD_TO_SHIFT 16
+#define ENA_REGS_CAPS_ADMIN_CMD_TO_MASK 0xf0000
/* aq_caps register */
#define ENA_REGS_AQ_CAPS_AQ_DEPTH_MASK 0xffff
@@ -104,6 +138,8 @@
#define ENA_REGS_DEV_CTL_QUIESCENT_MASK 0x4
#define ENA_REGS_DEV_CTL_IO_RESUME_SHIFT 3
#define ENA_REGS_DEV_CTL_IO_RESUME_MASK 0x8
+#define ENA_REGS_DEV_CTL_RESET_REASON_SHIFT 28
+#define ENA_REGS_DEV_CTL_RESET_REASON_MASK 0xf0000000
/* dev_sts register */
#define ENA_REGS_DEV_STS_READY_MASK 0x1
diff --git a/drivers/net/ena/base/ena_eth_com.c b/drivers/net/ena/base/ena_eth_com.c
index 290a5666..4c4989a3 100644
--- a/drivers/net/ena/base/ena_eth_com.c
+++ b/drivers/net/ena/base/ena_eth_com.c
@@ -43,11 +43,10 @@ static inline struct ena_eth_io_rx_cdesc_base *ena_com_get_next_rx_cdesc(
head_masked = io_cq->head & (io_cq->q_depth - 1);
expected_phase = io_cq->phase;
- cdesc = (struct ena_eth_io_rx_cdesc_base *)
- ((unsigned char *)io_cq->cdesc_addr.virt_addr
+ cdesc = (struct ena_eth_io_rx_cdesc_base *)(io_cq->cdesc_addr.virt_addr
+ (head_masked * io_cq->cdesc_entry_size_in_bytes));
- desc_phase = (cdesc->status & ENA_ETH_IO_RX_CDESC_BASE_PHASE_MASK) >>
+ desc_phase = (READ_ONCE(cdesc->status) & ENA_ETH_IO_RX_CDESC_BASE_PHASE_MASK) >>
ENA_ETH_IO_RX_CDESC_BASE_PHASE_SHIFT;
if (desc_phase != expected_phase)
@@ -74,7 +73,7 @@ static inline void *get_sq_desc(struct ena_com_io_sq *io_sq)
offset = tail_masked * io_sq->desc_entry_size;
- return (unsigned char *)io_sq->desc_addr.virt_addr + offset;
+ return (void *)((uintptr_t)io_sq->desc_addr.virt_addr + offset);
}
static inline void ena_com_copy_curr_sq_desc_to_dev(struct ena_com_io_sq *io_sq)
@@ -86,8 +85,8 @@ static inline void ena_com_copy_curr_sq_desc_to_dev(struct ena_com_io_sq *io_sq)
if (io_sq->mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_HOST)
return;
- memcpy_toio((unsigned char *)io_sq->desc_addr.pbuf_dev_addr + offset,
- (unsigned char *)io_sq->desc_addr.virt_addr + offset,
+ memcpy_toio(io_sq->desc_addr.pbuf_dev_addr + offset,
+ io_sq->desc_addr.virt_addr + offset,
io_sq->desc_entry_size);
}
@@ -125,11 +124,11 @@ static inline struct ena_eth_io_rx_cdesc_base *
{
idx &= (io_cq->q_depth - 1);
return (struct ena_eth_io_rx_cdesc_base *)
- ((unsigned char *)io_cq->cdesc_addr.virt_addr +
- idx * io_cq->cdesc_entry_size_in_bytes);
+ ((uintptr_t)io_cq->cdesc_addr.virt_addr +
+ idx * io_cq->cdesc_entry_size_in_bytes);
}
-static inline int ena_com_cdesc_rx_pkt_get(struct ena_com_io_cq *io_cq,
+static inline u16 ena_com_cdesc_rx_pkt_get(struct ena_com_io_cq *io_cq,
u16 *first_cdesc_idx)
{
struct ena_eth_io_rx_cdesc_base *cdesc;
@@ -143,7 +142,7 @@ static inline int ena_com_cdesc_rx_pkt_get(struct ena_com_io_cq *io_cq,
ena_com_cq_inc_head(io_cq);
count++;
- last = (cdesc->status & ENA_ETH_IO_RX_CDESC_BASE_LAST_MASK) >>
+ last = (READ_ONCE(cdesc->status) & ENA_ETH_IO_RX_CDESC_BASE_LAST_MASK) >>
ENA_ETH_IO_RX_CDESC_BASE_LAST_SHIFT;
} while (!last);
@@ -183,9 +182,8 @@ static inline bool ena_com_meta_desc_changed(struct ena_com_io_sq *io_sq,
return false;
}
-static inline void ena_com_create_and_store_tx_meta_desc(
- struct ena_com_io_sq *io_sq,
- struct ena_com_tx_ctx *ena_tx_ctx)
+static inline void ena_com_create_and_store_tx_meta_desc(struct ena_com_io_sq *io_sq,
+ struct ena_com_tx_ctx *ena_tx_ctx)
{
struct ena_eth_io_tx_meta_desc *meta_desc = NULL;
struct ena_com_tx_meta *ena_meta = &ena_tx_ctx->ena_meta;
@@ -203,8 +201,8 @@ static inline void ena_com_create_and_store_tx_meta_desc(
ENA_ETH_IO_TX_META_DESC_MSS_LO_MASK;
/* bits 10-13 of the mss */
meta_desc->len_ctrl |= ((ena_meta->mss >> 10) <<
- ENA_ETH_IO_TX_META_DESC_MSS_HI_PTP_SHIFT) &
- ENA_ETH_IO_TX_META_DESC_MSS_HI_PTP_MASK;
+ ENA_ETH_IO_TX_META_DESC_MSS_HI_SHIFT) &
+ ENA_ETH_IO_TX_META_DESC_MSS_HI_MASK;
/* Extended meta desc */
meta_desc->len_ctrl |= ENA_ETH_IO_TX_META_DESC_ETH_META_TYPE_MASK;
@@ -237,11 +235,11 @@ static inline void ena_com_create_and_store_tx_meta_desc(
static inline void ena_com_rx_set_flags(struct ena_com_rx_ctx *ena_rx_ctx,
struct ena_eth_io_rx_cdesc_base *cdesc)
{
- ena_rx_ctx->l3_proto = (enum ena_eth_io_l3_proto_index)(cdesc->status &
- ENA_ETH_IO_RX_CDESC_BASE_L3_PROTO_IDX_MASK);
- ena_rx_ctx->l4_proto = (enum ena_eth_io_l4_proto_index)
- ((cdesc->status & ENA_ETH_IO_RX_CDESC_BASE_L4_PROTO_IDX_MASK) >>
- ENA_ETH_IO_RX_CDESC_BASE_L4_PROTO_IDX_SHIFT);
+ ena_rx_ctx->l3_proto = cdesc->status &
+ ENA_ETH_IO_RX_CDESC_BASE_L3_PROTO_IDX_MASK;
+ ena_rx_ctx->l4_proto =
+ (cdesc->status & ENA_ETH_IO_RX_CDESC_BASE_L4_PROTO_IDX_MASK) >>
+ ENA_ETH_IO_RX_CDESC_BASE_L4_PROTO_IDX_SHIFT;
ena_rx_ctx->l3_csum_err =
(cdesc->status & ENA_ETH_IO_RX_CDESC_BASE_L3_CSUM_ERR_MASK) >>
ENA_ETH_IO_RX_CDESC_BASE_L3_CSUM_ERR_SHIFT;
@@ -280,8 +278,8 @@ int ena_com_prepare_tx(struct ena_com_io_sq *io_sq,
bool have_meta;
u64 addr_hi;
- ENA_ASSERT(io_sq->direction == ENA_COM_IO_QUEUE_DIRECTION_TX,
- "wrong Q type");
+ ENA_WARN(io_sq->direction != ENA_COM_IO_QUEUE_DIRECTION_TX,
+ "wrong Q type");
/* num_bufs +1 for potential meta desc */
if (ena_com_sq_empty_space(io_sq) < (num_bufs + 1)) {
@@ -410,8 +408,8 @@ int ena_com_rx_pkt(struct ena_com_io_cq *io_cq,
u16 nb_hw_desc;
u16 i;
- ENA_ASSERT(io_cq->direction == ENA_COM_IO_QUEUE_DIRECTION_RX,
- "wrong Q type");
+ ENA_WARN(io_cq->direction != ENA_COM_IO_QUEUE_DIRECTION_RX,
+ "wrong Q type");
nb_hw_desc = ena_com_cdesc_rx_pkt_get(io_cq, &cdesc_idx);
if (nb_hw_desc == 0) {
@@ -455,8 +453,8 @@ int ena_com_add_single_rx_desc(struct ena_com_io_sq *io_sq,
{
struct ena_eth_io_rx_desc *desc;
- ENA_ASSERT(io_sq->direction == ENA_COM_IO_QUEUE_DIRECTION_RX,
- "wrong Q type");
+ ENA_WARN(io_sq->direction != ENA_COM_IO_QUEUE_DIRECTION_RX,
+ "wrong Q type");
if (unlikely(ena_com_sq_empty_space(io_sq) == 0))
return ENA_COM_NO_SPACE;
@@ -475,8 +473,7 @@ int ena_com_add_single_rx_desc(struct ena_com_io_sq *io_sq,
desc->buff_addr_lo = (u32)ena_buf->paddr;
desc->buff_addr_hi =
- ((ena_buf->paddr &
- GENMASK_ULL(io_sq->dma_addr_bits - 1, 32)) >> 32);
+ ((ena_buf->paddr & GENMASK_ULL(io_sq->dma_addr_bits - 1, 32)) >> 32);
ena_com_sq_update_tail(io_sq);
@@ -493,20 +490,37 @@ int ena_com_tx_comp_req_id_get(struct ena_com_io_cq *io_cq, u16 *req_id)
expected_phase = io_cq->phase;
cdesc = (struct ena_eth_io_tx_cdesc *)
- ((unsigned char *)io_cq->cdesc_addr.virt_addr
- + (masked_head * io_cq->cdesc_entry_size_in_bytes));
+ ((uintptr_t)io_cq->cdesc_addr.virt_addr +
+ (masked_head * io_cq->cdesc_entry_size_in_bytes));
/* When the current completion descriptor phase isn't the same as the
* expected, it mean that the device still didn't update
* this completion.
*/
- cdesc_phase = cdesc->flags & ENA_ETH_IO_TX_CDESC_PHASE_MASK;
+ cdesc_phase = READ_ONCE(cdesc->flags) & ENA_ETH_IO_TX_CDESC_PHASE_MASK;
if (cdesc_phase != expected_phase)
return ENA_COM_TRY_AGAIN;
+ if (unlikely(cdesc->req_id >= io_cq->q_depth)) {
+ ena_trc_err("Invalid req id %d\n", cdesc->req_id);
+ return ENA_COM_INVAL;
+ }
+
ena_com_cq_inc_head(io_cq);
- *req_id = cdesc->req_id;
+ *req_id = READ_ONCE(cdesc->req_id);
return 0;
}
+
+bool ena_com_cq_empty(struct ena_com_io_cq *io_cq)
+{
+ struct ena_eth_io_rx_cdesc_base *cdesc;
+
+ cdesc = ena_com_get_next_rx_cdesc(io_cq);
+ if (cdesc)
+ return false;
+ else
+ return true;
+}
+
diff --git a/drivers/net/ena/base/ena_eth_com.h b/drivers/net/ena/base/ena_eth_com.h
index 71a880c0..56ea4ae6 100644
--- a/drivers/net/ena/base/ena_eth_com.h
+++ b/drivers/net/ena/base/ena_eth_com.h
@@ -92,10 +92,12 @@ int ena_com_add_single_rx_desc(struct ena_com_io_sq *io_sq,
int ena_com_tx_comp_req_id_get(struct ena_com_io_cq *io_cq, u16 *req_id);
+bool ena_com_cq_empty(struct ena_com_io_cq *io_cq);
+
static inline void ena_com_unmask_intr(struct ena_com_io_cq *io_cq,
struct ena_eth_io_intr_reg *intr_reg)
{
- ENA_REG_WRITE32(intr_reg->intr_control, io_cq->unmask_reg);
+ ENA_REG_WRITE32(io_cq->bus, intr_reg->intr_control, io_cq->unmask_reg);
}
static inline int ena_com_sq_empty_space(struct ena_com_io_sq *io_sq)
@@ -118,7 +120,7 @@ static inline int ena_com_write_sq_doorbell(struct ena_com_io_sq *io_sq)
ena_trc_dbg("write submission queue doorbell for queue: %d tail: %d\n",
io_sq->qid, tail);
- ENA_REG_WRITE32(tail, io_sq->db_addr);
+ ENA_REG_WRITE32(io_sq->bus, tail, io_sq->db_addr);
return 0;
}
@@ -135,7 +137,7 @@ static inline int ena_com_update_dev_comp_head(struct ena_com_io_cq *io_cq)
if (io_cq->cq_head_db_reg && need_update) {
ena_trc_dbg("Write completion queue doorbell for queue %d: head: %d\n",
io_cq->qid, head);
- ENA_REG_WRITE32(head, io_cq->cq_head_db_reg);
+ ENA_REG_WRITE32(io_cq->bus, head, io_cq->cq_head_db_reg);
io_cq->last_head_update = head;
}
@@ -153,7 +155,7 @@ static inline void ena_com_update_numa_node(struct ena_com_io_cq *io_cq,
numa_cfg.numa_cfg = (numa_node & ENA_ETH_IO_NUMA_NODE_CFG_REG_NUMA_MASK)
| ENA_ETH_IO_NUMA_NODE_CFG_REG_ENABLED_MASK;
- ENA_REG_WRITE32(numa_cfg.numa_cfg, io_cq->numa_node_cfg_reg);
+ ENA_REG_WRITE32(io_cq->bus, numa_cfg.numa_cfg, io_cq->numa_node_cfg_reg);
}
static inline void ena_com_comp_ack(struct ena_com_io_sq *io_sq, u16 elem)
diff --git a/drivers/net/ena/base/ena_plat.h b/drivers/net/ena/base/ena_plat.h
index b5b64545..f829936b 100644
--- a/drivers/net/ena/base/ena_plat.h
+++ b/drivers/net/ena/base/ena_plat.h
@@ -43,7 +43,11 @@
#include "ena_plat_dpdk.h"
#endif
#elif defined(__FreeBSD__)
+#if defined(_KERNEL)
+#include "ena_plat_fbsd.h"
+#else
#include "ena_plat_dpdk.h"
+#endif
#elif defined(_WIN32)
#include "ena_plat_windows.h"
#else
diff --git a/drivers/net/ena/base/ena_plat_dpdk.h b/drivers/net/ena/base/ena_plat_dpdk.h
index 8cba319e..900ba1a6 100644
--- a/drivers/net/ena/base/ena_plat_dpdk.h
+++ b/drivers/net/ena/base/ena_plat_dpdk.h
@@ -73,10 +73,10 @@ typedef uint64_t dma_addr_t;
#define ENA_COM_INVAL -EINVAL
#define ENA_COM_NO_SPACE -ENOSPC
#define ENA_COM_NO_DEVICE -ENODEV
-#define ENA_COM_PERMISSION -EPERM
#define ENA_COM_TIMER_EXPIRED -ETIME
#define ENA_COM_FAULT -EFAULT
#define ENA_COM_TRY_AGAIN -EAGAIN
+#define ENA_COM_UNSUPPORTED -EOPNOTSUPP
#define ____cacheline_aligned __rte_cache_aligned
@@ -116,11 +116,13 @@ typedef uint64_t dma_addr_t;
#define ENA_MIN16(x, y) RTE_MIN((x), (y))
#define ENA_MIN8(x, y) RTE_MIN((x), (y))
+#define BITS_PER_LONG_LONG (__SIZEOF_LONG_LONG__ * 8)
#define U64_C(x) x ## ULL
#define BIT(nr) (1UL << (nr))
#define BITS_PER_LONG (__SIZEOF_LONG__ * 8)
#define GENMASK(h, l) (((~0UL) << (l)) & (~0UL >> (BITS_PER_LONG - 1 - (h))))
-#define GENMASK_ULL(h, l) (((U64_C(1) << ((h) - (l) + 1)) - 1) << (l))
+#define GENMASK_ULL(h, l) (((~0ULL) - (1ULL << (l)) + 1) & \
+ (~0ULL >> (BITS_PER_LONG_LONG - 1 - (h))))
#ifdef RTE_LIBRTE_ENA_COM_DEBUG
#define ena_trc_dbg(format, arg...) \
@@ -138,6 +140,15 @@ typedef uint64_t dma_addr_t;
#define ena_trc_err(format, arg...) do { } while (0)
#endif /* RTE_LIBRTE_ENA_COM_DEBUG */
+#define ENA_WARN(cond, format, arg...) \
+do { \
+ if (unlikely(cond)) { \
+ ena_trc_err( \
+ "Warn failed on %s:%s:%d:" format, \
+ __FILE__, __func__, __LINE__, ##arg); \
+ } \
+} while (0)
+
/* Spinlock related methods */
#define ena_spinlock_t rte_spinlock_t
#define ENA_SPINLOCK_INIT(spinlock) rte_spinlock_init(&spinlock)
@@ -177,10 +188,21 @@ typedef uint64_t dma_addr_t;
#define ENA_WAIT_EVENT_SIGNAL(waitevent) pthread_cond_signal(&waitevent.cond)
/* pthread condition doesn't need to be rearmed after usage */
#define ENA_WAIT_EVENT_CLEAR(...)
+#define ENA_WAIT_EVENT_DESTROY(waitqueue) ((void)(waitqueue))
#define ena_wait_event_t ena_wait_queue_t
#define ENA_MIGHT_SLEEP()
+#define ENA_TIME_EXPIRE(timeout) (timeout < rte_get_timer_cycles())
+#define ENA_GET_SYSTEM_TIMEOUT(timeout_us) \
+ (timeout_us * rte_get_timer_hz() / 1000000 + rte_get_timer_cycles())
+
+/*
+ * Each rte_memzone should have unique name.
+ * To satisfy it, count number of allocations and add it to name.
+ */
+extern uint32_t ena_alloc_cnt;
+
#define ENA_MEM_ALLOC_COHERENT(dmadev, size, virt, phys, handle) \
do { \
const struct rte_memzone *mz; \
@@ -188,47 +210,57 @@ typedef uint64_t dma_addr_t;
ENA_TOUCH(dmadev); ENA_TOUCH(handle); \
snprintf(z_name, sizeof(z_name), \
"ena_alloc_%d", ena_alloc_cnt++); \
- mz = rte_memzone_reserve(z_name, size, SOCKET_ID_ANY, 0); \
- memset(mz->addr, 0, size); \
- virt = mz->addr; \
- phys = mz->iova; \
+ mz = rte_memzone_reserve(z_name, size, SOCKET_ID_ANY, \
+ RTE_MEMZONE_IOVA_CONTIG); \
handle = mz; \
+ if (mz == NULL) { \
+ virt = NULL; \
+ phys = 0; \
+ } else { \
+ memset(mz->addr, 0, size); \
+ virt = mz->addr; \
+ phys = mz->iova; \
+ } \
} while (0)
#define ENA_MEM_FREE_COHERENT(dmadev, size, virt, phys, handle) \
({ ENA_TOUCH(size); ENA_TOUCH(phys); \
ENA_TOUCH(dmadev); \
rte_memzone_free(handle); })
-#define ENA_MEM_ALLOC_COHERENT_NODE(dmadev, size, virt, phys, node, dev_node) \
+#define ENA_MEM_ALLOC_COHERENT_NODE( \
+ dmadev, size, virt, phys, mem_handle, node, dev_node) \
do { \
const struct rte_memzone *mz; \
char z_name[RTE_MEMZONE_NAMESIZE]; \
ENA_TOUCH(dmadev); ENA_TOUCH(dev_node); \
snprintf(z_name, sizeof(z_name), \
"ena_alloc_%d", ena_alloc_cnt++); \
- mz = rte_memzone_reserve(z_name, size, node, 0); \
- memset(mz->addr, 0, size); \
- virt = mz->addr; \
- phys = mz->iova; \
+ mz = rte_memzone_reserve(z_name, size, node, \
+ RTE_MEMZONE_IOVA_CONTIG); \
+ mem_handle = mz; \
+ if (mz == NULL) { \
+ virt = NULL; \
+ phys = 0; \
+ } else { \
+ memset(mz->addr, 0, size); \
+ virt = mz->addr; \
+ phys = mz->iova; \
+ } \
} while (0)
#define ENA_MEM_ALLOC_NODE(dmadev, size, virt, node, dev_node) \
do { \
- const struct rte_memzone *mz; \
- char z_name[RTE_MEMZONE_NAMESIZE]; \
ENA_TOUCH(dmadev); ENA_TOUCH(dev_node); \
- snprintf(z_name, sizeof(z_name), \
- "ena_alloc_%d", ena_alloc_cnt++); \
- mz = rte_memzone_reserve(z_name, size, node, 0); \
- memset(mz->addr, 0, size); \
- virt = mz->addr; \
+ virt = rte_zmalloc_socket(NULL, size, 0, node); \
} while (0)
#define ENA_MEM_ALLOC(dmadev, size) rte_zmalloc(NULL, size, 1)
#define ENA_MEM_FREE(dmadev, ptr) ({ENA_TOUCH(dmadev); rte_free(ptr); })
-#define ENA_REG_WRITE32(value, reg) rte_write32_relaxed((value), (reg))
-#define ENA_REG_READ32(reg) rte_read32_relaxed((reg))
+#define ENA_REG_WRITE32(bus, value, reg) \
+ ({ (void)(bus); rte_write32_relaxed((value), (reg)); })
+#define ENA_REG_READ32(bus, reg) \
+ ({ (void)(bus); rte_read32_relaxed((reg)); })
#define ATOMIC32_INC(i32_ptr) rte_atomic32_inc(i32_ptr)
#define ATOMIC32_DEC(i32_ptr) rte_atomic32_dec(i32_ptr)
@@ -244,4 +276,11 @@ typedef uint64_t dma_addr_t;
#define PTR_ERR(error) ((long)(void *)error)
#define might_sleep()
+#define lower_32_bits(x) ((uint32_t)(x))
+#define upper_32_bits(x) ((uint32_t)(((x) >> 16) >> 16))
+
+#ifndef READ_ONCE
+#define READ_ONCE(var) (*((volatile typeof(var) *)(&(var))))
+#endif
+
#endif /* DPDK_ENA_COM_ENA_PLAT_DPDK_H_ */
diff --git a/drivers/net/ena/ena_ethdev.c b/drivers/net/ena/ena_ethdev.c
index 34b2a8d7..c255dc6d 100644
--- a/drivers/net/ena/ena_ethdev.c
+++ b/drivers/net/ena/ena_ethdev.c
@@ -54,7 +54,7 @@
#include <ena_eth_io_defs.h>
#define DRV_MODULE_VER_MAJOR 1
-#define DRV_MODULE_VER_MINOR 0
+#define DRV_MODULE_VER_MINOR 1
#define DRV_MODULE_VER_SUBMINOR 0
#define ENA_IO_TXQ_IDX(q) (2 * (q))
@@ -85,6 +85,9 @@
#define ARRAY_SIZE(x) (sizeof(x) / sizeof((x)[0]))
+#define ENA_MAX_RING_DESC ENA_DEFAULT_RING_SIZE
+#define ENA_MIN_RING_DESC 128
+
enum ethtool_stringset {
ETH_SS_TEST = 0,
ETH_SS_STATS,
@@ -114,6 +117,12 @@ struct ena_stats {
#define ENA_STAT_GLOBAL_ENTRY(stat) \
ENA_STAT_ENTRY(stat, dev)
+/*
+ * Each rte_memzone should have unique name.
+ * To satisfy it, count number of allocation and add it to name.
+ */
+uint32_t ena_alloc_cnt;
+
static const struct ena_stats ena_stats_global_strings[] = {
ENA_STAT_GLOBAL_ENTRY(tx_timeout),
ENA_STAT_GLOBAL_ENTRY(io_suspend),
@@ -195,8 +204,11 @@ static const struct rte_pci_id pci_id_ena_map[] = {
{ .device_id = 0 },
};
+static struct ena_aenq_handlers aenq_handlers;
+
static int ena_device_init(struct ena_com_dev *ena_dev,
- struct ena_com_dev_get_features_ctx *get_feat_ctx);
+ struct ena_com_dev_get_features_ctx *get_feat_ctx,
+ bool *wd_state);
static int ena_dev_configure(struct rte_eth_dev *dev);
static uint16_t eth_ena_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
uint16_t nb_pkts);
@@ -215,7 +227,9 @@ static int ena_populate_rx_queue(struct ena_ring *rxq, unsigned int count);
static void ena_init_rings(struct ena_adapter *adapter);
static int ena_mtu_set(struct rte_eth_dev *dev, uint16_t mtu);
static int ena_start(struct rte_eth_dev *dev);
+static void ena_stop(struct rte_eth_dev *dev);
static void ena_close(struct rte_eth_dev *dev);
+static int ena_dev_reset(struct rte_eth_dev *dev);
static int ena_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats);
static void ena_rx_queue_release_all(struct rte_eth_dev *dev);
static void ena_tx_queue_release_all(struct rte_eth_dev *dev);
@@ -238,10 +252,8 @@ static int ena_rss_reta_query(struct rte_eth_dev *dev,
struct rte_eth_rss_reta_entry64 *reta_conf,
uint16_t reta_size);
static int ena_get_sset_count(struct rte_eth_dev *dev, int sset);
-static bool ena_are_tx_queue_offloads_allowed(struct ena_adapter *adapter,
- uint64_t offloads);
-static bool ena_are_rx_queue_offloads_allowed(struct ena_adapter *adapter,
- uint64_t offloads);
+static void ena_interrupt_handler_rte(void *cb_arg);
+static void ena_timer_wd_callback(struct rte_timer *timer, void *arg);
static const struct eth_dev_ops ena_dev_ops = {
.dev_configure = ena_dev_configure,
@@ -249,12 +261,14 @@ static const struct eth_dev_ops ena_dev_ops = {
.rx_queue_setup = ena_rx_queue_setup,
.tx_queue_setup = ena_tx_queue_setup,
.dev_start = ena_start,
+ .dev_stop = ena_stop,
.link_update = ena_link_update,
.stats_get = ena_stats_get,
.mtu_set = ena_mtu_set,
.rx_queue_release = ena_rx_queue_release,
.tx_queue_release = ena_tx_queue_release,
.dev_close = ena_close,
+ .dev_reset = ena_dev_reset,
.reta_update = ena_rss_reta_update,
.reta_query = ena_rss_reta_query,
};
@@ -264,11 +278,15 @@ static const struct eth_dev_ops ena_dev_ops = {
static inline int ena_cpu_to_node(int cpu)
{
struct rte_config *config = rte_eal_get_configuration();
+ struct rte_fbarray *arr = &config->mem_config->memzones;
+ const struct rte_memzone *mz;
+
+ if (unlikely(cpu >= RTE_MAX_MEMZONE))
+ return NUMA_NO_NODE;
- if (likely(cpu < RTE_MAX_MEMZONE))
- return config->mem_config->memzone[cpu].socket_id;
+ mz = rte_fbarray_get(arr, cpu);
- return NUMA_NO_NODE;
+ return mz->socket_id;
}
static inline void ena_rx_mbuf_prepare(struct rte_mbuf *mbuf,
@@ -346,9 +364,6 @@ static inline void ena_tx_mbuf_prepare(struct rte_mbuf *mbuf,
ena_meta->mss = mbuf->tso_segsz;
ena_meta->l3_hdr_len = mbuf->l3_len;
ena_meta->l3_hdr_offset = mbuf->l2_len;
- /* this param needed only for TSO */
- ena_meta->l3_outer_hdr_len = 0;
- ena_meta->l3_outer_hdr_offset = 0;
ena_tx_ctx->meta_valid = true;
} else {
@@ -356,6 +371,40 @@ static inline void ena_tx_mbuf_prepare(struct rte_mbuf *mbuf,
}
}
+static inline int validate_rx_req_id(struct ena_ring *rx_ring, uint16_t req_id)
+{
+ if (likely(req_id < rx_ring->ring_size))
+ return 0;
+
+ RTE_LOG(ERR, PMD, "Invalid rx req_id: %hu\n", req_id);
+
+ rx_ring->adapter->reset_reason = ENA_REGS_RESET_INV_RX_REQ_ID;
+ rx_ring->adapter->trigger_reset = true;
+
+ return -EFAULT;
+}
+
+static int validate_tx_req_id(struct ena_ring *tx_ring, u16 req_id)
+{
+ struct ena_tx_buffer *tx_info = NULL;
+
+ if (likely(req_id < tx_ring->ring_size)) {
+ tx_info = &tx_ring->tx_buffer_info[req_id];
+ if (likely(tx_info->mbuf))
+ return 0;
+ }
+
+ if (tx_info)
+ RTE_LOG(ERR, PMD, "tx_info doesn't have valid mbuf\n");
+ else
+ RTE_LOG(ERR, PMD, "Invalid req_id: %hu\n", req_id);
+
+ /* Trigger device reset */
+ tx_ring->adapter->reset_reason = ENA_REGS_RESET_INV_TX_REQ_ID;
+ tx_ring->adapter->trigger_reset = true;
+ return -EFAULT;
+}
+
static void ena_config_host_info(struct ena_com_dev *ena_dev)
{
struct ena_admin_host_info *host_info;
@@ -387,9 +436,12 @@ static void ena_config_host_info(struct ena_com_dev *ena_dev)
rc = ena_com_set_host_attributes(ena_dev);
if (rc) {
- RTE_LOG(ERR, PMD, "Cannot set host attributes\n");
- if (rc != -EPERM)
- goto err;
+ if (rc == -ENA_COM_UNSUPPORTED)
+ RTE_LOG(WARNING, PMD, "Cannot set host attributes\n");
+ else
+ RTE_LOG(ERR, PMD, "Cannot set host attributes\n");
+
+ goto err;
}
return;
@@ -440,9 +492,12 @@ static void ena_config_debug_area(struct ena_adapter *adapter)
rc = ena_com_set_host_attributes(&adapter->ena_dev);
if (rc) {
- RTE_LOG(WARNING, PMD, "Cannot set host attributes\n");
- if (rc != -EPERM)
- goto err;
+ if (rc == -ENA_COM_UNSUPPORTED)
+ RTE_LOG(WARNING, PMD, "Cannot set host attributes\n");
+ else
+ RTE_LOG(ERR, PMD, "Cannot set host attributes\n");
+
+ goto err;
}
return;
@@ -455,12 +510,76 @@ static void ena_close(struct rte_eth_dev *dev)
struct ena_adapter *adapter =
(struct ena_adapter *)(dev->data->dev_private);
- adapter->state = ENA_ADAPTER_STATE_STOPPED;
+ ena_stop(dev);
+ adapter->state = ENA_ADAPTER_STATE_CLOSED;
ena_rx_queue_release_all(dev);
ena_tx_queue_release_all(dev);
}
+static int
+ena_dev_reset(struct rte_eth_dev *dev)
+{
+ struct rte_mempool *mb_pool_rx[ENA_MAX_NUM_QUEUES];
+ struct rte_eth_dev *eth_dev;
+ struct rte_pci_device *pci_dev;
+ struct rte_intr_handle *intr_handle;
+ struct ena_com_dev *ena_dev;
+ struct ena_com_dev_get_features_ctx get_feat_ctx;
+ struct ena_adapter *adapter;
+ int nb_queues;
+ int rc, i;
+ bool wd_state;
+
+ adapter = (struct ena_adapter *)(dev->data->dev_private);
+ ena_dev = &adapter->ena_dev;
+ eth_dev = adapter->rte_dev;
+ pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
+ intr_handle = &pci_dev->intr_handle;
+ nb_queues = eth_dev->data->nb_rx_queues;
+
+ ena_com_set_admin_running_state(ena_dev, false);
+
+ rc = ena_com_dev_reset(ena_dev, adapter->reset_reason);
+ if (rc)
+ RTE_LOG(ERR, PMD, "Device reset failed\n");
+
+ for (i = 0; i < nb_queues; i++)
+ mb_pool_rx[i] = adapter->rx_ring[i].mb_pool;
+
+ ena_rx_queue_release_all(eth_dev);
+ ena_tx_queue_release_all(eth_dev);
+
+ rte_intr_disable(intr_handle);
+
+ ena_com_abort_admin_commands(ena_dev);
+ ena_com_wait_for_abort_completion(ena_dev);
+ ena_com_admin_destroy(ena_dev);
+ ena_com_mmio_reg_read_request_destroy(ena_dev);
+
+ rc = ena_device_init(ena_dev, &get_feat_ctx, &wd_state);
+ if (rc) {
+ PMD_INIT_LOG(CRIT, "Cannot initialize device\n");
+ return rc;
+ }
+ adapter->wd_state = wd_state;
+
+ rte_intr_enable(intr_handle);
+ ena_com_set_admin_polling_mode(ena_dev, false);
+ ena_com_admin_aenq_enable(ena_dev);
+
+ for (i = 0; i < nb_queues; ++i)
+ ena_rx_queue_setup(eth_dev, i, adapter->rx_ring_size, 0, NULL,
+ mb_pool_rx[i]);
+
+ for (i = 0; i < nb_queues; ++i)
+ ena_tx_queue_setup(eth_dev, i, adapter->tx_ring_size, 0, NULL);
+
+ adapter->trigger_reset = false;
+
+ return 0;
+}
+
static int ena_rss_reta_update(struct rte_eth_dev *dev,
struct rte_eth_rss_reta_entry64 *reta_conf,
uint16_t reta_size)
@@ -468,7 +587,7 @@ static int ena_rss_reta_update(struct rte_eth_dev *dev,
struct ena_adapter *adapter =
(struct ena_adapter *)(dev->data->dev_private);
struct ena_com_dev *ena_dev = &adapter->ena_dev;
- int ret, i;
+ int rc, i;
u16 entry_value;
int conf_idx;
int idx;
@@ -480,8 +599,7 @@ static int ena_rss_reta_update(struct rte_eth_dev *dev,
RTE_LOG(WARNING, PMD,
"indirection table %d is bigger than supported (%d)\n",
reta_size, ENA_RX_RSS_TABLE_SIZE);
- ret = -EINVAL;
- goto err;
+ return -EINVAL;
}
for (i = 0 ; i < reta_size ; i++) {
@@ -493,29 +611,28 @@ static int ena_rss_reta_update(struct rte_eth_dev *dev,
if (TEST_BIT(reta_conf[conf_idx].mask, idx)) {
entry_value =
ENA_IO_RXQ_IDX(reta_conf[conf_idx].reta[idx]);
- ret = ena_com_indirect_table_fill_entry(ena_dev,
- i,
- entry_value);
- if (unlikely(ret && (ret != ENA_COM_PERMISSION))) {
+
+ rc = ena_com_indirect_table_fill_entry(ena_dev,
+ i,
+ entry_value);
+ if (unlikely(rc && rc != ENA_COM_UNSUPPORTED)) {
RTE_LOG(ERR, PMD,
"Cannot fill indirect table\n");
- ret = -ENOTSUP;
- goto err;
+ return rc;
}
}
}
- ret = ena_com_indirect_table_set(ena_dev);
- if (unlikely(ret && (ret != ENA_COM_PERMISSION))) {
+ rc = ena_com_indirect_table_set(ena_dev);
+ if (unlikely(rc && rc != ENA_COM_UNSUPPORTED)) {
RTE_LOG(ERR, PMD, "Cannot flush the indirect table\n");
- ret = -ENOTSUP;
- goto err;
+ return rc;
}
RTE_LOG(DEBUG, PMD, "%s(): RSS configured %d entries for port %d\n",
__func__, reta_size, adapter->rte_dev->data->port_id);
-err:
- return ret;
+
+ return 0;
}
/* Query redirection table. */
@@ -526,7 +643,7 @@ static int ena_rss_reta_query(struct rte_eth_dev *dev,
struct ena_adapter *adapter =
(struct ena_adapter *)(dev->data->dev_private);
struct ena_com_dev *ena_dev = &adapter->ena_dev;
- int ret;
+ int rc;
int i;
u32 indirect_table[ENA_RX_RSS_TABLE_SIZE] = {0};
int reta_conf_idx;
@@ -536,11 +653,10 @@ static int ena_rss_reta_query(struct rte_eth_dev *dev,
(reta_size > RTE_RETA_GROUP_SIZE && ((reta_conf + 1) == NULL)))
return -EINVAL;
- ret = ena_com_indirect_table_get(ena_dev, indirect_table);
- if (unlikely(ret && (ret != ENA_COM_PERMISSION))) {
+ rc = ena_com_indirect_table_get(ena_dev, indirect_table);
+ if (unlikely(rc && rc != ENA_COM_UNSUPPORTED)) {
RTE_LOG(ERR, PMD, "cannot get indirect table\n");
- ret = -ENOTSUP;
- goto err;
+ return -ENOTSUP;
}
for (i = 0 ; i < reta_size ; i++) {
@@ -550,8 +666,8 @@ static int ena_rss_reta_query(struct rte_eth_dev *dev,
reta_conf[reta_conf_idx].reta[reta_idx] =
ENA_IO_RXQ_IDX_REV(indirect_table[i]);
}
-err:
- return ret;
+
+ return 0;
}
static int ena_rss_init_default(struct ena_adapter *adapter)
@@ -571,7 +687,7 @@ static int ena_rss_init_default(struct ena_adapter *adapter)
val = i % nb_rx_queues;
rc = ena_com_indirect_table_fill_entry(ena_dev, i,
ENA_IO_RXQ_IDX(val));
- if (unlikely(rc && (rc != ENA_COM_PERMISSION))) {
+ if (unlikely(rc && (rc != ENA_COM_UNSUPPORTED))) {
RTE_LOG(ERR, PMD, "Cannot fill indirect table\n");
goto err_fill_indir;
}
@@ -579,19 +695,19 @@ static int ena_rss_init_default(struct ena_adapter *adapter)
rc = ena_com_fill_hash_function(ena_dev, ENA_ADMIN_CRC32, NULL,
ENA_HASH_KEY_SIZE, 0xFFFFFFFF);
- if (unlikely(rc && (rc != ENA_COM_PERMISSION))) {
+ if (unlikely(rc && (rc != ENA_COM_UNSUPPORTED))) {
RTE_LOG(INFO, PMD, "Cannot fill hash function\n");
goto err_fill_indir;
}
rc = ena_com_set_default_hash_ctrl(ena_dev);
- if (unlikely(rc && (rc != ENA_COM_PERMISSION))) {
+ if (unlikely(rc && (rc != ENA_COM_UNSUPPORTED))) {
RTE_LOG(INFO, PMD, "Cannot fill hash control\n");
goto err_fill_indir;
}
rc = ena_com_indirect_table_set(ena_dev);
- if (unlikely(rc && (rc != ENA_COM_PERMISSION))) {
+ if (unlikely(rc && (rc != ENA_COM_UNSUPPORTED))) {
RTE_LOG(ERR, PMD, "Cannot flush the indirect table\n");
goto err_fill_indir;
}
@@ -650,6 +766,10 @@ static void ena_rx_queue_release(void *queue)
rte_free(ring->rx_buffer_info);
ring->rx_buffer_info = NULL;
+ if (ring->empty_rx_reqs)
+ rte_free(ring->empty_rx_reqs);
+ ring->empty_rx_reqs = NULL;
+
ring->configured = 0;
RTE_LOG(NOTICE, PMD, "RX Queue %d:%d released\n",
@@ -723,9 +843,12 @@ static int ena_link_update(struct rte_eth_dev *dev,
__rte_unused int wait_to_complete)
{
struct rte_eth_link *link = &dev->data->dev_link;
+ struct ena_adapter *adapter;
+
+ adapter = (struct ena_adapter *)(dev->data->dev_private);
- link->link_status = 1;
- link->link_speed = ETH_SPEED_NUM_10G;
+ link->link_status = adapter->link_status ? ETH_LINK_UP : ETH_LINK_DOWN;
+ link->link_speed = ETH_SPEED_NUM_NONE;
link->link_duplex = ETH_LINK_FULL_DUPLEX;
return 0;
@@ -737,13 +860,18 @@ static int ena_queue_restart_all(struct rte_eth_dev *dev,
struct ena_adapter *adapter =
(struct ena_adapter *)(dev->data->dev_private);
struct ena_ring *queues = NULL;
+ int nb_queues;
int i = 0;
int rc = 0;
- queues = (ring_type == ENA_RING_TYPE_RX) ?
- adapter->rx_ring : adapter->tx_ring;
-
- for (i = 0; i < adapter->num_queues; i++) {
+ if (ring_type == ENA_RING_TYPE_RX) {
+ queues = adapter->rx_ring;
+ nb_queues = dev->data->nb_rx_queues;
+ } else {
+ queues = adapter->tx_ring;
+ nb_queues = dev->data->nb_tx_queues;
+ }
+ for (i = 0; i < nb_queues; i++) {
if (queues[i].configured) {
if (ring_type == ENA_RING_TYPE_RX) {
ena_assert_msg(
@@ -761,7 +889,7 @@ static int ena_queue_restart_all(struct rte_eth_dev *dev,
PMD_INIT_LOG(ERR,
"failed to restart queue %d type(%d)",
i, ring_type);
- return -1;
+ return rc;
}
}
}
@@ -785,9 +913,11 @@ static int ena_check_valid_conf(struct ena_adapter *adapter)
{
uint32_t max_frame_len = ena_get_mtu_conf(adapter);
- if (max_frame_len > adapter->max_mtu) {
- PMD_INIT_LOG(ERR, "Unsupported MTU of %d", max_frame_len);
- return -1;
+ if (max_frame_len > adapter->max_mtu || max_frame_len < ENA_MIN_MTU) {
+ PMD_INIT_LOG(ERR, "Unsupported MTU of %d. "
+ "max mtu: %d, min mtu: %d\n",
+ max_frame_len, adapter->max_mtu, ENA_MIN_MTU);
+ return ENA_COM_UNSUPPORTED;
}
return 0;
@@ -795,6 +925,7 @@ static int ena_check_valid_conf(struct ena_adapter *adapter)
static int
ena_calc_queue_size(struct ena_com_dev *ena_dev,
+ u16 *max_tx_sgl_size,
struct ena_com_dev_get_features_ctx *get_feat_ctx)
{
uint32_t queue_size = ENA_DEFAULT_RING_SIZE;
@@ -812,11 +943,14 @@ ena_calc_queue_size(struct ena_com_dev *ena_dev,
if (!rte_is_power_of_2(queue_size))
queue_size = rte_align32pow2(queue_size >> 1);
- if (queue_size == 0) {
+ if (unlikely(queue_size == 0)) {
PMD_INIT_LOG(ERR, "Invalid queue size");
return -EFAULT;
}
+ *max_tx_sgl_size = RTE_MIN(ENA_PKT_MAX_BUFS,
+ get_feat_ctx->max_queues.max_packet_tx_descs);
+
return queue_size;
}
@@ -881,12 +1015,12 @@ static int ena_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
ena_dev = &adapter->ena_dev;
ena_assert_msg(ena_dev != NULL, "Uninitialized device");
- if (mtu > ena_get_mtu_conf(adapter)) {
+ if (mtu > ena_get_mtu_conf(adapter) || mtu < ENA_MIN_MTU) {
RTE_LOG(ERR, PMD,
- "Given MTU (%d) exceeds maximum MTU supported (%d)\n",
- mtu, ena_get_mtu_conf(adapter));
- rc = -EINVAL;
- goto err;
+ "Invalid MTU setting. new_mtu: %d "
+ "max mtu: %d min mtu: %d\n",
+ mtu, ena_get_mtu_conf(adapter), ENA_MIN_MTU);
+ return -EINVAL;
}
rc = ena_com_set_dev_mtu(ena_dev, mtu);
@@ -895,7 +1029,6 @@ static int ena_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
else
RTE_LOG(NOTICE, PMD, "Set MTU: %d\n", mtu);
-err:
return rc;
}
@@ -903,14 +1036,9 @@ static int ena_start(struct rte_eth_dev *dev)
{
struct ena_adapter *adapter =
(struct ena_adapter *)(dev->data->dev_private);
+ uint64_t ticks;
int rc = 0;
- if (!(adapter->state == ENA_ADAPTER_STATE_CONFIG ||
- adapter->state == ENA_ADAPTER_STATE_STOPPED)) {
- PMD_INIT_LOG(ERR, "API violation");
- return -1;
- }
-
rc = ena_check_valid_conf(adapter);
if (rc)
return rc;
@@ -924,7 +1052,7 @@ static int ena_start(struct rte_eth_dev *dev)
return rc;
if (adapter->rte_dev->data->dev_conf.rxmode.mq_mode &
- ETH_MQ_RX_RSS_FLAG) {
+ ETH_MQ_RX_RSS_FLAG && adapter->rte_dev->data->nb_rx_queues > 0) {
rc = ena_rss_init_default(adapter);
if (rc)
return rc;
@@ -932,11 +1060,28 @@ static int ena_start(struct rte_eth_dev *dev)
ena_stats_restart(dev);
+ adapter->timestamp_wd = rte_get_timer_cycles();
+ adapter->keep_alive_timeout = ENA_DEVICE_KALIVE_TIMEOUT;
+
+ ticks = rte_get_timer_hz();
+ rte_timer_reset(&adapter->timer_wd, ticks, PERIODICAL, rte_lcore_id(),
+ ena_timer_wd_callback, adapter);
+
adapter->state = ENA_ADAPTER_STATE_RUNNING;
return 0;
}
+static void ena_stop(struct rte_eth_dev *dev)
+{
+ struct ena_adapter *adapter =
+ (struct ena_adapter *)(dev->data->dev_private);
+
+ rte_timer_stop_sync(&adapter->timer_wd);
+
+ adapter->state = ENA_ADAPTER_STATE_STOPPED;
+}
+
static int ena_queue_restart(struct ena_ring *ring)
{
int rc, bufs_num;
@@ -954,7 +1099,7 @@ static int ena_queue_restart(struct ena_ring *ring)
rc = ena_populate_rx_queue(ring, bufs_num);
if (rc != bufs_num) {
PMD_INIT_LOG(ERR, "Failed to populate rx ring !");
- return (-1);
+ return ENA_COM_FAULT;
}
return 0;
@@ -984,12 +1129,12 @@ static int ena_tx_queue_setup(struct rte_eth_dev *dev,
RTE_LOG(CRIT, PMD,
"API violation. Queue %d is already configured\n",
queue_idx);
- return -1;
+ return ENA_COM_FAULT;
}
if (!rte_is_power_of_2(nb_desc)) {
RTE_LOG(ERR, PMD,
- "Unsupported size of RX queue: %d is not a power of 2.",
+ "Unsupported size of TX queue: %d is not a power of 2.",
nb_desc);
return -EINVAL;
}
@@ -1001,12 +1146,6 @@ static int ena_tx_queue_setup(struct rte_eth_dev *dev,
return -EINVAL;
}
- if (tx_conf->txq_flags == ETH_TXQ_FLAGS_IGNORE &&
- !ena_are_tx_queue_offloads_allowed(adapter, tx_conf->offloads)) {
- RTE_LOG(ERR, PMD, "Unsupported queue offloads\n");
- return -EINVAL;
- }
-
ena_qid = ENA_IO_TXQ_IDX(queue_idx);
ctx.direction = ENA_COM_IO_QUEUE_DIRECTION_TX;
@@ -1021,6 +1160,7 @@ static int ena_tx_queue_setup(struct rte_eth_dev *dev,
RTE_LOG(ERR, PMD,
"failed to create io TX queue #%d (qid:%d) rc: %d\n",
queue_idx, ena_qid, rc);
+ return rc;
}
txq->ena_com_io_cq = &ena_dev->io_cq_queues[ena_qid];
txq->ena_com_io_sq = &ena_dev->io_sq_queues[ena_qid];
@@ -1032,10 +1172,11 @@ static int ena_tx_queue_setup(struct rte_eth_dev *dev,
RTE_LOG(ERR, PMD,
"Failed to get TX queue handlers. TX queue num %d rc: %d\n",
queue_idx, rc);
- ena_com_destroy_io_queue(ena_dev, ena_qid);
- goto err;
+ goto err_destroy_io_queue;
}
+ ena_com_update_numa_node(txq->ena_com_io_cq, ctx.numa_node);
+
txq->port_id = dev->data->port_id;
txq->next_to_clean = 0;
txq->next_to_use = 0;
@@ -1047,7 +1188,8 @@ static int ena_tx_queue_setup(struct rte_eth_dev *dev,
RTE_CACHE_LINE_SIZE);
if (!txq->tx_buffer_info) {
RTE_LOG(ERR, PMD, "failed to alloc mem for tx buffer info\n");
- return -ENOMEM;
+ rc = -ENOMEM;
+ goto err_destroy_io_queue;
}
txq->empty_tx_reqs = rte_zmalloc("txq->empty_tx_reqs",
@@ -1055,18 +1197,29 @@ static int ena_tx_queue_setup(struct rte_eth_dev *dev,
RTE_CACHE_LINE_SIZE);
if (!txq->empty_tx_reqs) {
RTE_LOG(ERR, PMD, "failed to alloc mem for tx reqs\n");
- rte_free(txq->tx_buffer_info);
- return -ENOMEM;
+ rc = -ENOMEM;
+ goto err_free;
}
+
for (i = 0; i < txq->ring_size; i++)
txq->empty_tx_reqs[i] = i;
- txq->offloads = tx_conf->offloads;
+ if (tx_conf != NULL) {
+ txq->offloads =
+ tx_conf->offloads | dev->data->dev_conf.txmode.offloads;
+ }
/* Store pointer to this queue in upper layer */
txq->configured = 1;
dev->data->tx_queues[queue_idx] = txq;
-err:
+
+ return 0;
+
+err_free:
+ rte_free(txq->tx_buffer_info);
+
+err_destroy_io_queue:
+ ena_com_destroy_io_queue(ena_dev, ena_qid);
return rc;
}
@@ -1074,7 +1227,7 @@ static int ena_rx_queue_setup(struct rte_eth_dev *dev,
uint16_t queue_idx,
uint16_t nb_desc,
__rte_unused unsigned int socket_id,
- const struct rte_eth_rxconf *rx_conf,
+ __rte_unused const struct rte_eth_rxconf *rx_conf,
struct rte_mempool *mp)
{
struct ena_com_create_io_ctx ctx =
@@ -1085,7 +1238,7 @@ static int ena_rx_queue_setup(struct rte_eth_dev *dev,
(struct ena_adapter *)(dev->data->dev_private);
struct ena_ring *rxq = NULL;
uint16_t ena_qid = 0;
- int rc = 0;
+ int i, rc = 0;
struct ena_com_dev *ena_dev = &adapter->ena_dev;
rxq = &adapter->rx_ring[queue_idx];
@@ -1093,12 +1246,12 @@ static int ena_rx_queue_setup(struct rte_eth_dev *dev,
RTE_LOG(CRIT, PMD,
"API violation. Queue %d is already configured\n",
queue_idx);
- return -1;
+ return ENA_COM_FAULT;
}
if (!rte_is_power_of_2(nb_desc)) {
RTE_LOG(ERR, PMD,
- "Unsupported size of TX queue: %d is not a power of 2.",
+ "Unsupported size of RX queue: %d is not a power of 2.",
nb_desc);
return -EINVAL;
}
@@ -1110,11 +1263,6 @@ static int ena_rx_queue_setup(struct rte_eth_dev *dev,
return -EINVAL;
}
- if (!ena_are_rx_queue_offloads_allowed(adapter, rx_conf->offloads)) {
- RTE_LOG(ERR, PMD, "Unsupported queue offloads\n");
- return -EINVAL;
- }
-
ena_qid = ENA_IO_RXQ_IDX(queue_idx);
ctx.qid = ena_qid;
@@ -1125,9 +1273,11 @@ static int ena_rx_queue_setup(struct rte_eth_dev *dev,
ctx.numa_node = ena_cpu_to_node(queue_idx);
rc = ena_com_create_io_queue(ena_dev, &ctx);
- if (rc)
+ if (rc) {
RTE_LOG(ERR, PMD, "failed to create io RX queue #%d rc: %d\n",
queue_idx, rc);
+ return rc;
+ }
rxq->ena_com_io_cq = &ena_dev->io_cq_queues[ena_qid];
rxq->ena_com_io_sq = &ena_dev->io_sq_queues[ena_qid];
@@ -1140,6 +1290,7 @@ static int ena_rx_queue_setup(struct rte_eth_dev *dev,
"Failed to get RX queue handlers. RX queue num %d rc: %d\n",
queue_idx, rc);
ena_com_destroy_io_queue(ena_dev, ena_qid);
+ return rc;
}
rxq->port_id = dev->data->port_id;
@@ -1153,9 +1304,24 @@ static int ena_rx_queue_setup(struct rte_eth_dev *dev,
RTE_CACHE_LINE_SIZE);
if (!rxq->rx_buffer_info) {
RTE_LOG(ERR, PMD, "failed to alloc mem for rx buffer info\n");
+ ena_com_destroy_io_queue(ena_dev, ena_qid);
return -ENOMEM;
}
+ rxq->empty_rx_reqs = rte_zmalloc("rxq->empty_rx_reqs",
+ sizeof(uint16_t) * nb_desc,
+ RTE_CACHE_LINE_SIZE);
+ if (!rxq->empty_rx_reqs) {
+ RTE_LOG(ERR, PMD, "failed to alloc mem for empty rx reqs\n");
+ rte_free(rxq->rx_buffer_info);
+ rxq->rx_buffer_info = NULL;
+ ena_com_destroy_io_queue(ena_dev, ena_qid);
+ return -ENOMEM;
+ }
+
+ for (i = 0; i < nb_desc; i++)
+ rxq->empty_tx_reqs[i] = i;
+
/* Store pointer to this queue in upper layer */
rxq->configured = 1;
dev->data->rx_queues[queue_idx] = rxq;
@@ -1170,7 +1336,7 @@ static int ena_populate_rx_queue(struct ena_ring *rxq, unsigned int count)
uint16_t ring_size = rxq->ring_size;
uint16_t ring_mask = ring_size - 1;
uint16_t next_to_use = rxq->next_to_use;
- uint16_t in_use;
+ uint16_t in_use, req_id;
struct rte_mbuf **mbufs = &rxq->rx_buffer_info[0];
if (unlikely(!count))
@@ -1198,12 +1364,18 @@ static int ena_populate_rx_queue(struct ena_ring *rxq, unsigned int count)
struct ena_com_buf ebuf;
rte_prefetch0(mbufs[((next_to_use + 4) & ring_mask)]);
+
+ req_id = rxq->empty_rx_reqs[next_to_use_masked];
+ rc = validate_rx_req_id(rxq, req_id);
+ if (unlikely(rc < 0))
+ break;
+
/* prepare physical address for DMA transaction */
ebuf.paddr = mbuf->buf_iova + RTE_PKTMBUF_HEADROOM;
ebuf.len = mbuf->buf_len - RTE_PKTMBUF_HEADROOM;
/* pass resource to device */
rc = ena_com_add_single_rx_desc(rxq->ena_com_io_sq,
- &ebuf, next_to_use_masked);
+ &ebuf, req_id);
if (unlikely(rc)) {
rte_mempool_put_bulk(rxq->mb_pool, (void **)(&mbuf),
count - i);
@@ -1213,9 +1385,17 @@ static int ena_populate_rx_queue(struct ena_ring *rxq, unsigned int count)
next_to_use++;
}
+ if (unlikely(i < count))
+ RTE_LOG(WARNING, PMD, "refilled rx qid %d with only %d "
+ "buffers (from %d)\n", rxq->id, i, count);
+
/* When we submitted free recources to device... */
- if (i > 0) {
- /* ...let HW know that it can fill buffers with data */
+ if (likely(i > 0)) {
+ /* ...let HW know that it can fill buffers with data
+ *
+ * Add memory barrier to make sure the desc were written before
+ * issue a doorbell
+ */
rte_wmb();
ena_com_write_sq_doorbell(rxq->ena_com_io_sq);
@@ -1226,8 +1406,10 @@ static int ena_populate_rx_queue(struct ena_ring *rxq, unsigned int count)
}
static int ena_device_init(struct ena_com_dev *ena_dev,
- struct ena_com_dev_get_features_ctx *get_feat_ctx)
+ struct ena_com_dev_get_features_ctx *get_feat_ctx,
+ bool *wd_state)
{
+ uint32_t aenq_groups;
int rc;
bool readless_supported;
@@ -1247,7 +1429,7 @@ static int ena_device_init(struct ena_com_dev *ena_dev,
ena_com_set_mmio_read_mode(ena_dev, readless_supported);
/* reset device */
- rc = ena_com_dev_reset(ena_dev);
+ rc = ena_com_dev_reset(ena_dev, ENA_REGS_RESET_NORMAL);
if (rc) {
RTE_LOG(ERR, PMD, "cannot reset device\n");
goto err_mmio_read_less;
@@ -1263,7 +1445,7 @@ static int ena_device_init(struct ena_com_dev *ena_dev,
ena_dev->dma_addr_bits = ena_com_get_dma_width(ena_dev);
/* ENA device administration layer init */
- rc = ena_com_admin_init(ena_dev, NULL, true);
+ rc = ena_com_admin_init(ena_dev, &aenq_handlers, true);
if (rc) {
RTE_LOG(ERR, PMD,
"cannot initialize ena admin queue with device\n");
@@ -1286,6 +1468,21 @@ static int ena_device_init(struct ena_com_dev *ena_dev,
goto err_admin_init;
}
+ aenq_groups = BIT(ENA_ADMIN_LINK_CHANGE) |
+ BIT(ENA_ADMIN_NOTIFICATION) |
+ BIT(ENA_ADMIN_KEEP_ALIVE) |
+ BIT(ENA_ADMIN_FATAL_ERROR) |
+ BIT(ENA_ADMIN_WARNING);
+
+ aenq_groups &= get_feat_ctx->aenq.supported_groups;
+ rc = ena_com_set_aenq_config(ena_dev, aenq_groups);
+ if (rc) {
+ RTE_LOG(ERR, PMD, "Cannot configure aenq groups rc: %d\n", rc);
+ goto err_admin_init;
+ }
+
+ *wd_state = !!(aenq_groups & BIT(ENA_ADMIN_KEEP_ALIVE));
+
return 0;
err_admin_init:
@@ -1297,16 +1494,89 @@ err_mmio_read_less:
return rc;
}
+static void ena_interrupt_handler_rte(void *cb_arg)
+{
+ struct ena_adapter *adapter = (struct ena_adapter *)cb_arg;
+ struct ena_com_dev *ena_dev = &adapter->ena_dev;
+
+ ena_com_admin_q_comp_intr_handler(ena_dev);
+ if (likely(adapter->state != ENA_ADAPTER_STATE_CLOSED))
+ ena_com_aenq_intr_handler(ena_dev, adapter);
+}
+
+static void check_for_missing_keep_alive(struct ena_adapter *adapter)
+{
+ if (!adapter->wd_state)
+ return;
+
+ if (adapter->keep_alive_timeout == ENA_HW_HINTS_NO_TIMEOUT)
+ return;
+
+ if (unlikely((rte_get_timer_cycles() - adapter->timestamp_wd) >=
+ adapter->keep_alive_timeout)) {
+ RTE_LOG(ERR, PMD, "Keep alive timeout\n");
+ adapter->reset_reason = ENA_REGS_RESET_KEEP_ALIVE_TO;
+ adapter->trigger_reset = true;
+ }
+}
+
+/* Check if admin queue is enabled */
+static void check_for_admin_com_state(struct ena_adapter *adapter)
+{
+ if (unlikely(!ena_com_get_admin_running_state(&adapter->ena_dev))) {
+ RTE_LOG(ERR, PMD, "ENA admin queue is not in running state!\n");
+ adapter->reset_reason = ENA_REGS_RESET_ADMIN_TO;
+ adapter->trigger_reset = true;
+ }
+}
+
+static void ena_timer_wd_callback(__rte_unused struct rte_timer *timer,
+ void *arg)
+{
+ struct ena_adapter *adapter = (struct ena_adapter *)arg;
+ struct rte_eth_dev *dev = adapter->rte_dev;
+
+ check_for_missing_keep_alive(adapter);
+ check_for_admin_com_state(adapter);
+
+ if (unlikely(adapter->trigger_reset)) {
+ RTE_LOG(ERR, PMD, "Trigger reset is on\n");
+ _rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_RESET,
+ NULL);
+ }
+}
+
+static int ena_calc_io_queue_num(__rte_unused struct ena_com_dev *ena_dev,
+ struct ena_com_dev_get_features_ctx *get_feat_ctx)
+{
+ int io_sq_num, io_cq_num, io_queue_num;
+
+ io_sq_num = get_feat_ctx->max_queues.max_sq_num;
+ io_cq_num = get_feat_ctx->max_queues.max_cq_num;
+
+ io_queue_num = RTE_MIN(io_sq_num, io_cq_num);
+
+ if (unlikely(io_queue_num == 0)) {
+ RTE_LOG(ERR, PMD, "Number of IO queues should not be 0\n");
+ return -EFAULT;
+ }
+
+ return io_queue_num;
+}
+
static int eth_ena_dev_init(struct rte_eth_dev *eth_dev)
{
struct rte_pci_device *pci_dev;
+ struct rte_intr_handle *intr_handle;
struct ena_adapter *adapter =
(struct ena_adapter *)(eth_dev->data->dev_private);
struct ena_com_dev *ena_dev = &adapter->ena_dev;
struct ena_com_dev_get_features_ctx get_feat_ctx;
int queue_size, rc;
+ u16 tx_sgl_size = 0;
static int adapters_found;
+ bool wd_state;
memset(adapter, 0, sizeof(struct ena_adapter));
ena_dev = &adapter->ena_dev;
@@ -1330,19 +1600,16 @@ static int eth_ena_dev_init(struct rte_eth_dev *eth_dev)
pci_dev->addr.devid,
pci_dev->addr.function);
+ intr_handle = &pci_dev->intr_handle;
+
adapter->regs = pci_dev->mem_resource[ENA_REGS_BAR].addr;
adapter->dev_mem_base = pci_dev->mem_resource[ENA_MEM_BAR].addr;
- /* Present ENA_MEM_BAR indicates available LLQ mode.
- * Use corresponding policy
- */
- if (adapter->dev_mem_base)
- ena_dev->tx_mem_queue_type = ENA_ADMIN_PLACEMENT_POLICY_DEV;
- else if (adapter->regs)
- ena_dev->tx_mem_queue_type = ENA_ADMIN_PLACEMENT_POLICY_HOST;
- else
+ if (!adapter->regs) {
PMD_INIT_LOG(CRIT, "Failed to access registers BAR(%d)",
ENA_REGS_BAR);
+ return -ENXIO;
+ }
ena_dev->reg_bar = adapter->regs;
ena_dev->dmadev = adapter->pdev;
@@ -1353,36 +1620,28 @@ static int eth_ena_dev_init(struct rte_eth_dev *eth_dev)
adapter->id_number);
/* device specific initialization routine */
- rc = ena_device_init(ena_dev, &get_feat_ctx);
+ rc = ena_device_init(ena_dev, &get_feat_ctx, &wd_state);
if (rc) {
PMD_INIT_LOG(CRIT, "Failed to init ENA device");
- return -1;
- }
-
- if (ena_dev->tx_mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV) {
- if (get_feat_ctx.max_queues.max_llq_num == 0) {
- PMD_INIT_LOG(ERR,
- "Trying to use LLQ but llq_num is 0.\n"
- "Fall back into regular queues.");
- ena_dev->tx_mem_queue_type =
- ENA_ADMIN_PLACEMENT_POLICY_HOST;
- adapter->num_queues =
- get_feat_ctx.max_queues.max_sq_num;
- } else {
- adapter->num_queues =
- get_feat_ctx.max_queues.max_llq_num;
- }
- } else {
- adapter->num_queues = get_feat_ctx.max_queues.max_sq_num;
+ goto err;
}
+ adapter->wd_state = wd_state;
- queue_size = ena_calc_queue_size(ena_dev, &get_feat_ctx);
- if ((queue_size <= 0) || (adapter->num_queues <= 0))
- return -EFAULT;
+ ena_dev->tx_mem_queue_type = ENA_ADMIN_PLACEMENT_POLICY_HOST;
+ adapter->num_queues = ena_calc_io_queue_num(ena_dev,
+ &get_feat_ctx);
+
+ queue_size = ena_calc_queue_size(ena_dev, &tx_sgl_size, &get_feat_ctx);
+ if (queue_size <= 0 || adapter->num_queues <= 0) {
+ rc = -EFAULT;
+ goto err_device_destroy;
+ }
adapter->tx_ring_size = queue_size;
adapter->rx_ring_size = queue_size;
+ adapter->max_tx_sgl_size = tx_sgl_size;
+
/* prepare ring structures */
ena_init_rings(adapter);
@@ -1405,58 +1664,77 @@ static int eth_ena_dev_init(struct rte_eth_dev *eth_dev)
RTE_CACHE_LINE_SIZE);
if (!adapter->drv_stats) {
RTE_LOG(ERR, PMD, "failed to alloc mem for adapter stats\n");
- return -ENOMEM;
+ rc = -ENOMEM;
+ goto err_delete_debug_area;
}
+ rte_intr_callback_register(intr_handle,
+ ena_interrupt_handler_rte,
+ adapter);
+ rte_intr_enable(intr_handle);
+ ena_com_set_admin_polling_mode(ena_dev, false);
+ ena_com_admin_aenq_enable(ena_dev);
+
+ if (adapters_found == 0)
+ rte_timer_subsystem_init();
+ rte_timer_init(&adapter->timer_wd);
+
adapters_found++;
adapter->state = ENA_ADAPTER_STATE_INIT;
return 0;
+
+err_delete_debug_area:
+ ena_com_delete_debug_area(ena_dev);
+
+err_device_destroy:
+ ena_com_delete_host_info(ena_dev);
+ ena_com_admin_destroy(ena_dev);
+
+err:
+ return rc;
}
-static int ena_dev_configure(struct rte_eth_dev *dev)
+static int eth_ena_dev_uninit(struct rte_eth_dev *eth_dev)
{
+ struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
+ struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
struct ena_adapter *adapter =
- (struct ena_adapter *)(dev->data->dev_private);
- uint64_t tx_offloads = dev->data->dev_conf.txmode.offloads;
- uint64_t rx_offloads = dev->data->dev_conf.rxmode.offloads;
+ (struct ena_adapter *)(eth_dev->data->dev_private);
- if ((tx_offloads & adapter->tx_supported_offloads) != tx_offloads) {
- RTE_LOG(ERR, PMD, "Some Tx offloads are not supported "
- "requested 0x%" PRIx64 " supported 0x%" PRIx64 "\n",
- tx_offloads, adapter->tx_supported_offloads);
- return -ENOTSUP;
- }
+ if (rte_eal_process_type() != RTE_PROC_PRIMARY)
+ return -EPERM;
- if ((rx_offloads & adapter->rx_supported_offloads) != rx_offloads) {
- RTE_LOG(ERR, PMD, "Some Rx offloads are not supported "
- "requested 0x%" PRIx64 " supported 0x%" PRIx64 "\n",
- rx_offloads, adapter->rx_supported_offloads);
- return -ENOTSUP;
- }
+ if (adapter->state != ENA_ADAPTER_STATE_CLOSED)
+ ena_close(eth_dev);
- if (!(adapter->state == ENA_ADAPTER_STATE_INIT ||
- adapter->state == ENA_ADAPTER_STATE_STOPPED)) {
- PMD_INIT_LOG(ERR, "Illegal adapter state: %d",
- adapter->state);
- return -1;
- }
+ eth_dev->dev_ops = NULL;
+ eth_dev->rx_pkt_burst = NULL;
+ eth_dev->tx_pkt_burst = NULL;
+ eth_dev->tx_pkt_prepare = NULL;
- switch (adapter->state) {
- case ENA_ADAPTER_STATE_INIT:
- case ENA_ADAPTER_STATE_STOPPED:
- adapter->state = ENA_ADAPTER_STATE_CONFIG;
- break;
- case ENA_ADAPTER_STATE_CONFIG:
- RTE_LOG(WARNING, PMD,
- "Ivalid driver state while trying to configure device\n");
- break;
- default:
- break;
- }
+ rte_free(adapter->drv_stats);
+ adapter->drv_stats = NULL;
+
+ rte_intr_disable(intr_handle);
+ rte_intr_callback_unregister(intr_handle,
+ ena_interrupt_handler_rte,
+ adapter);
+
+ adapter->state = ENA_ADAPTER_STATE_FREE;
+
+ return 0;
+}
- adapter->tx_selected_offloads = tx_offloads;
- adapter->rx_selected_offloads = rx_offloads;
+static int ena_dev_configure(struct rte_eth_dev *dev)
+{
+ struct ena_adapter *adapter =
+ (struct ena_adapter *)(dev->data->dev_private);
+
+ adapter->state = ENA_ADAPTER_STATE_CONFIG;
+
+ adapter->tx_selected_offloads = dev->data->dev_conf.txmode.offloads;
+ adapter->rx_selected_offloads = dev->data->dev_conf.rxmode.offloads;
return 0;
}
@@ -1473,6 +1751,7 @@ static void ena_init_rings(struct ena_adapter *adapter)
ring->id = i;
ring->tx_mem_queue_type = adapter->ena_dev.tx_mem_queue_type;
ring->tx_max_header_size = adapter->ena_dev.tx_max_header_size;
+ ring->sgl_size = adapter->max_tx_sgl_size;
}
for (i = 0; i < adapter->num_queues; i++) {
@@ -1485,32 +1764,6 @@ static void ena_init_rings(struct ena_adapter *adapter)
}
}
-static bool ena_are_tx_queue_offloads_allowed(struct ena_adapter *adapter,
- uint64_t offloads)
-{
- uint64_t port_offloads = adapter->tx_selected_offloads;
-
- /* Check if port supports all requested offloads.
- * True if all offloads selected for queue are set for port.
- */
- if ((offloads & port_offloads) != offloads)
- return false;
- return true;
-}
-
-static bool ena_are_rx_queue_offloads_allowed(struct ena_adapter *adapter,
- uint64_t offloads)
-{
- uint64_t port_offloads = adapter->rx_selected_offloads;
-
- /* Check if port supports all requested offloads.
- * True if all offloads selected for queue are set for port.
- */
- if ((offloads & port_offloads) != offloads)
- return false;
- return true;
-}
-
static void ena_infos_get(struct rte_eth_dev *dev,
struct rte_eth_dev_info *dev_info)
{
@@ -1527,8 +1780,6 @@ static void ena_infos_get(struct rte_eth_dev *dev,
ena_dev = &adapter->ena_dev;
ena_assert_msg(ena_dev != NULL, "Uninitialized device");
- dev_info->pci_dev = RTE_ETH_DEV_TO_PCI(dev);
-
dev_info->speed_capa =
ETH_LINK_SPEED_1G |
ETH_LINK_SPEED_2_5G |
@@ -1581,6 +1832,16 @@ static void ena_infos_get(struct rte_eth_dev *dev,
adapter->tx_supported_offloads = tx_feat;
adapter->rx_supported_offloads = rx_feat;
+
+ dev_info->rx_desc_lim.nb_max = ENA_MAX_RING_DESC;
+ dev_info->rx_desc_lim.nb_min = ENA_MIN_RING_DESC;
+
+ dev_info->tx_desc_lim.nb_max = ENA_MAX_RING_DESC;
+ dev_info->tx_desc_lim.nb_min = ENA_MIN_RING_DESC;
+ dev_info->tx_desc_lim.nb_seg_max = RTE_MIN(ENA_PKT_MAX_BUFS,
+ feat.max_queues.max_packet_tx_descs);
+ dev_info->tx_desc_lim.nb_mtu_seg_max = RTE_MIN(ENA_PKT_MAX_BUFS,
+ feat.max_queues.max_packet_tx_descs);
}
static uint16_t eth_ena_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
@@ -1591,6 +1852,7 @@ static uint16_t eth_ena_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
unsigned int ring_mask = ring_size - 1;
uint16_t next_to_clean = rx_ring->next_to_clean;
uint16_t desc_in_use = 0;
+ uint16_t req_id;
unsigned int recv_idx = 0;
struct rte_mbuf *mbuf = NULL;
struct rte_mbuf *mbuf_head = NULL;
@@ -1624,6 +1886,7 @@ static uint16_t eth_ena_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
&ena_rx_ctx);
if (unlikely(rc)) {
RTE_LOG(ERR, PMD, "ena_com_rx_pkt error %d\n", rc);
+ rx_ring->adapter->trigger_reset = true;
return 0;
}
@@ -1631,12 +1894,17 @@ static uint16_t eth_ena_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
break;
while (segments < ena_rx_ctx.descs) {
- mbuf = rx_buff_info[next_to_clean & ring_mask];
+ req_id = ena_rx_ctx.ena_bufs[segments].req_id;
+ rc = validate_rx_req_id(rx_ring, req_id);
+ if (unlikely(rc))
+ break;
+
+ mbuf = rx_buff_info[req_id];
mbuf->data_len = ena_rx_ctx.ena_bufs[segments].len;
mbuf->data_off = RTE_PKTMBUF_HEADROOM;
mbuf->refcnt = 1;
mbuf->next = NULL;
- if (segments == 0) {
+ if (unlikely(segments == 0)) {
mbuf->nb_segs = ena_rx_ctx.descs;
mbuf->port = rx_ring->port_id;
mbuf->pkt_len = 0;
@@ -1648,6 +1916,8 @@ static uint16_t eth_ena_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
mbuf_head->pkt_len += mbuf->data_len;
mbuf_prev = mbuf;
+ rx_ring->empty_rx_reqs[next_to_clean & ring_mask] =
+ req_id;
segments++;
next_to_clean++;
}
@@ -1741,6 +2011,46 @@ eth_ena_prep_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
return i;
}
+static void ena_update_hints(struct ena_adapter *adapter,
+ struct ena_admin_ena_hw_hints *hints)
+{
+ if (hints->admin_completion_tx_timeout)
+ adapter->ena_dev.admin_queue.completion_timeout =
+ hints->admin_completion_tx_timeout * 1000;
+
+ if (hints->mmio_read_timeout)
+ /* convert to usec */
+ adapter->ena_dev.mmio_read.reg_read_to =
+ hints->mmio_read_timeout * 1000;
+
+ if (hints->driver_watchdog_timeout) {
+ if (hints->driver_watchdog_timeout == ENA_HW_HINTS_NO_TIMEOUT)
+ adapter->keep_alive_timeout = ENA_HW_HINTS_NO_TIMEOUT;
+ else
+ // Convert msecs to ticks
+ adapter->keep_alive_timeout =
+ (hints->driver_watchdog_timeout *
+ rte_get_timer_hz()) / 1000;
+ }
+}
+
+static int ena_check_and_linearize_mbuf(struct ena_ring *tx_ring,
+ struct rte_mbuf *mbuf)
+{
+ int num_segments, rc;
+
+ num_segments = mbuf->nb_segs;
+
+ if (likely(num_segments < tx_ring->sgl_size))
+ return 0;
+
+ rc = rte_pktmbuf_linearize(mbuf);
+ if (unlikely(rc))
+ RTE_LOG(WARNING, PMD, "Mbuf linearize failed\n");
+
+ return rc;
+}
+
static uint16_t eth_ena_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
uint16_t nb_pkts)
{
@@ -1771,6 +2081,10 @@ static uint16_t eth_ena_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
for (sent_idx = 0; sent_idx < nb_pkts; sent_idx++) {
mbuf = tx_pkts[sent_idx];
+ rc = ena_check_and_linearize_mbuf(tx_ring, mbuf);
+ if (unlikely(rc))
+ break;
+
req_id = tx_ring->empty_tx_reqs[next_to_use & ring_mask];
tx_info = &tx_ring->tx_buffer_info[req_id];
tx_info->mbuf = mbuf;
@@ -1848,6 +2162,10 @@ static uint16_t eth_ena_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
/* Clear complete packets */
while (ena_com_tx_comp_req_id_get(tx_ring->ena_com_io_cq, &req_id) >= 0) {
+ rc = validate_tx_req_id(tx_ring, req_id);
+ if (rc)
+ break;
+
/* Get Tx info & store how many descs were processed */
tx_info = &tx_ring->tx_buffer_info[req_id];
total_tx_descs += tx_info->tx_descs;
@@ -1875,6 +2193,9 @@ static uint16_t eth_ena_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
return sent_idx;
}
+/*********************************************************************
+ * PMD configuration
+ *********************************************************************/
static int eth_ena_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
struct rte_pci_device *pci_dev)
{
@@ -1884,12 +2205,13 @@ static int eth_ena_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
static int eth_ena_pci_remove(struct rte_pci_device *pci_dev)
{
- return rte_eth_dev_pci_generic_remove(pci_dev, NULL);
+ return rte_eth_dev_pci_generic_remove(pci_dev, eth_ena_dev_uninit);
}
static struct rte_pci_driver rte_ena_pmd = {
.id_table = pci_id_ena_map,
- .drv_flags = RTE_PCI_DRV_NEED_MAPPING,
+ .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC |
+ RTE_PCI_DRV_WC_ACTIVATE,
.probe = eth_ena_pci_probe,
.remove = eth_ena_pci_remove,
};
@@ -1898,9 +2220,7 @@ RTE_PMD_REGISTER_PCI(net_ena, rte_ena_pmd);
RTE_PMD_REGISTER_PCI_TABLE(net_ena, pci_id_ena_map);
RTE_PMD_REGISTER_KMOD_DEP(net_ena, "* igb_uio | uio_pci_generic | vfio-pci");
-RTE_INIT(ena_init_log);
-static void
-ena_init_log(void)
+RTE_INIT(ena_init_log)
{
ena_logtype_init = rte_log_register("pmd.net.ena.init");
if (ena_logtype_init >= 0)
@@ -1909,3 +2229,75 @@ ena_init_log(void)
if (ena_logtype_driver >= 0)
rte_log_set_level(ena_logtype_driver, RTE_LOG_NOTICE);
}
+
+/******************************************************************************
+ ******************************** AENQ Handlers *******************************
+ *****************************************************************************/
+static void ena_update_on_link_change(void *adapter_data,
+ struct ena_admin_aenq_entry *aenq_e)
+{
+ struct rte_eth_dev *eth_dev;
+ struct ena_adapter *adapter;
+ struct ena_admin_aenq_link_change_desc *aenq_link_desc;
+ uint32_t status;
+
+ adapter = (struct ena_adapter *)adapter_data;
+ aenq_link_desc = (struct ena_admin_aenq_link_change_desc *)aenq_e;
+ eth_dev = adapter->rte_dev;
+
+ status = get_ena_admin_aenq_link_change_desc_link_status(aenq_link_desc);
+ adapter->link_status = status;
+
+ ena_link_update(eth_dev, 0);
+ _rte_eth_dev_callback_process(eth_dev, RTE_ETH_EVENT_INTR_LSC, NULL);
+}
+
+static void ena_notification(void *data,
+ struct ena_admin_aenq_entry *aenq_e)
+{
+ struct ena_adapter *adapter = (struct ena_adapter *)data;
+ struct ena_admin_ena_hw_hints *hints;
+
+ if (aenq_e->aenq_common_desc.group != ENA_ADMIN_NOTIFICATION)
+ RTE_LOG(WARNING, PMD, "Invalid group(%x) expected %x\n",
+ aenq_e->aenq_common_desc.group,
+ ENA_ADMIN_NOTIFICATION);
+
+ switch (aenq_e->aenq_common_desc.syndrom) {
+ case ENA_ADMIN_UPDATE_HINTS:
+ hints = (struct ena_admin_ena_hw_hints *)
+ (&aenq_e->inline_data_w4);
+ ena_update_hints(adapter, hints);
+ break;
+ default:
+ RTE_LOG(ERR, PMD, "Invalid aenq notification link state %d\n",
+ aenq_e->aenq_common_desc.syndrom);
+ }
+}
+
+static void ena_keep_alive(void *adapter_data,
+ __rte_unused struct ena_admin_aenq_entry *aenq_e)
+{
+ struct ena_adapter *adapter = (struct ena_adapter *)adapter_data;
+
+ adapter->timestamp_wd = rte_get_timer_cycles();
+}
+
+/**
+ * This handler will called for unknown event group or unimplemented handlers
+ **/
+static void unimplemented_aenq_handler(__rte_unused void *data,
+ __rte_unused struct ena_admin_aenq_entry *aenq_e)
+{
+ RTE_LOG(ERR, PMD, "Unknown event was received or event with "
+ "unimplemented handler\n");
+}
+
+static struct ena_aenq_handlers aenq_handlers = {
+ .handlers = {
+ [ENA_ADMIN_LINK_CHANGE] = ena_update_on_link_change,
+ [ENA_ADMIN_NOTIFICATION] = ena_notification,
+ [ENA_ADMIN_KEEP_ALIVE] = ena_keep_alive
+ },
+ .unimplemented_handler = unimplemented_aenq_handler
+};
diff --git a/drivers/net/ena/ena_ethdev.h b/drivers/net/ena/ena_ethdev.h
index 394d05e0..2dc8129e 100644
--- a/drivers/net/ena/ena_ethdev.h
+++ b/drivers/net/ena/ena_ethdev.h
@@ -34,8 +34,10 @@
#ifndef _ENA_ETHDEV_H_
#define _ENA_ETHDEV_H_
+#include <rte_cycles.h>
#include <rte_pci.h>
#include <rte_bus_pci.h>
+#include <rte_timer.h>
#include "ena_com.h"
@@ -48,8 +50,13 @@
#define ENA_NAME_MAX_LEN 20
#define ENA_PKT_MAX_BUFS 17
+#define ENA_MIN_MTU 128
+
#define ENA_MMIO_DISABLE_REG_READ BIT(0)
+#define ENA_WD_TIMEOUT_SEC 3
+#define ENA_DEVICE_KALIVE_TIMEOUT (ENA_WD_TIMEOUT_SEC * rte_get_timer_hz())
+
struct ena_adapter;
enum ena_ring_type {
@@ -70,8 +77,12 @@ struct ena_ring {
enum ena_ring_type type;
enum ena_admin_placement_policy_type tx_mem_queue_type;
- /* Holds the empty requests for TX OOO completions */
- uint16_t *empty_tx_reqs;
+ /* Holds the empty requests for TX/RX OOO completions */
+ union {
+ uint16_t *empty_tx_reqs;
+ uint16_t *empty_rx_reqs;
+ };
+
union {
struct ena_tx_buffer *tx_buffer_info; /* contex of tx packet */
struct rte_mbuf **rx_buffer_info; /* contex of rx packet */
@@ -92,14 +103,16 @@ struct ena_ring {
int configured;
struct ena_adapter *adapter;
uint64_t offloads;
+ u16 sgl_size;
} __rte_cache_aligned;
enum ena_adapter_state {
ENA_ADAPTER_STATE_FREE = 0,
ENA_ADAPTER_STATE_INIT = 1,
- ENA_ADAPTER_STATE_RUNNING = 2,
+ ENA_ADAPTER_STATE_RUNNING = 2,
ENA_ADAPTER_STATE_STOPPED = 3,
ENA_ADAPTER_STATE_CONFIG = 4,
+ ENA_ADAPTER_STATE_CLOSED = 5,
};
struct ena_driver_stats {
@@ -157,6 +170,7 @@ struct ena_adapter {
/* TX */
struct ena_ring tx_ring[ENA_MAX_NUM_QUEUES] __rte_cache_aligned;
int tx_ring_size;
+ u16 max_tx_sgl_size;
/* RX */
struct ena_ring rx_ring[ENA_MAX_NUM_QUEUES] __rte_cache_aligned;
@@ -180,6 +194,18 @@ struct ena_adapter {
uint64_t tx_selected_offloads;
uint64_t rx_supported_offloads;
uint64_t rx_selected_offloads;
+
+ bool link_status;
+
+ enum ena_regs_reset_reason_types reset_reason;
+
+ struct rte_timer timer_wd;
+ uint64_t timestamp_wd;
+ uint64_t keep_alive_timeout;
+
+ bool trigger_reset;
+
+ bool wd_state;
};
#endif /* _ENA_ETHDEV_H_ */
diff --git a/drivers/net/ena/meson.build b/drivers/net/ena/meson.build
new file mode 100644
index 00000000..091ca6e3
--- /dev/null
+++ b/drivers/net/ena/meson.build
@@ -0,0 +1,11 @@
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright(c) 2018 Intel Corporation
+
+allow_experimental_apis = true
+sources = files('ena_ethdev.c',
+ 'base/ena_com.c',
+ 'base/ena_eth_com.c')
+
+deps += ['timer']
+
+includes += include_directories('base', 'base/ena_defs')