summaryrefslogtreecommitdiffstats
path: root/drivers/net/ena/base/ena_defs
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/ena/base/ena_defs')
-rw-r--r--drivers/net/ena/base/ena_defs/ena_admin_defs.h1979
-rw-r--r--drivers/net/ena/base/ena_defs/ena_common_defs.h54
-rw-r--r--drivers/net/ena/base/ena_defs/ena_eth_io_defs.h1488
-rw-r--r--drivers/net/ena/base/ena_defs/ena_gen_info.h35
-rw-r--r--drivers/net/ena/base/ena_defs/ena_includes.h39
-rw-r--r--drivers/net/ena/base/ena_defs/ena_regs_defs.h135
6 files changed, 3730 insertions, 0 deletions
diff --git a/drivers/net/ena/base/ena_defs/ena_admin_defs.h b/drivers/net/ena/base/ena_defs/ena_admin_defs.h
new file mode 100644
index 00000000..fe412469
--- /dev/null
+++ b/drivers/net/ena/base/ena_defs/ena_admin_defs.h
@@ -0,0 +1,1979 @@
+/*-
+* BSD LICENSE
+*
+* Copyright (c) 2015-2016 Amazon.com, Inc. or its affiliates.
+* All rights reserved.
+*
+* Redistribution and use in source and binary forms, with or without
+* modification, are permitted provided that the following conditions
+* are met:
+*
+* * Redistributions of source code must retain the above copyright
+* notice, this list of conditions and the following disclaimer.
+* * Redistributions in binary form must reproduce the above copyright
+* notice, this list of conditions and the following disclaimer in
+* the documentation and/or other materials provided with the
+* distribution.
+* * Neither the name of copyright holder nor the names of its
+* contributors may be used to endorse or promote products derived
+* from this software without specific prior written permission.
+*
+* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+
+#ifndef _ENA_ADMIN_H_
+#define _ENA_ADMIN_H_
+
+/* admin commands opcodes */
+enum ena_admin_aq_opcode {
+ /* create submission queue */
+ ENA_ADMIN_CREATE_SQ = 1,
+
+ /* destroy submission queue */
+ ENA_ADMIN_DESTROY_SQ = 2,
+
+ /* create completion queue */
+ ENA_ADMIN_CREATE_CQ = 3,
+
+ /* destroy completion queue */
+ ENA_ADMIN_DESTROY_CQ = 4,
+
+ /* get capabilities of particular feature */
+ ENA_ADMIN_GET_FEATURE = 8,
+
+ /* get capabilities of particular feature */
+ ENA_ADMIN_SET_FEATURE = 9,
+
+ /* get statistics */
+ ENA_ADMIN_GET_STATS = 11,
+};
+
+/* privileged amdin commands opcodes */
+enum ena_admin_aq_opcode_privileged {
+ /* get device capabilities */
+ ENA_ADMIN_IDENTIFY = 48,
+
+ /* configure device */
+ ENA_ADMIN_CONFIGURE_PF_DEVICE = 49,
+
+ /* setup SRIOV PCIe Virtual Function capabilities */
+ ENA_ADMIN_SETUP_VF = 50,
+
+ /* load firmware to the controller */
+ ENA_ADMIN_LOAD_FIRMWARE = 52,
+
+ /* commit previously loaded firmare */
+ ENA_ADMIN_COMMIT_FIRMWARE = 53,
+
+ /* quiesce virtual function */
+ ENA_ADMIN_QUIESCE_VF = 54,
+
+ /* load virtual function from migrates context */
+ ENA_ADMIN_MIGRATE_VF = 55,
+};
+
+/* admin command completion status codes */
+enum ena_admin_aq_completion_status {
+ /* Request completed successfully */
+ ENA_ADMIN_SUCCESS = 0,
+
+ /* no resources to satisfy request */
+ ENA_ADMIN_RESOURCE_ALLOCATION_FAILURE = 1,
+
+ /* Bad opcode in request descriptor */
+ ENA_ADMIN_BAD_OPCODE = 2,
+
+ /* Unsupported opcode in request descriptor */
+ ENA_ADMIN_UNSUPPORTED_OPCODE = 3,
+
+ /* Wrong request format */
+ ENA_ADMIN_MALFORMED_REQUEST = 4,
+
+ /* One of parameters is not valid. Provided in ACQ entry
+ * extended_status
+ */
+ ENA_ADMIN_ILLEGAL_PARAMETER = 5,
+
+ /* unexpected error */
+ ENA_ADMIN_UNKNOWN_ERROR = 6,
+};
+
+/* get/set feature subcommands opcodes */
+enum ena_admin_aq_feature_id {
+ /* list of all supported attributes/capabilities in the ENA */
+ ENA_ADMIN_DEVICE_ATTRIBUTES = 1,
+
+ /* max number of supported queues per for every queues type */
+ ENA_ADMIN_MAX_QUEUES_NUM = 2,
+
+ /* low latency queues capabilities (max entry size, depth) */
+ ENA_ADMIN_LLQ_CONFIG = 3,
+
+ /* power management capabilities */
+ ENA_ADMIN_POWER_MANAGEMENT_CONFIG = 4,
+
+ /* MAC address filters support, multicast, broadcast, and
+ * promiscuous
+ */
+ ENA_ADMIN_MAC_FILTERS_CONFIG = 5,
+
+ /* VLAN membership, frame format, etc. */
+ ENA_ADMIN_VLAN_CONFIG = 6,
+
+ /* Available size for various on-chip memory resources, accessible
+ * by the driver
+ */
+ ENA_ADMIN_ON_DEVICE_MEMORY_CONFIG = 7,
+
+ /* Receive Side Scaling (RSS) function */
+ ENA_ADMIN_RSS_HASH_FUNCTION = 10,
+
+ /* stateless TCP/UDP/IP offload capabilities. */
+ ENA_ADMIN_STATELESS_OFFLOAD_CONFIG = 11,
+
+ /* Multiple tuples flow table configuration */
+ ENA_ADMIN_RSS_REDIRECTION_TABLE_CONFIG = 12,
+
+ /* max MTU, current MTU */
+ ENA_ADMIN_MTU = 14,
+
+ /* Receive Side Scaling (RSS) hash input */
+ ENA_ADMIN_RSS_HASH_INPUT = 18,
+
+ /* overlay tunnels configuration */
+ ENA_ADMIN_TUNNEL_CONFIG = 19,
+
+ /* interrupt moderation parameters */
+ ENA_ADMIN_INTERRUPT_MODERATION = 20,
+
+ /* 1588v2 and Timing configuration */
+ ENA_ADMIN_1588_CONFIG = 21,
+
+ /* Packet Header format templates configuration for input and
+ * output parsers
+ */
+ ENA_ADMIN_PKT_HEADER_TEMPLATES_CONFIG = 23,
+
+ /* AENQ configuration */
+ ENA_ADMIN_AENQ_CONFIG = 26,
+
+ /* Link configuration */
+ ENA_ADMIN_LINK_CONFIG = 27,
+
+ /* Host attributes configuration */
+ ENA_ADMIN_HOST_ATTR_CONFIG = 28,
+
+ /* Number of valid opcodes */
+ ENA_ADMIN_FEATURES_OPCODE_NUM = 32,
+};
+
+/* descriptors and headers placement */
+enum ena_admin_placement_policy_type {
+ /* descriptors and headers are in OS memory */
+ ENA_ADMIN_PLACEMENT_POLICY_HOST = 1,
+
+ /* descriptors and headers in device memory (a.k.a Low Latency
+ * Queue)
+ */
+ ENA_ADMIN_PLACEMENT_POLICY_DEV = 3,
+};
+
+/* link speeds */
+enum ena_admin_link_types {
+ ENA_ADMIN_LINK_SPEED_1G = 0x1,
+
+ ENA_ADMIN_LINK_SPEED_2_HALF_G = 0x2,
+
+ ENA_ADMIN_LINK_SPEED_5G = 0x4,
+
+ ENA_ADMIN_LINK_SPEED_10G = 0x8,
+
+ ENA_ADMIN_LINK_SPEED_25G = 0x10,
+
+ ENA_ADMIN_LINK_SPEED_40G = 0x20,
+
+ ENA_ADMIN_LINK_SPEED_50G = 0x40,
+
+ ENA_ADMIN_LINK_SPEED_100G = 0x80,
+
+ ENA_ADMIN_LINK_SPEED_200G = 0x100,
+
+ ENA_ADMIN_LINK_SPEED_400G = 0x200,
+};
+
+/* completion queue update policy */
+enum ena_admin_completion_policy_type {
+ /* cqe for each sq descriptor */
+ ENA_ADMIN_COMPLETION_POLICY_DESC = 0,
+
+ /* cqe upon request in sq descriptor */
+ ENA_ADMIN_COMPLETION_POLICY_DESC_ON_DEMAND = 1,
+
+ /* current queue head pointer is updated in OS memory upon sq
+ * descriptor request
+ */
+ ENA_ADMIN_COMPLETION_POLICY_HEAD_ON_DEMAND = 2,
+
+ /* current queue head pointer is updated in OS memory for each sq
+ * descriptor
+ */
+ ENA_ADMIN_COMPLETION_POLICY_HEAD = 3,
+};
+
+/* type of get statistics command */
+enum ena_admin_get_stats_type {
+ /* Basic statistics */
+ ENA_ADMIN_GET_STATS_TYPE_BASIC = 0,
+
+ /* Extended statistics */
+ ENA_ADMIN_GET_STATS_TYPE_EXTENDED = 1,
+};
+
+/* scope of get statistics command */
+enum ena_admin_get_stats_scope {
+ ENA_ADMIN_SPECIFIC_QUEUE = 0,
+
+ ENA_ADMIN_ETH_TRAFFIC = 1,
+};
+
+/* ENA Admin Queue (AQ) common descriptor */
+struct ena_admin_aq_common_desc {
+ /* word 0 : */
+ /* command identificator to associate it with the completion
+ * 11:0 : command_id
+ * 15:12 : reserved12
+ */
+ uint16_t command_id;
+
+ /* as appears in ena_aq_opcode */
+ uint8_t opcode;
+
+ /* 0 : phase
+ * 1 : ctrl_data - control buffer address valid
+ * 2 : ctrl_data_indirect - control buffer address
+ * points to list of pages with addresses of control
+ * buffers
+ * 7:3 : reserved3
+ */
+ uint8_t flags;
+};
+
+/* used in ena_aq_entry. Can point directly to control data, or to a page
+ * list chunk. Used also at the end of indirect mode page list chunks, for
+ * chaining.
+ */
+struct ena_admin_ctrl_buff_info {
+ /* word 0 : indicates length of the buffer pointed by
+ * control_buffer_address.
+ */
+ uint32_t length;
+
+ /* words 1:2 : points to control buffer (direct or indirect) */
+ struct ena_common_mem_addr address;
+};
+
+/* submission queue full identification */
+struct ena_admin_sq {
+ /* word 0 : */
+ /* queue id */
+ uint16_t sq_idx;
+
+ /* 4:0 : reserved
+ * 7:5 : sq_direction - 0x1 - Tx; 0x2 - Rx
+ */
+ uint8_t sq_identity;
+
+ uint8_t reserved1;
+};
+
+/* AQ entry format */
+struct ena_admin_aq_entry {
+ /* words 0 : */
+ struct ena_admin_aq_common_desc aq_common_descriptor;
+
+ /* words 1:3 : */
+ union {
+ /* command specific inline data */
+ uint32_t inline_data_w1[3];
+
+ /* words 1:3 : points to control buffer (direct or
+ * indirect, chained if needed)
+ */
+ struct ena_admin_ctrl_buff_info control_buffer;
+ } u;
+
+ /* command specific inline data */
+ uint32_t inline_data_w4[12];
+};
+
+/* ENA Admin Completion Queue (ACQ) common descriptor */
+struct ena_admin_acq_common_desc {
+ /* word 0 : */
+ /* command identifier to associate it with the aq descriptor
+ * 11:0 : command_id
+ * 15:12 : reserved12
+ */
+ uint16_t command;
+
+ /* status of request execution */
+ uint8_t status;
+
+ /* 0 : phase
+ * 7:1 : reserved1
+ */
+ uint8_t flags;
+
+ /* word 1 : */
+ /* provides additional info */
+ uint16_t extended_status;
+
+ /* submission queue head index, serves as a hint what AQ entries can
+ * be revoked
+ */
+ uint16_t sq_head_indx;
+};
+
+/* ACQ entry format */
+struct ena_admin_acq_entry {
+ /* words 0:1 : */
+ struct ena_admin_acq_common_desc acq_common_descriptor;
+
+ /* response type specific data */
+ uint32_t response_specific_data[14];
+};
+
+/* ENA AQ Create Submission Queue command. Placed in control buffer pointed
+ * by AQ entry
+ */
+struct ena_admin_aq_create_sq_cmd {
+ /* words 0 : */
+ struct ena_admin_aq_common_desc aq_common_descriptor;
+
+ /* word 1 : */
+ /* 4:0 : reserved0_w1
+ * 7:5 : sq_direction - 0x1 - Tx, 0x2 - Rx
+ */
+ uint8_t sq_identity;
+
+ uint8_t reserved8_w1;
+
+ /* 3:0 : placement_policy - Describing where the SQ
+ * descriptor ring and the SQ packet headers reside:
+ * 0x1 - descriptors and headers are in OS memory,
+ * 0x3 - descriptors and headers in device memory
+ * (a.k.a Low Latency Queue)
+ * 6:4 : completion_policy - Describing what policy
+ * to use for generation completion entry (cqe) in
+ * the CQ associated with this SQ: 0x0 - cqe for each
+ * sq descriptor, 0x1 - cqe upon request in sq
+ * descriptor, 0x2 - current queue head pointer is
+ * updated in OS memory upon sq descriptor request
+ * 0x3 - current queue head pointer is updated in OS
+ * memory for each sq descriptor
+ * 7 : reserved15_w1
+ */
+ uint8_t sq_caps_2;
+
+ /* 0 : is_physically_contiguous - Described if the
+ * queue ring memory is allocated in physical
+ * contiguous pages or split.
+ * 7:1 : reserved17_w1
+ */
+ uint8_t sq_caps_3;
+
+ /* word 2 : */
+ /* associated completion queue id. This CQ must be created prior to
+ * SQ creation
+ */
+ uint16_t cq_idx;
+
+ /* submission queue depth in entries */
+ uint16_t sq_depth;
+
+ /* words 3:4 : SQ physical base address in OS memory. This field
+ * should not be used for Low Latency queues. Has to be page
+ * aligned.
+ */
+ struct ena_common_mem_addr sq_ba;
+
+ /* words 5:6 : specifies queue head writeback location in OS
+ * memory. Valid if completion_policy is set to
+ * completion_policy_head_on_demand or completion_policy_head. Has
+ * to be cache aligned
+ */
+ struct ena_common_mem_addr sq_head_writeback;
+
+ /* word 7 : reserved word */
+ uint32_t reserved0_w7;
+
+ /* word 8 : reserved word */
+ uint32_t reserved0_w8;
+};
+
+/* submission queue direction */
+enum ena_admin_sq_direction {
+ ENA_ADMIN_SQ_DIRECTION_TX = 1,
+
+ ENA_ADMIN_SQ_DIRECTION_RX = 2,
+};
+
+/* ENA Response for Create SQ Command. Appears in ACQ entry as
+ * response_specific_data
+ */
+struct ena_admin_acq_create_sq_resp_desc {
+ /* words 0:1 : Common Admin Queue completion descriptor */
+ struct ena_admin_acq_common_desc acq_common_desc;
+
+ /* word 2 : */
+ /* sq identifier */
+ uint16_t sq_idx;
+
+ uint16_t reserved;
+
+ /* word 3 : queue doorbell address as and offset to PCIe MMIO REG
+ * BAR
+ */
+ uint32_t sq_doorbell_offset;
+
+ /* word 4 : low latency queue ring base address as an offset to
+ * PCIe MMIO LLQ_MEM BAR
+ */
+ uint32_t llq_descriptors_offset;
+
+ /* word 5 : low latency queue headers' memory as an offset to PCIe
+ * MMIO LLQ_MEM BAR
+ */
+ uint32_t llq_headers_offset;
+};
+
+/* ENA AQ Destroy Submission Queue command. Placed in control buffer
+ * pointed by AQ entry
+ */
+struct ena_admin_aq_destroy_sq_cmd {
+ /* words 0 : */
+ struct ena_admin_aq_common_desc aq_common_descriptor;
+
+ /* words 1 : */
+ struct ena_admin_sq sq;
+};
+
+/* ENA Response for Destroy SQ Command. Appears in ACQ entry as
+ * response_specific_data
+ */
+struct ena_admin_acq_destroy_sq_resp_desc {
+ /* words 0:1 : Common Admin Queue completion descriptor */
+ struct ena_admin_acq_common_desc acq_common_desc;
+};
+
+/* ENA AQ Create Completion Queue command */
+struct ena_admin_aq_create_cq_cmd {
+ /* words 0 : */
+ struct ena_admin_aq_common_desc aq_common_descriptor;
+
+ /* word 1 : */
+ /* 4:0 : reserved5
+ * 5 : interrupt_mode_enabled - if set, cq operates
+ * in interrupt mode, otherwise - polling
+ * 7:6 : reserved6
+ */
+ uint8_t cq_caps_1;
+
+ /* 4:0 : cq_entry_size_words - size of CQ entry in
+ * 32-bit words, valid values: 4, 8.
+ * 7:5 : reserved7
+ */
+ uint8_t cq_caps_2;
+
+ /* completion queue depth in # of entries. must be power of 2 */
+ uint16_t cq_depth;
+
+ /* word 2 : msix vector assigned to this cq */
+ uint32_t msix_vector;
+
+ /* words 3:4 : cq physical base address in OS memory. CQ must be
+ * physically contiguous
+ */
+ struct ena_common_mem_addr cq_ba;
+};
+
+/* ENA Response for Create CQ Command. Appears in ACQ entry as response
+ * specific data
+ */
+struct ena_admin_acq_create_cq_resp_desc {
+ /* words 0:1 : Common Admin Queue completion descriptor */
+ struct ena_admin_acq_common_desc acq_common_desc;
+
+ /* word 2 : */
+ /* cq identifier */
+ uint16_t cq_idx;
+
+ /* actual cq depth in # of entries */
+ uint16_t cq_actual_depth;
+
+ /* word 3 : doorbell address as an offset to PCIe MMIO REG BAR */
+ uint32_t cq_doorbell_offset;
+
+ /* word 4 : completion head doorbell address as an offset to PCIe
+ * MMIO REG BAR
+ */
+ uint32_t cq_head_db_offset;
+
+ /* word 5 : interrupt unmask register address as an offset into
+ * PCIe MMIO REG BAR
+ */
+ uint32_t cq_interrupt_unmask_register;
+};
+
+/* ENA AQ Destroy Completion Queue command. Placed in control buffer
+ * pointed by AQ entry
+ */
+struct ena_admin_aq_destroy_cq_cmd {
+ /* words 0 : */
+ struct ena_admin_aq_common_desc aq_common_descriptor;
+
+ /* word 1 : */
+ /* associated queue id. */
+ uint16_t cq_idx;
+
+ uint16_t reserved1;
+};
+
+/* ENA Response for Destroy CQ Command. Appears in ACQ entry as
+ * response_specific_data
+ */
+struct ena_admin_acq_destroy_cq_resp_desc {
+ /* words 0:1 : Common Admin Queue completion descriptor */
+ struct ena_admin_acq_common_desc acq_common_desc;
+};
+
+/* ENA AQ Get Statistics command. Extended statistics are placed in control
+ * buffer pointed by AQ entry
+ */
+struct ena_admin_aq_get_stats_cmd {
+ /* words 0 : */
+ struct ena_admin_aq_common_desc aq_common_descriptor;
+
+ /* words 1:3 : */
+ union {
+ /* command specific inline data */
+ uint32_t inline_data_w1[3];
+
+ /* words 1:3 : points to control buffer (direct or
+ * indirect, chained if needed)
+ */
+ struct ena_admin_ctrl_buff_info control_buffer;
+ } u;
+
+ /* word 4 : */
+ /* stats type as defined in enum ena_admin_get_stats_type */
+ uint8_t type;
+
+ /* stats scope defined in enum ena_admin_get_stats_scope */
+ uint8_t scope;
+
+ uint16_t reserved3;
+
+ /* word 5 : */
+ /* queue id. used when scope is specific_queue */
+ uint16_t queue_idx;
+
+ /* device id, value 0xFFFF means mine. only privileged device can get
+ * stats of other device
+ */
+ uint16_t device_id;
+};
+
+/* Basic Statistics Command. */
+struct ena_admin_basic_stats {
+ /* word 0 : */
+ uint32_t tx_bytes_low;
+
+ /* word 1 : */
+ uint32_t tx_bytes_high;
+
+ /* word 2 : */
+ uint32_t tx_pkts_low;
+
+ /* word 3 : */
+ uint32_t tx_pkts_high;
+
+ /* word 4 : */
+ uint32_t rx_bytes_low;
+
+ /* word 5 : */
+ uint32_t rx_bytes_high;
+
+ /* word 6 : */
+ uint32_t rx_pkts_low;
+
+ /* word 7 : */
+ uint32_t rx_pkts_high;
+
+ /* word 8 : */
+ uint32_t rx_drops_low;
+
+ /* word 9 : */
+ uint32_t rx_drops_high;
+};
+
+/* ENA Response for Get Statistics Command. Appears in ACQ entry as
+ * response_specific_data
+ */
+struct ena_admin_acq_get_stats_resp {
+ /* words 0:1 : Common Admin Queue completion descriptor */
+ struct ena_admin_acq_common_desc acq_common_desc;
+
+ /* words 2:11 : */
+ struct ena_admin_basic_stats basic_stats;
+};
+
+/* ENA Get/Set Feature common descriptor. Appears as inline word in
+ * ena_aq_entry
+ */
+struct ena_admin_get_set_feature_common_desc {
+ /* word 0 : */
+ /* 1:0 : select - 0x1 - current value; 0x3 - default
+ * value
+ * 7:3 : reserved3
+ */
+ uint8_t flags;
+
+ /* as appears in ena_feature_id */
+ uint8_t feature_id;
+
+ /* reserved16 */
+ uint16_t reserved16;
+};
+
+/* ENA Device Attributes Feature descriptor. */
+struct ena_admin_device_attr_feature_desc {
+ /* word 0 : implementation id */
+ uint32_t impl_id;
+
+ /* word 1 : device version */
+ uint32_t device_version;
+
+ /* word 2 : bit map of which bits are supported value of 1
+ * indicated that this feature is supported and can perform SET/GET
+ * for it
+ */
+ uint32_t supported_features;
+
+ /* word 3 : */
+ uint32_t reserved3;
+
+ /* word 4 : Indicates how many bits are used physical address
+ * access.
+ */
+ uint32_t phys_addr_width;
+
+ /* word 5 : Indicates how many bits are used virtual address access. */
+ uint32_t virt_addr_width;
+
+ /* unicast MAC address (in Network byte order) */
+ uint8_t mac_addr[6];
+
+ uint8_t reserved7[2];
+
+ /* word 8 : Max supported MTU value */
+ uint32_t max_mtu;
+};
+
+/* ENA Max Queues Feature descriptor. */
+struct ena_admin_queue_feature_desc {
+ /* word 0 : Max number of submission queues (including LLQs) */
+ uint32_t max_sq_num;
+
+ /* word 1 : Max submission queue depth */
+ uint32_t max_sq_depth;
+
+ /* word 2 : Max number of completion queues */
+ uint32_t max_cq_num;
+
+ /* word 3 : Max completion queue depth */
+ uint32_t max_cq_depth;
+
+ /* word 4 : Max number of LLQ submission queues */
+ uint32_t max_llq_num;
+
+ /* word 5 : Max submission queue depth of LLQ */
+ uint32_t max_llq_depth;
+
+ /* word 6 : Max header size */
+ uint32_t max_header_size;
+
+ /* word 7 : */
+ /* Maximum Descriptors number, including meta descriptors, allowed
+ * for a single Tx packet
+ */
+ uint16_t max_packet_tx_descs;
+
+ /* Maximum Descriptors number allowed for a single Rx packet */
+ uint16_t max_packet_rx_descs;
+};
+
+/* ENA MTU Set Feature descriptor. */
+struct ena_admin_set_feature_mtu_desc {
+ /* word 0 : mtu size including L2 */
+ uint32_t mtu;
+};
+
+/* ENA host attributes Set Feature descriptor. */
+struct ena_admin_set_feature_host_attr_desc {
+ /* words 0:1 : host OS info base address in OS memory. host info is
+ * 4KB of physically contiguous
+ */
+ struct ena_common_mem_addr os_info_ba;
+
+ /* words 2:3 : host debug area base address in OS memory. debug
+ * area must be physically contiguous
+ */
+ struct ena_common_mem_addr debug_ba;
+
+ /* word 4 : debug area size */
+ uint32_t debug_area_size;
+};
+
+/* ENA Interrupt Moderation Get Feature descriptor. */
+struct ena_admin_feature_intr_moder_desc {
+ /* word 0 : */
+ /* interrupt delay granularity in usec */
+ uint16_t intr_delay_resolution;
+
+ uint16_t reserved;
+};
+
+/* ENA Link Get Feature descriptor. */
+struct ena_admin_get_feature_link_desc {
+ /* word 0 : Link speed in Mb */
+ uint32_t speed;
+
+ /* word 1 : supported speeds (bit field of enum ena_admin_link
+ * types)
+ */
+ uint32_t supported;
+
+ /* word 2 : */
+ /* 0 : autoneg - auto negotiation
+ * 1 : duplex - Full Duplex
+ * 31:2 : reserved2
+ */
+ uint32_t flags;
+};
+
+/* ENA AENQ Feature descriptor. */
+struct ena_admin_feature_aenq_desc {
+ /* word 0 : bitmask for AENQ groups the device can report */
+ uint32_t supported_groups;
+
+ /* word 1 : bitmask for AENQ groups to report */
+ uint32_t enabled_groups;
+};
+
+/* ENA Stateless Offload Feature descriptor. */
+struct ena_admin_feature_offload_desc {
+ /* word 0 : */
+ /* Trasmit side stateless offload
+ * 0 : TX_L3_csum_ipv4 - IPv4 checksum
+ * 1 : TX_L4_ipv4_csum_part - TCP/UDP over IPv4
+ * checksum, the checksum field should be initialized
+ * with pseudo header checksum
+ * 2 : TX_L4_ipv4_csum_full - TCP/UDP over IPv4
+ * checksum
+ * 3 : TX_L4_ipv6_csum_part - TCP/UDP over IPv6
+ * checksum, the checksum field should be initialized
+ * with pseudo header checksum
+ * 4 : TX_L4_ipv6_csum_full - TCP/UDP over IPv6
+ * checksum
+ * 5 : tso_ipv4 - TCP/IPv4 Segmentation Offloading
+ * 6 : tso_ipv6 - TCP/IPv6 Segmentation Offloading
+ * 7 : tso_ecn - TCP Segmentation with ECN
+ */
+ uint32_t tx;
+
+ /* word 1 : */
+ /* Receive side supported stateless offload
+ * 0 : RX_L3_csum_ipv4 - IPv4 checksum
+ * 1 : RX_L4_ipv4_csum - TCP/UDP/IPv4 checksum
+ * 2 : RX_L4_ipv6_csum - TCP/UDP/IPv6 checksum
+ * 3 : RX_hash - Hash calculation
+ */
+ uint32_t rx_supported;
+
+ /* word 2 : */
+ /* Receive side enabled stateless offload */
+ uint32_t rx_enabled;
+};
+
+/* hash functions */
+enum ena_admin_hash_functions {
+ /* Toeplitz hash */
+ ENA_ADMIN_TOEPLITZ = 1,
+
+ /* CRC32 hash */
+ ENA_ADMIN_CRC32 = 2,
+};
+
+/* ENA RSS flow hash control buffer structure */
+struct ena_admin_feature_rss_flow_hash_control {
+ /* word 0 : number of valid keys */
+ uint32_t keys_num;
+
+ /* word 1 : */
+ uint32_t reserved;
+
+ /* Toeplitz keys */
+ uint32_t key[10];
+};
+
+/* ENA RSS Flow Hash Function */
+struct ena_admin_feature_rss_flow_hash_function {
+ /* word 0 : */
+ /* supported hash functions
+ * 7:0 : funcs - supported hash functions (bitmask
+ * accroding to ena_admin_hash_functions)
+ */
+ uint32_t supported_func;
+
+ /* word 1 : */
+ /* selected hash func
+ * 7:0 : selected_func - selected hash function
+ * (bitmask accroding to ena_admin_hash_functions)
+ */
+ uint32_t selected_func;
+
+ /* word 2 : initial value */
+ uint32_t init_val;
+};
+
+/* RSS flow hash protocols */
+enum ena_admin_flow_hash_proto {
+ /* tcp/ipv4 */
+ ENA_ADMIN_RSS_TCP4 = 0,
+
+ /* udp/ipv4 */
+ ENA_ADMIN_RSS_UDP4 = 1,
+
+ /* tcp/ipv6 */
+ ENA_ADMIN_RSS_TCP6 = 2,
+
+ /* udp/ipv6 */
+ ENA_ADMIN_RSS_UDP6 = 3,
+
+ /* ipv4 not tcp/udp */
+ ENA_ADMIN_RSS_IP4 = 4,
+
+ /* ipv6 not tcp/udp */
+ ENA_ADMIN_RSS_IP6 = 5,
+
+ /* fragmented ipv4 */
+ ENA_ADMIN_RSS_IP4_FRAG = 6,
+
+ /* not ipv4/6 */
+ ENA_ADMIN_RSS_NOT_IP = 7,
+
+ /* max number of protocols */
+ ENA_ADMIN_RSS_PROTO_NUM = 16,
+};
+
+/* RSS flow hash fields */
+enum ena_admin_flow_hash_fields {
+ /* Ethernet Dest Addr */
+ ENA_ADMIN_RSS_L2_DA = 0,
+
+ /* Ethernet Src Addr */
+ ENA_ADMIN_RSS_L2_SA = 1,
+
+ /* ipv4/6 Dest Addr */
+ ENA_ADMIN_RSS_L3_DA = 2,
+
+ /* ipv4/6 Src Addr */
+ ENA_ADMIN_RSS_L3_SA = 5,
+
+ /* tcp/udp Dest Port */
+ ENA_ADMIN_RSS_L4_DP = 6,
+
+ /* tcp/udp Src Port */
+ ENA_ADMIN_RSS_L4_SP = 7,
+};
+
+/* hash input fields for flow protocol */
+struct ena_admin_proto_input {
+ /* word 0 : */
+ /* flow hash fields (bitwise according to ena_admin_flow_hash_fields) */
+ uint16_t fields;
+
+ /* 0 : inner - for tunneled packet, select the fields
+ * from inner header
+ */
+ uint16_t flags;
+};
+
+/* ENA RSS hash control buffer structure */
+struct ena_admin_feature_rss_hash_control {
+ /* supported input fields */
+ struct ena_admin_proto_input supported_fields[ENA_ADMIN_RSS_PROTO_NUM];
+
+ /* selected input fields */
+ struct ena_admin_proto_input selected_fields[ENA_ADMIN_RSS_PROTO_NUM];
+
+ /* supported input fields for inner header */
+ struct ena_admin_proto_input supported_inner_fields[ENA_ADMIN_RSS_PROTO_NUM];
+
+ /* selected input fields */
+ struct ena_admin_proto_input selected_inner_fields[ENA_ADMIN_RSS_PROTO_NUM];
+};
+
+/* ENA RSS flow hash input */
+struct ena_admin_feature_rss_flow_hash_input {
+ /* word 0 : */
+ /* supported hash input sorting
+ * 1 : L3_sort - support swap L3 addresses if DA
+ * smaller than SA
+ * 2 : L4_sort - support swap L4 ports if DP smaller
+ * SP
+ */
+ uint16_t supported_input_sort;
+
+ /* enabled hash input sorting
+ * 1 : enable_L3_sort - enable swap L3 addresses if
+ * DA smaller than SA
+ * 2 : enable_L4_sort - enable swap L4 ports if DP
+ * smaller than SP
+ */
+ uint16_t enabled_input_sort;
+};
+
+/* Operating system type */
+enum ena_admin_os_type {
+ /* Linux OS */
+ ENA_ADMIN_OS_LINUX = 1,
+
+ /* Windows OS */
+ ENA_ADMIN_OS_WIN = 2,
+
+ /* DPDK OS */
+ ENA_ADMIN_OS_DPDK = 3,
+
+ /* FreeBSD OS */
+ ENA_ADMIN_OS_FREE_BSD = 4,
+
+ /* PXE OS */
+ ENA_ADMIN_OS_PXE = 5,
+};
+
+/* host info */
+struct ena_admin_host_info {
+ /* word 0 : OS type defined in enum ena_os_type */
+ uint32_t os_type;
+
+ /* os distribution string format */
+ uint8_t os_dist_str[128];
+
+ /* word 33 : OS distribution numeric format */
+ uint32_t os_dist;
+
+ /* kernel version string format */
+ uint8_t kernel_ver_str[32];
+
+ /* word 42 : Kernel version numeric format */
+ uint32_t kernel_ver;
+
+ /* word 43 : */
+ /* driver version
+ * 7:0 : major - major
+ * 15:8 : minor - minor
+ * 23:16 : sub_minor - sub minor
+ */
+ uint32_t driver_version;
+
+ /* features bitmap */
+ uint32_t supported_network_features[4];
+};
+
+/* ENA RSS indirection table entry */
+struct ena_admin_rss_ind_table_entry {
+ /* word 0 : */
+ /* cq identifier */
+ uint16_t cq_idx;
+
+ uint16_t reserved;
+};
+
+/* ENA RSS indirection table */
+struct ena_admin_feature_rss_ind_table {
+ /* word 0 : */
+ /* min supported table size (2^min_size) */
+ uint16_t min_size;
+
+ /* max supported table size (2^max_size) */
+ uint16_t max_size;
+
+ /* word 1 : */
+ /* table size (2^size) */
+ uint16_t size;
+
+ uint16_t reserved;
+
+ /* word 2 : index of the inline entry. 0xFFFFFFFF means invalid */
+ uint32_t inline_index;
+
+ /* words 3 : used for updating single entry, ignored when setting
+ * the entire table through the control buffer.
+ */
+ struct ena_admin_rss_ind_table_entry inline_entry;
+};
+
+/* ENA Get Feature command */
+struct ena_admin_get_feat_cmd {
+ /* words 0 : */
+ struct ena_admin_aq_common_desc aq_common_descriptor;
+
+ /* words 1:3 : points to control buffer (direct or indirect,
+ * chained if needed)
+ */
+ struct ena_admin_ctrl_buff_info control_buffer;
+
+ /* words 4 : */
+ struct ena_admin_get_set_feature_common_desc feat_common;
+
+ /* words 5:15 : */
+ union {
+ /* raw words */
+ uint32_t raw[11];
+ } u;
+};
+
+/* ENA Get Feature command response */
+struct ena_admin_get_feat_resp {
+ /* words 0:1 : */
+ struct ena_admin_acq_common_desc acq_common_desc;
+
+ /* words 2:15 : */
+ union {
+ /* raw words */
+ uint32_t raw[14];
+
+ /* words 2:10 : Get Device Attributes */
+ struct ena_admin_device_attr_feature_desc dev_attr;
+
+ /* words 2:5 : Max queues num */
+ struct ena_admin_queue_feature_desc max_queue;
+
+ /* words 2:3 : AENQ configuration */
+ struct ena_admin_feature_aenq_desc aenq;
+
+ /* words 2:4 : Get Link configuration */
+ struct ena_admin_get_feature_link_desc link;
+
+ /* words 2:4 : offload configuration */
+ struct ena_admin_feature_offload_desc offload;
+
+ /* words 2:4 : rss flow hash function */
+ struct ena_admin_feature_rss_flow_hash_function flow_hash_func;
+
+ /* words 2 : rss flow hash input */
+ struct ena_admin_feature_rss_flow_hash_input flow_hash_input;
+
+ /* words 2:3 : rss indirection table */
+ struct ena_admin_feature_rss_ind_table ind_table;
+
+ /* words 2 : interrupt moderation configuration */
+ struct ena_admin_feature_intr_moder_desc intr_moderation;
+ } u;
+};
+
+/* ENA Set Feature command */
+struct ena_admin_set_feat_cmd {
+ /* words 0 : */
+ struct ena_admin_aq_common_desc aq_common_descriptor;
+
+ /* words 1:3 : points to control buffer (direct or indirect,
+ * chained if needed)
+ */
+ struct ena_admin_ctrl_buff_info control_buffer;
+
+ /* words 4 : */
+ struct ena_admin_get_set_feature_common_desc feat_common;
+
+ /* words 5:15 : */
+ union {
+ /* raw words */
+ uint32_t raw[11];
+
+ /* words 5 : mtu size */
+ struct ena_admin_set_feature_mtu_desc mtu;
+
+ /* words 5:7 : host attributes */
+ struct ena_admin_set_feature_host_attr_desc host_attr;
+
+ /* words 5:6 : AENQ configuration */
+ struct ena_admin_feature_aenq_desc aenq;
+
+ /* words 5:7 : rss flow hash function */
+ struct ena_admin_feature_rss_flow_hash_function flow_hash_func;
+
+ /* words 5 : rss flow hash input */
+ struct ena_admin_feature_rss_flow_hash_input flow_hash_input;
+
+ /* words 5:6 : rss indirection table */
+ struct ena_admin_feature_rss_ind_table ind_table;
+ } u;
+};
+
+/* ENA Set Feature command response */
+struct ena_admin_set_feat_resp {
+ /* words 0:1 : */
+ struct ena_admin_acq_common_desc acq_common_desc;
+
+ /* words 2:15 : */
+ union {
+ /* raw words */
+ uint32_t raw[14];
+ } u;
+};
+
+/* ENA Asynchronous Event Notification Queue descriptor. */
+struct ena_admin_aenq_common_desc {
+ /* word 0 : */
+ uint16_t group;
+
+ uint16_t syndrom;
+
+ /* word 1 : */
+ /* 0 : phase */
+ uint8_t flags;
+
+ uint8_t reserved1[3];
+
+ /* word 2 : Timestamp LSB */
+ uint32_t timestamp_low;
+
+ /* word 3 : Timestamp MSB */
+ uint32_t timestamp_high;
+};
+
+/* asynchronous event notification groups */
+enum ena_admin_aenq_group {
+ /* Link State Change */
+ ENA_ADMIN_LINK_CHANGE = 0,
+
+ ENA_ADMIN_FATAL_ERROR = 1,
+
+ ENA_ADMIN_WARNING = 2,
+
+ ENA_ADMIN_NOTIFICATION = 3,
+
+ ENA_ADMIN_KEEP_ALIVE = 4,
+
+ ENA_ADMIN_AENQ_GROUPS_NUM = 5,
+};
+
+/* syndorm of AENQ notification group */
+enum ena_admin_aenq_notification_syndrom {
+ ENA_ADMIN_SUSPEND = 0,
+
+ ENA_ADMIN_RESUME = 1,
+};
+
+/* ENA Asynchronous Event Notification generic descriptor. */
+struct ena_admin_aenq_entry {
+ /* words 0:3 : */
+ struct ena_admin_aenq_common_desc aenq_common_desc;
+
+ /* command specific inline data */
+ uint32_t inline_data_w4[12];
+};
+
+/* ENA Asynchronous Event Notification Queue Link Change descriptor. */
+struct ena_admin_aenq_link_change_desc {
+ /* words 0:3 : */
+ struct ena_admin_aenq_common_desc aenq_common_desc;
+
+ /* word 4 : */
+ /* 0 : link_status */
+ uint32_t flags;
+};
+
+/* ENA MMIO Readless response interface */
+struct ena_admin_ena_mmio_req_read_less_resp {
+ /* word 0 : */
+ /* request id */
+ uint16_t req_id;
+
+ /* register offset */
+ uint16_t reg_off;
+
+ /* word 1 : value is valid when poll is cleared */
+ uint32_t reg_val;
+};
+
+/* aq_common_desc */
+#define ENA_ADMIN_AQ_COMMON_DESC_COMMAND_ID_MASK GENMASK(11, 0)
+#define ENA_ADMIN_AQ_COMMON_DESC_PHASE_MASK BIT(0)
+#define ENA_ADMIN_AQ_COMMON_DESC_CTRL_DATA_SHIFT 1
+#define ENA_ADMIN_AQ_COMMON_DESC_CTRL_DATA_MASK BIT(1)
+#define ENA_ADMIN_AQ_COMMON_DESC_CTRL_DATA_INDIRECT_SHIFT 2
+#define ENA_ADMIN_AQ_COMMON_DESC_CTRL_DATA_INDIRECT_MASK BIT(2)
+
+/* sq */
+#define ENA_ADMIN_SQ_SQ_DIRECTION_SHIFT 5
+#define ENA_ADMIN_SQ_SQ_DIRECTION_MASK GENMASK(7, 5)
+
+/* acq_common_desc */
+#define ENA_ADMIN_ACQ_COMMON_DESC_COMMAND_ID_MASK GENMASK(11, 0)
+#define ENA_ADMIN_ACQ_COMMON_DESC_PHASE_MASK BIT(0)
+
+/* aq_create_sq_cmd */
+#define ENA_ADMIN_AQ_CREATE_SQ_CMD_SQ_DIRECTION_SHIFT 5
+#define ENA_ADMIN_AQ_CREATE_SQ_CMD_SQ_DIRECTION_MASK GENMASK(7, 5)
+#define ENA_ADMIN_AQ_CREATE_SQ_CMD_PLACEMENT_POLICY_MASK GENMASK(3, 0)
+#define ENA_ADMIN_AQ_CREATE_SQ_CMD_COMPLETION_POLICY_SHIFT 4
+#define ENA_ADMIN_AQ_CREATE_SQ_CMD_COMPLETION_POLICY_MASK GENMASK(6, 4)
+#define ENA_ADMIN_AQ_CREATE_SQ_CMD_IS_PHYSICALLY_CONTIGUOUS_MASK BIT(0)
+
+/* aq_create_cq_cmd */
+#define ENA_ADMIN_AQ_CREATE_CQ_CMD_INTERRUPT_MODE_ENABLED_SHIFT 5
+#define ENA_ADMIN_AQ_CREATE_CQ_CMD_INTERRUPT_MODE_ENABLED_MASK BIT(5)
+#define ENA_ADMIN_AQ_CREATE_CQ_CMD_CQ_ENTRY_SIZE_WORDS_MASK GENMASK(4, 0)
+
+/* get_set_feature_common_desc */
+#define ENA_ADMIN_GET_SET_FEATURE_COMMON_DESC_SELECT_MASK GENMASK(1, 0)
+
+/* get_feature_link_desc */
+#define ENA_ADMIN_GET_FEATURE_LINK_DESC_AUTONEG_MASK BIT(0)
+#define ENA_ADMIN_GET_FEATURE_LINK_DESC_DUPLEX_SHIFT 1
+#define ENA_ADMIN_GET_FEATURE_LINK_DESC_DUPLEX_MASK BIT(1)
+
+/* feature_offload_desc */
+#define ENA_ADMIN_FEATURE_OFFLOAD_DESC_TX_L3_CSUM_IPV4_MASK BIT(0)
+#define ENA_ADMIN_FEATURE_OFFLOAD_DESC_TX_L4_IPV4_CSUM_PART_SHIFT 1
+#define ENA_ADMIN_FEATURE_OFFLOAD_DESC_TX_L4_IPV4_CSUM_PART_MASK BIT(1)
+#define ENA_ADMIN_FEATURE_OFFLOAD_DESC_TX_L4_IPV4_CSUM_FULL_SHIFT 2
+#define ENA_ADMIN_FEATURE_OFFLOAD_DESC_TX_L4_IPV4_CSUM_FULL_MASK BIT(2)
+#define ENA_ADMIN_FEATURE_OFFLOAD_DESC_TX_L4_IPV6_CSUM_PART_SHIFT 3
+#define ENA_ADMIN_FEATURE_OFFLOAD_DESC_TX_L4_IPV6_CSUM_PART_MASK BIT(3)
+#define ENA_ADMIN_FEATURE_OFFLOAD_DESC_TX_L4_IPV6_CSUM_FULL_SHIFT 4
+#define ENA_ADMIN_FEATURE_OFFLOAD_DESC_TX_L4_IPV6_CSUM_FULL_MASK BIT(4)
+#define ENA_ADMIN_FEATURE_OFFLOAD_DESC_TSO_IPV4_SHIFT 5
+#define ENA_ADMIN_FEATURE_OFFLOAD_DESC_TSO_IPV4_MASK BIT(5)
+#define ENA_ADMIN_FEATURE_OFFLOAD_DESC_TSO_IPV6_SHIFT 6
+#define ENA_ADMIN_FEATURE_OFFLOAD_DESC_TSO_IPV6_MASK BIT(6)
+#define ENA_ADMIN_FEATURE_OFFLOAD_DESC_TSO_ECN_SHIFT 7
+#define ENA_ADMIN_FEATURE_OFFLOAD_DESC_TSO_ECN_MASK BIT(7)
+#define ENA_ADMIN_FEATURE_OFFLOAD_DESC_RX_L3_CSUM_IPV4_MASK BIT(0)
+#define ENA_ADMIN_FEATURE_OFFLOAD_DESC_RX_L4_IPV4_CSUM_SHIFT 1
+#define ENA_ADMIN_FEATURE_OFFLOAD_DESC_RX_L4_IPV4_CSUM_MASK BIT(1)
+#define ENA_ADMIN_FEATURE_OFFLOAD_DESC_RX_L4_IPV6_CSUM_SHIFT 2
+#define ENA_ADMIN_FEATURE_OFFLOAD_DESC_RX_L4_IPV6_CSUM_MASK BIT(2)
+#define ENA_ADMIN_FEATURE_OFFLOAD_DESC_RX_HASH_SHIFT 3
+#define ENA_ADMIN_FEATURE_OFFLOAD_DESC_RX_HASH_MASK BIT(3)
+
+/* feature_rss_flow_hash_function */
+#define ENA_ADMIN_FEATURE_RSS_FLOW_HASH_FUNCTION_FUNCS_MASK GENMASK(7, 0)
+#define ENA_ADMIN_FEATURE_RSS_FLOW_HASH_FUNCTION_SELECTED_FUNC_MASK \
+ GENMASK(7, 0)
+
+/* proto_input */
+#define ENA_ADMIN_PROTO_INPUT_INNER_MASK BIT(0)
+
+/* feature_rss_flow_hash_input */
+#define ENA_ADMIN_FEATURE_RSS_FLOW_HASH_INPUT_L3_SORT_SHIFT 1
+#define ENA_ADMIN_FEATURE_RSS_FLOW_HASH_INPUT_L3_SORT_MASK BIT(1)
+#define ENA_ADMIN_FEATURE_RSS_FLOW_HASH_INPUT_L4_SORT_SHIFT 2
+#define ENA_ADMIN_FEATURE_RSS_FLOW_HASH_INPUT_L4_SORT_MASK BIT(2)
+#define ENA_ADMIN_FEATURE_RSS_FLOW_HASH_INPUT_ENABLE_L3_SORT_SHIFT 1
+#define ENA_ADMIN_FEATURE_RSS_FLOW_HASH_INPUT_ENABLE_L3_SORT_MASK BIT(1)
+#define ENA_ADMIN_FEATURE_RSS_FLOW_HASH_INPUT_ENABLE_L4_SORT_SHIFT 2
+#define ENA_ADMIN_FEATURE_RSS_FLOW_HASH_INPUT_ENABLE_L4_SORT_MASK BIT(2)
+
+/* host_info */
+#define ENA_ADMIN_HOST_INFO_MAJOR_MASK GENMASK(7, 0)
+#define ENA_ADMIN_HOST_INFO_MINOR_SHIFT 8
+#define ENA_ADMIN_HOST_INFO_MINOR_MASK GENMASK(15, 8)
+#define ENA_ADMIN_HOST_INFO_SUB_MINOR_SHIFT 16
+#define ENA_ADMIN_HOST_INFO_SUB_MINOR_MASK GENMASK(23, 16)
+
+/* aenq_common_desc */
+#define ENA_ADMIN_AENQ_COMMON_DESC_PHASE_MASK BIT(0)
+
+/* aenq_link_change_desc */
+#define ENA_ADMIN_AENQ_LINK_CHANGE_DESC_LINK_STATUS_MASK BIT(0)
+
+#if !defined(ENA_DEFS_LINUX_MAINLINE)
+static inline uint16_t
+get_ena_admin_aq_common_desc_command_id(
+ const struct ena_admin_aq_common_desc *p)
+{
+ return p->command_id & ENA_ADMIN_AQ_COMMON_DESC_COMMAND_ID_MASK;
+}
+
+static inline void
+set_ena_admin_aq_common_desc_command_id(struct ena_admin_aq_common_desc *p,
+ uint16_t val)
+{
+ p->command_id |= val & ENA_ADMIN_AQ_COMMON_DESC_COMMAND_ID_MASK;
+}
+
+static inline uint8_t
+get_ena_admin_aq_common_desc_phase(const struct ena_admin_aq_common_desc *p)
+{
+ return p->flags & ENA_ADMIN_AQ_COMMON_DESC_PHASE_MASK;
+}
+
+static inline void
+set_ena_admin_aq_common_desc_phase(struct ena_admin_aq_common_desc *p,
+ uint8_t val)
+{
+ p->flags |= val & ENA_ADMIN_AQ_COMMON_DESC_PHASE_MASK;
+}
+
+static inline uint8_t
+get_ena_admin_aq_common_desc_ctrl_data(
+ const struct ena_admin_aq_common_desc *p)
+{
+ return (p->flags & ENA_ADMIN_AQ_COMMON_DESC_CTRL_DATA_MASK) >>
+ ENA_ADMIN_AQ_COMMON_DESC_CTRL_DATA_SHIFT;
+}
+
+static inline void
+set_ena_admin_aq_common_desc_ctrl_data(struct ena_admin_aq_common_desc *p,
+ uint8_t val)
+{
+ p->flags |= (val << ENA_ADMIN_AQ_COMMON_DESC_CTRL_DATA_SHIFT)
+ & ENA_ADMIN_AQ_COMMON_DESC_CTRL_DATA_MASK;
+}
+
+static inline uint8_t
+get_ena_admin_aq_common_desc_ctrl_data_indirect(
+ const struct ena_admin_aq_common_desc *p)
+{
+ return (p->flags & ENA_ADMIN_AQ_COMMON_DESC_CTRL_DATA_INDIRECT_MASK)
+ >> ENA_ADMIN_AQ_COMMON_DESC_CTRL_DATA_INDIRECT_SHIFT;
+}
+
+static inline void
+set_ena_admin_aq_common_desc_ctrl_data_indirect(
+ struct ena_admin_aq_common_desc *p,
+ uint8_t val)
+{
+ p->flags |= (val << ENA_ADMIN_AQ_COMMON_DESC_CTRL_DATA_INDIRECT_SHIFT)
+ & ENA_ADMIN_AQ_COMMON_DESC_CTRL_DATA_INDIRECT_MASK;
+}
+
+static inline uint8_t
+get_ena_admin_sq_sq_direction(const struct ena_admin_sq *p)
+{
+ return (p->sq_identity & ENA_ADMIN_SQ_SQ_DIRECTION_MASK)
+ >> ENA_ADMIN_SQ_SQ_DIRECTION_SHIFT;
+}
+
+static inline void
+set_ena_admin_sq_sq_direction(struct ena_admin_sq *p, uint8_t val)
+{
+ p->sq_identity |= (val << ENA_ADMIN_SQ_SQ_DIRECTION_SHIFT) &
+ ENA_ADMIN_SQ_SQ_DIRECTION_MASK;
+}
+
+static inline uint16_t
+get_ena_admin_acq_common_desc_command_id(
+ const struct ena_admin_acq_common_desc *p)
+{
+ return p->command & ENA_ADMIN_ACQ_COMMON_DESC_COMMAND_ID_MASK;
+}
+
+static inline void
+set_ena_admin_acq_common_desc_command_id(struct ena_admin_acq_common_desc *p,
+ uint16_t val)
+{
+ p->command |= val & ENA_ADMIN_ACQ_COMMON_DESC_COMMAND_ID_MASK;
+}
+
+static inline uint8_t
+get_ena_admin_acq_common_desc_phase(const struct ena_admin_acq_common_desc *p)
+{
+ return p->flags & ENA_ADMIN_ACQ_COMMON_DESC_PHASE_MASK;
+}
+
+static inline void
+set_ena_admin_acq_common_desc_phase(struct ena_admin_acq_common_desc *p,
+ uint8_t val)
+{
+ p->flags |= val & ENA_ADMIN_ACQ_COMMON_DESC_PHASE_MASK;
+}
+
+static inline uint8_t
+get_ena_admin_aq_create_sq_cmd_sq_direction(
+ const struct ena_admin_aq_create_sq_cmd *p)
+{
+ return (p->sq_identity & ENA_ADMIN_AQ_CREATE_SQ_CMD_SQ_DIRECTION_MASK)
+ >> ENA_ADMIN_AQ_CREATE_SQ_CMD_SQ_DIRECTION_SHIFT;
+}
+
+static inline void
+set_ena_admin_aq_create_sq_cmd_sq_direction(
+ struct ena_admin_aq_create_sq_cmd *p,
+ uint8_t val)
+{
+ p->sq_identity |= (val <<
+ ENA_ADMIN_AQ_CREATE_SQ_CMD_SQ_DIRECTION_SHIFT)
+ & ENA_ADMIN_AQ_CREATE_SQ_CMD_SQ_DIRECTION_MASK;
+}
+
+static inline uint8_t
+get_ena_admin_aq_create_sq_cmd_placement_policy(
+ const struct ena_admin_aq_create_sq_cmd *p)
+{
+ return p->sq_caps_2 & ENA_ADMIN_AQ_CREATE_SQ_CMD_PLACEMENT_POLICY_MASK;
+}
+
+static inline void
+set_ena_admin_aq_create_sq_cmd_placement_policy(
+ struct ena_admin_aq_create_sq_cmd *p,
+ uint8_t val)
+{
+ p->sq_caps_2 |= val & ENA_ADMIN_AQ_CREATE_SQ_CMD_PLACEMENT_POLICY_MASK;
+}
+
+static inline uint8_t
+get_ena_admin_aq_create_sq_cmd_completion_policy(
+ const struct ena_admin_aq_create_sq_cmd *p)
+{
+ return (p->sq_caps_2
+ & ENA_ADMIN_AQ_CREATE_SQ_CMD_COMPLETION_POLICY_MASK)
+ >> ENA_ADMIN_AQ_CREATE_SQ_CMD_COMPLETION_POLICY_SHIFT;
+}
+
+static inline void
+set_ena_admin_aq_create_sq_cmd_completion_policy(
+ struct ena_admin_aq_create_sq_cmd *p,
+ uint8_t val)
+{
+ p->sq_caps_2 |=
+ (val << ENA_ADMIN_AQ_CREATE_SQ_CMD_COMPLETION_POLICY_SHIFT)
+ & ENA_ADMIN_AQ_CREATE_SQ_CMD_COMPLETION_POLICY_MASK;
+}
+
+static inline uint8_t
+get_ena_admin_aq_create_sq_cmd_is_physically_contiguous(
+ const struct ena_admin_aq_create_sq_cmd *p)
+{
+ return p->sq_caps_3 &
+ ENA_ADMIN_AQ_CREATE_SQ_CMD_IS_PHYSICALLY_CONTIGUOUS_MASK;
+}
+
+static inline void
+set_ena_admin_aq_create_sq_cmd_is_physically_contiguous(
+ struct ena_admin_aq_create_sq_cmd *p,
+ uint8_t val)
+{
+ p->sq_caps_3 |= val &
+ ENA_ADMIN_AQ_CREATE_SQ_CMD_IS_PHYSICALLY_CONTIGUOUS_MASK;
+}
+
+static inline uint8_t
+get_ena_admin_aq_create_cq_cmd_interrupt_mode_enabled(
+ const struct ena_admin_aq_create_cq_cmd *p)
+{
+ return (p->cq_caps_1 &
+ ENA_ADMIN_AQ_CREATE_CQ_CMD_INTERRUPT_MODE_ENABLED_MASK)
+ >> ENA_ADMIN_AQ_CREATE_CQ_CMD_INTERRUPT_MODE_ENABLED_SHIFT;
+}
+
+static inline void
+set_ena_admin_aq_create_cq_cmd_interrupt_mode_enabled(
+ struct ena_admin_aq_create_cq_cmd *p,
+ uint8_t val)
+{
+ p->cq_caps_1 |=
+ (val << ENA_ADMIN_AQ_CREATE_CQ_CMD_INTERRUPT_MODE_ENABLED_SHIFT)
+ & ENA_ADMIN_AQ_CREATE_CQ_CMD_INTERRUPT_MODE_ENABLED_MASK;
+}
+
+static inline uint8_t
+get_ena_admin_aq_create_cq_cmd_cq_entry_size_words(
+ const struct ena_admin_aq_create_cq_cmd *p)
+{
+ return p->cq_caps_2
+ & ENA_ADMIN_AQ_CREATE_CQ_CMD_CQ_ENTRY_SIZE_WORDS_MASK;
+}
+
+static inline void
+set_ena_admin_aq_create_cq_cmd_cq_entry_size_words(
+ struct ena_admin_aq_create_cq_cmd *p,
+ uint8_t val)
+{
+ p->cq_caps_2 |=
+ val & ENA_ADMIN_AQ_CREATE_CQ_CMD_CQ_ENTRY_SIZE_WORDS_MASK;
+}
+
+static inline uint8_t
+get_ena_admin_get_set_feature_common_desc_select(
+ const struct ena_admin_get_set_feature_common_desc *p)
+{
+ return p->flags & ENA_ADMIN_GET_SET_FEATURE_COMMON_DESC_SELECT_MASK;
+}
+
+static inline void
+set_ena_admin_get_set_feature_common_desc_select(
+ struct ena_admin_get_set_feature_common_desc *p,
+ uint8_t val)
+{
+ p->flags |= val & ENA_ADMIN_GET_SET_FEATURE_COMMON_DESC_SELECT_MASK;
+}
+
+static inline uint32_t
+get_ena_admin_get_feature_link_desc_autoneg(
+ const struct ena_admin_get_feature_link_desc *p)
+{
+ return p->flags & ENA_ADMIN_GET_FEATURE_LINK_DESC_AUTONEG_MASK;
+}
+
+static inline void
+set_ena_admin_get_feature_link_desc_autoneg(
+ struct ena_admin_get_feature_link_desc *p,
+ uint32_t val)
+{
+ p->flags |= val & ENA_ADMIN_GET_FEATURE_LINK_DESC_AUTONEG_MASK;
+}
+
+static inline uint32_t
+get_ena_admin_get_feature_link_desc_duplex(
+ const struct ena_admin_get_feature_link_desc *p)
+{
+ return (p->flags & ENA_ADMIN_GET_FEATURE_LINK_DESC_DUPLEX_MASK)
+ >> ENA_ADMIN_GET_FEATURE_LINK_DESC_DUPLEX_SHIFT;
+}
+
+static inline void
+set_ena_admin_get_feature_link_desc_duplex(
+ struct ena_admin_get_feature_link_desc *p,
+ uint32_t val)
+{
+ p->flags |= (val << ENA_ADMIN_GET_FEATURE_LINK_DESC_DUPLEX_SHIFT)
+ & ENA_ADMIN_GET_FEATURE_LINK_DESC_DUPLEX_MASK;
+}
+
+static inline uint32_t
+get_ena_admin_feature_offload_desc_TX_L3_csum_ipv4(
+ const struct ena_admin_feature_offload_desc *p)
+{
+ return p->tx & ENA_ADMIN_FEATURE_OFFLOAD_DESC_TX_L3_CSUM_IPV4_MASK;
+}
+
+static inline void
+set_ena_admin_feature_offload_desc_TX_L3_csum_ipv4(
+ struct ena_admin_feature_offload_desc *p,
+ uint32_t val)
+{
+ p->tx |= val & ENA_ADMIN_FEATURE_OFFLOAD_DESC_TX_L3_CSUM_IPV4_MASK;
+}
+
+static inline uint32_t
+get_ena_admin_feature_offload_desc_TX_L4_ipv4_csum_part(
+ const struct ena_admin_feature_offload_desc *p)
+{
+ return (p->tx &
+ ENA_ADMIN_FEATURE_OFFLOAD_DESC_TX_L4_IPV4_CSUM_PART_MASK)
+ >> ENA_ADMIN_FEATURE_OFFLOAD_DESC_TX_L4_IPV4_CSUM_PART_SHIFT;
+}
+
+static inline void
+set_ena_admin_feature_offload_desc_TX_L4_ipv4_csum_part(
+ struct ena_admin_feature_offload_desc *p,
+ uint32_t val)
+{
+ p->tx |= (val <<
+ ENA_ADMIN_FEATURE_OFFLOAD_DESC_TX_L4_IPV4_CSUM_PART_SHIFT)
+ & ENA_ADMIN_FEATURE_OFFLOAD_DESC_TX_L4_IPV4_CSUM_PART_MASK;
+}
+
+static inline uint32_t
+get_ena_admin_feature_offload_desc_TX_L4_ipv4_csum_full(
+ const struct ena_admin_feature_offload_desc *p)
+{
+ return (p->tx &
+ ENA_ADMIN_FEATURE_OFFLOAD_DESC_TX_L4_IPV4_CSUM_FULL_MASK)
+ >> ENA_ADMIN_FEATURE_OFFLOAD_DESC_TX_L4_IPV4_CSUM_FULL_SHIFT;
+}
+
+static inline void
+set_ena_admin_feature_offload_desc_TX_L4_ipv4_csum_full(
+ struct ena_admin_feature_offload_desc *p,
+ uint32_t val)
+{
+ p->tx |= (val <<
+ ENA_ADMIN_FEATURE_OFFLOAD_DESC_TX_L4_IPV4_CSUM_FULL_SHIFT)
+ & ENA_ADMIN_FEATURE_OFFLOAD_DESC_TX_L4_IPV4_CSUM_FULL_MASK;
+}
+
+static inline uint32_t
+get_ena_admin_feature_offload_desc_TX_L4_ipv6_csum_part(
+ const struct ena_admin_feature_offload_desc *p)
+{
+ return (p->tx &
+ ENA_ADMIN_FEATURE_OFFLOAD_DESC_TX_L4_IPV6_CSUM_PART_MASK)
+ >> ENA_ADMIN_FEATURE_OFFLOAD_DESC_TX_L4_IPV6_CSUM_PART_SHIFT;
+}
+
+static inline void
+set_ena_admin_feature_offload_desc_TX_L4_ipv6_csum_part(
+ struct ena_admin_feature_offload_desc *p,
+ uint32_t val)
+{
+ p->tx |= (val <<
+ ENA_ADMIN_FEATURE_OFFLOAD_DESC_TX_L4_IPV6_CSUM_PART_SHIFT)
+ & ENA_ADMIN_FEATURE_OFFLOAD_DESC_TX_L4_IPV6_CSUM_PART_MASK;
+}
+
+static inline uint32_t
+get_ena_admin_feature_offload_desc_TX_L4_ipv6_csum_full(
+ const struct ena_admin_feature_offload_desc *p)
+{
+ return (p->tx &
+ ENA_ADMIN_FEATURE_OFFLOAD_DESC_TX_L4_IPV6_CSUM_FULL_MASK)
+ >> ENA_ADMIN_FEATURE_OFFLOAD_DESC_TX_L4_IPV6_CSUM_FULL_SHIFT;
+}
+
+static inline void
+set_ena_admin_feature_offload_desc_TX_L4_ipv6_csum_full(
+ struct ena_admin_feature_offload_desc *p,
+ uint32_t val)
+{
+ p->tx |= (val <<
+ ENA_ADMIN_FEATURE_OFFLOAD_DESC_TX_L4_IPV6_CSUM_FULL_SHIFT)
+ & ENA_ADMIN_FEATURE_OFFLOAD_DESC_TX_L4_IPV6_CSUM_FULL_MASK;
+}
+
+static inline uint32_t
+get_ena_admin_feature_offload_desc_tso_ipv4(
+ const struct ena_admin_feature_offload_desc *p)
+{
+ return (p->tx & ENA_ADMIN_FEATURE_OFFLOAD_DESC_TSO_IPV4_MASK)
+ >> ENA_ADMIN_FEATURE_OFFLOAD_DESC_TSO_IPV4_SHIFT;
+}
+
+static inline void
+set_ena_admin_feature_offload_desc_tso_ipv4(
+ struct ena_admin_feature_offload_desc *p,
+ uint32_t val)
+{
+ p->tx |= (val << ENA_ADMIN_FEATURE_OFFLOAD_DESC_TSO_IPV4_SHIFT)
+ & ENA_ADMIN_FEATURE_OFFLOAD_DESC_TSO_IPV4_MASK;
+}
+
+static inline uint32_t
+get_ena_admin_feature_offload_desc_tso_ipv6(
+ const struct ena_admin_feature_offload_desc *p)
+{
+ return (p->tx & ENA_ADMIN_FEATURE_OFFLOAD_DESC_TSO_IPV6_MASK)
+ >> ENA_ADMIN_FEATURE_OFFLOAD_DESC_TSO_IPV6_SHIFT;
+}
+
+static inline void
+set_ena_admin_feature_offload_desc_tso_ipv6(
+ struct ena_admin_feature_offload_desc *p,
+ uint32_t val)
+{
+ p->tx |= (val << ENA_ADMIN_FEATURE_OFFLOAD_DESC_TSO_IPV6_SHIFT)
+ & ENA_ADMIN_FEATURE_OFFLOAD_DESC_TSO_IPV6_MASK;
+}
+
+static inline uint32_t
+get_ena_admin_feature_offload_desc_tso_ecn(
+ const struct ena_admin_feature_offload_desc *p)
+{
+ return (p->tx & ENA_ADMIN_FEATURE_OFFLOAD_DESC_TSO_ECN_MASK)
+ >> ENA_ADMIN_FEATURE_OFFLOAD_DESC_TSO_ECN_SHIFT;
+}
+
+static inline void
+set_ena_admin_feature_offload_desc_tso_ecn(
+ struct ena_admin_feature_offload_desc *p,
+ uint32_t val)
+{
+ p->tx |= (val << ENA_ADMIN_FEATURE_OFFLOAD_DESC_TSO_ECN_SHIFT)
+ & ENA_ADMIN_FEATURE_OFFLOAD_DESC_TSO_ECN_MASK;
+}
+
+static inline uint32_t
+get_ena_admin_feature_offload_desc_RX_L3_csum_ipv4(
+ const struct ena_admin_feature_offload_desc *p)
+{
+ return p->rx_supported &
+ ENA_ADMIN_FEATURE_OFFLOAD_DESC_RX_L3_CSUM_IPV4_MASK;
+}
+
+static inline void
+set_ena_admin_feature_offload_desc_RX_L3_csum_ipv4(
+ struct ena_admin_feature_offload_desc *p,
+ uint32_t val)
+{
+ p->rx_supported |=
+ val & ENA_ADMIN_FEATURE_OFFLOAD_DESC_RX_L3_CSUM_IPV4_MASK;
+}
+
+static inline uint32_t
+get_ena_admin_feature_offload_desc_RX_L4_ipv4_csum(
+ const struct ena_admin_feature_offload_desc *p)
+{
+ return (p->rx_supported &
+ ENA_ADMIN_FEATURE_OFFLOAD_DESC_RX_L4_IPV4_CSUM_MASK)
+ >> ENA_ADMIN_FEATURE_OFFLOAD_DESC_RX_L4_IPV4_CSUM_SHIFT;
+}
+
+static inline void
+set_ena_admin_feature_offload_desc_RX_L4_ipv4_csum(
+ struct ena_admin_feature_offload_desc *p,
+ uint32_t val)
+{
+ p->rx_supported |=
+ (val << ENA_ADMIN_FEATURE_OFFLOAD_DESC_RX_L4_IPV4_CSUM_SHIFT)
+ & ENA_ADMIN_FEATURE_OFFLOAD_DESC_RX_L4_IPV4_CSUM_MASK;
+}
+
+static inline uint32_t
+get_ena_admin_feature_offload_desc_RX_L4_ipv6_csum(
+ const struct ena_admin_feature_offload_desc *p)
+{
+ return (p->rx_supported &
+ ENA_ADMIN_FEATURE_OFFLOAD_DESC_RX_L4_IPV6_CSUM_MASK)
+ >> ENA_ADMIN_FEATURE_OFFLOAD_DESC_RX_L4_IPV6_CSUM_SHIFT;
+}
+
+static inline void
+set_ena_admin_feature_offload_desc_RX_L4_ipv6_csum(
+ struct ena_admin_feature_offload_desc *p,
+ uint32_t val)
+{
+ p->rx_supported |=
+ (val << ENA_ADMIN_FEATURE_OFFLOAD_DESC_RX_L4_IPV6_CSUM_SHIFT)
+ & ENA_ADMIN_FEATURE_OFFLOAD_DESC_RX_L4_IPV6_CSUM_MASK;
+}
+
+static inline uint32_t
+get_ena_admin_feature_offload_desc_RX_hash(
+ const struct ena_admin_feature_offload_desc *p)
+{
+ return (p->rx_supported &
+ ENA_ADMIN_FEATURE_OFFLOAD_DESC_RX_HASH_MASK)
+ >> ENA_ADMIN_FEATURE_OFFLOAD_DESC_RX_HASH_SHIFT;
+}
+
+static inline void
+set_ena_admin_feature_offload_desc_RX_hash(
+ struct ena_admin_feature_offload_desc *p,
+ uint32_t val)
+{
+ p->rx_supported |=
+ (val << ENA_ADMIN_FEATURE_OFFLOAD_DESC_RX_HASH_SHIFT)
+ & ENA_ADMIN_FEATURE_OFFLOAD_DESC_RX_HASH_MASK;
+}
+
+static inline uint32_t
+get_ena_admin_feature_rss_flow_hash_function_funcs(
+ const struct ena_admin_feature_rss_flow_hash_function *p)
+{
+ return p->supported_func &
+ ENA_ADMIN_FEATURE_RSS_FLOW_HASH_FUNCTION_FUNCS_MASK;
+}
+
+static inline void
+set_ena_admin_feature_rss_flow_hash_function_funcs(
+ struct ena_admin_feature_rss_flow_hash_function *p,
+ uint32_t val)
+{
+ p->supported_func |=
+ val & ENA_ADMIN_FEATURE_RSS_FLOW_HASH_FUNCTION_FUNCS_MASK;
+}
+
+static inline uint32_t
+get_ena_admin_feature_rss_flow_hash_function_selected_func(
+ const struct ena_admin_feature_rss_flow_hash_function *p)
+{
+ return p->selected_func &
+ ENA_ADMIN_FEATURE_RSS_FLOW_HASH_FUNCTION_SELECTED_FUNC_MASK;
+}
+
+static inline void
+set_ena_admin_feature_rss_flow_hash_function_selected_func(
+ struct ena_admin_feature_rss_flow_hash_function *p,
+ uint32_t val)
+{
+ p->selected_func |=
+ val &
+ ENA_ADMIN_FEATURE_RSS_FLOW_HASH_FUNCTION_SELECTED_FUNC_MASK;
+}
+
+static inline uint16_t
+get_ena_admin_proto_input_inner(const struct ena_admin_proto_input *p)
+{
+ return p->flags & ENA_ADMIN_PROTO_INPUT_INNER_MASK;
+}
+
+static inline void
+set_ena_admin_proto_input_inner(struct ena_admin_proto_input *p, uint16_t val)
+{
+ p->flags |= val & ENA_ADMIN_PROTO_INPUT_INNER_MASK;
+}
+
+static inline uint16_t
+get_ena_admin_feature_rss_flow_hash_input_L3_sort(
+ const struct ena_admin_feature_rss_flow_hash_input *p)
+{
+ return (p->supported_input_sort &
+ ENA_ADMIN_FEATURE_RSS_FLOW_HASH_INPUT_L3_SORT_MASK)
+ >> ENA_ADMIN_FEATURE_RSS_FLOW_HASH_INPUT_L3_SORT_SHIFT;
+}
+
+static inline void
+set_ena_admin_feature_rss_flow_hash_input_L3_sort(
+ struct ena_admin_feature_rss_flow_hash_input *p,
+ uint16_t val)
+{
+ p->supported_input_sort |=
+ (val << ENA_ADMIN_FEATURE_RSS_FLOW_HASH_INPUT_L3_SORT_SHIFT)
+ & ENA_ADMIN_FEATURE_RSS_FLOW_HASH_INPUT_L3_SORT_MASK;
+}
+
+static inline uint16_t
+get_ena_admin_feature_rss_flow_hash_input_L4_sort(
+ const struct ena_admin_feature_rss_flow_hash_input *p)
+{
+ return (p->supported_input_sort &
+ ENA_ADMIN_FEATURE_RSS_FLOW_HASH_INPUT_L4_SORT_MASK)
+ >> ENA_ADMIN_FEATURE_RSS_FLOW_HASH_INPUT_L4_SORT_SHIFT;
+}
+
+static inline void
+set_ena_admin_feature_rss_flow_hash_input_L4_sort(
+ struct ena_admin_feature_rss_flow_hash_input *p,
+ uint16_t val)
+{
+ p->supported_input_sort |=
+ (val << ENA_ADMIN_FEATURE_RSS_FLOW_HASH_INPUT_L4_SORT_SHIFT)
+ & ENA_ADMIN_FEATURE_RSS_FLOW_HASH_INPUT_L4_SORT_MASK;
+}
+
+static inline uint16_t
+get_ena_admin_feature_rss_flow_hash_input_enable_L3_sort(
+ const struct ena_admin_feature_rss_flow_hash_input *p)
+{
+ return (p->enabled_input_sort &
+ ENA_ADMIN_FEATURE_RSS_FLOW_HASH_INPUT_ENABLE_L3_SORT_MASK)
+ >> ENA_ADMIN_FEATURE_RSS_FLOW_HASH_INPUT_ENABLE_L3_SORT_SHIFT;
+}
+
+static inline void
+set_ena_admin_feature_rss_flow_hash_input_enable_L3_sort(
+ struct ena_admin_feature_rss_flow_hash_input *p,
+ uint16_t val)
+{
+ p->enabled_input_sort |=
+ (val <<
+ ENA_ADMIN_FEATURE_RSS_FLOW_HASH_INPUT_ENABLE_L3_SORT_SHIFT)
+ & ENA_ADMIN_FEATURE_RSS_FLOW_HASH_INPUT_ENABLE_L3_SORT_MASK;
+}
+
+static inline uint16_t
+get_ena_admin_feature_rss_flow_hash_input_enable_L4_sort(
+ const struct ena_admin_feature_rss_flow_hash_input *p)
+{
+ return (p->enabled_input_sort &
+ ENA_ADMIN_FEATURE_RSS_FLOW_HASH_INPUT_ENABLE_L4_SORT_MASK)
+ >> ENA_ADMIN_FEATURE_RSS_FLOW_HASH_INPUT_ENABLE_L4_SORT_SHIFT;
+}
+
+static inline void
+set_ena_admin_feature_rss_flow_hash_input_enable_L4_sort(
+ struct ena_admin_feature_rss_flow_hash_input *p,
+ uint16_t val)
+{
+ p->enabled_input_sort |=
+ (val <<
+ ENA_ADMIN_FEATURE_RSS_FLOW_HASH_INPUT_ENABLE_L4_SORT_SHIFT)
+ & ENA_ADMIN_FEATURE_RSS_FLOW_HASH_INPUT_ENABLE_L4_SORT_MASK;
+}
+
+static inline uint32_t
+get_ena_admin_host_info_major(const struct ena_admin_host_info *p)
+{
+ return p->driver_version & ENA_ADMIN_HOST_INFO_MAJOR_MASK;
+}
+
+static inline void
+set_ena_admin_host_info_major(struct ena_admin_host_info *p, uint32_t val)
+{
+ p->driver_version |= val & ENA_ADMIN_HOST_INFO_MAJOR_MASK;
+}
+
+static inline uint32_t
+get_ena_admin_host_info_minor(const struct ena_admin_host_info *p)
+{
+ return (p->driver_version & ENA_ADMIN_HOST_INFO_MINOR_MASK)
+ >> ENA_ADMIN_HOST_INFO_MINOR_SHIFT;
+}
+
+static inline void
+set_ena_admin_host_info_minor(struct ena_admin_host_info *p, uint32_t val)
+{
+ p->driver_version |= (val << ENA_ADMIN_HOST_INFO_MINOR_SHIFT)
+ & ENA_ADMIN_HOST_INFO_MINOR_MASK;
+}
+
+static inline uint32_t
+get_ena_admin_host_info_sub_minor(const struct ena_admin_host_info *p)
+{
+ return (p->driver_version & ENA_ADMIN_HOST_INFO_SUB_MINOR_MASK)
+ >> ENA_ADMIN_HOST_INFO_SUB_MINOR_SHIFT;
+}
+
+static inline void
+set_ena_admin_host_info_sub_minor(struct ena_admin_host_info *p, uint32_t val)
+{
+ p->driver_version |= (val << ENA_ADMIN_HOST_INFO_SUB_MINOR_SHIFT)
+ & ENA_ADMIN_HOST_INFO_SUB_MINOR_MASK;
+}
+
+static inline uint8_t
+get_ena_admin_aenq_common_desc_phase(
+ const struct ena_admin_aenq_common_desc *p)
+{
+ return p->flags & ENA_ADMIN_AENQ_COMMON_DESC_PHASE_MASK;
+}
+
+static inline void
+set_ena_admin_aenq_common_desc_phase(
+ struct ena_admin_aenq_common_desc *p,
+ uint8_t val)
+{
+ p->flags |= val & ENA_ADMIN_AENQ_COMMON_DESC_PHASE_MASK;
+}
+
+static inline uint32_t
+get_ena_admin_aenq_link_change_desc_link_status(
+ const struct ena_admin_aenq_link_change_desc *p)
+{
+ return p->flags & ENA_ADMIN_AENQ_LINK_CHANGE_DESC_LINK_STATUS_MASK;
+}
+
+static inline void
+set_ena_admin_aenq_link_change_desc_link_status(
+ struct ena_admin_aenq_link_change_desc *p,
+ uint32_t val)
+{
+ p->flags |= val & ENA_ADMIN_AENQ_LINK_CHANGE_DESC_LINK_STATUS_MASK;
+}
+
+#endif /* !defined(ENA_DEFS_LINUX_MAINLINE) */
+#endif /*_ENA_ADMIN_H_ */
diff --git a/drivers/net/ena/base/ena_defs/ena_common_defs.h b/drivers/net/ena/base/ena_defs/ena_common_defs.h
new file mode 100644
index 00000000..95e0f389
--- /dev/null
+++ b/drivers/net/ena/base/ena_defs/ena_common_defs.h
@@ -0,0 +1,54 @@
+/*-
+* BSD LICENSE
+*
+* Copyright (c) 2015-2016 Amazon.com, Inc. or its affiliates.
+* All rights reserved.
+*
+* Redistribution and use in source and binary forms, with or without
+* modification, are permitted provided that the following conditions
+* are met:
+*
+* * Redistributions of source code must retain the above copyright
+* notice, this list of conditions and the following disclaimer.
+* * Redistributions in binary form must reproduce the above copyright
+* notice, this list of conditions and the following disclaimer in
+* the documentation and/or other materials provided with the
+* distribution.
+* * Neither the name of copyright holder nor the names of its
+* contributors may be used to endorse or promote products derived
+* from this software without specific prior written permission.
+*
+* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+
+#ifndef _ENA_COMMON_H_
+#define _ENA_COMMON_H_
+
+/* spec version */
+#define ENA_COMMON_SPEC_VERSION_MAJOR 0 /* spec version major */
+#define ENA_COMMON_SPEC_VERSION_MINOR 10 /* spec version minor */
+
+/* ENA operates with 48-bit memory addresses. ena_mem_addr_t */
+struct ena_common_mem_addr {
+ /* word 0 : low 32 bit of the memory address */
+ uint32_t mem_addr_low;
+
+ /* word 1 : */
+ /* high 16 bits of the memory address */
+ uint16_t mem_addr_high;
+
+ /* MBZ */
+ uint16_t reserved16;
+};
+
+#endif /*_ENA_COMMON_H_ */
diff --git a/drivers/net/ena/base/ena_defs/ena_eth_io_defs.h b/drivers/net/ena/base/ena_defs/ena_eth_io_defs.h
new file mode 100644
index 00000000..a547033d
--- /dev/null
+++ b/drivers/net/ena/base/ena_defs/ena_eth_io_defs.h
@@ -0,0 +1,1488 @@
+/*-
+* BSD LICENSE
+*
+* Copyright (c) 2015-2016 Amazon.com, Inc. or its affiliates.
+* All rights reserved.
+*
+* Redistribution and use in source and binary forms, with or without
+* modification, are permitted provided that the following conditions
+* are met:
+*
+* * Redistributions of source code must retain the above copyright
+* notice, this list of conditions and the following disclaimer.
+* * Redistributions in binary form must reproduce the above copyright
+* notice, this list of conditions and the following disclaimer in
+* the documentation and/or other materials provided with the
+* distribution.
+* * Neither the name of copyright holder nor the names of its
+* contributors may be used to endorse or promote products derived
+* from this software without specific prior written permission.
+*
+* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+
+#ifndef _ENA_ETH_IO_H_
+#define _ENA_ETH_IO_H_
+
+/* Layer 3 protocol index */
+enum ena_eth_io_l3_proto_index {
+ ENA_ETH_IO_L3_PROTO_UNKNOWN = 0,
+
+ ENA_ETH_IO_L3_PROTO_IPV4 = 8,
+
+ ENA_ETH_IO_L3_PROTO_IPV6 = 11,
+
+ ENA_ETH_IO_L3_PROTO_FCOE = 21,
+
+ ENA_ETH_IO_L3_PROTO_ROCE = 22,
+};
+
+/* Layer 4 protocol index */
+enum ena_eth_io_l4_proto_index {
+ ENA_ETH_IO_L4_PROTO_UNKNOWN = 0,
+
+ ENA_ETH_IO_L4_PROTO_TCP = 12,
+
+ ENA_ETH_IO_L4_PROTO_UDP = 13,
+
+ ENA_ETH_IO_L4_PROTO_ROUTEABLE_ROCE = 23,
+};
+
+/* ENA IO Queue Tx descriptor */
+struct ena_eth_io_tx_desc {
+ /* word 0 : */
+ /* length, request id and control flags
+ * 15:0 : length - Buffer length in bytes, must
+ * include any packet trailers that the ENA supposed
+ * to update like End-to-End CRC, Authentication GMAC
+ * etc. This length must not include the
+ * 'Push_Buffer' length. This length must not include
+ * the 4-byte added in the end for 802.3 Ethernet FCS
+ * 21:16 : req_id_hi - Request ID[15:10]
+ * 22 : reserved22 - MBZ
+ * 23 : meta_desc - MBZ
+ * 24 : phase
+ * 25 : reserved1 - MBZ
+ * 26 : first - Indicates first descriptor in
+ * transaction
+ * 27 : last - Indicates last descriptor in
+ * transaction
+ * 28 : comp_req - Indicates whether completion
+ * should be posted, after packet is transmitted.
+ * Valid only for first descriptor
+ * 30:29 : reserved29 - MBZ
+ * 31 : reserved31 - MBZ
+ */
+ uint32_t len_ctrl;
+
+ /* word 1 : */
+ /* ethernet control
+ * 3:0 : l3_proto_idx - L3 protocol, if
+ * tunnel_ctrl[0] is set, then this is the inner
+ * packet L3. This field required when
+ * l3_csum_en,l3_csum or tso_en are set.
+ * 4 : DF - IPv4 DF, must be 0 if packet is IPv4 and
+ * DF flags of the IPv4 header is 0. Otherwise must
+ * be set to 1
+ * 6:5 : reserved5
+ * 7 : tso_en - Enable TSO, For TCP only. For packets
+ * with tunnel (tunnel_ctrl[0]=1), then the inner
+ * packet will be segmented while the outer tunnel is
+ * duplicated
+ * 12:8 : l4_proto_idx - L4 protocol, if
+ * tunnel_ctrl[0] is set, then this is the inner
+ * packet L4. This field need to be set when
+ * l4_csum_en or tso_en are set.
+ * 13 : l3_csum_en - enable IPv4 header checksum. if
+ * tunnel_ctrl[0] is set, then this will enable
+ * checksum for the inner packet IPv4
+ * 14 : l4_csum_en - enable TCP/UDP checksum. if
+ * tunnel_ctrl[0] is set, then this will enable
+ * checksum on the inner packet TCP/UDP checksum
+ * 15 : ethernet_fcs_dis - when set, the controller
+ * will not append the 802.3 Ethernet Frame Check
+ * Sequence to the packet
+ * 16 : reserved16
+ * 17 : l4_csum_partial - L4 partial checksum. when
+ * set to 0, the ENA calculates the L4 checksum,
+ * where the Destination Address required for the
+ * TCP/UDP pseudo-header is taken from the actual
+ * packet L3 header. when set to 1, the ENA doesn't
+ * calculate the sum of the pseudo-header, instead,
+ * the checksum field of the L4 is used instead. When
+ * TSO enabled, the checksum of the pseudo-header
+ * must not include the tcp length field. L4 partial
+ * checksum should be used for IPv6 packet that
+ * contains Routing Headers.
+ * 20:18 : tunnel_ctrl - Bit 0: tunneling exists, Bit
+ * 1: tunnel packet actually uses UDP as L4, Bit 2:
+ * tunnel packet L3 protocol: 0: IPv4 1: IPv6
+ * 21 : ts_req - Indicates that the packet is IEEE
+ * 1588v2 packet requiring the timestamp
+ * 31:22 : req_id_lo - Request ID[9:0]
+ */
+ uint32_t meta_ctrl;
+
+ /* word 2 : Buffer address bits[31:0] */
+ uint32_t buff_addr_lo;
+
+ /* word 3 : */
+ /* address high and header size
+ * 15:0 : addr_hi - Buffer Pointer[47:32]
+ * 23:16 : reserved16_w2
+ * 31:24 : header_length - Header length. For Low
+ * Latency Queues, this fields indicates the number
+ * of bytes written to the headers' memory. For
+ * normal queues, if packet is TCP or UDP, and longer
+ * than max_header_size, then this field should be
+ * set to the sum of L4 header offset and L4 header
+ * size(without options), otherwise, this field
+ * should be set to 0. For both modes, this field
+ * must not exceed the max_header_size.
+ * max_header_size value is reported by the Max
+ * Queues Feature descriptor
+ */
+ uint32_t buff_addr_hi_hdr_sz;
+};
+
+/* ENA IO Queue Tx Meta descriptor */
+struct ena_eth_io_tx_meta_desc {
+ /* word 0 : */
+ /* length, request id and control flags
+ * 9:0 : req_id_lo - Request ID[9:0]
+ * 11:10 : outr_l3_off_hi - valid if
+ * tunnel_ctrl[0]=1. bits[4:3] of outer packet L3
+ * offset
+ * 12 : reserved12 - MBZ
+ * 13 : reserved13 - MBZ
+ * 14 : ext_valid - if set, offset fields in Word2
+ * are valid Also MSS High in Word 0 and Outer L3
+ * Offset High in WORD 0 and bits [31:24] in Word 3
+ * 15 : word3_valid - If set Crypto Info[23:0] of
+ * Word 3 is valid
+ * 19:16 : mss_hi_ptp
+ * 20 : eth_meta_type - 0: Tx Metadata Descriptor, 1:
+ * Extended Metadata Descriptor
+ * 21 : meta_store - Store extended metadata in queue
+ * cache
+ * 22 : reserved22 - MBZ
+ * 23 : meta_desc - MBO
+ * 24 : phase
+ * 25 : reserved25 - MBZ
+ * 26 : first - Indicates first descriptor in
+ * transaction
+ * 27 : last - Indicates last descriptor in
+ * transaction
+ * 28 : comp_req - Indicates whether completion
+ * should be posted, after packet is transmitted.
+ * Valid only for first descriptor
+ * 30:29 : reserved29 - MBZ
+ * 31 : reserved31 - MBZ
+ */
+ uint32_t len_ctrl;
+
+ /* word 1 : */
+ /* word 1
+ * 5:0 : req_id_hi
+ * 31:6 : reserved6 - MBZ
+ */
+ uint32_t word1;
+
+ /* word 2 : */
+ /* word 2
+ * 7:0 : l3_hdr_len - the header length L3 IP header.
+ * if tunnel_ctrl[0]=1, this is the IP header length
+ * of the inner packet. FIXME - check if includes IP
+ * options hdr_len
+ * 15:8 : l3_hdr_off - the offset of the first byte
+ * in the L3 header from the beginning of the to-be
+ * transmitted packet. if tunnel_ctrl[0]=1, this is
+ * the offset the L3 header of the inner packet
+ * 21:16 : l4_hdr_len_in_words - counts the L4 header
+ * length in words. there is an explicit assumption
+ * that L4 header appears right after L3 header and
+ * L4 offset is based on l3_hdr_off+l3_hdr_len FIXME
+ * - pls confirm
+ * 31:22 : mss_lo
+ */
+ uint32_t word2;
+
+ /* word 3 : */
+ /* word 3
+ * 23:0 : crypto_info
+ * 28:24 : outr_l3_hdr_len_words - valid if
+ * tunnel_ctrl[0]=1. Counts in words
+ * 31:29 : outr_l3_off_lo - valid if
+ * tunnel_ctrl[0]=1. bits[2:0] of outer packet L3
+ * offset. Counts the offset of the tunnel IP header
+ * from beginning of the packet. NOTE: if the tunnel
+ * header requires CRC or checksum, it is expected to
+ * be done by the driver as it is not done by the HW
+ */
+ uint32_t word3;
+};
+
+/* ENA IO Queue Tx completions descriptor */
+struct ena_eth_io_tx_cdesc {
+ /* word 0 : */
+ /* Request ID[15:0] */
+ uint16_t req_id;
+
+ uint8_t status;
+
+ /* flags
+ * 0 : phase
+ * 7:1 : reserved1
+ */
+ uint8_t flags;
+
+ /* word 1 : */
+ uint16_t sub_qid;
+
+ /* indicates location of submission queue head */
+ uint16_t sq_head_idx;
+};
+
+/* ENA IO Queue Rx descriptor */
+struct ena_eth_io_rx_desc {
+ /* word 0 : */
+ /* In bytes. 0 means 64KB */
+ uint16_t length;
+
+ /* MBZ */
+ uint8_t reserved2;
+
+ /* control flags
+ * 0 : phase
+ * 1 : reserved1 - MBZ
+ * 2 : first - Indicates first descriptor in
+ * transaction
+ * 3 : last - Indicates last descriptor in transaction
+ * 4 : comp_req
+ * 5 : reserved5 - MBO
+ * 7:6 : reserved6 - MBZ
+ */
+ uint8_t ctrl;
+
+ /* word 1 : */
+ uint16_t req_id;
+
+ /* MBZ */
+ uint16_t reserved6;
+
+ /* word 2 : Buffer address bits[31:0] */
+ uint32_t buff_addr_lo;
+
+ /* word 3 : */
+ /* Buffer Address bits[47:16] */
+ uint16_t buff_addr_hi;
+
+ /* MBZ */
+ uint16_t reserved16_w3;
+};
+
+/* ENA IO Queue Rx Completion Base Descriptor (4-word format). Note: all
+ * ethernet parsing information are valid only when last=1
+ */
+struct ena_eth_io_rx_cdesc_base {
+ /* word 0 : */
+ /* 4:0 : l3_proto_idx - L3 protocol index
+ * 6:5 : src_vlan_cnt - Source VLAN count
+ * 7 : tunnel - Tunnel exists
+ * 12:8 : l4_proto_idx - L4 protocol index
+ * 13 : l3_csum_err - when set, either the L3
+ * checksum error detected, or, the controller didn't
+ * validate the checksum, If tunnel exists, this
+ * result is for the inner packet. This bit is valid
+ * only when l3_proto_idx indicates IPv4 packet
+ * 14 : l4_csum_err - when set, either the L4
+ * checksum error detected, or, the controller didn't
+ * validate the checksum. If tunnel exists, this
+ * result is for the inner packet. This bit is valid
+ * only when l4_proto_idx indicates TCP/UDP packet,
+ * and, ipv4_frag is not set
+ * 15 : ipv4_frag - Indicates IPv4 fragmented packet
+ * 17:16 : reserved16
+ * 19:18 : reserved18
+ * 20 : secured_pkt - Set if packet was handled by
+ * inline crypto engine
+ * 22:21 : crypto_status - bit 0 secured direction:
+ * 0: decryption, 1: encryption. bit 1 reserved
+ * 23 : reserved23
+ * 24 : phase
+ * 25 : l3_csum2 - second checksum engine result
+ * 26 : first - Indicates first descriptor in
+ * transaction
+ * 27 : last - Indicates last descriptor in
+ * transaction
+ * 28 : inr_l4_csum - TCP/UDP checksum results for
+ * inner packet
+ * 29 : reserved29
+ * 30 : buffer - 0: Metadata descriptor. 1: Buffer
+ * Descriptor was used
+ * 31 : reserved31
+ */
+ uint32_t status;
+
+ /* word 1 : */
+ uint16_t length;
+
+ uint16_t req_id;
+
+ /* word 2 : 32-bit hash result */
+ uint32_t hash;
+
+ /* word 3 : */
+ /* submission queue number */
+ uint16_t sub_qid;
+
+ uint16_t reserved;
+};
+
+/* ENA IO Queue Rx Completion Descriptor (8-word format) */
+struct ena_eth_io_rx_cdesc_ext {
+ /* words 0:3 : Rx Completion Extended */
+ struct ena_eth_io_rx_cdesc_base base;
+
+ /* word 4 : Completed Buffer address bits[31:0] */
+ uint32_t buff_addr_lo;
+
+ /* word 5 : */
+ /* the buffer address used bits[47:32] */
+ uint16_t buff_addr_hi;
+
+ uint16_t reserved16;
+
+ /* word 6 : Reserved */
+ uint32_t reserved_w6;
+
+ /* word 7 : Reserved */
+ uint32_t reserved_w7;
+};
+
+/* ENA Interrupt Unmask Register */
+struct ena_eth_io_intr_reg {
+ /* word 0 : */
+ /* 14:0 : rx_intr_delay - rx interrupt delay value
+ * 29:15 : tx_intr_delay - tx interrupt delay value
+ * 30 : intr_unmask - if set, unmasks interrupt
+ * 31 : reserved
+ */
+ uint32_t intr_control;
+};
+
+/* tx_desc */
+#define ENA_ETH_IO_TX_DESC_LENGTH_MASK GENMASK(15, 0)
+#define ENA_ETH_IO_TX_DESC_REQ_ID_HI_SHIFT 16
+#define ENA_ETH_IO_TX_DESC_REQ_ID_HI_MASK GENMASK(21, 16)
+#define ENA_ETH_IO_TX_DESC_META_DESC_SHIFT 23
+#define ENA_ETH_IO_TX_DESC_META_DESC_MASK BIT(23)
+#define ENA_ETH_IO_TX_DESC_PHASE_SHIFT 24
+#define ENA_ETH_IO_TX_DESC_PHASE_MASK BIT(24)
+#define ENA_ETH_IO_TX_DESC_FIRST_SHIFT 26
+#define ENA_ETH_IO_TX_DESC_FIRST_MASK BIT(26)
+#define ENA_ETH_IO_TX_DESC_LAST_SHIFT 27
+#define ENA_ETH_IO_TX_DESC_LAST_MASK BIT(27)
+#define ENA_ETH_IO_TX_DESC_COMP_REQ_SHIFT 28
+#define ENA_ETH_IO_TX_DESC_COMP_REQ_MASK BIT(28)
+#define ENA_ETH_IO_TX_DESC_L3_PROTO_IDX_MASK GENMASK(3, 0)
+#define ENA_ETH_IO_TX_DESC_DF_SHIFT 4
+#define ENA_ETH_IO_TX_DESC_DF_MASK BIT(4)
+#define ENA_ETH_IO_TX_DESC_TSO_EN_SHIFT 7
+#define ENA_ETH_IO_TX_DESC_TSO_EN_MASK BIT(7)
+#define ENA_ETH_IO_TX_DESC_L4_PROTO_IDX_SHIFT 8
+#define ENA_ETH_IO_TX_DESC_L4_PROTO_IDX_MASK GENMASK(12, 8)
+#define ENA_ETH_IO_TX_DESC_L3_CSUM_EN_SHIFT 13
+#define ENA_ETH_IO_TX_DESC_L3_CSUM_EN_MASK BIT(13)
+#define ENA_ETH_IO_TX_DESC_L4_CSUM_EN_SHIFT 14
+#define ENA_ETH_IO_TX_DESC_L4_CSUM_EN_MASK BIT(14)
+#define ENA_ETH_IO_TX_DESC_ETHERNET_FCS_DIS_SHIFT 15
+#define ENA_ETH_IO_TX_DESC_ETHERNET_FCS_DIS_MASK BIT(15)
+#define ENA_ETH_IO_TX_DESC_L4_CSUM_PARTIAL_SHIFT 17
+#define ENA_ETH_IO_TX_DESC_L4_CSUM_PARTIAL_MASK BIT(17)
+#define ENA_ETH_IO_TX_DESC_TUNNEL_CTRL_SHIFT 18
+#define ENA_ETH_IO_TX_DESC_TUNNEL_CTRL_MASK GENMASK(20, 18)
+#define ENA_ETH_IO_TX_DESC_TS_REQ_SHIFT 21
+#define ENA_ETH_IO_TX_DESC_TS_REQ_MASK BIT(21)
+#define ENA_ETH_IO_TX_DESC_REQ_ID_LO_SHIFT 22
+#define ENA_ETH_IO_TX_DESC_REQ_ID_LO_MASK GENMASK(31, 22)
+#define ENA_ETH_IO_TX_DESC_ADDR_HI_MASK GENMASK(15, 0)
+#define ENA_ETH_IO_TX_DESC_HEADER_LENGTH_SHIFT 24
+#define ENA_ETH_IO_TX_DESC_HEADER_LENGTH_MASK GENMASK(31, 24)
+
+/* tx_meta_desc */
+#define ENA_ETH_IO_TX_META_DESC_REQ_ID_LO_MASK GENMASK(9, 0)
+#define ENA_ETH_IO_TX_META_DESC_OUTR_L3_OFF_HI_SHIFT 10
+#define ENA_ETH_IO_TX_META_DESC_OUTR_L3_OFF_HI_MASK GENMASK(11, 10)
+#define ENA_ETH_IO_TX_META_DESC_EXT_VALID_SHIFT 14
+#define ENA_ETH_IO_TX_META_DESC_EXT_VALID_MASK BIT(14)
+#define ENA_ETH_IO_TX_META_DESC_WORD3_VALID_SHIFT 15
+#define ENA_ETH_IO_TX_META_DESC_WORD3_VALID_MASK BIT(15)
+#define ENA_ETH_IO_TX_META_DESC_MSS_HI_PTP_SHIFT 16
+#define ENA_ETH_IO_TX_META_DESC_MSS_HI_PTP_MASK GENMASK(19, 16)
+#define ENA_ETH_IO_TX_META_DESC_ETH_META_TYPE_SHIFT 20
+#define ENA_ETH_IO_TX_META_DESC_ETH_META_TYPE_MASK BIT(20)
+#define ENA_ETH_IO_TX_META_DESC_META_STORE_SHIFT 21
+#define ENA_ETH_IO_TX_META_DESC_META_STORE_MASK BIT(21)
+#define ENA_ETH_IO_TX_META_DESC_META_DESC_SHIFT 23
+#define ENA_ETH_IO_TX_META_DESC_META_DESC_MASK BIT(23)
+#define ENA_ETH_IO_TX_META_DESC_PHASE_SHIFT 24
+#define ENA_ETH_IO_TX_META_DESC_PHASE_MASK BIT(24)
+#define ENA_ETH_IO_TX_META_DESC_FIRST_SHIFT 26
+#define ENA_ETH_IO_TX_META_DESC_FIRST_MASK BIT(26)
+#define ENA_ETH_IO_TX_META_DESC_LAST_SHIFT 27
+#define ENA_ETH_IO_TX_META_DESC_LAST_MASK BIT(27)
+#define ENA_ETH_IO_TX_META_DESC_COMP_REQ_SHIFT 28
+#define ENA_ETH_IO_TX_META_DESC_COMP_REQ_MASK BIT(28)
+#define ENA_ETH_IO_TX_META_DESC_REQ_ID_HI_MASK GENMASK(5, 0)
+#define ENA_ETH_IO_TX_META_DESC_L3_HDR_LEN_MASK GENMASK(7, 0)
+#define ENA_ETH_IO_TX_META_DESC_L3_HDR_OFF_SHIFT 8
+#define ENA_ETH_IO_TX_META_DESC_L3_HDR_OFF_MASK GENMASK(15, 8)
+#define ENA_ETH_IO_TX_META_DESC_L4_HDR_LEN_IN_WORDS_SHIFT 16
+#define ENA_ETH_IO_TX_META_DESC_L4_HDR_LEN_IN_WORDS_MASK GENMASK(21, 16)
+#define ENA_ETH_IO_TX_META_DESC_MSS_LO_SHIFT 22
+#define ENA_ETH_IO_TX_META_DESC_MSS_LO_MASK GENMASK(31, 22)
+#define ENA_ETH_IO_TX_META_DESC_CRYPTO_INFO_MASK GENMASK(23, 0)
+#define ENA_ETH_IO_TX_META_DESC_OUTR_L3_HDR_LEN_WORDS_SHIFT 24
+#define ENA_ETH_IO_TX_META_DESC_OUTR_L3_HDR_LEN_WORDS_MASK GENMASK(28, 24)
+#define ENA_ETH_IO_TX_META_DESC_OUTR_L3_OFF_LO_SHIFT 29
+#define ENA_ETH_IO_TX_META_DESC_OUTR_L3_OFF_LO_MASK GENMASK(31, 29)
+
+/* tx_cdesc */
+#define ENA_ETH_IO_TX_CDESC_PHASE_MASK BIT(0)
+
+/* rx_desc */
+#define ENA_ETH_IO_RX_DESC_PHASE_MASK BIT(0)
+#define ENA_ETH_IO_RX_DESC_FIRST_SHIFT 2
+#define ENA_ETH_IO_RX_DESC_FIRST_MASK BIT(2)
+#define ENA_ETH_IO_RX_DESC_LAST_SHIFT 3
+#define ENA_ETH_IO_RX_DESC_LAST_MASK BIT(3)
+#define ENA_ETH_IO_RX_DESC_COMP_REQ_SHIFT 4
+#define ENA_ETH_IO_RX_DESC_COMP_REQ_MASK BIT(4)
+
+/* rx_cdesc_base */
+#define ENA_ETH_IO_RX_CDESC_BASE_L3_PROTO_IDX_MASK GENMASK(4, 0)
+#define ENA_ETH_IO_RX_CDESC_BASE_SRC_VLAN_CNT_SHIFT 5
+#define ENA_ETH_IO_RX_CDESC_BASE_SRC_VLAN_CNT_MASK GENMASK(6, 5)
+#define ENA_ETH_IO_RX_CDESC_BASE_TUNNEL_SHIFT 7
+#define ENA_ETH_IO_RX_CDESC_BASE_TUNNEL_MASK BIT(7)
+#define ENA_ETH_IO_RX_CDESC_BASE_L4_PROTO_IDX_SHIFT 8
+#define ENA_ETH_IO_RX_CDESC_BASE_L4_PROTO_IDX_MASK GENMASK(12, 8)
+#define ENA_ETH_IO_RX_CDESC_BASE_L3_CSUM_ERR_SHIFT 13
+#define ENA_ETH_IO_RX_CDESC_BASE_L3_CSUM_ERR_MASK BIT(13)
+#define ENA_ETH_IO_RX_CDESC_BASE_L4_CSUM_ERR_SHIFT 14
+#define ENA_ETH_IO_RX_CDESC_BASE_L4_CSUM_ERR_MASK BIT(14)
+#define ENA_ETH_IO_RX_CDESC_BASE_IPV4_FRAG_SHIFT 15
+#define ENA_ETH_IO_RX_CDESC_BASE_IPV4_FRAG_MASK BIT(15)
+#define ENA_ETH_IO_RX_CDESC_BASE_SECURED_PKT_SHIFT 20
+#define ENA_ETH_IO_RX_CDESC_BASE_SECURED_PKT_MASK BIT(20)
+#define ENA_ETH_IO_RX_CDESC_BASE_CRYPTO_STATUS_SHIFT 21
+#define ENA_ETH_IO_RX_CDESC_BASE_CRYPTO_STATUS_MASK GENMASK(22, 21)
+#define ENA_ETH_IO_RX_CDESC_BASE_PHASE_SHIFT 24
+#define ENA_ETH_IO_RX_CDESC_BASE_PHASE_MASK BIT(24)
+#define ENA_ETH_IO_RX_CDESC_BASE_L3_CSUM2_SHIFT 25
+#define ENA_ETH_IO_RX_CDESC_BASE_L3_CSUM2_MASK BIT(25)
+#define ENA_ETH_IO_RX_CDESC_BASE_FIRST_SHIFT 26
+#define ENA_ETH_IO_RX_CDESC_BASE_FIRST_MASK BIT(26)
+#define ENA_ETH_IO_RX_CDESC_BASE_LAST_SHIFT 27
+#define ENA_ETH_IO_RX_CDESC_BASE_LAST_MASK BIT(27)
+#define ENA_ETH_IO_RX_CDESC_BASE_INR_L4_CSUM_SHIFT 28
+#define ENA_ETH_IO_RX_CDESC_BASE_INR_L4_CSUM_MASK BIT(28)
+#define ENA_ETH_IO_RX_CDESC_BASE_BUFFER_SHIFT 30
+#define ENA_ETH_IO_RX_CDESC_BASE_BUFFER_MASK BIT(30)
+
+/* intr_reg */
+#define ENA_ETH_IO_INTR_REG_RX_INTR_DELAY_MASK GENMASK(14, 0)
+#define ENA_ETH_IO_INTR_REG_TX_INTR_DELAY_SHIFT 15
+#define ENA_ETH_IO_INTR_REG_TX_INTR_DELAY_MASK GENMASK(29, 15)
+#define ENA_ETH_IO_INTR_REG_INTR_UNMASK_SHIFT 30
+#define ENA_ETH_IO_INTR_REG_INTR_UNMASK_MASK BIT(30)
+
+#if !defined(ENA_DEFS_LINUX_MAINLINE)
+static inline uint32_t get_ena_eth_io_tx_desc_length(
+ const struct ena_eth_io_tx_desc *p)
+{
+ return p->len_ctrl & ENA_ETH_IO_TX_DESC_LENGTH_MASK;
+}
+
+static inline void set_ena_eth_io_tx_desc_length(
+ struct ena_eth_io_tx_desc *p,
+ uint32_t val)
+{
+ p->len_ctrl |= val & ENA_ETH_IO_TX_DESC_LENGTH_MASK;
+}
+
+static inline uint32_t get_ena_eth_io_tx_desc_req_id_hi(
+ const struct ena_eth_io_tx_desc *p)
+{
+ return (p->len_ctrl & ENA_ETH_IO_TX_DESC_REQ_ID_HI_MASK)
+ >> ENA_ETH_IO_TX_DESC_REQ_ID_HI_SHIFT;
+}
+
+static inline void set_ena_eth_io_tx_desc_req_id_hi(
+ struct ena_eth_io_tx_desc *p,
+ uint32_t val)
+{
+ p->len_ctrl |=
+ (val << ENA_ETH_IO_TX_DESC_REQ_ID_HI_SHIFT)
+ & ENA_ETH_IO_TX_DESC_REQ_ID_HI_MASK;
+}
+
+static inline uint32_t get_ena_eth_io_tx_desc_meta_desc(
+ const struct ena_eth_io_tx_desc *p)
+{
+ return (p->len_ctrl & ENA_ETH_IO_TX_DESC_META_DESC_MASK)
+ >> ENA_ETH_IO_TX_DESC_META_DESC_SHIFT;
+}
+
+static inline void set_ena_eth_io_tx_desc_meta_desc(
+ struct ena_eth_io_tx_desc *p,
+ uint32_t val)
+{
+ p->len_ctrl |=
+ (val << ENA_ETH_IO_TX_DESC_META_DESC_SHIFT)
+ & ENA_ETH_IO_TX_DESC_META_DESC_MASK;
+}
+
+static inline uint32_t get_ena_eth_io_tx_desc_phase(
+ const struct ena_eth_io_tx_desc *p)
+{
+ return (p->len_ctrl & ENA_ETH_IO_TX_DESC_PHASE_MASK)
+ >> ENA_ETH_IO_TX_DESC_PHASE_SHIFT;
+}
+
+static inline void set_ena_eth_io_tx_desc_phase(
+ struct ena_eth_io_tx_desc *p,
+ uint32_t val)
+{
+ p->len_ctrl |=
+ (val << ENA_ETH_IO_TX_DESC_PHASE_SHIFT)
+ & ENA_ETH_IO_TX_DESC_PHASE_MASK;
+}
+
+static inline uint32_t get_ena_eth_io_tx_desc_first(
+ const struct ena_eth_io_tx_desc *p)
+{
+ return (p->len_ctrl & ENA_ETH_IO_TX_DESC_FIRST_MASK)
+ >> ENA_ETH_IO_TX_DESC_FIRST_SHIFT;
+}
+
+static inline void set_ena_eth_io_tx_desc_first(
+ struct ena_eth_io_tx_desc *p,
+ uint32_t val)
+{
+ p->len_ctrl |=
+ (val << ENA_ETH_IO_TX_DESC_FIRST_SHIFT)
+ & ENA_ETH_IO_TX_DESC_FIRST_MASK;
+}
+
+static inline uint32_t get_ena_eth_io_tx_desc_last(
+ const struct ena_eth_io_tx_desc *p)
+{
+ return (p->len_ctrl & ENA_ETH_IO_TX_DESC_LAST_MASK)
+ >> ENA_ETH_IO_TX_DESC_LAST_SHIFT;
+}
+
+static inline void set_ena_eth_io_tx_desc_last(
+ struct ena_eth_io_tx_desc *p,
+ uint32_t val)
+{
+ p->len_ctrl |=
+ (val << ENA_ETH_IO_TX_DESC_LAST_SHIFT)
+ & ENA_ETH_IO_TX_DESC_LAST_MASK;
+}
+
+static inline uint32_t get_ena_eth_io_tx_desc_comp_req(
+ const struct ena_eth_io_tx_desc *p)
+{
+ return (p->len_ctrl & ENA_ETH_IO_TX_DESC_COMP_REQ_MASK)
+ >> ENA_ETH_IO_TX_DESC_COMP_REQ_SHIFT;
+}
+
+static inline void set_ena_eth_io_tx_desc_comp_req(
+ struct ena_eth_io_tx_desc *p,
+ uint32_t val)
+{
+ p->len_ctrl |=
+ (val << ENA_ETH_IO_TX_DESC_COMP_REQ_SHIFT)
+ & ENA_ETH_IO_TX_DESC_COMP_REQ_MASK;
+}
+
+static inline uint32_t get_ena_eth_io_tx_desc_l3_proto_idx(
+ const struct ena_eth_io_tx_desc *p)
+{
+ return p->meta_ctrl & ENA_ETH_IO_TX_DESC_L3_PROTO_IDX_MASK;
+}
+
+static inline void set_ena_eth_io_tx_desc_l3_proto_idx(
+ struct ena_eth_io_tx_desc *p,
+ uint32_t val)
+{
+ p->meta_ctrl |= val & ENA_ETH_IO_TX_DESC_L3_PROTO_IDX_MASK;
+}
+
+static inline uint32_t get_ena_eth_io_tx_desc_DF(
+ const struct ena_eth_io_tx_desc *p)
+{
+ return (p->meta_ctrl & ENA_ETH_IO_TX_DESC_DF_MASK)
+ >> ENA_ETH_IO_TX_DESC_DF_SHIFT;
+}
+
+static inline void set_ena_eth_io_tx_desc_DF(
+ struct ena_eth_io_tx_desc *p,
+ uint32_t val)
+{
+ p->meta_ctrl |=
+ (val << ENA_ETH_IO_TX_DESC_DF_SHIFT)
+ & ENA_ETH_IO_TX_DESC_DF_MASK;
+}
+
+static inline uint32_t get_ena_eth_io_tx_desc_tso_en(
+ const struct ena_eth_io_tx_desc *p)
+{
+ return (p->meta_ctrl & ENA_ETH_IO_TX_DESC_TSO_EN_MASK)
+ >> ENA_ETH_IO_TX_DESC_TSO_EN_SHIFT;
+}
+
+static inline void set_ena_eth_io_tx_desc_tso_en(
+ struct ena_eth_io_tx_desc *p,
+ uint32_t val)
+{
+ p->meta_ctrl |=
+ (val << ENA_ETH_IO_TX_DESC_TSO_EN_SHIFT)
+ & ENA_ETH_IO_TX_DESC_TSO_EN_MASK;
+}
+
+static inline uint32_t get_ena_eth_io_tx_desc_l4_proto_idx(
+ const struct ena_eth_io_tx_desc *p)
+{
+ return (p->meta_ctrl & ENA_ETH_IO_TX_DESC_L4_PROTO_IDX_MASK)
+ >> ENA_ETH_IO_TX_DESC_L4_PROTO_IDX_SHIFT;
+}
+
+static inline void set_ena_eth_io_tx_desc_l4_proto_idx(
+ struct ena_eth_io_tx_desc *p,
+ uint32_t val)
+{
+ p->meta_ctrl |=
+ (val << ENA_ETH_IO_TX_DESC_L4_PROTO_IDX_SHIFT)
+ & ENA_ETH_IO_TX_DESC_L4_PROTO_IDX_MASK;
+}
+
+static inline uint32_t get_ena_eth_io_tx_desc_l3_csum_en(
+ const struct ena_eth_io_tx_desc *p)
+{
+ return (p->meta_ctrl & ENA_ETH_IO_TX_DESC_L3_CSUM_EN_MASK)
+ >> ENA_ETH_IO_TX_DESC_L3_CSUM_EN_SHIFT;
+}
+
+static inline void set_ena_eth_io_tx_desc_l3_csum_en(
+ struct ena_eth_io_tx_desc *p,
+ uint32_t val)
+{
+ p->meta_ctrl |=
+ (val << ENA_ETH_IO_TX_DESC_L3_CSUM_EN_SHIFT)
+ & ENA_ETH_IO_TX_DESC_L3_CSUM_EN_MASK;
+}
+
+static inline uint32_t get_ena_eth_io_tx_desc_l4_csum_en(
+ const struct ena_eth_io_tx_desc *p)
+{
+ return (p->meta_ctrl & ENA_ETH_IO_TX_DESC_L4_CSUM_EN_MASK)
+ >> ENA_ETH_IO_TX_DESC_L4_CSUM_EN_SHIFT;
+}
+
+static inline void set_ena_eth_io_tx_desc_l4_csum_en(
+ struct ena_eth_io_tx_desc *p,
+ uint32_t val)
+{
+ p->meta_ctrl |=
+ (val << ENA_ETH_IO_TX_DESC_L4_CSUM_EN_SHIFT)
+ & ENA_ETH_IO_TX_DESC_L4_CSUM_EN_MASK;
+}
+
+static inline uint32_t get_ena_eth_io_tx_desc_ethernet_fcs_dis(
+ const struct ena_eth_io_tx_desc *p)
+{
+ return (p->meta_ctrl & ENA_ETH_IO_TX_DESC_ETHERNET_FCS_DIS_MASK)
+ >> ENA_ETH_IO_TX_DESC_ETHERNET_FCS_DIS_SHIFT;
+}
+
+static inline void set_ena_eth_io_tx_desc_ethernet_fcs_dis(
+ struct ena_eth_io_tx_desc *p,
+ uint32_t val)
+{
+ p->meta_ctrl |=
+ (val << ENA_ETH_IO_TX_DESC_ETHERNET_FCS_DIS_SHIFT)
+ & ENA_ETH_IO_TX_DESC_ETHERNET_FCS_DIS_MASK;
+}
+
+static inline uint32_t get_ena_eth_io_tx_desc_l4_csum_partial(
+ const struct ena_eth_io_tx_desc *p)
+{
+ return (p->meta_ctrl & ENA_ETH_IO_TX_DESC_L4_CSUM_PARTIAL_MASK)
+ >> ENA_ETH_IO_TX_DESC_L4_CSUM_PARTIAL_SHIFT;
+}
+
+static inline void set_ena_eth_io_tx_desc_l4_csum_partial(
+ struct ena_eth_io_tx_desc *p,
+ uint32_t val)
+{
+ p->meta_ctrl |=
+ (val << ENA_ETH_IO_TX_DESC_L4_CSUM_PARTIAL_SHIFT)
+ & ENA_ETH_IO_TX_DESC_L4_CSUM_PARTIAL_MASK;
+}
+
+static inline uint32_t get_ena_eth_io_tx_desc_tunnel_ctrl(
+ const struct ena_eth_io_tx_desc *p)
+{
+ return (p->meta_ctrl & ENA_ETH_IO_TX_DESC_TUNNEL_CTRL_MASK)
+ >> ENA_ETH_IO_TX_DESC_TUNNEL_CTRL_SHIFT;
+}
+
+static inline void set_ena_eth_io_tx_desc_tunnel_ctrl(
+ struct ena_eth_io_tx_desc *p,
+ uint32_t val)
+{
+ p->meta_ctrl |=
+ (val << ENA_ETH_IO_TX_DESC_TUNNEL_CTRL_SHIFT)
+ & ENA_ETH_IO_TX_DESC_TUNNEL_CTRL_MASK;
+}
+
+static inline uint32_t get_ena_eth_io_tx_desc_ts_req(
+ const struct ena_eth_io_tx_desc *p)
+{
+ return (p->meta_ctrl & ENA_ETH_IO_TX_DESC_TS_REQ_MASK)
+ >> ENA_ETH_IO_TX_DESC_TS_REQ_SHIFT;
+}
+
+static inline void set_ena_eth_io_tx_desc_ts_req(
+ struct ena_eth_io_tx_desc *p,
+ uint32_t val)
+{
+ p->meta_ctrl |=
+ (val << ENA_ETH_IO_TX_DESC_TS_REQ_SHIFT)
+ & ENA_ETH_IO_TX_DESC_TS_REQ_MASK;
+}
+
+static inline uint32_t get_ena_eth_io_tx_desc_req_id_lo(
+ const struct ena_eth_io_tx_desc *p)
+{
+ return (p->meta_ctrl & ENA_ETH_IO_TX_DESC_REQ_ID_LO_MASK)
+ >> ENA_ETH_IO_TX_DESC_REQ_ID_LO_SHIFT;
+}
+
+static inline void set_ena_eth_io_tx_desc_req_id_lo(
+ struct ena_eth_io_tx_desc *p,
+ uint32_t val)
+{
+ p->meta_ctrl |=
+ (val << ENA_ETH_IO_TX_DESC_REQ_ID_LO_SHIFT)
+ & ENA_ETH_IO_TX_DESC_REQ_ID_LO_MASK;
+}
+
+static inline uint32_t get_ena_eth_io_tx_desc_addr_hi(
+ const struct ena_eth_io_tx_desc *p)
+{
+ return p->buff_addr_hi_hdr_sz & ENA_ETH_IO_TX_DESC_ADDR_HI_MASK;
+}
+
+static inline void set_ena_eth_io_tx_desc_addr_hi(
+ struct ena_eth_io_tx_desc *p,
+ uint32_t val)
+{
+ p->buff_addr_hi_hdr_sz |= val & ENA_ETH_IO_TX_DESC_ADDR_HI_MASK;
+}
+
+static inline uint32_t get_ena_eth_io_tx_desc_header_length(
+ const struct ena_eth_io_tx_desc *p)
+{
+ return (p->buff_addr_hi_hdr_sz & ENA_ETH_IO_TX_DESC_HEADER_LENGTH_MASK)
+ >> ENA_ETH_IO_TX_DESC_HEADER_LENGTH_SHIFT;
+}
+
+static inline void set_ena_eth_io_tx_desc_header_length(
+ struct ena_eth_io_tx_desc *p,
+ uint32_t val)
+{
+ p->buff_addr_hi_hdr_sz |=
+ (val << ENA_ETH_IO_TX_DESC_HEADER_LENGTH_SHIFT)
+ & ENA_ETH_IO_TX_DESC_HEADER_LENGTH_MASK;
+}
+
+static inline uint32_t get_ena_eth_io_tx_meta_desc_req_id_lo(
+ const struct ena_eth_io_tx_meta_desc *p)
+{
+ return p->len_ctrl & ENA_ETH_IO_TX_META_DESC_REQ_ID_LO_MASK;
+}
+
+static inline void set_ena_eth_io_tx_meta_desc_req_id_lo(
+ struct ena_eth_io_tx_meta_desc *p,
+ uint32_t val)
+{
+ p->len_ctrl |= val & ENA_ETH_IO_TX_META_DESC_REQ_ID_LO_MASK;
+}
+
+static inline uint32_t get_ena_eth_io_tx_meta_desc_outr_l3_off_hi(
+ const struct ena_eth_io_tx_meta_desc *p)
+{
+ return (p->len_ctrl & ENA_ETH_IO_TX_META_DESC_OUTR_L3_OFF_HI_MASK)
+ >> ENA_ETH_IO_TX_META_DESC_OUTR_L3_OFF_HI_SHIFT;
+}
+
+static inline void set_ena_eth_io_tx_meta_desc_outr_l3_off_hi(
+ struct ena_eth_io_tx_meta_desc *p,
+ uint32_t val)
+{
+ p->len_ctrl |=
+ (val << ENA_ETH_IO_TX_META_DESC_OUTR_L3_OFF_HI_SHIFT)
+ & ENA_ETH_IO_TX_META_DESC_OUTR_L3_OFF_HI_MASK;
+}
+
+static inline uint32_t get_ena_eth_io_tx_meta_desc_ext_valid(
+ const struct ena_eth_io_tx_meta_desc *p)
+{
+ return (p->len_ctrl & ENA_ETH_IO_TX_META_DESC_EXT_VALID_MASK)
+ >> ENA_ETH_IO_TX_META_DESC_EXT_VALID_SHIFT;
+}
+
+static inline void set_ena_eth_io_tx_meta_desc_ext_valid(
+ struct ena_eth_io_tx_meta_desc *p,
+ uint32_t val)
+{
+ p->len_ctrl |=
+ (val << ENA_ETH_IO_TX_META_DESC_EXT_VALID_SHIFT)
+ & ENA_ETH_IO_TX_META_DESC_EXT_VALID_MASK;
+}
+
+static inline uint32_t get_ena_eth_io_tx_meta_desc_word3_valid(
+ const struct ena_eth_io_tx_meta_desc *p)
+{
+ return (p->len_ctrl & ENA_ETH_IO_TX_META_DESC_WORD3_VALID_MASK)
+ >> ENA_ETH_IO_TX_META_DESC_WORD3_VALID_SHIFT;
+}
+
+static inline void set_ena_eth_io_tx_meta_desc_word3_valid(
+ struct ena_eth_io_tx_meta_desc *p,
+ uint32_t val)
+{
+ p->len_ctrl |=
+ (val << ENA_ETH_IO_TX_META_DESC_WORD3_VALID_SHIFT)
+ & ENA_ETH_IO_TX_META_DESC_WORD3_VALID_MASK;
+}
+
+static inline uint32_t get_ena_eth_io_tx_meta_desc_mss_hi_ptp(
+ const struct ena_eth_io_tx_meta_desc *p)
+{
+ return (p->len_ctrl & ENA_ETH_IO_TX_META_DESC_MSS_HI_PTP_MASK)
+ >> ENA_ETH_IO_TX_META_DESC_MSS_HI_PTP_SHIFT;
+}
+
+static inline void set_ena_eth_io_tx_meta_desc_mss_hi_ptp(
+ struct ena_eth_io_tx_meta_desc *p,
+ uint32_t val)
+{
+ p->len_ctrl |=
+ (val << ENA_ETH_IO_TX_META_DESC_MSS_HI_PTP_SHIFT)
+ & ENA_ETH_IO_TX_META_DESC_MSS_HI_PTP_MASK;
+}
+
+static inline uint32_t get_ena_eth_io_tx_meta_desc_eth_meta_type(
+ const struct ena_eth_io_tx_meta_desc *p)
+{
+ return (p->len_ctrl & ENA_ETH_IO_TX_META_DESC_ETH_META_TYPE_MASK)
+ >> ENA_ETH_IO_TX_META_DESC_ETH_META_TYPE_SHIFT;
+}
+
+static inline void set_ena_eth_io_tx_meta_desc_eth_meta_type(
+ struct ena_eth_io_tx_meta_desc *p,
+ uint32_t val)
+{
+ p->len_ctrl |=
+ (val << ENA_ETH_IO_TX_META_DESC_ETH_META_TYPE_SHIFT)
+ & ENA_ETH_IO_TX_META_DESC_ETH_META_TYPE_MASK;
+}
+
+static inline uint32_t get_ena_eth_io_tx_meta_desc_meta_store(
+ const struct ena_eth_io_tx_meta_desc *p)
+{
+ return (p->len_ctrl & ENA_ETH_IO_TX_META_DESC_META_STORE_MASK)
+ >> ENA_ETH_IO_TX_META_DESC_META_STORE_SHIFT;
+}
+
+static inline void set_ena_eth_io_tx_meta_desc_meta_store(
+ struct ena_eth_io_tx_meta_desc *p,
+ uint32_t val)
+{
+ p->len_ctrl |=
+ (val << ENA_ETH_IO_TX_META_DESC_META_STORE_SHIFT)
+ & ENA_ETH_IO_TX_META_DESC_META_STORE_MASK;
+}
+
+static inline uint32_t get_ena_eth_io_tx_meta_desc_meta_desc(
+ const struct ena_eth_io_tx_meta_desc *p)
+{
+ return (p->len_ctrl & ENA_ETH_IO_TX_META_DESC_META_DESC_MASK)
+ >> ENA_ETH_IO_TX_META_DESC_META_DESC_SHIFT;
+}
+
+static inline void set_ena_eth_io_tx_meta_desc_meta_desc(
+ struct ena_eth_io_tx_meta_desc *p,
+ uint32_t val)
+{
+ p->len_ctrl |=
+ (val << ENA_ETH_IO_TX_META_DESC_META_DESC_SHIFT)
+ & ENA_ETH_IO_TX_META_DESC_META_DESC_MASK;
+}
+
+static inline uint32_t get_ena_eth_io_tx_meta_desc_phase(
+ const struct ena_eth_io_tx_meta_desc *p)
+{
+ return (p->len_ctrl & ENA_ETH_IO_TX_META_DESC_PHASE_MASK)
+ >> ENA_ETH_IO_TX_META_DESC_PHASE_SHIFT;
+}
+
+static inline void set_ena_eth_io_tx_meta_desc_phase(
+ struct ena_eth_io_tx_meta_desc *p,
+ uint32_t val)
+{
+ p->len_ctrl |=
+ (val << ENA_ETH_IO_TX_META_DESC_PHASE_SHIFT)
+ & ENA_ETH_IO_TX_META_DESC_PHASE_MASK;
+}
+
+static inline uint32_t get_ena_eth_io_tx_meta_desc_first(
+ const struct ena_eth_io_tx_meta_desc *p)
+{
+ return (p->len_ctrl & ENA_ETH_IO_TX_META_DESC_FIRST_MASK)
+ >> ENA_ETH_IO_TX_META_DESC_FIRST_SHIFT;
+}
+
+static inline void set_ena_eth_io_tx_meta_desc_first(
+ struct ena_eth_io_tx_meta_desc *p,
+ uint32_t val)
+{
+ p->len_ctrl |=
+ (val << ENA_ETH_IO_TX_META_DESC_FIRST_SHIFT)
+ & ENA_ETH_IO_TX_META_DESC_FIRST_MASK;
+}
+
+static inline uint32_t get_ena_eth_io_tx_meta_desc_last(
+ const struct ena_eth_io_tx_meta_desc *p)
+{
+ return (p->len_ctrl & ENA_ETH_IO_TX_META_DESC_LAST_MASK)
+ >> ENA_ETH_IO_TX_META_DESC_LAST_SHIFT;
+}
+
+static inline void set_ena_eth_io_tx_meta_desc_last(
+ struct ena_eth_io_tx_meta_desc *p,
+ uint32_t val)
+{
+ p->len_ctrl |=
+ (val << ENA_ETH_IO_TX_META_DESC_LAST_SHIFT)
+ & ENA_ETH_IO_TX_META_DESC_LAST_MASK;
+}
+
+static inline uint32_t get_ena_eth_io_tx_meta_desc_comp_req(
+ const struct ena_eth_io_tx_meta_desc *p)
+{
+ return (p->len_ctrl & ENA_ETH_IO_TX_META_DESC_COMP_REQ_MASK)
+ >> ENA_ETH_IO_TX_META_DESC_COMP_REQ_SHIFT;
+}
+
+static inline void set_ena_eth_io_tx_meta_desc_comp_req(
+ struct ena_eth_io_tx_meta_desc *p,
+ uint32_t val)
+{
+ p->len_ctrl |=
+ (val << ENA_ETH_IO_TX_META_DESC_COMP_REQ_SHIFT)
+ & ENA_ETH_IO_TX_META_DESC_COMP_REQ_MASK;
+}
+
+static inline uint32_t get_ena_eth_io_tx_meta_desc_req_id_hi(
+ const struct ena_eth_io_tx_meta_desc *p)
+{
+ return p->word1 & ENA_ETH_IO_TX_META_DESC_REQ_ID_HI_MASK;
+}
+
+static inline void set_ena_eth_io_tx_meta_desc_req_id_hi(
+ struct ena_eth_io_tx_meta_desc *p,
+ uint32_t val)
+{
+ p->word1 |= val & ENA_ETH_IO_TX_META_DESC_REQ_ID_HI_MASK;
+}
+
+static inline uint32_t get_ena_eth_io_tx_meta_desc_l3_hdr_len(
+ const struct ena_eth_io_tx_meta_desc *p)
+{
+ return p->word2 & ENA_ETH_IO_TX_META_DESC_L3_HDR_LEN_MASK;
+}
+
+static inline void set_ena_eth_io_tx_meta_desc_l3_hdr_len(
+ struct ena_eth_io_tx_meta_desc *p,
+ uint32_t val)
+{
+ p->word2 |= val & ENA_ETH_IO_TX_META_DESC_L3_HDR_LEN_MASK;
+}
+
+static inline uint32_t get_ena_eth_io_tx_meta_desc_l3_hdr_off(
+ const struct ena_eth_io_tx_meta_desc *p)
+{
+ return (p->word2 & ENA_ETH_IO_TX_META_DESC_L3_HDR_OFF_MASK)
+ >> ENA_ETH_IO_TX_META_DESC_L3_HDR_OFF_SHIFT;
+}
+
+static inline void set_ena_eth_io_tx_meta_desc_l3_hdr_off(
+ struct ena_eth_io_tx_meta_desc *p,
+ uint32_t val)
+{
+ p->word2 |=
+ (val << ENA_ETH_IO_TX_META_DESC_L3_HDR_OFF_SHIFT)
+ & ENA_ETH_IO_TX_META_DESC_L3_HDR_OFF_MASK;
+}
+
+static inline uint32_t get_ena_eth_io_tx_meta_desc_l4_hdr_len_in_words(
+ const struct ena_eth_io_tx_meta_desc *p)
+{
+ return (p->word2 & ENA_ETH_IO_TX_META_DESC_L4_HDR_LEN_IN_WORDS_MASK)
+ >> ENA_ETH_IO_TX_META_DESC_L4_HDR_LEN_IN_WORDS_SHIFT;
+}
+
+static inline void set_ena_eth_io_tx_meta_desc_l4_hdr_len_in_words(
+ struct ena_eth_io_tx_meta_desc *p,
+ uint32_t val)
+{
+ p->word2 |=
+ (val << ENA_ETH_IO_TX_META_DESC_L4_HDR_LEN_IN_WORDS_SHIFT)
+ & ENA_ETH_IO_TX_META_DESC_L4_HDR_LEN_IN_WORDS_MASK;
+}
+
+static inline uint32_t get_ena_eth_io_tx_meta_desc_mss_lo(
+ const struct ena_eth_io_tx_meta_desc *p)
+{
+ return (p->word2 & ENA_ETH_IO_TX_META_DESC_MSS_LO_MASK)
+ >> ENA_ETH_IO_TX_META_DESC_MSS_LO_SHIFT;
+}
+
+static inline void set_ena_eth_io_tx_meta_desc_mss_lo(
+ struct ena_eth_io_tx_meta_desc *p,
+ uint32_t val)
+{
+ p->word2 |=
+ (val << ENA_ETH_IO_TX_META_DESC_MSS_LO_SHIFT)
+ & ENA_ETH_IO_TX_META_DESC_MSS_LO_MASK;
+}
+
+static inline uint32_t get_ena_eth_io_tx_meta_desc_crypto_info(
+ const struct ena_eth_io_tx_meta_desc *p)
+{
+ return p->word3 & ENA_ETH_IO_TX_META_DESC_CRYPTO_INFO_MASK;
+}
+
+static inline void set_ena_eth_io_tx_meta_desc_crypto_info(
+ struct ena_eth_io_tx_meta_desc *p,
+ uint32_t val)
+{
+ p->word3 |= val & ENA_ETH_IO_TX_META_DESC_CRYPTO_INFO_MASK;
+}
+
+static inline uint32_t get_ena_eth_io_tx_meta_desc_outr_l3_hdr_len_words(
+ const struct ena_eth_io_tx_meta_desc *p)
+{
+ return (p->word3 & ENA_ETH_IO_TX_META_DESC_OUTR_L3_HDR_LEN_WORDS_MASK)
+ >> ENA_ETH_IO_TX_META_DESC_OUTR_L3_HDR_LEN_WORDS_SHIFT;
+}
+
+static inline void set_ena_eth_io_tx_meta_desc_outr_l3_hdr_len_words(
+ struct ena_eth_io_tx_meta_desc *p,
+ uint32_t val)
+{
+ p->word3 |=
+ (val << ENA_ETH_IO_TX_META_DESC_OUTR_L3_HDR_LEN_WORDS_SHIFT)
+ & ENA_ETH_IO_TX_META_DESC_OUTR_L3_HDR_LEN_WORDS_MASK;
+}
+
+static inline uint32_t get_ena_eth_io_tx_meta_desc_outr_l3_off_lo(
+ const struct ena_eth_io_tx_meta_desc *p)
+{
+ return (p->word3 & ENA_ETH_IO_TX_META_DESC_OUTR_L3_OFF_LO_MASK)
+ >> ENA_ETH_IO_TX_META_DESC_OUTR_L3_OFF_LO_SHIFT;
+}
+
+static inline void set_ena_eth_io_tx_meta_desc_outr_l3_off_lo(
+ struct ena_eth_io_tx_meta_desc *p,
+ uint32_t val)
+{
+ p->word3 |=
+ (val << ENA_ETH_IO_TX_META_DESC_OUTR_L3_OFF_LO_SHIFT)
+ & ENA_ETH_IO_TX_META_DESC_OUTR_L3_OFF_LO_MASK;
+}
+
+static inline uint8_t get_ena_eth_io_tx_cdesc_phase(
+ const struct ena_eth_io_tx_cdesc *p)
+{
+ return p->flags & ENA_ETH_IO_TX_CDESC_PHASE_MASK;
+}
+
+static inline void set_ena_eth_io_tx_cdesc_phase(
+ struct ena_eth_io_tx_cdesc *p,
+ uint8_t val)
+{
+ p->flags |= val & ENA_ETH_IO_TX_CDESC_PHASE_MASK;
+}
+
+static inline uint8_t get_ena_eth_io_rx_desc_phase(
+ const struct ena_eth_io_rx_desc *p)
+{
+ return p->ctrl & ENA_ETH_IO_RX_DESC_PHASE_MASK;
+}
+
+static inline void set_ena_eth_io_rx_desc_phase(
+ struct ena_eth_io_rx_desc *p,
+ uint8_t val)
+{
+ p->ctrl |= val & ENA_ETH_IO_RX_DESC_PHASE_MASK;
+}
+
+static inline uint8_t get_ena_eth_io_rx_desc_first(
+ const struct ena_eth_io_rx_desc *p)
+{
+ return (p->ctrl & ENA_ETH_IO_RX_DESC_FIRST_MASK)
+ >> ENA_ETH_IO_RX_DESC_FIRST_SHIFT;
+}
+
+static inline void set_ena_eth_io_rx_desc_first(
+ struct ena_eth_io_rx_desc *p,
+ uint8_t val)
+{
+ p->ctrl |=
+ (val << ENA_ETH_IO_RX_DESC_FIRST_SHIFT)
+ & ENA_ETH_IO_RX_DESC_FIRST_MASK;
+}
+
+static inline uint8_t get_ena_eth_io_rx_desc_last(
+ const struct ena_eth_io_rx_desc *p)
+{
+ return (p->ctrl & ENA_ETH_IO_RX_DESC_LAST_MASK)
+ >> ENA_ETH_IO_RX_DESC_LAST_SHIFT;
+}
+
+static inline void set_ena_eth_io_rx_desc_last(
+ struct ena_eth_io_rx_desc *p,
+ uint8_t val)
+{
+ p->ctrl |=
+ (val << ENA_ETH_IO_RX_DESC_LAST_SHIFT)
+ & ENA_ETH_IO_RX_DESC_LAST_MASK;
+}
+
+static inline uint8_t get_ena_eth_io_rx_desc_comp_req(
+ const struct ena_eth_io_rx_desc *p)
+{
+ return (p->ctrl & ENA_ETH_IO_RX_DESC_COMP_REQ_MASK)
+ >> ENA_ETH_IO_RX_DESC_COMP_REQ_SHIFT;
+}
+
+static inline void set_ena_eth_io_rx_desc_comp_req(
+ struct ena_eth_io_rx_desc *p,
+ uint8_t val)
+{
+ p->ctrl |=
+ (val << ENA_ETH_IO_RX_DESC_COMP_REQ_SHIFT)
+ & ENA_ETH_IO_RX_DESC_COMP_REQ_MASK;
+}
+
+static inline uint32_t get_ena_eth_io_rx_cdesc_base_l3_proto_idx(
+ const struct ena_eth_io_rx_cdesc_base *p)
+{
+ return p->status & ENA_ETH_IO_RX_CDESC_BASE_L3_PROTO_IDX_MASK;
+}
+
+static inline void set_ena_eth_io_rx_cdesc_base_l3_proto_idx(
+ struct ena_eth_io_rx_cdesc_base *p,
+ uint32_t val)
+{
+ p->status |= val & ENA_ETH_IO_RX_CDESC_BASE_L3_PROTO_IDX_MASK;
+}
+
+static inline uint32_t get_ena_eth_io_rx_cdesc_base_src_vlan_cnt(
+ const struct ena_eth_io_rx_cdesc_base *p)
+{
+ return (p->status & ENA_ETH_IO_RX_CDESC_BASE_SRC_VLAN_CNT_MASK)
+ >> ENA_ETH_IO_RX_CDESC_BASE_SRC_VLAN_CNT_SHIFT;
+}
+
+static inline void set_ena_eth_io_rx_cdesc_base_src_vlan_cnt(
+ struct ena_eth_io_rx_cdesc_base *p,
+ uint32_t val)
+{
+ p->status |=
+ (val << ENA_ETH_IO_RX_CDESC_BASE_SRC_VLAN_CNT_SHIFT)
+ & ENA_ETH_IO_RX_CDESC_BASE_SRC_VLAN_CNT_MASK;
+}
+
+static inline uint32_t get_ena_eth_io_rx_cdesc_base_tunnel(
+ const struct ena_eth_io_rx_cdesc_base *p)
+{
+ return (p->status & ENA_ETH_IO_RX_CDESC_BASE_TUNNEL_MASK)
+ >> ENA_ETH_IO_RX_CDESC_BASE_TUNNEL_SHIFT;
+}
+
+static inline void set_ena_eth_io_rx_cdesc_base_tunnel(
+ struct ena_eth_io_rx_cdesc_base *p,
+ uint32_t val)
+{
+ p->status |=
+ (val << ENA_ETH_IO_RX_CDESC_BASE_TUNNEL_SHIFT)
+ & ENA_ETH_IO_RX_CDESC_BASE_TUNNEL_MASK;
+}
+
+static inline uint32_t get_ena_eth_io_rx_cdesc_base_l4_proto_idx(
+ const struct ena_eth_io_rx_cdesc_base *p)
+{
+ return (p->status & ENA_ETH_IO_RX_CDESC_BASE_L4_PROTO_IDX_MASK)
+ >> ENA_ETH_IO_RX_CDESC_BASE_L4_PROTO_IDX_SHIFT;
+}
+
+static inline void set_ena_eth_io_rx_cdesc_base_l4_proto_idx(
+ struct ena_eth_io_rx_cdesc_base *p,
+ uint32_t val)
+{
+ p->status |=
+ (val << ENA_ETH_IO_RX_CDESC_BASE_L4_PROTO_IDX_SHIFT)
+ & ENA_ETH_IO_RX_CDESC_BASE_L4_PROTO_IDX_MASK;
+}
+
+static inline uint32_t get_ena_eth_io_rx_cdesc_base_l3_csum_err(
+ const struct ena_eth_io_rx_cdesc_base *p)
+{
+ return (p->status & ENA_ETH_IO_RX_CDESC_BASE_L3_CSUM_ERR_MASK)
+ >> ENA_ETH_IO_RX_CDESC_BASE_L3_CSUM_ERR_SHIFT;
+}
+
+static inline void set_ena_eth_io_rx_cdesc_base_l3_csum_err(
+ struct ena_eth_io_rx_cdesc_base *p,
+ uint32_t val)
+{
+ p->status |=
+ (val << ENA_ETH_IO_RX_CDESC_BASE_L3_CSUM_ERR_SHIFT)
+ & ENA_ETH_IO_RX_CDESC_BASE_L3_CSUM_ERR_MASK;
+}
+
+static inline uint32_t get_ena_eth_io_rx_cdesc_base_l4_csum_err(
+ const struct ena_eth_io_rx_cdesc_base *p)
+{
+ return (p->status & ENA_ETH_IO_RX_CDESC_BASE_L4_CSUM_ERR_MASK)
+ >> ENA_ETH_IO_RX_CDESC_BASE_L4_CSUM_ERR_SHIFT;
+}
+
+static inline void set_ena_eth_io_rx_cdesc_base_l4_csum_err(
+ struct ena_eth_io_rx_cdesc_base *p,
+ uint32_t val)
+{
+ p->status |=
+ (val << ENA_ETH_IO_RX_CDESC_BASE_L4_CSUM_ERR_SHIFT)
+ & ENA_ETH_IO_RX_CDESC_BASE_L4_CSUM_ERR_MASK;
+}
+
+static inline uint32_t get_ena_eth_io_rx_cdesc_base_ipv4_frag(
+ const struct ena_eth_io_rx_cdesc_base *p)
+{
+ return (p->status & ENA_ETH_IO_RX_CDESC_BASE_IPV4_FRAG_MASK)
+ >> ENA_ETH_IO_RX_CDESC_BASE_IPV4_FRAG_SHIFT;
+}
+
+static inline void set_ena_eth_io_rx_cdesc_base_ipv4_frag(
+ struct ena_eth_io_rx_cdesc_base *p,
+ uint32_t val)
+{
+ p->status |=
+ (val << ENA_ETH_IO_RX_CDESC_BASE_IPV4_FRAG_SHIFT)
+ & ENA_ETH_IO_RX_CDESC_BASE_IPV4_FRAG_MASK;
+}
+
+static inline uint32_t get_ena_eth_io_rx_cdesc_base_secured_pkt(
+ const struct ena_eth_io_rx_cdesc_base *p)
+{
+ return (p->status & ENA_ETH_IO_RX_CDESC_BASE_SECURED_PKT_MASK)
+ >> ENA_ETH_IO_RX_CDESC_BASE_SECURED_PKT_SHIFT;
+}
+
+static inline void set_ena_eth_io_rx_cdesc_base_secured_pkt(
+ struct ena_eth_io_rx_cdesc_base *p,
+ uint32_t val)
+{
+ p->status |=
+ (val << ENA_ETH_IO_RX_CDESC_BASE_SECURED_PKT_SHIFT)
+ & ENA_ETH_IO_RX_CDESC_BASE_SECURED_PKT_MASK;
+}
+
+static inline uint32_t get_ena_eth_io_rx_cdesc_base_crypto_status(
+ const struct ena_eth_io_rx_cdesc_base *p)
+{
+ return (p->status & ENA_ETH_IO_RX_CDESC_BASE_CRYPTO_STATUS_MASK)
+ >> ENA_ETH_IO_RX_CDESC_BASE_CRYPTO_STATUS_SHIFT;
+}
+
+static inline void set_ena_eth_io_rx_cdesc_base_crypto_status(
+ struct ena_eth_io_rx_cdesc_base *p,
+ uint32_t val)
+{
+ p->status |=
+ (val << ENA_ETH_IO_RX_CDESC_BASE_CRYPTO_STATUS_SHIFT)
+ & ENA_ETH_IO_RX_CDESC_BASE_CRYPTO_STATUS_MASK;
+}
+
+static inline uint32_t get_ena_eth_io_rx_cdesc_base_phase(
+ const struct ena_eth_io_rx_cdesc_base *p)
+{
+ return (p->status & ENA_ETH_IO_RX_CDESC_BASE_PHASE_MASK)
+ >> ENA_ETH_IO_RX_CDESC_BASE_PHASE_SHIFT;
+}
+
+static inline void set_ena_eth_io_rx_cdesc_base_phase(
+ struct ena_eth_io_rx_cdesc_base *p,
+ uint32_t val)
+{
+ p->status |=
+ (val << ENA_ETH_IO_RX_CDESC_BASE_PHASE_SHIFT)
+ & ENA_ETH_IO_RX_CDESC_BASE_PHASE_MASK;
+}
+
+static inline uint32_t get_ena_eth_io_rx_cdesc_base_l3_csum2(
+ const struct ena_eth_io_rx_cdesc_base *p)
+{
+ return (p->status & ENA_ETH_IO_RX_CDESC_BASE_L3_CSUM2_MASK)
+ >> ENA_ETH_IO_RX_CDESC_BASE_L3_CSUM2_SHIFT;
+}
+
+static inline void set_ena_eth_io_rx_cdesc_base_l3_csum2(
+ struct ena_eth_io_rx_cdesc_base *p,
+ uint32_t val)
+{
+ p->status |=
+ (val << ENA_ETH_IO_RX_CDESC_BASE_L3_CSUM2_SHIFT)
+ & ENA_ETH_IO_RX_CDESC_BASE_L3_CSUM2_MASK;
+}
+
+static inline uint32_t get_ena_eth_io_rx_cdesc_base_first(
+ const struct ena_eth_io_rx_cdesc_base *p)
+{
+ return (p->status & ENA_ETH_IO_RX_CDESC_BASE_FIRST_MASK)
+ >> ENA_ETH_IO_RX_CDESC_BASE_FIRST_SHIFT;
+}
+
+static inline void set_ena_eth_io_rx_cdesc_base_first(
+ struct ena_eth_io_rx_cdesc_base *p,
+ uint32_t val)
+{
+ p->status |=
+ (val << ENA_ETH_IO_RX_CDESC_BASE_FIRST_SHIFT)
+ & ENA_ETH_IO_RX_CDESC_BASE_FIRST_MASK;
+}
+
+static inline uint32_t get_ena_eth_io_rx_cdesc_base_last(
+ const struct ena_eth_io_rx_cdesc_base *p)
+{
+ return (p->status & ENA_ETH_IO_RX_CDESC_BASE_LAST_MASK)
+ >> ENA_ETH_IO_RX_CDESC_BASE_LAST_SHIFT;
+}
+
+static inline void set_ena_eth_io_rx_cdesc_base_last(
+ struct ena_eth_io_rx_cdesc_base *p,
+ uint32_t val)
+{
+ p->status |=
+ (val << ENA_ETH_IO_RX_CDESC_BASE_LAST_SHIFT)
+ & ENA_ETH_IO_RX_CDESC_BASE_LAST_MASK;
+}
+
+static inline uint32_t get_ena_eth_io_rx_cdesc_base_inr_l4_csum(
+ const struct ena_eth_io_rx_cdesc_base *p)
+{
+ return (p->status & ENA_ETH_IO_RX_CDESC_BASE_INR_L4_CSUM_MASK)
+ >> ENA_ETH_IO_RX_CDESC_BASE_INR_L4_CSUM_SHIFT;
+}
+
+static inline void set_ena_eth_io_rx_cdesc_base_inr_l4_csum(
+ struct ena_eth_io_rx_cdesc_base *p,
+ uint32_t val)
+{
+ p->status |=
+ (val << ENA_ETH_IO_RX_CDESC_BASE_INR_L4_CSUM_SHIFT)
+ & ENA_ETH_IO_RX_CDESC_BASE_INR_L4_CSUM_MASK;
+}
+
+static inline uint32_t get_ena_eth_io_rx_cdesc_base_buffer(
+ const struct ena_eth_io_rx_cdesc_base *p)
+{
+ return (p->status & ENA_ETH_IO_RX_CDESC_BASE_BUFFER_MASK)
+ >> ENA_ETH_IO_RX_CDESC_BASE_BUFFER_SHIFT;
+}
+
+static inline void set_ena_eth_io_rx_cdesc_base_buffer(
+ struct ena_eth_io_rx_cdesc_base *p,
+ uint32_t val)
+{
+ p->status |=
+ (val << ENA_ETH_IO_RX_CDESC_BASE_BUFFER_SHIFT)
+ & ENA_ETH_IO_RX_CDESC_BASE_BUFFER_MASK;
+}
+
+static inline uint32_t get_ena_eth_io_intr_reg_rx_intr_delay(
+ const struct ena_eth_io_intr_reg *p)
+{
+ return p->intr_control & ENA_ETH_IO_INTR_REG_RX_INTR_DELAY_MASK;
+}
+
+static inline void set_ena_eth_io_intr_reg_rx_intr_delay(
+ struct ena_eth_io_intr_reg *p,
+ uint32_t val)
+{
+ p->intr_control |= val & ENA_ETH_IO_INTR_REG_RX_INTR_DELAY_MASK;
+}
+
+static inline uint32_t get_ena_eth_io_intr_reg_tx_intr_delay(
+ const struct ena_eth_io_intr_reg *p)
+{
+ return (p->intr_control & ENA_ETH_IO_INTR_REG_TX_INTR_DELAY_MASK)
+ >> ENA_ETH_IO_INTR_REG_TX_INTR_DELAY_SHIFT;
+}
+
+static inline void set_ena_eth_io_intr_reg_tx_intr_delay(
+ struct ena_eth_io_intr_reg *p,
+ uint32_t val)
+{
+ p->intr_control |=
+ (val << ENA_ETH_IO_INTR_REG_TX_INTR_DELAY_SHIFT)
+ & ENA_ETH_IO_INTR_REG_TX_INTR_DELAY_MASK;
+}
+
+static inline uint32_t get_ena_eth_io_intr_reg_intr_unmask(
+ const struct ena_eth_io_intr_reg *p)
+{
+ return (p->intr_control & ENA_ETH_IO_INTR_REG_INTR_UNMASK_MASK)
+ >> ENA_ETH_IO_INTR_REG_INTR_UNMASK_SHIFT;
+}
+
+static inline void set_ena_eth_io_intr_reg_intr_unmask(
+ struct ena_eth_io_intr_reg *p,
+ uint32_t val)
+{
+ p->intr_control |=
+ (val << ENA_ETH_IO_INTR_REG_INTR_UNMASK_SHIFT)
+ & ENA_ETH_IO_INTR_REG_INTR_UNMASK_MASK;
+}
+
+#endif /* !defined(ENA_DEFS_LINUX_MAINLINE) */
+#endif /*_ENA_ETH_IO_H_ */
diff --git a/drivers/net/ena/base/ena_defs/ena_gen_info.h b/drivers/net/ena/base/ena_defs/ena_gen_info.h
new file mode 100644
index 00000000..4abdffed
--- /dev/null
+++ b/drivers/net/ena/base/ena_defs/ena_gen_info.h
@@ -0,0 +1,35 @@
+/*-
+* BSD LICENSE
+*
+* Copyright (c) 2015-2016 Amazon.com, Inc. or its affiliates.
+* All rights reserved.
+*
+* Redistribution and use in source and binary forms, with or without
+* modification, are permitted provided that the following conditions
+* are met:
+*
+* * Redistributions of source code must retain the above copyright
+* notice, this list of conditions and the following disclaimer.
+* * Redistributions in binary form must reproduce the above copyright
+* notice, this list of conditions and the following disclaimer in
+* the documentation and/or other materials provided with the
+* distribution.
+* * Neither the name of copyright holder nor the names of its
+* contributors may be used to endorse or promote products derived
+* from this software without specific prior written permission.
+*
+* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+
+#define ENA_GEN_DATE "Mon Feb 15 14:33:08 IST 2016"
+#define ENA_GEN_COMMIT "c71ec25"
diff --git a/drivers/net/ena/base/ena_defs/ena_includes.h b/drivers/net/ena/base/ena_defs/ena_includes.h
new file mode 100644
index 00000000..a86c876f
--- /dev/null
+++ b/drivers/net/ena/base/ena_defs/ena_includes.h
@@ -0,0 +1,39 @@
+/*-
+* BSD LICENSE
+*
+* Copyright (c) 2015-2016 Amazon.com, Inc. or its affiliates.
+* All rights reserved.
+*
+* Redistribution and use in source and binary forms, with or without
+* modification, are permitted provided that the following conditions
+* are met:
+*
+* * Redistributions of source code must retain the above copyright
+* notice, this list of conditions and the following disclaimer.
+* * Redistributions in binary form must reproduce the above copyright
+* notice, this list of conditions and the following disclaimer in
+* the documentation and/or other materials provided with the
+* distribution.
+* * Neither the name of copyright holder nor the names of its
+* contributors may be used to endorse or promote products derived
+* from this software without specific prior written permission.
+*
+* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+
+#include "ena_common_defs.h"
+#include "ena_regs_defs.h"
+#include "ena_admin_defs.h"
+#include "ena_eth_io_defs.h"
+#include "ena_efa_admin_defs.h"
+#include "ena_efa_io_defs.h"
diff --git a/drivers/net/ena/base/ena_defs/ena_regs_defs.h b/drivers/net/ena/base/ena_defs/ena_regs_defs.h
new file mode 100644
index 00000000..d0241278
--- /dev/null
+++ b/drivers/net/ena/base/ena_defs/ena_regs_defs.h
@@ -0,0 +1,135 @@
+/*-
+* BSD LICENSE
+*
+* Copyright (c) 2015-2016 Amazon.com, Inc. or its affiliates.
+* All rights reserved.
+*
+* Redistribution and use in source and binary forms, with or without
+* modification, are permitted provided that the following conditions
+* are met:
+*
+* * Redistributions of source code must retain the above copyright
+* notice, this list of conditions and the following disclaimer.
+* * Redistributions in binary form must reproduce the above copyright
+* notice, this list of conditions and the following disclaimer in
+* the documentation and/or other materials provided with the
+* distribution.
+* * Neither the name of copyright holder nor the names of its
+* contributors may be used to endorse or promote products derived
+* from this software without specific prior written permission.
+*
+* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+
+#ifndef _ENA_REGS_H_
+#define _ENA_REGS_H_
+
+/* ena_registers offsets */
+#define ENA_REGS_VERSION_OFF 0x0
+#define ENA_REGS_CONTROLLER_VERSION_OFF 0x4
+#define ENA_REGS_CAPS_OFF 0x8
+#define ENA_REGS_CAPS_EXT_OFF 0xc
+#define ENA_REGS_AQ_BASE_LO_OFF 0x10
+#define ENA_REGS_AQ_BASE_HI_OFF 0x14
+#define ENA_REGS_AQ_CAPS_OFF 0x18
+#define ENA_REGS_ACQ_BASE_LO_OFF 0x20
+#define ENA_REGS_ACQ_BASE_HI_OFF 0x24
+#define ENA_REGS_ACQ_CAPS_OFF 0x28
+#define ENA_REGS_AQ_DB_OFF 0x2c
+#define ENA_REGS_ACQ_TAIL_OFF 0x30
+#define ENA_REGS_AENQ_CAPS_OFF 0x34
+#define ENA_REGS_AENQ_BASE_LO_OFF 0x38
+#define ENA_REGS_AENQ_BASE_HI_OFF 0x3c
+#define ENA_REGS_AENQ_HEAD_DB_OFF 0x40
+#define ENA_REGS_AENQ_TAIL_OFF 0x44
+#define ENA_REGS_INTR_MASK_OFF 0x4c
+#define ENA_REGS_DEV_CTL_OFF 0x54
+#define ENA_REGS_DEV_STS_OFF 0x58
+#define ENA_REGS_MMIO_REG_READ_OFF 0x5c
+#define ENA_REGS_MMIO_RESP_LO_OFF 0x60
+#define ENA_REGS_MMIO_RESP_HI_OFF 0x64
+#define ENA_REGS_RSS_IND_ENTRY_UPDATE_OFF 0x68
+
+/* version register */
+#define ENA_REGS_VERSION_MINOR_VERSION_MASK 0xff
+#define ENA_REGS_VERSION_MAJOR_VERSION_SHIFT 8
+#define ENA_REGS_VERSION_MAJOR_VERSION_MASK 0xff00
+
+/* controller_version register */
+#define ENA_REGS_CONTROLLER_VERSION_SUBMINOR_VERSION_MASK 0xff
+#define ENA_REGS_CONTROLLER_VERSION_MINOR_VERSION_SHIFT 8
+#define ENA_REGS_CONTROLLER_VERSION_MINOR_VERSION_MASK 0xff00
+#define ENA_REGS_CONTROLLER_VERSION_MAJOR_VERSION_SHIFT 16
+#define ENA_REGS_CONTROLLER_VERSION_MAJOR_VERSION_MASK 0xff0000
+#define ENA_REGS_CONTROLLER_VERSION_IMPL_ID_SHIFT 24
+#define ENA_REGS_CONTROLLER_VERSION_IMPL_ID_MASK 0xff000000
+
+/* caps register */
+#define ENA_REGS_CAPS_CONTIGUOUS_QUEUE_REQUIRED_MASK 0x1
+#define ENA_REGS_CAPS_RESET_TIMEOUT_SHIFT 1
+#define ENA_REGS_CAPS_RESET_TIMEOUT_MASK 0x3e
+#define ENA_REGS_CAPS_DMA_ADDR_WIDTH_SHIFT 8
+#define ENA_REGS_CAPS_DMA_ADDR_WIDTH_MASK 0xff00
+
+/* aq_caps register */
+#define ENA_REGS_AQ_CAPS_AQ_DEPTH_MASK 0xffff
+#define ENA_REGS_AQ_CAPS_AQ_ENTRY_SIZE_SHIFT 16
+#define ENA_REGS_AQ_CAPS_AQ_ENTRY_SIZE_MASK 0xffff0000
+
+/* acq_caps register */
+#define ENA_REGS_ACQ_CAPS_ACQ_DEPTH_MASK 0xffff
+#define ENA_REGS_ACQ_CAPS_ACQ_ENTRY_SIZE_SHIFT 16
+#define ENA_REGS_ACQ_CAPS_ACQ_ENTRY_SIZE_MASK 0xffff0000
+
+/* aenq_caps register */
+#define ENA_REGS_AENQ_CAPS_AENQ_DEPTH_MASK 0xffff
+#define ENA_REGS_AENQ_CAPS_AENQ_ENTRY_SIZE_SHIFT 16
+#define ENA_REGS_AENQ_CAPS_AENQ_ENTRY_SIZE_MASK 0xffff0000
+
+/* dev_ctl register */
+#define ENA_REGS_DEV_CTL_DEV_RESET_MASK 0x1
+#define ENA_REGS_DEV_CTL_AQ_RESTART_SHIFT 1
+#define ENA_REGS_DEV_CTL_AQ_RESTART_MASK 0x2
+#define ENA_REGS_DEV_CTL_QUIESCENT_SHIFT 2
+#define ENA_REGS_DEV_CTL_QUIESCENT_MASK 0x4
+#define ENA_REGS_DEV_CTL_IO_RESUME_SHIFT 3
+#define ENA_REGS_DEV_CTL_IO_RESUME_MASK 0x8
+
+/* dev_sts register */
+#define ENA_REGS_DEV_STS_READY_MASK 0x1
+#define ENA_REGS_DEV_STS_AQ_RESTART_IN_PROGRESS_SHIFT 1
+#define ENA_REGS_DEV_STS_AQ_RESTART_IN_PROGRESS_MASK 0x2
+#define ENA_REGS_DEV_STS_AQ_RESTART_FINISHED_SHIFT 2
+#define ENA_REGS_DEV_STS_AQ_RESTART_FINISHED_MASK 0x4
+#define ENA_REGS_DEV_STS_RESET_IN_PROGRESS_SHIFT 3
+#define ENA_REGS_DEV_STS_RESET_IN_PROGRESS_MASK 0x8
+#define ENA_REGS_DEV_STS_RESET_FINISHED_SHIFT 4
+#define ENA_REGS_DEV_STS_RESET_FINISHED_MASK 0x10
+#define ENA_REGS_DEV_STS_FATAL_ERROR_SHIFT 5
+#define ENA_REGS_DEV_STS_FATAL_ERROR_MASK 0x20
+#define ENA_REGS_DEV_STS_QUIESCENT_STATE_IN_PROGRESS_SHIFT 6
+#define ENA_REGS_DEV_STS_QUIESCENT_STATE_IN_PROGRESS_MASK 0x40
+#define ENA_REGS_DEV_STS_QUIESCENT_STATE_ACHIEVED_SHIFT 7
+#define ENA_REGS_DEV_STS_QUIESCENT_STATE_ACHIEVED_MASK 0x80
+
+/* mmio_reg_read register */
+#define ENA_REGS_MMIO_REG_READ_REQ_ID_MASK 0xffff
+#define ENA_REGS_MMIO_REG_READ_REG_OFF_SHIFT 16
+#define ENA_REGS_MMIO_REG_READ_REG_OFF_MASK 0xffff0000
+
+/* rss_ind_entry_update register */
+#define ENA_REGS_RSS_IND_ENTRY_UPDATE_INDEX_MASK 0xffff
+#define ENA_REGS_RSS_IND_ENTRY_UPDATE_CQ_IDX_SHIFT 16
+#define ENA_REGS_RSS_IND_ENTRY_UPDATE_CQ_IDX_MASK 0xffff0000
+
+#endif /*_ENA_REGS_H_ */