summaryrefslogtreecommitdiffstats
path: root/src/dpdk/drivers/net/i40e
diff options
context:
space:
mode:
authorIdo Barnea <ibarnea@cisco.com>2016-07-13 14:17:09 +0300
committerIdo Barnea <ibarnea@cisco.com>2016-07-24 14:00:31 +0300
commita551c94a6069f30617825f9046d36099846ab7ec (patch)
tree005c42cdc50eda621a93dc7dc7b8611b8bb97380 /src/dpdk/drivers/net/i40e
parent47906bbc49195f2ef2ed44cd54337feb87f7dbc3 (diff)
dpdk0716 rc3
Diffstat (limited to 'src/dpdk/drivers/net/i40e')
-rw-r--r--src/dpdk/drivers/net/i40e/base/i40e_adminq.c1139
-rw-r--r--src/dpdk/drivers/net/i40e/base/i40e_adminq.h169
-rw-r--r--src/dpdk/drivers/net/i40e/base/i40e_adminq_cmd.h2543
-rw-r--r--src/dpdk/drivers/net/i40e/base/i40e_alloc.h65
-rw-r--r--src/dpdk/drivers/net/i40e/base/i40e_common.c6681
-rw-r--r--src/dpdk/drivers/net/i40e/base/i40e_dcb.c1305
-rw-r--r--src/dpdk/drivers/net/i40e/base/i40e_dcb.h223
-rw-r--r--src/dpdk/drivers/net/i40e/base/i40e_devids.h82
-rw-r--r--src/dpdk/drivers/net/i40e/base/i40e_diag.c175
-rw-r--r--src/dpdk/drivers/net/i40e/base/i40e_diag.h61
-rw-r--r--src/dpdk/drivers/net/i40e/base/i40e_hmc.c370
-rw-r--r--src/dpdk/drivers/net/i40e/base/i40e_hmc.h245
-rw-r--r--src/dpdk/drivers/net/i40e/base/i40e_lan_hmc.c1411
-rw-r--r--src/dpdk/drivers/net/i40e/base/i40e_lan_hmc.h200
-rw-r--r--src/dpdk/drivers/net/i40e/base/i40e_nvm.c1637
-rw-r--r--src/dpdk/drivers/net/i40e/base/i40e_osdep.h240
-rw-r--r--src/dpdk/drivers/net/i40e/base/i40e_prototype.h545
-rw-r--r--src/dpdk/drivers/net/i40e/base/i40e_register.h5370
-rw-r--r--src/dpdk/drivers/net/i40e/base/i40e_status.h107
-rw-r--r--src/dpdk/drivers/net/i40e/base/i40e_type.h1674
-rw-r--r--src/dpdk/drivers/net/i40e/base/i40e_virtchnl.h438
-rw-r--r--src/dpdk/drivers/net/i40e/i40e_ethdev.c9498
-rw-r--r--src/dpdk/drivers/net/i40e/i40e_ethdev.h715
-rw-r--r--src/dpdk/drivers/net/i40e/i40e_ethdev_vf.c2678
-rw-r--r--src/dpdk/drivers/net/i40e/i40e_fdir.c1451
-rw-r--r--src/dpdk/drivers/net/i40e/i40e_logs.h77
-rw-r--r--src/dpdk/drivers/net/i40e/i40e_pf.c1104
-rw-r--r--src/dpdk/drivers/net/i40e/i40e_pf.h128
-rw-r--r--src/dpdk/drivers/net/i40e/i40e_regs.h997
-rw-r--r--src/dpdk/drivers/net/i40e/i40e_rxtx.c3329
-rw-r--r--src/dpdk/drivers/net/i40e/i40e_rxtx.h258
-rw-r--r--src/dpdk/drivers/net/i40e/i40e_rxtx_vec.c761
32 files changed, 45676 insertions, 0 deletions
diff --git a/src/dpdk/drivers/net/i40e/base/i40e_adminq.c b/src/dpdk/drivers/net/i40e/base/i40e_adminq.c
new file mode 100644
index 00000000..0d3a83fa
--- /dev/null
+++ b/src/dpdk/drivers/net/i40e/base/i40e_adminq.c
@@ -0,0 +1,1139 @@
+/*******************************************************************************
+
+Copyright (c) 2013 - 2015, Intel Corporation
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are met:
+
+ 1. Redistributions of source code must retain the above copyright notice,
+ this list of conditions and the following disclaimer.
+
+ 2. Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in the
+ documentation and/or other materials provided with the distribution.
+
+ 3. Neither the name of the Intel Corporation nor the names of its
+ contributors may be used to endorse or promote products derived from
+ this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+POSSIBILITY OF SUCH DAMAGE.
+
+***************************************************************************/
+
+#include "i40e_status.h"
+#include "i40e_type.h"
+#include "i40e_register.h"
+#include "i40e_adminq.h"
+#include "i40e_prototype.h"
+
+/**
+ * i40e_adminq_init_regs - Initialize AdminQ registers
+ * @hw: pointer to the hardware structure
+ *
+ * This assumes the alloc_asq and alloc_arq functions have already been called
+ **/
+STATIC void i40e_adminq_init_regs(struct i40e_hw *hw)
+{
+ /* set head and tail registers in our local struct */
+ if (i40e_is_vf(hw)) {
+ hw->aq.asq.tail = I40E_VF_ATQT1;
+ hw->aq.asq.head = I40E_VF_ATQH1;
+ hw->aq.asq.len = I40E_VF_ATQLEN1;
+ hw->aq.asq.bal = I40E_VF_ATQBAL1;
+ hw->aq.asq.bah = I40E_VF_ATQBAH1;
+ hw->aq.arq.tail = I40E_VF_ARQT1;
+ hw->aq.arq.head = I40E_VF_ARQH1;
+ hw->aq.arq.len = I40E_VF_ARQLEN1;
+ hw->aq.arq.bal = I40E_VF_ARQBAL1;
+ hw->aq.arq.bah = I40E_VF_ARQBAH1;
+#ifdef PF_DRIVER
+ } else {
+ hw->aq.asq.tail = I40E_PF_ATQT;
+ hw->aq.asq.head = I40E_PF_ATQH;
+ hw->aq.asq.len = I40E_PF_ATQLEN;
+ hw->aq.asq.bal = I40E_PF_ATQBAL;
+ hw->aq.asq.bah = I40E_PF_ATQBAH;
+ hw->aq.arq.tail = I40E_PF_ARQT;
+ hw->aq.arq.head = I40E_PF_ARQH;
+ hw->aq.arq.len = I40E_PF_ARQLEN;
+ hw->aq.arq.bal = I40E_PF_ARQBAL;
+ hw->aq.arq.bah = I40E_PF_ARQBAH;
+#endif
+ }
+}
+
+/**
+ * i40e_alloc_adminq_asq_ring - Allocate Admin Queue send rings
+ * @hw: pointer to the hardware structure
+ **/
+enum i40e_status_code i40e_alloc_adminq_asq_ring(struct i40e_hw *hw)
+{
+ enum i40e_status_code ret_code;
+
+ ret_code = i40e_allocate_dma_mem(hw, &hw->aq.asq.desc_buf,
+ i40e_mem_atq_ring,
+ (hw->aq.num_asq_entries *
+ sizeof(struct i40e_aq_desc)),
+ I40E_ADMINQ_DESC_ALIGNMENT);
+ if (ret_code)
+ return ret_code;
+
+ ret_code = i40e_allocate_virt_mem(hw, &hw->aq.asq.cmd_buf,
+ (hw->aq.num_asq_entries *
+ sizeof(struct i40e_asq_cmd_details)));
+ if (ret_code) {
+ i40e_free_dma_mem(hw, &hw->aq.asq.desc_buf);
+ return ret_code;
+ }
+
+ return ret_code;
+}
+
+/**
+ * i40e_alloc_adminq_arq_ring - Allocate Admin Queue receive rings
+ * @hw: pointer to the hardware structure
+ **/
+enum i40e_status_code i40e_alloc_adminq_arq_ring(struct i40e_hw *hw)
+{
+ enum i40e_status_code ret_code;
+
+ ret_code = i40e_allocate_dma_mem(hw, &hw->aq.arq.desc_buf,
+ i40e_mem_arq_ring,
+ (hw->aq.num_arq_entries *
+ sizeof(struct i40e_aq_desc)),
+ I40E_ADMINQ_DESC_ALIGNMENT);
+
+ return ret_code;
+}
+
+/**
+ * i40e_free_adminq_asq - Free Admin Queue send rings
+ * @hw: pointer to the hardware structure
+ *
+ * This assumes the posted send buffers have already been cleaned
+ * and de-allocated
+ **/
+void i40e_free_adminq_asq(struct i40e_hw *hw)
+{
+ i40e_free_dma_mem(hw, &hw->aq.asq.desc_buf);
+}
+
+/**
+ * i40e_free_adminq_arq - Free Admin Queue receive rings
+ * @hw: pointer to the hardware structure
+ *
+ * This assumes the posted receive buffers have already been cleaned
+ * and de-allocated
+ **/
+void i40e_free_adminq_arq(struct i40e_hw *hw)
+{
+ i40e_free_dma_mem(hw, &hw->aq.arq.desc_buf);
+}
+
+/**
+ * i40e_alloc_arq_bufs - Allocate pre-posted buffers for the receive queue
+ * @hw: pointer to the hardware structure
+ **/
+STATIC enum i40e_status_code i40e_alloc_arq_bufs(struct i40e_hw *hw)
+{
+ enum i40e_status_code ret_code;
+ struct i40e_aq_desc *desc;
+ struct i40e_dma_mem *bi;
+ int i;
+
+ /* We'll be allocating the buffer info memory first, then we can
+ * allocate the mapped buffers for the event processing
+ */
+
+ /* buffer_info structures do not need alignment */
+ ret_code = i40e_allocate_virt_mem(hw, &hw->aq.arq.dma_head,
+ (hw->aq.num_arq_entries * sizeof(struct i40e_dma_mem)));
+ if (ret_code)
+ goto alloc_arq_bufs;
+ hw->aq.arq.r.arq_bi = (struct i40e_dma_mem *)hw->aq.arq.dma_head.va;
+
+ /* allocate the mapped buffers */
+ for (i = 0; i < hw->aq.num_arq_entries; i++) {
+ bi = &hw->aq.arq.r.arq_bi[i];
+ ret_code = i40e_allocate_dma_mem(hw, bi,
+ i40e_mem_arq_buf,
+ hw->aq.arq_buf_size,
+ I40E_ADMINQ_DESC_ALIGNMENT);
+ if (ret_code)
+ goto unwind_alloc_arq_bufs;
+
+ /* now configure the descriptors for use */
+ desc = I40E_ADMINQ_DESC(hw->aq.arq, i);
+
+ desc->flags = CPU_TO_LE16(I40E_AQ_FLAG_BUF);
+ if (hw->aq.arq_buf_size > I40E_AQ_LARGE_BUF)
+ desc->flags |= CPU_TO_LE16(I40E_AQ_FLAG_LB);
+ desc->opcode = 0;
+ /* This is in accordance with Admin queue design, there is no
+ * register for buffer size configuration
+ */
+ desc->datalen = CPU_TO_LE16((u16)bi->size);
+ desc->retval = 0;
+ desc->cookie_high = 0;
+ desc->cookie_low = 0;
+ desc->params.external.addr_high =
+ CPU_TO_LE32(I40E_HI_DWORD(bi->pa));
+ desc->params.external.addr_low =
+ CPU_TO_LE32(I40E_LO_DWORD(bi->pa));
+ desc->params.external.param0 = 0;
+ desc->params.external.param1 = 0;
+ }
+
+alloc_arq_bufs:
+ return ret_code;
+
+unwind_alloc_arq_bufs:
+ /* don't try to free the one that failed... */
+ i--;
+ for (; i >= 0; i--)
+ i40e_free_dma_mem(hw, &hw->aq.arq.r.arq_bi[i]);
+ i40e_free_virt_mem(hw, &hw->aq.arq.dma_head);
+
+ return ret_code;
+}
+
+/**
+ * i40e_alloc_asq_bufs - Allocate empty buffer structs for the send queue
+ * @hw: pointer to the hardware structure
+ **/
+STATIC enum i40e_status_code i40e_alloc_asq_bufs(struct i40e_hw *hw)
+{
+ enum i40e_status_code ret_code;
+ struct i40e_dma_mem *bi;
+ int i;
+
+ /* No mapped memory needed yet, just the buffer info structures */
+ ret_code = i40e_allocate_virt_mem(hw, &hw->aq.asq.dma_head,
+ (hw->aq.num_asq_entries * sizeof(struct i40e_dma_mem)));
+ if (ret_code)
+ goto alloc_asq_bufs;
+ hw->aq.asq.r.asq_bi = (struct i40e_dma_mem *)hw->aq.asq.dma_head.va;
+
+ /* allocate the mapped buffers */
+ for (i = 0; i < hw->aq.num_asq_entries; i++) {
+ bi = &hw->aq.asq.r.asq_bi[i];
+ ret_code = i40e_allocate_dma_mem(hw, bi,
+ i40e_mem_asq_buf,
+ hw->aq.asq_buf_size,
+ I40E_ADMINQ_DESC_ALIGNMENT);
+ if (ret_code)
+ goto unwind_alloc_asq_bufs;
+ }
+alloc_asq_bufs:
+ return ret_code;
+
+unwind_alloc_asq_bufs:
+ /* don't try to free the one that failed... */
+ i--;
+ for (; i >= 0; i--)
+ i40e_free_dma_mem(hw, &hw->aq.asq.r.asq_bi[i]);
+ i40e_free_virt_mem(hw, &hw->aq.asq.dma_head);
+
+ return ret_code;
+}
+
+/**
+ * i40e_free_arq_bufs - Free receive queue buffer info elements
+ * @hw: pointer to the hardware structure
+ **/
+STATIC void i40e_free_arq_bufs(struct i40e_hw *hw)
+{
+ int i;
+
+ /* free descriptors */
+ for (i = 0; i < hw->aq.num_arq_entries; i++)
+ i40e_free_dma_mem(hw, &hw->aq.arq.r.arq_bi[i]);
+
+ /* free the descriptor memory */
+ i40e_free_dma_mem(hw, &hw->aq.arq.desc_buf);
+
+ /* free the dma header */
+ i40e_free_virt_mem(hw, &hw->aq.arq.dma_head);
+}
+
+/**
+ * i40e_free_asq_bufs - Free send queue buffer info elements
+ * @hw: pointer to the hardware structure
+ **/
+STATIC void i40e_free_asq_bufs(struct i40e_hw *hw)
+{
+ int i;
+
+ /* only unmap if the address is non-NULL */
+ for (i = 0; i < hw->aq.num_asq_entries; i++)
+ if (hw->aq.asq.r.asq_bi[i].pa)
+ i40e_free_dma_mem(hw, &hw->aq.asq.r.asq_bi[i]);
+
+ /* free the buffer info list */
+ i40e_free_virt_mem(hw, &hw->aq.asq.cmd_buf);
+
+ /* free the descriptor memory */
+ i40e_free_dma_mem(hw, &hw->aq.asq.desc_buf);
+
+ /* free the dma header */
+ i40e_free_virt_mem(hw, &hw->aq.asq.dma_head);
+}
+
+/**
+ * i40e_config_asq_regs - configure ASQ registers
+ * @hw: pointer to the hardware structure
+ *
+ * Configure base address and length registers for the transmit queue
+ **/
+STATIC enum i40e_status_code i40e_config_asq_regs(struct i40e_hw *hw)
+{
+ enum i40e_status_code ret_code = I40E_SUCCESS;
+ u32 reg = 0;
+
+ /* Clear Head and Tail */
+ wr32(hw, hw->aq.asq.head, 0);
+ wr32(hw, hw->aq.asq.tail, 0);
+
+ /* set starting point */
+#ifdef PF_DRIVER
+#ifdef INTEGRATED_VF
+ if (!i40e_is_vf(hw))
+ wr32(hw, hw->aq.asq.len, (hw->aq.num_asq_entries |
+ I40E_PF_ATQLEN_ATQENABLE_MASK));
+#else
+ wr32(hw, hw->aq.asq.len, (hw->aq.num_asq_entries |
+ I40E_PF_ATQLEN_ATQENABLE_MASK));
+#endif /* INTEGRATED_VF */
+#endif /* PF_DRIVER */
+#ifdef VF_DRIVER
+#ifdef INTEGRATED_VF
+ if (i40e_is_vf(hw))
+ wr32(hw, hw->aq.asq.len, (hw->aq.num_asq_entries |
+ I40E_VF_ATQLEN1_ATQENABLE_MASK));
+#else
+ wr32(hw, hw->aq.asq.len, (hw->aq.num_asq_entries |
+ I40E_VF_ATQLEN1_ATQENABLE_MASK));
+#endif /* INTEGRATED_VF */
+#endif /* VF_DRIVER */
+ wr32(hw, hw->aq.asq.bal, I40E_LO_DWORD(hw->aq.asq.desc_buf.pa));
+ wr32(hw, hw->aq.asq.bah, I40E_HI_DWORD(hw->aq.asq.desc_buf.pa));
+
+ /* Check one register to verify that config was applied */
+ reg = rd32(hw, hw->aq.asq.bal);
+ if (reg != I40E_LO_DWORD(hw->aq.asq.desc_buf.pa))
+ ret_code = I40E_ERR_ADMIN_QUEUE_ERROR;
+
+ return ret_code;
+}
+
+/**
+ * i40e_config_arq_regs - ARQ register configuration
+ * @hw: pointer to the hardware structure
+ *
+ * Configure base address and length registers for the receive (event queue)
+ **/
+STATIC enum i40e_status_code i40e_config_arq_regs(struct i40e_hw *hw)
+{
+ enum i40e_status_code ret_code = I40E_SUCCESS;
+ u32 reg = 0;
+
+ /* Clear Head and Tail */
+ wr32(hw, hw->aq.arq.head, 0);
+ wr32(hw, hw->aq.arq.tail, 0);
+
+ /* set starting point */
+#ifdef PF_DRIVER
+#ifdef INTEGRATED_VF
+ if (!i40e_is_vf(hw))
+ wr32(hw, hw->aq.arq.len, (hw->aq.num_arq_entries |
+ I40E_PF_ARQLEN_ARQENABLE_MASK));
+#else
+ wr32(hw, hw->aq.arq.len, (hw->aq.num_arq_entries |
+ I40E_PF_ARQLEN_ARQENABLE_MASK));
+#endif /* INTEGRATED_VF */
+#endif /* PF_DRIVER */
+#ifdef VF_DRIVER
+#ifdef INTEGRATED_VF
+ if (i40e_is_vf(hw))
+ wr32(hw, hw->aq.arq.len, (hw->aq.num_arq_entries |
+ I40E_VF_ARQLEN1_ARQENABLE_MASK));
+#else
+ wr32(hw, hw->aq.arq.len, (hw->aq.num_arq_entries |
+ I40E_VF_ARQLEN1_ARQENABLE_MASK));
+#endif /* INTEGRATED_VF */
+#endif /* VF_DRIVER */
+ wr32(hw, hw->aq.arq.bal, I40E_LO_DWORD(hw->aq.arq.desc_buf.pa));
+ wr32(hw, hw->aq.arq.bah, I40E_HI_DWORD(hw->aq.arq.desc_buf.pa));
+
+ /* Update tail in the HW to post pre-allocated buffers */
+ wr32(hw, hw->aq.arq.tail, hw->aq.num_arq_entries - 1);
+
+ /* Check one register to verify that config was applied */
+ reg = rd32(hw, hw->aq.arq.bal);
+ if (reg != I40E_LO_DWORD(hw->aq.arq.desc_buf.pa))
+ ret_code = I40E_ERR_ADMIN_QUEUE_ERROR;
+
+ return ret_code;
+}
+
+/**
+ * i40e_init_asq - main initialization routine for ASQ
+ * @hw: pointer to the hardware structure
+ *
+ * This is the main initialization routine for the Admin Send Queue
+ * Prior to calling this function, drivers *MUST* set the following fields
+ * in the hw->aq structure:
+ * - hw->aq.num_asq_entries
+ * - hw->aq.arq_buf_size
+ *
+ * Do *NOT* hold the lock when calling this as the memory allocation routines
+ * called are not going to be atomic context safe
+ **/
+enum i40e_status_code i40e_init_asq(struct i40e_hw *hw)
+{
+ enum i40e_status_code ret_code = I40E_SUCCESS;
+
+ if (hw->aq.asq.count > 0) {
+ /* queue already initialized */
+ ret_code = I40E_ERR_NOT_READY;
+ goto init_adminq_exit;
+ }
+
+ /* verify input for valid configuration */
+ if ((hw->aq.num_asq_entries == 0) ||
+ (hw->aq.asq_buf_size == 0)) {
+ ret_code = I40E_ERR_CONFIG;
+ goto init_adminq_exit;
+ }
+
+ hw->aq.asq.next_to_use = 0;
+ hw->aq.asq.next_to_clean = 0;
+
+ /* allocate the ring memory */
+ ret_code = i40e_alloc_adminq_asq_ring(hw);
+ if (ret_code != I40E_SUCCESS)
+ goto init_adminq_exit;
+
+ /* allocate buffers in the rings */
+ ret_code = i40e_alloc_asq_bufs(hw);
+ if (ret_code != I40E_SUCCESS)
+ goto init_adminq_free_rings;
+
+ /* initialize base registers */
+ ret_code = i40e_config_asq_regs(hw);
+ if (ret_code != I40E_SUCCESS)
+ goto init_adminq_free_rings;
+
+ /* success! */
+ hw->aq.asq.count = hw->aq.num_asq_entries;
+ goto init_adminq_exit;
+
+init_adminq_free_rings:
+ i40e_free_adminq_asq(hw);
+
+init_adminq_exit:
+ return ret_code;
+}
+
+/**
+ * i40e_init_arq - initialize ARQ
+ * @hw: pointer to the hardware structure
+ *
+ * The main initialization routine for the Admin Receive (Event) Queue.
+ * Prior to calling this function, drivers *MUST* set the following fields
+ * in the hw->aq structure:
+ * - hw->aq.num_asq_entries
+ * - hw->aq.arq_buf_size
+ *
+ * Do *NOT* hold the lock when calling this as the memory allocation routines
+ * called are not going to be atomic context safe
+ **/
+enum i40e_status_code i40e_init_arq(struct i40e_hw *hw)
+{
+ enum i40e_status_code ret_code = I40E_SUCCESS;
+
+ if (hw->aq.arq.count > 0) {
+ /* queue already initialized */
+ ret_code = I40E_ERR_NOT_READY;
+ goto init_adminq_exit;
+ }
+
+ /* verify input for valid configuration */
+ if ((hw->aq.num_arq_entries == 0) ||
+ (hw->aq.arq_buf_size == 0)) {
+ ret_code = I40E_ERR_CONFIG;
+ goto init_adminq_exit;
+ }
+
+ hw->aq.arq.next_to_use = 0;
+ hw->aq.arq.next_to_clean = 0;
+
+ /* allocate the ring memory */
+ ret_code = i40e_alloc_adminq_arq_ring(hw);
+ if (ret_code != I40E_SUCCESS)
+ goto init_adminq_exit;
+
+ /* allocate buffers in the rings */
+ ret_code = i40e_alloc_arq_bufs(hw);
+ if (ret_code != I40E_SUCCESS)
+ goto init_adminq_free_rings;
+
+ /* initialize base registers */
+ ret_code = i40e_config_arq_regs(hw);
+ if (ret_code != I40E_SUCCESS)
+ goto init_adminq_free_rings;
+
+ /* success! */
+ hw->aq.arq.count = hw->aq.num_arq_entries;
+ goto init_adminq_exit;
+
+init_adminq_free_rings:
+ i40e_free_adminq_arq(hw);
+
+init_adminq_exit:
+ return ret_code;
+}
+
+/**
+ * i40e_shutdown_asq - shutdown the ASQ
+ * @hw: pointer to the hardware structure
+ *
+ * The main shutdown routine for the Admin Send Queue
+ **/
+enum i40e_status_code i40e_shutdown_asq(struct i40e_hw *hw)
+{
+ enum i40e_status_code ret_code = I40E_SUCCESS;
+
+ i40e_acquire_spinlock(&hw->aq.asq_spinlock);
+
+ if (hw->aq.asq.count == 0) {
+ ret_code = I40E_ERR_NOT_READY;
+ goto shutdown_asq_out;
+ }
+
+ /* Stop firmware AdminQ processing */
+ wr32(hw, hw->aq.asq.head, 0);
+ wr32(hw, hw->aq.asq.tail, 0);
+ wr32(hw, hw->aq.asq.len, 0);
+ wr32(hw, hw->aq.asq.bal, 0);
+ wr32(hw, hw->aq.asq.bah, 0);
+
+ hw->aq.asq.count = 0; /* to indicate uninitialized queue */
+
+ /* free ring buffers */
+ i40e_free_asq_bufs(hw);
+
+shutdown_asq_out:
+ i40e_release_spinlock(&hw->aq.asq_spinlock);
+ return ret_code;
+}
+
+/**
+ * i40e_shutdown_arq - shutdown ARQ
+ * @hw: pointer to the hardware structure
+ *
+ * The main shutdown routine for the Admin Receive Queue
+ **/
+enum i40e_status_code i40e_shutdown_arq(struct i40e_hw *hw)
+{
+ enum i40e_status_code ret_code = I40E_SUCCESS;
+
+ i40e_acquire_spinlock(&hw->aq.arq_spinlock);
+
+ if (hw->aq.arq.count == 0) {
+ ret_code = I40E_ERR_NOT_READY;
+ goto shutdown_arq_out;
+ }
+
+ /* Stop firmware AdminQ processing */
+ wr32(hw, hw->aq.arq.head, 0);
+ wr32(hw, hw->aq.arq.tail, 0);
+ wr32(hw, hw->aq.arq.len, 0);
+ wr32(hw, hw->aq.arq.bal, 0);
+ wr32(hw, hw->aq.arq.bah, 0);
+
+ hw->aq.arq.count = 0; /* to indicate uninitialized queue */
+
+ /* free ring buffers */
+ i40e_free_arq_bufs(hw);
+
+shutdown_arq_out:
+ i40e_release_spinlock(&hw->aq.arq_spinlock);
+ return ret_code;
+}
+#ifdef PF_DRIVER
+
+/**
+ * i40e_resume_aq - resume AQ processing from 0
+ * @hw: pointer to the hardware structure
+ **/
+STATIC void i40e_resume_aq(struct i40e_hw *hw)
+{
+ /* Registers are reset after PF reset */
+ hw->aq.asq.next_to_use = 0;
+ hw->aq.asq.next_to_clean = 0;
+
+ i40e_config_asq_regs(hw);
+
+ hw->aq.arq.next_to_use = 0;
+ hw->aq.arq.next_to_clean = 0;
+
+ i40e_config_arq_regs(hw);
+}
+#endif /* PF_DRIVER */
+
+/**
+ * i40e_init_adminq - main initialization routine for Admin Queue
+ * @hw: pointer to the hardware structure
+ *
+ * Prior to calling this function, drivers *MUST* set the following fields
+ * in the hw->aq structure:
+ * - hw->aq.num_asq_entries
+ * - hw->aq.num_arq_entries
+ * - hw->aq.arq_buf_size
+ * - hw->aq.asq_buf_size
+ **/
+enum i40e_status_code i40e_init_adminq(struct i40e_hw *hw)
+{
+#ifdef PF_DRIVER
+ u16 cfg_ptr, oem_hi, oem_lo;
+ u16 eetrack_lo, eetrack_hi;
+#endif
+ enum i40e_status_code ret_code;
+#ifdef PF_DRIVER
+ int retry = 0;
+#endif
+
+ /* verify input for valid configuration */
+ if ((hw->aq.num_arq_entries == 0) ||
+ (hw->aq.num_asq_entries == 0) ||
+ (hw->aq.arq_buf_size == 0) ||
+ (hw->aq.asq_buf_size == 0)) {
+ ret_code = I40E_ERR_CONFIG;
+ goto init_adminq_exit;
+ }
+ i40e_init_spinlock(&hw->aq.asq_spinlock);
+ i40e_init_spinlock(&hw->aq.arq_spinlock);
+
+ /* Set up register offsets */
+ i40e_adminq_init_regs(hw);
+
+ /* setup ASQ command write back timeout */
+ hw->aq.asq_cmd_timeout = I40E_ASQ_CMD_TIMEOUT;
+
+ /* allocate the ASQ */
+ ret_code = i40e_init_asq(hw);
+ if (ret_code != I40E_SUCCESS)
+ goto init_adminq_destroy_spinlocks;
+
+ /* allocate the ARQ */
+ ret_code = i40e_init_arq(hw);
+ if (ret_code != I40E_SUCCESS)
+ goto init_adminq_free_asq;
+
+#ifdef PF_DRIVER
+#ifdef INTEGRATED_VF
+ /* VF has no need of firmware */
+ if (i40e_is_vf(hw))
+ goto init_adminq_exit;
+#endif
+ /* There are some cases where the firmware may not be quite ready
+ * for AdminQ operations, so we retry the AdminQ setup a few times
+ * if we see timeouts in this first AQ call.
+ */
+ do {
+ ret_code = i40e_aq_get_firmware_version(hw,
+ &hw->aq.fw_maj_ver,
+ &hw->aq.fw_min_ver,
+ &hw->aq.fw_build,
+ &hw->aq.api_maj_ver,
+ &hw->aq.api_min_ver,
+ NULL);
+ if (ret_code != I40E_ERR_ADMIN_QUEUE_TIMEOUT)
+ break;
+ retry++;
+ i40e_msec_delay(100);
+ i40e_resume_aq(hw);
+ } while (retry < 10);
+ if (ret_code != I40E_SUCCESS)
+ goto init_adminq_free_arq;
+
+ /* get the NVM version info */
+ i40e_read_nvm_word(hw, I40E_SR_NVM_DEV_STARTER_VERSION,
+ &hw->nvm.version);
+ i40e_read_nvm_word(hw, I40E_SR_NVM_EETRACK_LO, &eetrack_lo);
+ i40e_read_nvm_word(hw, I40E_SR_NVM_EETRACK_HI, &eetrack_hi);
+ hw->nvm.eetrack = (eetrack_hi << 16) | eetrack_lo;
+ i40e_read_nvm_word(hw, I40E_SR_BOOT_CONFIG_PTR, &cfg_ptr);
+ i40e_read_nvm_word(hw, (cfg_ptr + I40E_NVM_OEM_VER_OFF),
+ &oem_hi);
+ i40e_read_nvm_word(hw, (cfg_ptr + (I40E_NVM_OEM_VER_OFF + 1)),
+ &oem_lo);
+ hw->nvm.oem_ver = ((u32)oem_hi << 16) | oem_lo;
+
+ if (hw->aq.api_maj_ver > I40E_FW_API_VERSION_MAJOR) {
+ ret_code = I40E_ERR_FIRMWARE_API_VERSION;
+ goto init_adminq_free_arq;
+ }
+
+ /* pre-emptive resource lock release */
+ i40e_aq_release_resource(hw, I40E_NVM_RESOURCE_ID, 0, NULL);
+ hw->nvm_release_on_done = false;
+ hw->nvmupd_state = I40E_NVMUPD_STATE_INIT;
+
+#endif /* PF_DRIVER */
+ ret_code = I40E_SUCCESS;
+
+ /* success! */
+ goto init_adminq_exit;
+
+#ifdef PF_DRIVER
+init_adminq_free_arq:
+ i40e_shutdown_arq(hw);
+#endif
+init_adminq_free_asq:
+ i40e_shutdown_asq(hw);
+init_adminq_destroy_spinlocks:
+ i40e_destroy_spinlock(&hw->aq.asq_spinlock);
+ i40e_destroy_spinlock(&hw->aq.arq_spinlock);
+
+init_adminq_exit:
+ return ret_code;
+}
+
+/**
+ * i40e_shutdown_adminq - shutdown routine for the Admin Queue
+ * @hw: pointer to the hardware structure
+ **/
+enum i40e_status_code i40e_shutdown_adminq(struct i40e_hw *hw)
+{
+ enum i40e_status_code ret_code = I40E_SUCCESS;
+
+ if (i40e_check_asq_alive(hw))
+ i40e_aq_queue_shutdown(hw, true);
+
+ i40e_shutdown_asq(hw);
+ i40e_shutdown_arq(hw);
+ i40e_destroy_spinlock(&hw->aq.asq_spinlock);
+ i40e_destroy_spinlock(&hw->aq.arq_spinlock);
+
+ if (hw->nvm_buff.va)
+ i40e_free_virt_mem(hw, &hw->nvm_buff);
+
+ return ret_code;
+}
+
+/**
+ * i40e_clean_asq - cleans Admin send queue
+ * @hw: pointer to the hardware structure
+ *
+ * returns the number of free desc
+ **/
+u16 i40e_clean_asq(struct i40e_hw *hw)
+{
+ struct i40e_adminq_ring *asq = &(hw->aq.asq);
+ struct i40e_asq_cmd_details *details;
+ u16 ntc = asq->next_to_clean;
+ struct i40e_aq_desc desc_cb;
+ struct i40e_aq_desc *desc;
+
+ desc = I40E_ADMINQ_DESC(*asq, ntc);
+ details = I40E_ADMINQ_DETAILS(*asq, ntc);
+ while (rd32(hw, hw->aq.asq.head) != ntc) {
+ i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE,
+ "ntc %d head %d.\n", ntc, rd32(hw, hw->aq.asq.head));
+
+ if (details->callback) {
+ I40E_ADMINQ_CALLBACK cb_func =
+ (I40E_ADMINQ_CALLBACK)details->callback;
+ i40e_memcpy(&desc_cb, desc, sizeof(struct i40e_aq_desc),
+ I40E_DMA_TO_DMA);
+ cb_func(hw, &desc_cb);
+ }
+ i40e_memset(desc, 0, sizeof(*desc), I40E_DMA_MEM);
+ i40e_memset(details, 0, sizeof(*details), I40E_NONDMA_MEM);
+ ntc++;
+ if (ntc == asq->count)
+ ntc = 0;
+ desc = I40E_ADMINQ_DESC(*asq, ntc);
+ details = I40E_ADMINQ_DETAILS(*asq, ntc);
+ }
+
+ asq->next_to_clean = ntc;
+
+ return I40E_DESC_UNUSED(asq);
+}
+
+/**
+ * i40e_asq_done - check if FW has processed the Admin Send Queue
+ * @hw: pointer to the hw struct
+ *
+ * Returns true if the firmware has processed all descriptors on the
+ * admin send queue. Returns false if there are still requests pending.
+ **/
+#ifdef VF_DRIVER
+bool i40e_asq_done(struct i40e_hw *hw)
+#else
+STATIC bool i40e_asq_done(struct i40e_hw *hw)
+#endif
+{
+ /* AQ designers suggest use of head for better
+ * timing reliability than DD bit
+ */
+ return rd32(hw, hw->aq.asq.head) == hw->aq.asq.next_to_use;
+
+}
+
+/**
+ * i40e_asq_send_command - send command to Admin Queue
+ * @hw: pointer to the hw struct
+ * @desc: prefilled descriptor describing the command (non DMA mem)
+ * @buff: buffer to use for indirect commands
+ * @buff_size: size of buffer for indirect commands
+ * @cmd_details: pointer to command details structure
+ *
+ * This is the main send command driver routine for the Admin Queue send
+ * queue. It runs the queue, cleans the queue, etc
+ **/
+enum i40e_status_code i40e_asq_send_command(struct i40e_hw *hw,
+ struct i40e_aq_desc *desc,
+ void *buff, /* can be NULL */
+ u16 buff_size,
+ struct i40e_asq_cmd_details *cmd_details)
+{
+ enum i40e_status_code status = I40E_SUCCESS;
+ struct i40e_dma_mem *dma_buff = NULL;
+ struct i40e_asq_cmd_details *details;
+ struct i40e_aq_desc *desc_on_ring;
+ bool cmd_completed = false;
+ u16 retval = 0;
+ u32 val = 0;
+
+ i40e_acquire_spinlock(&hw->aq.asq_spinlock);
+
+ hw->aq.asq_last_status = I40E_AQ_RC_OK;
+
+ if (hw->aq.asq.count == 0) {
+ i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE,
+ "AQTX: Admin queue not initialized.\n");
+ status = I40E_ERR_QUEUE_EMPTY;
+ goto asq_send_command_error;
+ }
+
+ val = rd32(hw, hw->aq.asq.head);
+ if (val >= hw->aq.num_asq_entries) {
+ i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE,
+ "AQTX: head overrun at %d\n", val);
+ status = I40E_ERR_QUEUE_EMPTY;
+ goto asq_send_command_error;
+ }
+
+ details = I40E_ADMINQ_DETAILS(hw->aq.asq, hw->aq.asq.next_to_use);
+ if (cmd_details) {
+ i40e_memcpy(details,
+ cmd_details,
+ sizeof(struct i40e_asq_cmd_details),
+ I40E_NONDMA_TO_NONDMA);
+
+ /* If the cmd_details are defined copy the cookie. The
+ * CPU_TO_LE32 is not needed here because the data is ignored
+ * by the FW, only used by the driver
+ */
+ if (details->cookie) {
+ desc->cookie_high =
+ CPU_TO_LE32(I40E_HI_DWORD(details->cookie));
+ desc->cookie_low =
+ CPU_TO_LE32(I40E_LO_DWORD(details->cookie));
+ }
+ } else {
+ i40e_memset(details, 0,
+ sizeof(struct i40e_asq_cmd_details),
+ I40E_NONDMA_MEM);
+ }
+
+ /* clear requested flags and then set additional flags if defined */
+ desc->flags &= ~CPU_TO_LE16(details->flags_dis);
+ desc->flags |= CPU_TO_LE16(details->flags_ena);
+
+ if (buff_size > hw->aq.asq_buf_size) {
+ i40e_debug(hw,
+ I40E_DEBUG_AQ_MESSAGE,
+ "AQTX: Invalid buffer size: %d.\n",
+ buff_size);
+ status = I40E_ERR_INVALID_SIZE;
+ goto asq_send_command_error;
+ }
+
+ if (details->postpone && !details->async) {
+ i40e_debug(hw,
+ I40E_DEBUG_AQ_MESSAGE,
+ "AQTX: Async flag not set along with postpone flag");
+ status = I40E_ERR_PARAM;
+ goto asq_send_command_error;
+ }
+
+ /* call clean and check queue available function to reclaim the
+ * descriptors that were processed by FW, the function returns the
+ * number of desc available
+ */
+ /* the clean function called here could be called in a separate thread
+ * in case of asynchronous completions
+ */
+ if (i40e_clean_asq(hw) == 0) {
+ i40e_debug(hw,
+ I40E_DEBUG_AQ_MESSAGE,
+ "AQTX: Error queue is full.\n");
+ status = I40E_ERR_ADMIN_QUEUE_FULL;
+ goto asq_send_command_error;
+ }
+
+ /* initialize the temp desc pointer with the right desc */
+ desc_on_ring = I40E_ADMINQ_DESC(hw->aq.asq, hw->aq.asq.next_to_use);
+
+ /* if the desc is available copy the temp desc to the right place */
+ i40e_memcpy(desc_on_ring, desc, sizeof(struct i40e_aq_desc),
+ I40E_NONDMA_TO_DMA);
+
+ /* if buff is not NULL assume indirect command */
+ if (buff != NULL) {
+ dma_buff = &(hw->aq.asq.r.asq_bi[hw->aq.asq.next_to_use]);
+ /* copy the user buff into the respective DMA buff */
+ i40e_memcpy(dma_buff->va, buff, buff_size,
+ I40E_NONDMA_TO_DMA);
+ desc_on_ring->datalen = CPU_TO_LE16(buff_size);
+
+ /* Update the address values in the desc with the pa value
+ * for respective buffer
+ */
+ desc_on_ring->params.external.addr_high =
+ CPU_TO_LE32(I40E_HI_DWORD(dma_buff->pa));
+ desc_on_ring->params.external.addr_low =
+ CPU_TO_LE32(I40E_LO_DWORD(dma_buff->pa));
+ }
+
+ /* bump the tail */
+ i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE, "AQTX: desc and buffer:\n");
+ i40e_debug_aq(hw, I40E_DEBUG_AQ_COMMAND, (void *)desc_on_ring,
+ buff, buff_size);
+ (hw->aq.asq.next_to_use)++;
+ if (hw->aq.asq.next_to_use == hw->aq.asq.count)
+ hw->aq.asq.next_to_use = 0;
+ if (!details->postpone)
+ wr32(hw, hw->aq.asq.tail, hw->aq.asq.next_to_use);
+
+ /* if cmd_details are not defined or async flag is not set,
+ * we need to wait for desc write back
+ */
+ if (!details->async && !details->postpone) {
+ u32 total_delay = 0;
+
+ do {
+ /* AQ designers suggest use of head for better
+ * timing reliability than DD bit
+ */
+ if (i40e_asq_done(hw))
+ break;
+ i40e_msec_delay(1);
+ total_delay++;
+ } while (total_delay < hw->aq.asq_cmd_timeout);
+ }
+
+ /* if ready, copy the desc back to temp */
+ if (i40e_asq_done(hw)) {
+ i40e_memcpy(desc, desc_on_ring, sizeof(struct i40e_aq_desc),
+ I40E_DMA_TO_NONDMA);
+ if (buff != NULL)
+ i40e_memcpy(buff, dma_buff->va, buff_size,
+ I40E_DMA_TO_NONDMA);
+ retval = LE16_TO_CPU(desc->retval);
+ if (retval != 0) {
+ i40e_debug(hw,
+ I40E_DEBUG_AQ_MESSAGE,
+ "AQTX: Command completed with error 0x%X.\n",
+ retval);
+
+ /* strip off FW internal code */
+ retval &= 0xff;
+ }
+ cmd_completed = true;
+ if ((enum i40e_admin_queue_err)retval == I40E_AQ_RC_OK)
+ status = I40E_SUCCESS;
+ else
+ status = I40E_ERR_ADMIN_QUEUE_ERROR;
+ hw->aq.asq_last_status = (enum i40e_admin_queue_err)retval;
+ }
+
+ i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE,
+ "AQTX: desc and buffer writeback:\n");
+ i40e_debug_aq(hw, I40E_DEBUG_AQ_COMMAND, (void *)desc, buff, buff_size);
+
+ /* save writeback aq if requested */
+ if (details->wb_desc)
+ i40e_memcpy(details->wb_desc, desc_on_ring,
+ sizeof(struct i40e_aq_desc), I40E_DMA_TO_NONDMA);
+
+ /* update the error if time out occurred */
+ if ((!cmd_completed) &&
+ (!details->async && !details->postpone)) {
+ i40e_debug(hw,
+ I40E_DEBUG_AQ_MESSAGE,
+ "AQTX: Writeback timeout.\n");
+ status = I40E_ERR_ADMIN_QUEUE_TIMEOUT;
+ }
+
+asq_send_command_error:
+ i40e_release_spinlock(&hw->aq.asq_spinlock);
+ return status;
+}
+
+/**
+ * i40e_fill_default_direct_cmd_desc - AQ descriptor helper function
+ * @desc: pointer to the temp descriptor (non DMA mem)
+ * @opcode: the opcode can be used to decide which flags to turn off or on
+ *
+ * Fill the desc with default values
+ **/
+void i40e_fill_default_direct_cmd_desc(struct i40e_aq_desc *desc,
+ u16 opcode)
+{
+ /* zero out the desc */
+ i40e_memset((void *)desc, 0, sizeof(struct i40e_aq_desc),
+ I40E_NONDMA_MEM);
+ desc->opcode = CPU_TO_LE16(opcode);
+ desc->flags = CPU_TO_LE16(I40E_AQ_FLAG_SI);
+}
+
+/**
+ * i40e_clean_arq_element
+ * @hw: pointer to the hw struct
+ * @e: event info from the receive descriptor, includes any buffers
+ * @pending: number of events that could be left to process
+ *
+ * This function cleans one Admin Receive Queue element and returns
+ * the contents through e. It can also return how many events are
+ * left to process through 'pending'
+ **/
+enum i40e_status_code i40e_clean_arq_element(struct i40e_hw *hw,
+ struct i40e_arq_event_info *e,
+ u16 *pending)
+{
+ enum i40e_status_code ret_code = I40E_SUCCESS;
+ u16 ntc = hw->aq.arq.next_to_clean;
+ struct i40e_aq_desc *desc;
+ struct i40e_dma_mem *bi;
+ u16 desc_idx;
+ u16 datalen;
+ u16 flags;
+ u16 ntu;
+
+ /* pre-clean the event info */
+ i40e_memset(&e->desc, 0, sizeof(e->desc), I40E_NONDMA_MEM);
+
+ /* take the lock before we start messing with the ring */
+ i40e_acquire_spinlock(&hw->aq.arq_spinlock);
+
+ if (hw->aq.arq.count == 0) {
+ i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE,
+ "AQRX: Admin queue not initialized.\n");
+ ret_code = I40E_ERR_QUEUE_EMPTY;
+ goto clean_arq_element_err;
+ }
+
+ /* set next_to_use to head */
+#ifdef PF_DRIVER
+#ifdef INTEGRATED_VF
+ if (!i40e_is_vf(hw))
+ ntu = (rd32(hw, hw->aq.arq.head) & I40E_PF_ARQH_ARQH_MASK);
+#else
+ ntu = (rd32(hw, hw->aq.arq.head) & I40E_PF_ARQH_ARQH_MASK);
+#endif /* INTEGRATED_VF */
+#endif /* PF_DRIVER */
+#ifdef VF_DRIVER
+#ifdef INTEGRATED_VF
+ if (i40e_is_vf(hw))
+ ntu = (rd32(hw, hw->aq.arq.head) & I40E_VF_ARQH1_ARQH_MASK);
+#else
+ ntu = (rd32(hw, hw->aq.arq.head) & I40E_VF_ARQH1_ARQH_MASK);
+#endif /* INTEGRATED_VF */
+#endif /* VF_DRIVER */
+ if (ntu == ntc) {
+ /* nothing to do - shouldn't need to update ring's values */
+ ret_code = I40E_ERR_ADMIN_QUEUE_NO_WORK;
+ goto clean_arq_element_out;
+ }
+
+ /* now clean the next descriptor */
+ desc = I40E_ADMINQ_DESC(hw->aq.arq, ntc);
+ desc_idx = ntc;
+
+ flags = LE16_TO_CPU(desc->flags);
+ if (flags & I40E_AQ_FLAG_ERR) {
+ ret_code = I40E_ERR_ADMIN_QUEUE_ERROR;
+ hw->aq.arq_last_status =
+ (enum i40e_admin_queue_err)LE16_TO_CPU(desc->retval);
+ i40e_debug(hw,
+ I40E_DEBUG_AQ_MESSAGE,
+ "AQRX: Event received with error 0x%X.\n",
+ hw->aq.arq_last_status);
+ }
+
+ i40e_memcpy(&e->desc, desc, sizeof(struct i40e_aq_desc),
+ I40E_DMA_TO_NONDMA);
+ datalen = LE16_TO_CPU(desc->datalen);
+ e->msg_len = min(datalen, e->buf_len);
+ if (e->msg_buf != NULL && (e->msg_len != 0))
+ i40e_memcpy(e->msg_buf,
+ hw->aq.arq.r.arq_bi[desc_idx].va,
+ e->msg_len, I40E_DMA_TO_NONDMA);
+
+ i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE, "AQRX: desc and buffer:\n");
+ i40e_debug_aq(hw, I40E_DEBUG_AQ_COMMAND, (void *)desc, e->msg_buf,
+ hw->aq.arq_buf_size);
+
+ /* Restore the original datalen and buffer address in the desc,
+ * FW updates datalen to indicate the event message
+ * size
+ */
+ bi = &hw->aq.arq.r.arq_bi[ntc];
+ i40e_memset((void *)desc, 0, sizeof(struct i40e_aq_desc), I40E_DMA_MEM);
+
+ desc->flags = CPU_TO_LE16(I40E_AQ_FLAG_BUF);
+ if (hw->aq.arq_buf_size > I40E_AQ_LARGE_BUF)
+ desc->flags |= CPU_TO_LE16(I40E_AQ_FLAG_LB);
+ desc->datalen = CPU_TO_LE16((u16)bi->size);
+ desc->params.external.addr_high = CPU_TO_LE32(I40E_HI_DWORD(bi->pa));
+ desc->params.external.addr_low = CPU_TO_LE32(I40E_LO_DWORD(bi->pa));
+
+ /* set tail = the last cleaned desc index. */
+ wr32(hw, hw->aq.arq.tail, ntc);
+ /* ntc is updated to tail + 1 */
+ ntc++;
+ if (ntc == hw->aq.num_arq_entries)
+ ntc = 0;
+ hw->aq.arq.next_to_clean = ntc;
+ hw->aq.arq.next_to_use = ntu;
+
+#ifdef PF_DRIVER
+ i40e_nvmupd_check_wait_event(hw, LE16_TO_CPU(e->desc.opcode));
+#endif /* PF_DRIVER */
+clean_arq_element_out:
+ /* Set pending if needed, unlock and return */
+ if (pending != NULL)
+ *pending = (ntc > ntu ? hw->aq.arq.count : 0) + (ntu - ntc);
+clean_arq_element_err:
+ i40e_release_spinlock(&hw->aq.arq_spinlock);
+
+ return ret_code;
+}
+
diff --git a/src/dpdk/drivers/net/i40e/base/i40e_adminq.h b/src/dpdk/drivers/net/i40e/base/i40e_adminq.h
new file mode 100644
index 00000000..750973c5
--- /dev/null
+++ b/src/dpdk/drivers/net/i40e/base/i40e_adminq.h
@@ -0,0 +1,169 @@
+/*******************************************************************************
+
+Copyright (c) 2013 - 2015, Intel Corporation
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are met:
+
+ 1. Redistributions of source code must retain the above copyright notice,
+ this list of conditions and the following disclaimer.
+
+ 2. Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in the
+ documentation and/or other materials provided with the distribution.
+
+ 3. Neither the name of the Intel Corporation nor the names of its
+ contributors may be used to endorse or promote products derived from
+ this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+POSSIBILITY OF SUCH DAMAGE.
+
+***************************************************************************/
+
+#ifndef _I40E_ADMINQ_H_
+#define _I40E_ADMINQ_H_
+
+#include "i40e_osdep.h"
+#include "i40e_status.h"
+#include "i40e_adminq_cmd.h"
+
+#define I40E_ADMINQ_DESC(R, i) \
+ (&(((struct i40e_aq_desc *)((R).desc_buf.va))[i]))
+
+#define I40E_ADMINQ_DESC_ALIGNMENT 4096
+
+struct i40e_adminq_ring {
+ struct i40e_virt_mem dma_head; /* space for dma structures */
+ struct i40e_dma_mem desc_buf; /* descriptor ring memory */
+ struct i40e_virt_mem cmd_buf; /* command buffer memory */
+
+ union {
+ struct i40e_dma_mem *asq_bi;
+ struct i40e_dma_mem *arq_bi;
+ } r;
+
+ u16 count; /* Number of descriptors */
+ u16 rx_buf_len; /* Admin Receive Queue buffer length */
+
+ /* used for interrupt processing */
+ u16 next_to_use;
+ u16 next_to_clean;
+
+ /* used for queue tracking */
+ u32 head;
+ u32 tail;
+ u32 len;
+ u32 bah;
+ u32 bal;
+};
+
+/* ASQ transaction details */
+struct i40e_asq_cmd_details {
+ void *callback; /* cast from type I40E_ADMINQ_CALLBACK */
+ u64 cookie;
+ u16 flags_ena;
+ u16 flags_dis;
+ bool async;
+ bool postpone;
+ struct i40e_aq_desc *wb_desc;
+};
+
+#define I40E_ADMINQ_DETAILS(R, i) \
+ (&(((struct i40e_asq_cmd_details *)((R).cmd_buf.va))[i]))
+
+/* ARQ event information */
+struct i40e_arq_event_info {
+ struct i40e_aq_desc desc;
+ u16 msg_len;
+ u16 buf_len;
+ u8 *msg_buf;
+};
+
+/* Admin Queue information */
+struct i40e_adminq_info {
+ struct i40e_adminq_ring arq; /* receive queue */
+ struct i40e_adminq_ring asq; /* send queue */
+ u32 asq_cmd_timeout; /* send queue cmd write back timeout*/
+ u16 num_arq_entries; /* receive queue depth */
+ u16 num_asq_entries; /* send queue depth */
+ u16 arq_buf_size; /* receive queue buffer size */
+ u16 asq_buf_size; /* send queue buffer size */
+ u16 fw_maj_ver; /* firmware major version */
+ u16 fw_min_ver; /* firmware minor version */
+ u32 fw_build; /* firmware build number */
+ u16 api_maj_ver; /* api major version */
+ u16 api_min_ver; /* api minor version */
+
+ struct i40e_spinlock asq_spinlock; /* Send queue spinlock */
+ struct i40e_spinlock arq_spinlock; /* Receive queue spinlock */
+
+ /* last status values on send and receive queues */
+ enum i40e_admin_queue_err asq_last_status;
+ enum i40e_admin_queue_err arq_last_status;
+};
+
+/**
+ * i40e_aq_rc_to_posix - convert errors to user-land codes
+ * aq_ret: AdminQ handler error code can override aq_rc
+ * aq_rc: AdminQ firmware error code to convert
+ **/
+STATIC INLINE int i40e_aq_rc_to_posix(int aq_ret, int aq_rc)
+{
+ int aq_to_posix[] = {
+ 0, /* I40E_AQ_RC_OK */
+ -EPERM, /* I40E_AQ_RC_EPERM */
+ -ENOENT, /* I40E_AQ_RC_ENOENT */
+ -ESRCH, /* I40E_AQ_RC_ESRCH */
+ -EINTR, /* I40E_AQ_RC_EINTR */
+ -EIO, /* I40E_AQ_RC_EIO */
+ -ENXIO, /* I40E_AQ_RC_ENXIO */
+ -E2BIG, /* I40E_AQ_RC_E2BIG */
+ -EAGAIN, /* I40E_AQ_RC_EAGAIN */
+ -ENOMEM, /* I40E_AQ_RC_ENOMEM */
+ -EACCES, /* I40E_AQ_RC_EACCES */
+ -EFAULT, /* I40E_AQ_RC_EFAULT */
+ -EBUSY, /* I40E_AQ_RC_EBUSY */
+ -EEXIST, /* I40E_AQ_RC_EEXIST */
+ -EINVAL, /* I40E_AQ_RC_EINVAL */
+ -ENOTTY, /* I40E_AQ_RC_ENOTTY */
+ -ENOSPC, /* I40E_AQ_RC_ENOSPC */
+ -ENOSYS, /* I40E_AQ_RC_ENOSYS */
+ -ERANGE, /* I40E_AQ_RC_ERANGE */
+ -EPIPE, /* I40E_AQ_RC_EFLUSHED */
+ -ESPIPE, /* I40E_AQ_RC_BAD_ADDR */
+ -EROFS, /* I40E_AQ_RC_EMODE */
+ -EFBIG, /* I40E_AQ_RC_EFBIG */
+ };
+
+ /* aq_rc is invalid if AQ timed out */
+ if (aq_ret == I40E_ERR_ADMIN_QUEUE_TIMEOUT)
+ return -EAGAIN;
+
+ if (!((u32)aq_rc < (sizeof(aq_to_posix) / sizeof((aq_to_posix)[0]))))
+ return -ERANGE;
+
+ return aq_to_posix[aq_rc];
+}
+
+/* general information */
+#define I40E_AQ_LARGE_BUF 512
+#define I40E_ASQ_CMD_TIMEOUT 250 /* msecs */
+#ifdef I40E_ESS_SUPPORT
+#define I40E_ASQ_CMD_TIMEOUT_ESS 50000 /* msecs */
+#endif
+
+void i40e_fill_default_direct_cmd_desc(struct i40e_aq_desc *desc,
+ u16 opcode);
+
+#endif /* _I40E_ADMINQ_H_ */
diff --git a/src/dpdk/drivers/net/i40e/base/i40e_adminq_cmd.h b/src/dpdk/drivers/net/i40e/base/i40e_adminq_cmd.h
new file mode 100644
index 00000000..2b7a7608
--- /dev/null
+++ b/src/dpdk/drivers/net/i40e/base/i40e_adminq_cmd.h
@@ -0,0 +1,2543 @@
+/*******************************************************************************
+
+Copyright (c) 2013 - 2015, Intel Corporation
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are met:
+
+ 1. Redistributions of source code must retain the above copyright notice,
+ this list of conditions and the following disclaimer.
+
+ 2. Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in the
+ documentation and/or other materials provided with the distribution.
+
+ 3. Neither the name of the Intel Corporation nor the names of its
+ contributors may be used to endorse or promote products derived from
+ this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+POSSIBILITY OF SUCH DAMAGE.
+
+***************************************************************************/
+
+#ifndef _I40E_ADMINQ_CMD_H_
+#define _I40E_ADMINQ_CMD_H_
+
+/* This header file defines the i40e Admin Queue commands and is shared between
+ * i40e Firmware and Software.
+ *
+ * This file needs to comply with the Linux Kernel coding style.
+ */
+
+#define I40E_FW_API_VERSION_MAJOR 0x0001
+#define I40E_FW_API_VERSION_MINOR 0x0005
+
+struct i40e_aq_desc {
+ __le16 flags;
+ __le16 opcode;
+ __le16 datalen;
+ __le16 retval;
+ __le32 cookie_high;
+ __le32 cookie_low;
+ union {
+ struct {
+ __le32 param0;
+ __le32 param1;
+ __le32 param2;
+ __le32 param3;
+ } internal;
+ struct {
+ __le32 param0;
+ __le32 param1;
+ __le32 addr_high;
+ __le32 addr_low;
+ } external;
+ u8 raw[16];
+ } params;
+};
+
+/* Flags sub-structure
+ * |0 |1 |2 |3 |4 |5 |6 |7 |8 |9 |10 |11 |12 |13 |14 |15 |
+ * |DD |CMP|ERR|VFE| * * RESERVED * * |LB |RD |VFC|BUF|SI |EI |FE |
+ */
+
+/* command flags and offsets*/
+#define I40E_AQ_FLAG_DD_SHIFT 0
+#define I40E_AQ_FLAG_CMP_SHIFT 1
+#define I40E_AQ_FLAG_ERR_SHIFT 2
+#define I40E_AQ_FLAG_VFE_SHIFT 3
+#define I40E_AQ_FLAG_LB_SHIFT 9
+#define I40E_AQ_FLAG_RD_SHIFT 10
+#define I40E_AQ_FLAG_VFC_SHIFT 11
+#define I40E_AQ_FLAG_BUF_SHIFT 12
+#define I40E_AQ_FLAG_SI_SHIFT 13
+#define I40E_AQ_FLAG_EI_SHIFT 14
+#define I40E_AQ_FLAG_FE_SHIFT 15
+
+#define I40E_AQ_FLAG_DD (1 << I40E_AQ_FLAG_DD_SHIFT) /* 0x1 */
+#define I40E_AQ_FLAG_CMP (1 << I40E_AQ_FLAG_CMP_SHIFT) /* 0x2 */
+#define I40E_AQ_FLAG_ERR (1 << I40E_AQ_FLAG_ERR_SHIFT) /* 0x4 */
+#define I40E_AQ_FLAG_VFE (1 << I40E_AQ_FLAG_VFE_SHIFT) /* 0x8 */
+#define I40E_AQ_FLAG_LB (1 << I40E_AQ_FLAG_LB_SHIFT) /* 0x200 */
+#define I40E_AQ_FLAG_RD (1 << I40E_AQ_FLAG_RD_SHIFT) /* 0x400 */
+#define I40E_AQ_FLAG_VFC (1 << I40E_AQ_FLAG_VFC_SHIFT) /* 0x800 */
+#define I40E_AQ_FLAG_BUF (1 << I40E_AQ_FLAG_BUF_SHIFT) /* 0x1000 */
+#define I40E_AQ_FLAG_SI (1 << I40E_AQ_FLAG_SI_SHIFT) /* 0x2000 */
+#define I40E_AQ_FLAG_EI (1 << I40E_AQ_FLAG_EI_SHIFT) /* 0x4000 */
+#define I40E_AQ_FLAG_FE (1 << I40E_AQ_FLAG_FE_SHIFT) /* 0x8000 */
+
+/* error codes */
+enum i40e_admin_queue_err {
+ I40E_AQ_RC_OK = 0, /* success */
+ I40E_AQ_RC_EPERM = 1, /* Operation not permitted */
+ I40E_AQ_RC_ENOENT = 2, /* No such element */
+ I40E_AQ_RC_ESRCH = 3, /* Bad opcode */
+ I40E_AQ_RC_EINTR = 4, /* operation interrupted */
+ I40E_AQ_RC_EIO = 5, /* I/O error */
+ I40E_AQ_RC_ENXIO = 6, /* No such resource */
+ I40E_AQ_RC_E2BIG = 7, /* Arg too long */
+ I40E_AQ_RC_EAGAIN = 8, /* Try again */
+ I40E_AQ_RC_ENOMEM = 9, /* Out of memory */
+ I40E_AQ_RC_EACCES = 10, /* Permission denied */
+ I40E_AQ_RC_EFAULT = 11, /* Bad address */
+ I40E_AQ_RC_EBUSY = 12, /* Device or resource busy */
+ I40E_AQ_RC_EEXIST = 13, /* object already exists */
+ I40E_AQ_RC_EINVAL = 14, /* Invalid argument */
+ I40E_AQ_RC_ENOTTY = 15, /* Not a typewriter */
+ I40E_AQ_RC_ENOSPC = 16, /* No space left or alloc failure */
+ I40E_AQ_RC_ENOSYS = 17, /* Function not implemented */
+ I40E_AQ_RC_ERANGE = 18, /* Parameter out of range */
+ I40E_AQ_RC_EFLUSHED = 19, /* Cmd flushed due to prev cmd error */
+ I40E_AQ_RC_BAD_ADDR = 20, /* Descriptor contains a bad pointer */
+ I40E_AQ_RC_EMODE = 21, /* Op not allowed in current dev mode */
+ I40E_AQ_RC_EFBIG = 22, /* File too large */
+};
+
+/* Admin Queue command opcodes */
+enum i40e_admin_queue_opc {
+ /* aq commands */
+ i40e_aqc_opc_get_version = 0x0001,
+ i40e_aqc_opc_driver_version = 0x0002,
+ i40e_aqc_opc_queue_shutdown = 0x0003,
+ i40e_aqc_opc_set_pf_context = 0x0004,
+
+ /* resource ownership */
+ i40e_aqc_opc_request_resource = 0x0008,
+ i40e_aqc_opc_release_resource = 0x0009,
+
+ i40e_aqc_opc_list_func_capabilities = 0x000A,
+ i40e_aqc_opc_list_dev_capabilities = 0x000B,
+
+#ifdef X722_SUPPORT
+ /* Proxy commands */
+ i40e_aqc_opc_set_proxy_config = 0x0104,
+ i40e_aqc_opc_set_ns_proxy_table_entry = 0x0105,
+
+#endif
+ /* LAA */
+ i40e_aqc_opc_mac_address_read = 0x0107,
+ i40e_aqc_opc_mac_address_write = 0x0108,
+
+ /* PXE */
+ i40e_aqc_opc_clear_pxe_mode = 0x0110,
+
+#ifdef X722_SUPPORT
+ /* WoL commands */
+ i40e_aqc_opc_set_wol_filter = 0x0120,
+ i40e_aqc_opc_get_wake_reason = 0x0121,
+
+#endif
+ /* internal switch commands */
+ i40e_aqc_opc_get_switch_config = 0x0200,
+ i40e_aqc_opc_add_statistics = 0x0201,
+ i40e_aqc_opc_remove_statistics = 0x0202,
+ i40e_aqc_opc_set_port_parameters = 0x0203,
+ i40e_aqc_opc_get_switch_resource_alloc = 0x0204,
+ i40e_aqc_opc_set_switch_config = 0x0205,
+ i40e_aqc_opc_rx_ctl_reg_read = 0x0206,
+ i40e_aqc_opc_rx_ctl_reg_write = 0x0207,
+
+ i40e_aqc_opc_add_vsi = 0x0210,
+ i40e_aqc_opc_update_vsi_parameters = 0x0211,
+ i40e_aqc_opc_get_vsi_parameters = 0x0212,
+
+ i40e_aqc_opc_add_pv = 0x0220,
+ i40e_aqc_opc_update_pv_parameters = 0x0221,
+ i40e_aqc_opc_get_pv_parameters = 0x0222,
+
+ i40e_aqc_opc_add_veb = 0x0230,
+ i40e_aqc_opc_update_veb_parameters = 0x0231,
+ i40e_aqc_opc_get_veb_parameters = 0x0232,
+
+ i40e_aqc_opc_delete_element = 0x0243,
+
+ i40e_aqc_opc_add_macvlan = 0x0250,
+ i40e_aqc_opc_remove_macvlan = 0x0251,
+ i40e_aqc_opc_add_vlan = 0x0252,
+ i40e_aqc_opc_remove_vlan = 0x0253,
+ i40e_aqc_opc_set_vsi_promiscuous_modes = 0x0254,
+ i40e_aqc_opc_add_tag = 0x0255,
+ i40e_aqc_opc_remove_tag = 0x0256,
+ i40e_aqc_opc_add_multicast_etag = 0x0257,
+ i40e_aqc_opc_remove_multicast_etag = 0x0258,
+ i40e_aqc_opc_update_tag = 0x0259,
+ i40e_aqc_opc_add_control_packet_filter = 0x025A,
+ i40e_aqc_opc_remove_control_packet_filter = 0x025B,
+ i40e_aqc_opc_add_cloud_filters = 0x025C,
+ i40e_aqc_opc_remove_cloud_filters = 0x025D,
+
+ i40e_aqc_opc_add_mirror_rule = 0x0260,
+ i40e_aqc_opc_delete_mirror_rule = 0x0261,
+
+ /* DCB commands */
+ i40e_aqc_opc_dcb_ignore_pfc = 0x0301,
+ i40e_aqc_opc_dcb_updated = 0x0302,
+
+ /* TX scheduler */
+ i40e_aqc_opc_configure_vsi_bw_limit = 0x0400,
+ i40e_aqc_opc_configure_vsi_ets_sla_bw_limit = 0x0406,
+ i40e_aqc_opc_configure_vsi_tc_bw = 0x0407,
+ i40e_aqc_opc_query_vsi_bw_config = 0x0408,
+ i40e_aqc_opc_query_vsi_ets_sla_config = 0x040A,
+ i40e_aqc_opc_configure_switching_comp_bw_limit = 0x0410,
+
+ i40e_aqc_opc_enable_switching_comp_ets = 0x0413,
+ i40e_aqc_opc_modify_switching_comp_ets = 0x0414,
+ i40e_aqc_opc_disable_switching_comp_ets = 0x0415,
+ i40e_aqc_opc_configure_switching_comp_ets_bw_limit = 0x0416,
+ i40e_aqc_opc_configure_switching_comp_bw_config = 0x0417,
+ i40e_aqc_opc_query_switching_comp_ets_config = 0x0418,
+ i40e_aqc_opc_query_port_ets_config = 0x0419,
+ i40e_aqc_opc_query_switching_comp_bw_config = 0x041A,
+ i40e_aqc_opc_suspend_port_tx = 0x041B,
+ i40e_aqc_opc_resume_port_tx = 0x041C,
+ i40e_aqc_opc_configure_partition_bw = 0x041D,
+
+ /* phy commands*/
+ i40e_aqc_opc_get_phy_abilities = 0x0600,
+ i40e_aqc_opc_set_phy_config = 0x0601,
+ i40e_aqc_opc_set_mac_config = 0x0603,
+ i40e_aqc_opc_set_link_restart_an = 0x0605,
+ i40e_aqc_opc_get_link_status = 0x0607,
+ i40e_aqc_opc_set_phy_int_mask = 0x0613,
+ i40e_aqc_opc_get_local_advt_reg = 0x0614,
+ i40e_aqc_opc_set_local_advt_reg = 0x0615,
+ i40e_aqc_opc_get_partner_advt = 0x0616,
+ i40e_aqc_opc_set_lb_modes = 0x0618,
+ i40e_aqc_opc_get_phy_wol_caps = 0x0621,
+ i40e_aqc_opc_set_phy_debug = 0x0622,
+ i40e_aqc_opc_upload_ext_phy_fm = 0x0625,
+ i40e_aqc_opc_run_phy_activity = 0x0626,
+
+ /* NVM commands */
+ i40e_aqc_opc_nvm_read = 0x0701,
+ i40e_aqc_opc_nvm_erase = 0x0702,
+ i40e_aqc_opc_nvm_update = 0x0703,
+ i40e_aqc_opc_nvm_config_read = 0x0704,
+ i40e_aqc_opc_nvm_config_write = 0x0705,
+ i40e_aqc_opc_oem_post_update = 0x0720,
+ i40e_aqc_opc_thermal_sensor = 0x0721,
+
+ /* virtualization commands */
+ i40e_aqc_opc_send_msg_to_pf = 0x0801,
+ i40e_aqc_opc_send_msg_to_vf = 0x0802,
+ i40e_aqc_opc_send_msg_to_peer = 0x0803,
+
+ /* alternate structure */
+ i40e_aqc_opc_alternate_write = 0x0900,
+ i40e_aqc_opc_alternate_write_indirect = 0x0901,
+ i40e_aqc_opc_alternate_read = 0x0902,
+ i40e_aqc_opc_alternate_read_indirect = 0x0903,
+ i40e_aqc_opc_alternate_write_done = 0x0904,
+ i40e_aqc_opc_alternate_set_mode = 0x0905,
+ i40e_aqc_opc_alternate_clear_port = 0x0906,
+
+ /* LLDP commands */
+ i40e_aqc_opc_lldp_get_mib = 0x0A00,
+ i40e_aqc_opc_lldp_update_mib = 0x0A01,
+ i40e_aqc_opc_lldp_add_tlv = 0x0A02,
+ i40e_aqc_opc_lldp_update_tlv = 0x0A03,
+ i40e_aqc_opc_lldp_delete_tlv = 0x0A04,
+ i40e_aqc_opc_lldp_stop = 0x0A05,
+ i40e_aqc_opc_lldp_start = 0x0A06,
+ i40e_aqc_opc_get_cee_dcb_cfg = 0x0A07,
+ i40e_aqc_opc_lldp_set_local_mib = 0x0A08,
+ i40e_aqc_opc_lldp_stop_start_spec_agent = 0x0A09,
+
+ /* Tunnel commands */
+ i40e_aqc_opc_add_udp_tunnel = 0x0B00,
+ i40e_aqc_opc_del_udp_tunnel = 0x0B01,
+#ifdef X722_SUPPORT
+ i40e_aqc_opc_set_rss_key = 0x0B02,
+ i40e_aqc_opc_set_rss_lut = 0x0B03,
+ i40e_aqc_opc_get_rss_key = 0x0B04,
+ i40e_aqc_opc_get_rss_lut = 0x0B05,
+#endif
+
+ /* Async Events */
+ i40e_aqc_opc_event_lan_overflow = 0x1001,
+
+ /* OEM commands */
+ i40e_aqc_opc_oem_parameter_change = 0xFE00,
+ i40e_aqc_opc_oem_device_status_change = 0xFE01,
+ i40e_aqc_opc_oem_ocsd_initialize = 0xFE02,
+ i40e_aqc_opc_oem_ocbb_initialize = 0xFE03,
+
+ /* debug commands */
+ i40e_aqc_opc_debug_read_reg = 0xFF03,
+ i40e_aqc_opc_debug_write_reg = 0xFF04,
+ i40e_aqc_opc_debug_modify_reg = 0xFF07,
+ i40e_aqc_opc_debug_dump_internals = 0xFF08,
+};
+
+/* command structures and indirect data structures */
+
+/* Structure naming conventions:
+ * - no suffix for direct command descriptor structures
+ * - _data for indirect sent data
+ * - _resp for indirect return data (data which is both will use _data)
+ * - _completion for direct return data
+ * - _element_ for repeated elements (may also be _data or _resp)
+ *
+ * Command structures are expected to overlay the params.raw member of the basic
+ * descriptor, and as such cannot exceed 16 bytes in length.
+ */
+
+/* This macro is used to generate a compilation error if a structure
+ * is not exactly the correct length. It gives a divide by zero error if the
+ * structure is not of the correct size, otherwise it creates an enum that is
+ * never used.
+ */
+#define I40E_CHECK_STRUCT_LEN(n, X) enum i40e_static_assert_enum_##X \
+ { i40e_static_assert_##X = (n)/((sizeof(struct X) == (n)) ? 1 : 0) }
+
+/* This macro is used extensively to ensure that command structures are 16
+ * bytes in length as they have to map to the raw array of that size.
+ */
+#define I40E_CHECK_CMD_LENGTH(X) I40E_CHECK_STRUCT_LEN(16, X)
+
+/* internal (0x00XX) commands */
+
+/* Get version (direct 0x0001) */
+struct i40e_aqc_get_version {
+ __le32 rom_ver;
+ __le32 fw_build;
+ __le16 fw_major;
+ __le16 fw_minor;
+ __le16 api_major;
+ __le16 api_minor;
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_get_version);
+
+/* Send driver version (indirect 0x0002) */
+struct i40e_aqc_driver_version {
+ u8 driver_major_ver;
+ u8 driver_minor_ver;
+ u8 driver_build_ver;
+ u8 driver_subbuild_ver;
+ u8 reserved[4];
+ __le32 address_high;
+ __le32 address_low;
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_driver_version);
+
+/* Queue Shutdown (direct 0x0003) */
+struct i40e_aqc_queue_shutdown {
+ __le32 driver_unloading;
+#define I40E_AQ_DRIVER_UNLOADING 0x1
+ u8 reserved[12];
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_queue_shutdown);
+
+/* Set PF context (0x0004, direct) */
+struct i40e_aqc_set_pf_context {
+ u8 pf_id;
+ u8 reserved[15];
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_set_pf_context);
+
+/* Request resource ownership (direct 0x0008)
+ * Release resource ownership (direct 0x0009)
+ */
+#define I40E_AQ_RESOURCE_NVM 1
+#define I40E_AQ_RESOURCE_SDP 2
+#define I40E_AQ_RESOURCE_ACCESS_READ 1
+#define I40E_AQ_RESOURCE_ACCESS_WRITE 2
+#define I40E_AQ_RESOURCE_NVM_READ_TIMEOUT 3000
+#define I40E_AQ_RESOURCE_NVM_WRITE_TIMEOUT 180000
+
+struct i40e_aqc_request_resource {
+ __le16 resource_id;
+ __le16 access_type;
+ __le32 timeout;
+ __le32 resource_number;
+ u8 reserved[4];
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_request_resource);
+
+/* Get function capabilities (indirect 0x000A)
+ * Get device capabilities (indirect 0x000B)
+ */
+struct i40e_aqc_list_capabilites {
+ u8 command_flags;
+#define I40E_AQ_LIST_CAP_PF_INDEX_EN 1
+ u8 pf_index;
+ u8 reserved[2];
+ __le32 count;
+ __le32 addr_high;
+ __le32 addr_low;
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_list_capabilites);
+
+struct i40e_aqc_list_capabilities_element_resp {
+ __le16 id;
+ u8 major_rev;
+ u8 minor_rev;
+ __le32 number;
+ __le32 logical_id;
+ __le32 phys_id;
+ u8 reserved[16];
+};
+
+/* list of caps */
+
+#define I40E_AQ_CAP_ID_SWITCH_MODE 0x0001
+#define I40E_AQ_CAP_ID_MNG_MODE 0x0002
+#define I40E_AQ_CAP_ID_NPAR_ACTIVE 0x0003
+#define I40E_AQ_CAP_ID_OS2BMC_CAP 0x0004
+#define I40E_AQ_CAP_ID_FUNCTIONS_VALID 0x0005
+#define I40E_AQ_CAP_ID_ALTERNATE_RAM 0x0006
+#define I40E_AQ_CAP_ID_WOL_AND_PROXY 0x0008
+#define I40E_AQ_CAP_ID_SRIOV 0x0012
+#define I40E_AQ_CAP_ID_VF 0x0013
+#define I40E_AQ_CAP_ID_VMDQ 0x0014
+#define I40E_AQ_CAP_ID_8021QBG 0x0015
+#define I40E_AQ_CAP_ID_8021QBR 0x0016
+#define I40E_AQ_CAP_ID_VSI 0x0017
+#define I40E_AQ_CAP_ID_DCB 0x0018
+#define I40E_AQ_CAP_ID_FCOE 0x0021
+#define I40E_AQ_CAP_ID_ISCSI 0x0022
+#define I40E_AQ_CAP_ID_RSS 0x0040
+#define I40E_AQ_CAP_ID_RXQ 0x0041
+#define I40E_AQ_CAP_ID_TXQ 0x0042
+#define I40E_AQ_CAP_ID_MSIX 0x0043
+#define I40E_AQ_CAP_ID_VF_MSIX 0x0044
+#define I40E_AQ_CAP_ID_FLOW_DIRECTOR 0x0045
+#define I40E_AQ_CAP_ID_1588 0x0046
+#define I40E_AQ_CAP_ID_IWARP 0x0051
+#define I40E_AQ_CAP_ID_LED 0x0061
+#define I40E_AQ_CAP_ID_SDP 0x0062
+#define I40E_AQ_CAP_ID_MDIO 0x0063
+#define I40E_AQ_CAP_ID_WSR_PROT 0x0064
+#define I40E_AQ_CAP_ID_NVM_MGMT 0x0080
+#define I40E_AQ_CAP_ID_FLEX10 0x00F1
+#define I40E_AQ_CAP_ID_CEM 0x00F2
+
+/* Set CPPM Configuration (direct 0x0103) */
+struct i40e_aqc_cppm_configuration {
+ __le16 command_flags;
+#define I40E_AQ_CPPM_EN_LTRC 0x0800
+#define I40E_AQ_CPPM_EN_DMCTH 0x1000
+#define I40E_AQ_CPPM_EN_DMCTLX 0x2000
+#define I40E_AQ_CPPM_EN_HPTC 0x4000
+#define I40E_AQ_CPPM_EN_DMARC 0x8000
+ __le16 ttlx;
+ __le32 dmacr;
+ __le16 dmcth;
+ u8 hptc;
+ u8 reserved;
+ __le32 pfltrc;
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_cppm_configuration);
+
+/* Set ARP Proxy command / response (indirect 0x0104) */
+struct i40e_aqc_arp_proxy_data {
+ __le16 command_flags;
+#define I40E_AQ_ARP_INIT_IPV4 0x0008
+#define I40E_AQ_ARP_UNSUP_CTL 0x0010
+#define I40E_AQ_ARP_ENA 0x0020
+#define I40E_AQ_ARP_ADD_IPV4 0x0040
+#define I40E_AQ_ARP_DEL_IPV4 0x0080
+ __le16 table_id;
+ __le32 pfpm_proxyfc;
+ __le32 ip_addr;
+ u8 mac_addr[6];
+ u8 reserved[2];
+};
+
+I40E_CHECK_STRUCT_LEN(0x14, i40e_aqc_arp_proxy_data);
+
+/* Set NS Proxy Table Entry Command (indirect 0x0105) */
+struct i40e_aqc_ns_proxy_data {
+ __le16 table_idx_mac_addr_0;
+ __le16 table_idx_mac_addr_1;
+ __le16 table_idx_ipv6_0;
+ __le16 table_idx_ipv6_1;
+ __le16 control;
+#define I40E_AQ_NS_PROXY_ADD_0 0x0100
+#define I40E_AQ_NS_PROXY_DEL_0 0x0200
+#define I40E_AQ_NS_PROXY_ADD_1 0x0400
+#define I40E_AQ_NS_PROXY_DEL_1 0x0800
+#define I40E_AQ_NS_PROXY_ADD_IPV6_0 0x1000
+#define I40E_AQ_NS_PROXY_DEL_IPV6_0 0x2000
+#define I40E_AQ_NS_PROXY_ADD_IPV6_1 0x4000
+#define I40E_AQ_NS_PROXY_DEL_IPV6_1 0x8000
+#define I40E_AQ_NS_PROXY_COMMAND_SEQ 0x0001
+#define I40E_AQ_NS_PROXY_INIT_IPV6_TBL 0x0002
+#define I40E_AQ_NS_PROXY_INIT_MAC_TBL 0x0004
+ u8 mac_addr_0[6];
+ u8 mac_addr_1[6];
+ u8 local_mac_addr[6];
+ u8 ipv6_addr_0[16]; /* Warning! spec specifies BE byte order */
+ u8 ipv6_addr_1[16];
+};
+
+I40E_CHECK_STRUCT_LEN(0x3c, i40e_aqc_ns_proxy_data);
+
+/* Manage LAA Command (0x0106) - obsolete */
+struct i40e_aqc_mng_laa {
+ __le16 command_flags;
+#define I40E_AQ_LAA_FLAG_WR 0x8000
+ u8 reserved[2];
+ __le32 sal;
+ __le16 sah;
+ u8 reserved2[6];
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_mng_laa);
+
+/* Manage MAC Address Read Command (indirect 0x0107) */
+struct i40e_aqc_mac_address_read {
+ __le16 command_flags;
+#define I40E_AQC_LAN_ADDR_VALID 0x10
+#define I40E_AQC_SAN_ADDR_VALID 0x20
+#define I40E_AQC_PORT_ADDR_VALID 0x40
+#define I40E_AQC_WOL_ADDR_VALID 0x80
+#define I40E_AQC_MC_MAG_EN_VALID 0x100
+#define I40E_AQC_ADDR_VALID_MASK 0x1F0
+ u8 reserved[6];
+ __le32 addr_high;
+ __le32 addr_low;
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_mac_address_read);
+
+struct i40e_aqc_mac_address_read_data {
+ u8 pf_lan_mac[6];
+ u8 pf_san_mac[6];
+ u8 port_mac[6];
+ u8 pf_wol_mac[6];
+};
+
+I40E_CHECK_STRUCT_LEN(24, i40e_aqc_mac_address_read_data);
+
+/* Manage MAC Address Write Command (0x0108) */
+struct i40e_aqc_mac_address_write {
+ __le16 command_flags;
+#define I40E_AQC_WRITE_TYPE_LAA_ONLY 0x0000
+#define I40E_AQC_WRITE_TYPE_LAA_WOL 0x4000
+#define I40E_AQC_WRITE_TYPE_PORT 0x8000
+#define I40E_AQC_WRITE_TYPE_UPDATE_MC_MAG 0xC000
+#define I40E_AQC_WRITE_TYPE_MASK 0xC000
+
+ __le16 mac_sah;
+ __le32 mac_sal;
+ u8 reserved[8];
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_mac_address_write);
+
+/* PXE commands (0x011x) */
+
+/* Clear PXE Command and response (direct 0x0110) */
+struct i40e_aqc_clear_pxe {
+ u8 rx_cnt;
+ u8 reserved[15];
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_clear_pxe);
+
+#ifdef X722_SUPPORT
+/* Set WoL Filter (0x0120) */
+
+struct i40e_aqc_set_wol_filter {
+ __le16 filter_index;
+#define I40E_AQC_MAX_NUM_WOL_FILTERS 8
+ __le16 cmd_flags;
+#define I40E_AQC_SET_WOL_FILTER 0x8000
+#define I40E_AQC_SET_WOL_FILTER_NO_TCO_WOL 0x4000
+ __le16 valid_flags;
+#define I40E_AQC_SET_WOL_FILTER_ACTION_VALID 0x8000
+#define I40E_AQC_SET_WOL_FILTER_NO_TCO_ACTION_VALID 0x4000
+ u8 reserved[2];
+ __le32 address_high;
+ __le32 address_low;
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_set_wol_filter);
+
+/* Get Wake Reason (0x0121) */
+
+struct i40e_aqc_get_wake_reason_completion {
+ u8 reserved_1[2];
+ __le16 wake_reason;
+ u8 reserved_2[12];
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_get_wake_reason_completion);
+
+struct i40e_aqc_set_wol_filter_data {
+ u8 filter[128];
+ u8 mask[16];
+};
+
+I40E_CHECK_STRUCT_LEN(0x90, i40e_aqc_set_wol_filter_data);
+
+#endif /* X722_SUPPORT */
+/* Switch configuration commands (0x02xx) */
+
+/* Used by many indirect commands that only pass an seid and a buffer in the
+ * command
+ */
+struct i40e_aqc_switch_seid {
+ __le16 seid;
+ u8 reserved[6];
+ __le32 addr_high;
+ __le32 addr_low;
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_switch_seid);
+
+/* Get Switch Configuration command (indirect 0x0200)
+ * uses i40e_aqc_switch_seid for the descriptor
+ */
+struct i40e_aqc_get_switch_config_header_resp {
+ __le16 num_reported;
+ __le16 num_total;
+ u8 reserved[12];
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_get_switch_config_header_resp);
+
+struct i40e_aqc_switch_config_element_resp {
+ u8 element_type;
+#define I40E_AQ_SW_ELEM_TYPE_MAC 1
+#define I40E_AQ_SW_ELEM_TYPE_PF 2
+#define I40E_AQ_SW_ELEM_TYPE_VF 3
+#define I40E_AQ_SW_ELEM_TYPE_EMP 4
+#define I40E_AQ_SW_ELEM_TYPE_BMC 5
+#define I40E_AQ_SW_ELEM_TYPE_PV 16
+#define I40E_AQ_SW_ELEM_TYPE_VEB 17
+#define I40E_AQ_SW_ELEM_TYPE_PA 18
+#define I40E_AQ_SW_ELEM_TYPE_VSI 19
+ u8 revision;
+#define I40E_AQ_SW_ELEM_REV_1 1
+ __le16 seid;
+ __le16 uplink_seid;
+ __le16 downlink_seid;
+ u8 reserved[3];
+ u8 connection_type;
+#define I40E_AQ_CONN_TYPE_REGULAR 0x1
+#define I40E_AQ_CONN_TYPE_DEFAULT 0x2
+#define I40E_AQ_CONN_TYPE_CASCADED 0x3
+ __le16 scheduler_id;
+ __le16 element_info;
+};
+
+I40E_CHECK_STRUCT_LEN(0x10, i40e_aqc_switch_config_element_resp);
+
+/* Get Switch Configuration (indirect 0x0200)
+ * an array of elements are returned in the response buffer
+ * the first in the array is the header, remainder are elements
+ */
+struct i40e_aqc_get_switch_config_resp {
+ struct i40e_aqc_get_switch_config_header_resp header;
+ struct i40e_aqc_switch_config_element_resp element[1];
+};
+
+I40E_CHECK_STRUCT_LEN(0x20, i40e_aqc_get_switch_config_resp);
+
+/* Add Statistics (direct 0x0201)
+ * Remove Statistics (direct 0x0202)
+ */
+struct i40e_aqc_add_remove_statistics {
+ __le16 seid;
+ __le16 vlan;
+ __le16 stat_index;
+ u8 reserved[10];
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_add_remove_statistics);
+
+/* Set Port Parameters command (direct 0x0203) */
+struct i40e_aqc_set_port_parameters {
+ __le16 command_flags;
+#define I40E_AQ_SET_P_PARAMS_SAVE_BAD_PACKETS 1
+#define I40E_AQ_SET_P_PARAMS_PAD_SHORT_PACKETS 2 /* must set! */
+#define I40E_AQ_SET_P_PARAMS_DOUBLE_VLAN_ENA 4
+ __le16 bad_frame_vsi;
+ __le16 default_seid; /* reserved for command */
+ u8 reserved[10];
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_set_port_parameters);
+
+/* Get Switch Resource Allocation (indirect 0x0204) */
+struct i40e_aqc_get_switch_resource_alloc {
+ u8 num_entries; /* reserved for command */
+ u8 reserved[7];
+ __le32 addr_high;
+ __le32 addr_low;
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_get_switch_resource_alloc);
+
+/* expect an array of these structs in the response buffer */
+struct i40e_aqc_switch_resource_alloc_element_resp {
+ u8 resource_type;
+#define I40E_AQ_RESOURCE_TYPE_VEB 0x0
+#define I40E_AQ_RESOURCE_TYPE_VSI 0x1
+#define I40E_AQ_RESOURCE_TYPE_MACADDR 0x2
+#define I40E_AQ_RESOURCE_TYPE_STAG 0x3
+#define I40E_AQ_RESOURCE_TYPE_ETAG 0x4
+#define I40E_AQ_RESOURCE_TYPE_MULTICAST_HASH 0x5
+#define I40E_AQ_RESOURCE_TYPE_UNICAST_HASH 0x6
+#define I40E_AQ_RESOURCE_TYPE_VLAN 0x7
+#define I40E_AQ_RESOURCE_TYPE_VSI_LIST_ENTRY 0x8
+#define I40E_AQ_RESOURCE_TYPE_ETAG_LIST_ENTRY 0x9
+#define I40E_AQ_RESOURCE_TYPE_VLAN_STAT_POOL 0xA
+#define I40E_AQ_RESOURCE_TYPE_MIRROR_RULE 0xB
+#define I40E_AQ_RESOURCE_TYPE_QUEUE_SETS 0xC
+#define I40E_AQ_RESOURCE_TYPE_VLAN_FILTERS 0xD
+#define I40E_AQ_RESOURCE_TYPE_INNER_MAC_FILTERS 0xF
+#define I40E_AQ_RESOURCE_TYPE_IP_FILTERS 0x10
+#define I40E_AQ_RESOURCE_TYPE_GRE_VN_KEYS 0x11
+#define I40E_AQ_RESOURCE_TYPE_VN2_KEYS 0x12
+#define I40E_AQ_RESOURCE_TYPE_TUNNEL_PORTS 0x13
+ u8 reserved1;
+ __le16 guaranteed;
+ __le16 total;
+ __le16 used;
+ __le16 total_unalloced;
+ u8 reserved2[6];
+};
+
+I40E_CHECK_STRUCT_LEN(0x10, i40e_aqc_switch_resource_alloc_element_resp);
+
+/* Set Switch Configuration (direct 0x0205) */
+struct i40e_aqc_set_switch_config {
+ __le16 flags;
+#define I40E_AQ_SET_SWITCH_CFG_PROMISC 0x0001
+#define I40E_AQ_SET_SWITCH_CFG_L2_FILTER 0x0002
+ __le16 valid_flags;
+ u8 reserved[12];
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_set_switch_config);
+
+/* Read Receive control registers (direct 0x0206)
+ * Write Receive control registers (direct 0x0207)
+ * used for accessing Rx control registers that can be
+ * slow and need special handling when under high Rx load
+ */
+struct i40e_aqc_rx_ctl_reg_read_write {
+ __le32 reserved1;
+ __le32 address;
+ __le32 reserved2;
+ __le32 value;
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_rx_ctl_reg_read_write);
+
+/* Add VSI (indirect 0x0210)
+ * this indirect command uses struct i40e_aqc_vsi_properties_data
+ * as the indirect buffer (128 bytes)
+ *
+ * Update VSI (indirect 0x211)
+ * uses the same data structure as Add VSI
+ *
+ * Get VSI (indirect 0x0212)
+ * uses the same completion and data structure as Add VSI
+ */
+struct i40e_aqc_add_get_update_vsi {
+ __le16 uplink_seid;
+ u8 connection_type;
+#define I40E_AQ_VSI_CONN_TYPE_NORMAL 0x1
+#define I40E_AQ_VSI_CONN_TYPE_DEFAULT 0x2
+#define I40E_AQ_VSI_CONN_TYPE_CASCADED 0x3
+ u8 reserved1;
+ u8 vf_id;
+ u8 reserved2;
+ __le16 vsi_flags;
+#define I40E_AQ_VSI_TYPE_SHIFT 0x0
+#define I40E_AQ_VSI_TYPE_MASK (0x3 << I40E_AQ_VSI_TYPE_SHIFT)
+#define I40E_AQ_VSI_TYPE_VF 0x0
+#define I40E_AQ_VSI_TYPE_VMDQ2 0x1
+#define I40E_AQ_VSI_TYPE_PF 0x2
+#define I40E_AQ_VSI_TYPE_EMP_MNG 0x3
+#define I40E_AQ_VSI_FLAG_CASCADED_PV 0x4
+ __le32 addr_high;
+ __le32 addr_low;
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_add_get_update_vsi);
+
+struct i40e_aqc_add_get_update_vsi_completion {
+ __le16 seid;
+ __le16 vsi_number;
+ __le16 vsi_used;
+ __le16 vsi_free;
+ __le32 addr_high;
+ __le32 addr_low;
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_add_get_update_vsi_completion);
+
+struct i40e_aqc_vsi_properties_data {
+ /* first 96 byte are written by SW */
+ __le16 valid_sections;
+#define I40E_AQ_VSI_PROP_SWITCH_VALID 0x0001
+#define I40E_AQ_VSI_PROP_SECURITY_VALID 0x0002
+#define I40E_AQ_VSI_PROP_VLAN_VALID 0x0004
+#define I40E_AQ_VSI_PROP_CAS_PV_VALID 0x0008
+#define I40E_AQ_VSI_PROP_INGRESS_UP_VALID 0x0010
+#define I40E_AQ_VSI_PROP_EGRESS_UP_VALID 0x0020
+#define I40E_AQ_VSI_PROP_QUEUE_MAP_VALID 0x0040
+#define I40E_AQ_VSI_PROP_QUEUE_OPT_VALID 0x0080
+#define I40E_AQ_VSI_PROP_OUTER_UP_VALID 0x0100
+#define I40E_AQ_VSI_PROP_SCHED_VALID 0x0200
+ /* switch section */
+ __le16 switch_id; /* 12bit id combined with flags below */
+#define I40E_AQ_VSI_SW_ID_SHIFT 0x0000
+#define I40E_AQ_VSI_SW_ID_MASK (0xFFF << I40E_AQ_VSI_SW_ID_SHIFT)
+#define I40E_AQ_VSI_SW_ID_FLAG_NOT_STAG 0x1000
+#define I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB 0x2000
+#define I40E_AQ_VSI_SW_ID_FLAG_LOCAL_LB 0x4000
+ u8 sw_reserved[2];
+ /* security section */
+ u8 sec_flags;
+#define I40E_AQ_VSI_SEC_FLAG_ALLOW_DEST_OVRD 0x01
+#define I40E_AQ_VSI_SEC_FLAG_ENABLE_VLAN_CHK 0x02
+#define I40E_AQ_VSI_SEC_FLAG_ENABLE_MAC_CHK 0x04
+ u8 sec_reserved;
+ /* VLAN section */
+ __le16 pvid; /* VLANS include priority bits */
+ __le16 fcoe_pvid;
+ u8 port_vlan_flags;
+#define I40E_AQ_VSI_PVLAN_MODE_SHIFT 0x00
+#define I40E_AQ_VSI_PVLAN_MODE_MASK (0x03 << \
+ I40E_AQ_VSI_PVLAN_MODE_SHIFT)
+#define I40E_AQ_VSI_PVLAN_MODE_TAGGED 0x01
+#define I40E_AQ_VSI_PVLAN_MODE_UNTAGGED 0x02
+#define I40E_AQ_VSI_PVLAN_MODE_ALL 0x03
+#define I40E_AQ_VSI_PVLAN_INSERT_PVID 0x04
+#define I40E_AQ_VSI_PVLAN_EMOD_SHIFT 0x03
+#define I40E_AQ_VSI_PVLAN_EMOD_MASK (0x3 << \
+ I40E_AQ_VSI_PVLAN_EMOD_SHIFT)
+#define I40E_AQ_VSI_PVLAN_EMOD_STR_BOTH 0x0
+#define I40E_AQ_VSI_PVLAN_EMOD_STR_UP 0x08
+#define I40E_AQ_VSI_PVLAN_EMOD_STR 0x10
+#define I40E_AQ_VSI_PVLAN_EMOD_NOTHING 0x18
+ u8 pvlan_reserved[3];
+ /* ingress egress up sections */
+ __le32 ingress_table; /* bitmap, 3 bits per up */
+#define I40E_AQ_VSI_UP_TABLE_UP0_SHIFT 0
+#define I40E_AQ_VSI_UP_TABLE_UP0_MASK (0x7 << \
+ I40E_AQ_VSI_UP_TABLE_UP0_SHIFT)
+#define I40E_AQ_VSI_UP_TABLE_UP1_SHIFT 3
+#define I40E_AQ_VSI_UP_TABLE_UP1_MASK (0x7 << \
+ I40E_AQ_VSI_UP_TABLE_UP1_SHIFT)
+#define I40E_AQ_VSI_UP_TABLE_UP2_SHIFT 6
+#define I40E_AQ_VSI_UP_TABLE_UP2_MASK (0x7 << \
+ I40E_AQ_VSI_UP_TABLE_UP2_SHIFT)
+#define I40E_AQ_VSI_UP_TABLE_UP3_SHIFT 9
+#define I40E_AQ_VSI_UP_TABLE_UP3_MASK (0x7 << \
+ I40E_AQ_VSI_UP_TABLE_UP3_SHIFT)
+#define I40E_AQ_VSI_UP_TABLE_UP4_SHIFT 12
+#define I40E_AQ_VSI_UP_TABLE_UP4_MASK (0x7 << \
+ I40E_AQ_VSI_UP_TABLE_UP4_SHIFT)
+#define I40E_AQ_VSI_UP_TABLE_UP5_SHIFT 15
+#define I40E_AQ_VSI_UP_TABLE_UP5_MASK (0x7 << \
+ I40E_AQ_VSI_UP_TABLE_UP5_SHIFT)
+#define I40E_AQ_VSI_UP_TABLE_UP6_SHIFT 18
+#define I40E_AQ_VSI_UP_TABLE_UP6_MASK (0x7 << \
+ I40E_AQ_VSI_UP_TABLE_UP6_SHIFT)
+#define I40E_AQ_VSI_UP_TABLE_UP7_SHIFT 21
+#define I40E_AQ_VSI_UP_TABLE_UP7_MASK (0x7 << \
+ I40E_AQ_VSI_UP_TABLE_UP7_SHIFT)
+ __le32 egress_table; /* same defines as for ingress table */
+ /* cascaded PV section */
+ __le16 cas_pv_tag;
+ u8 cas_pv_flags;
+#define I40E_AQ_VSI_CAS_PV_TAGX_SHIFT 0x00
+#define I40E_AQ_VSI_CAS_PV_TAGX_MASK (0x03 << \
+ I40E_AQ_VSI_CAS_PV_TAGX_SHIFT)
+#define I40E_AQ_VSI_CAS_PV_TAGX_LEAVE 0x00
+#define I40E_AQ_VSI_CAS_PV_TAGX_REMOVE 0x01
+#define I40E_AQ_VSI_CAS_PV_TAGX_COPY 0x02
+#define I40E_AQ_VSI_CAS_PV_INSERT_TAG 0x10
+#define I40E_AQ_VSI_CAS_PV_ETAG_PRUNE 0x20
+#define I40E_AQ_VSI_CAS_PV_ACCEPT_HOST_TAG 0x40
+ u8 cas_pv_reserved;
+ /* queue mapping section */
+ __le16 mapping_flags;
+#define I40E_AQ_VSI_QUE_MAP_CONTIG 0x0
+#define I40E_AQ_VSI_QUE_MAP_NONCONTIG 0x1
+ __le16 queue_mapping[16];
+#define I40E_AQ_VSI_QUEUE_SHIFT 0x0
+#define I40E_AQ_VSI_QUEUE_MASK (0x7FF << I40E_AQ_VSI_QUEUE_SHIFT)
+ __le16 tc_mapping[8];
+#define I40E_AQ_VSI_TC_QUE_OFFSET_SHIFT 0
+#define I40E_AQ_VSI_TC_QUE_OFFSET_MASK (0x1FF << \
+ I40E_AQ_VSI_TC_QUE_OFFSET_SHIFT)
+#define I40E_AQ_VSI_TC_QUE_NUMBER_SHIFT 9
+#define I40E_AQ_VSI_TC_QUE_NUMBER_MASK (0x7 << \
+ I40E_AQ_VSI_TC_QUE_NUMBER_SHIFT)
+ /* queueing option section */
+ u8 queueing_opt_flags;
+#ifdef X722_SUPPORT
+#define I40E_AQ_VSI_QUE_OPT_MULTICAST_UDP_ENA 0x04
+#define I40E_AQ_VSI_QUE_OPT_UNICAST_UDP_ENA 0x08
+#endif
+#define I40E_AQ_VSI_QUE_OPT_TCP_ENA 0x10
+#define I40E_AQ_VSI_QUE_OPT_FCOE_ENA 0x20
+#ifdef X722_SUPPORT
+#define I40E_AQ_VSI_QUE_OPT_RSS_LUT_PF 0x00
+#define I40E_AQ_VSI_QUE_OPT_RSS_LUT_VSI 0x40
+#endif
+ u8 queueing_opt_reserved[3];
+ /* scheduler section */
+ u8 up_enable_bits;
+ u8 sched_reserved;
+ /* outer up section */
+ __le32 outer_up_table; /* same structure and defines as ingress tbl */
+ u8 cmd_reserved[8];
+ /* last 32 bytes are written by FW */
+ __le16 qs_handle[8];
+#define I40E_AQ_VSI_QS_HANDLE_INVALID 0xFFFF
+ __le16 stat_counter_idx;
+ __le16 sched_id;
+ u8 resp_reserved[12];
+};
+
+I40E_CHECK_STRUCT_LEN(128, i40e_aqc_vsi_properties_data);
+
+/* Add Port Virtualizer (direct 0x0220)
+ * also used for update PV (direct 0x0221) but only flags are used
+ * (IS_CTRL_PORT only works on add PV)
+ */
+struct i40e_aqc_add_update_pv {
+ __le16 command_flags;
+#define I40E_AQC_PV_FLAG_PV_TYPE 0x1
+#define I40E_AQC_PV_FLAG_FWD_UNKNOWN_STAG_EN 0x2
+#define I40E_AQC_PV_FLAG_FWD_UNKNOWN_ETAG_EN 0x4
+#define I40E_AQC_PV_FLAG_IS_CTRL_PORT 0x8
+ __le16 uplink_seid;
+ __le16 connected_seid;
+ u8 reserved[10];
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_add_update_pv);
+
+struct i40e_aqc_add_update_pv_completion {
+ /* reserved for update; for add also encodes error if rc == ENOSPC */
+ __le16 pv_seid;
+#define I40E_AQC_PV_ERR_FLAG_NO_PV 0x1
+#define I40E_AQC_PV_ERR_FLAG_NO_SCHED 0x2
+#define I40E_AQC_PV_ERR_FLAG_NO_COUNTER 0x4
+#define I40E_AQC_PV_ERR_FLAG_NO_ENTRY 0x8
+ u8 reserved[14];
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_add_update_pv_completion);
+
+/* Get PV Params (direct 0x0222)
+ * uses i40e_aqc_switch_seid for the descriptor
+ */
+
+struct i40e_aqc_get_pv_params_completion {
+ __le16 seid;
+ __le16 default_stag;
+ __le16 pv_flags; /* same flags as add_pv */
+#define I40E_AQC_GET_PV_PV_TYPE 0x1
+#define I40E_AQC_GET_PV_FRWD_UNKNOWN_STAG 0x2
+#define I40E_AQC_GET_PV_FRWD_UNKNOWN_ETAG 0x4
+ u8 reserved[8];
+ __le16 default_port_seid;
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_get_pv_params_completion);
+
+/* Add VEB (direct 0x0230) */
+struct i40e_aqc_add_veb {
+ __le16 uplink_seid;
+ __le16 downlink_seid;
+ __le16 veb_flags;
+#define I40E_AQC_ADD_VEB_FLOATING 0x1
+#define I40E_AQC_ADD_VEB_PORT_TYPE_SHIFT 1
+#define I40E_AQC_ADD_VEB_PORT_TYPE_MASK (0x3 << \
+ I40E_AQC_ADD_VEB_PORT_TYPE_SHIFT)
+#define I40E_AQC_ADD_VEB_PORT_TYPE_DEFAULT 0x2
+#define I40E_AQC_ADD_VEB_PORT_TYPE_DATA 0x4
+#define I40E_AQC_ADD_VEB_ENABLE_L2_FILTER 0x8 /* deprecated */
+#define I40E_AQC_ADD_VEB_ENABLE_DISABLE_STATS 0x10
+ u8 enable_tcs;
+ u8 reserved[9];
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_add_veb);
+
+struct i40e_aqc_add_veb_completion {
+ u8 reserved[6];
+ __le16 switch_seid;
+ /* also encodes error if rc == ENOSPC; codes are the same as add_pv */
+ __le16 veb_seid;
+#define I40E_AQC_VEB_ERR_FLAG_NO_VEB 0x1
+#define I40E_AQC_VEB_ERR_FLAG_NO_SCHED 0x2
+#define I40E_AQC_VEB_ERR_FLAG_NO_COUNTER 0x4
+#define I40E_AQC_VEB_ERR_FLAG_NO_ENTRY 0x8
+ __le16 statistic_index;
+ __le16 vebs_used;
+ __le16 vebs_free;
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_add_veb_completion);
+
+/* Get VEB Parameters (direct 0x0232)
+ * uses i40e_aqc_switch_seid for the descriptor
+ */
+struct i40e_aqc_get_veb_parameters_completion {
+ __le16 seid;
+ __le16 switch_id;
+ __le16 veb_flags; /* only the first/last flags from 0x0230 is valid */
+ __le16 statistic_index;
+ __le16 vebs_used;
+ __le16 vebs_free;
+ u8 reserved[4];
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_get_veb_parameters_completion);
+
+/* Delete Element (direct 0x0243)
+ * uses the generic i40e_aqc_switch_seid
+ */
+
+/* Add MAC-VLAN (indirect 0x0250) */
+
+/* used for the command for most vlan commands */
+struct i40e_aqc_macvlan {
+ __le16 num_addresses;
+ __le16 seid[3];
+#define I40E_AQC_MACVLAN_CMD_SEID_NUM_SHIFT 0
+#define I40E_AQC_MACVLAN_CMD_SEID_NUM_MASK (0x3FF << \
+ I40E_AQC_MACVLAN_CMD_SEID_NUM_SHIFT)
+#define I40E_AQC_MACVLAN_CMD_SEID_VALID 0x8000
+ __le32 addr_high;
+ __le32 addr_low;
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_macvlan);
+
+/* indirect data for command and response */
+struct i40e_aqc_add_macvlan_element_data {
+ u8 mac_addr[6];
+ __le16 vlan_tag;
+ __le16 flags;
+#define I40E_AQC_MACVLAN_ADD_PERFECT_MATCH 0x0001
+#define I40E_AQC_MACVLAN_ADD_HASH_MATCH 0x0002
+#define I40E_AQC_MACVLAN_ADD_IGNORE_VLAN 0x0004
+#define I40E_AQC_MACVLAN_ADD_TO_QUEUE 0x0008
+#define I40E_AQC_MACVLAN_ADD_USE_SHARED_MAC 0x0010
+ __le16 queue_number;
+#define I40E_AQC_MACVLAN_CMD_QUEUE_SHIFT 0
+#define I40E_AQC_MACVLAN_CMD_QUEUE_MASK (0x7FF << \
+ I40E_AQC_MACVLAN_CMD_SEID_NUM_SHIFT)
+ /* response section */
+ u8 match_method;
+#define I40E_AQC_MM_PERFECT_MATCH 0x01
+#define I40E_AQC_MM_HASH_MATCH 0x02
+#define I40E_AQC_MM_ERR_NO_RES 0xFF
+ u8 reserved1[3];
+};
+
+struct i40e_aqc_add_remove_macvlan_completion {
+ __le16 perfect_mac_used;
+ __le16 perfect_mac_free;
+ __le16 unicast_hash_free;
+ __le16 multicast_hash_free;
+ __le32 addr_high;
+ __le32 addr_low;
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_add_remove_macvlan_completion);
+
+/* Remove MAC-VLAN (indirect 0x0251)
+ * uses i40e_aqc_macvlan for the descriptor
+ * data points to an array of num_addresses of elements
+ */
+
+struct i40e_aqc_remove_macvlan_element_data {
+ u8 mac_addr[6];
+ __le16 vlan_tag;
+ u8 flags;
+#define I40E_AQC_MACVLAN_DEL_PERFECT_MATCH 0x01
+#define I40E_AQC_MACVLAN_DEL_HASH_MATCH 0x02
+#define I40E_AQC_MACVLAN_DEL_IGNORE_VLAN 0x08
+#define I40E_AQC_MACVLAN_DEL_ALL_VSIS 0x10
+ u8 reserved[3];
+ /* reply section */
+ u8 error_code;
+#define I40E_AQC_REMOVE_MACVLAN_SUCCESS 0x0
+#define I40E_AQC_REMOVE_MACVLAN_FAIL 0xFF
+ u8 reply_reserved[3];
+};
+
+/* Add VLAN (indirect 0x0252)
+ * Remove VLAN (indirect 0x0253)
+ * use the generic i40e_aqc_macvlan for the command
+ */
+struct i40e_aqc_add_remove_vlan_element_data {
+ __le16 vlan_tag;
+ u8 vlan_flags;
+/* flags for add VLAN */
+#define I40E_AQC_ADD_VLAN_LOCAL 0x1
+#define I40E_AQC_ADD_PVLAN_TYPE_SHIFT 1
+#define I40E_AQC_ADD_PVLAN_TYPE_MASK (0x3 << I40E_AQC_ADD_PVLAN_TYPE_SHIFT)
+#define I40E_AQC_ADD_PVLAN_TYPE_REGULAR 0x0
+#define I40E_AQC_ADD_PVLAN_TYPE_PRIMARY 0x2
+#define I40E_AQC_ADD_PVLAN_TYPE_SECONDARY 0x4
+#define I40E_AQC_VLAN_PTYPE_SHIFT 3
+#define I40E_AQC_VLAN_PTYPE_MASK (0x3 << I40E_AQC_VLAN_PTYPE_SHIFT)
+#define I40E_AQC_VLAN_PTYPE_REGULAR_VSI 0x0
+#define I40E_AQC_VLAN_PTYPE_PROMISC_VSI 0x8
+#define I40E_AQC_VLAN_PTYPE_COMMUNITY_VSI 0x10
+#define I40E_AQC_VLAN_PTYPE_ISOLATED_VSI 0x18
+/* flags for remove VLAN */
+#define I40E_AQC_REMOVE_VLAN_ALL 0x1
+ u8 reserved;
+ u8 result;
+/* flags for add VLAN */
+#define I40E_AQC_ADD_VLAN_SUCCESS 0x0
+#define I40E_AQC_ADD_VLAN_FAIL_REQUEST 0xFE
+#define I40E_AQC_ADD_VLAN_FAIL_RESOURCE 0xFF
+/* flags for remove VLAN */
+#define I40E_AQC_REMOVE_VLAN_SUCCESS 0x0
+#define I40E_AQC_REMOVE_VLAN_FAIL 0xFF
+ u8 reserved1[3];
+};
+
+struct i40e_aqc_add_remove_vlan_completion {
+ u8 reserved[4];
+ __le16 vlans_used;
+ __le16 vlans_free;
+ __le32 addr_high;
+ __le32 addr_low;
+};
+
+/* Set VSI Promiscuous Modes (direct 0x0254) */
+struct i40e_aqc_set_vsi_promiscuous_modes {
+ __le16 promiscuous_flags;
+ __le16 valid_flags;
+/* flags used for both fields above */
+#define I40E_AQC_SET_VSI_PROMISC_UNICAST 0x01
+#define I40E_AQC_SET_VSI_PROMISC_MULTICAST 0x02
+#define I40E_AQC_SET_VSI_PROMISC_BROADCAST 0x04
+#define I40E_AQC_SET_VSI_DEFAULT 0x08
+#define I40E_AQC_SET_VSI_PROMISC_VLAN 0x10
+#define I40E_AQC_SET_VSI_PROMISC_TX 0x8000
+ __le16 seid;
+#define I40E_AQC_VSI_PROM_CMD_SEID_MASK 0x3FF
+ __le16 vlan_tag;
+#define I40E_AQC_SET_VSI_VLAN_MASK 0x0FFF
+#define I40E_AQC_SET_VSI_VLAN_VALID 0x8000
+ u8 reserved[8];
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_set_vsi_promiscuous_modes);
+
+/* Add S/E-tag command (direct 0x0255)
+ * Uses generic i40e_aqc_add_remove_tag_completion for completion
+ */
+struct i40e_aqc_add_tag {
+ __le16 flags;
+#define I40E_AQC_ADD_TAG_FLAG_TO_QUEUE 0x0001
+ __le16 seid;
+#define I40E_AQC_ADD_TAG_CMD_SEID_NUM_SHIFT 0
+#define I40E_AQC_ADD_TAG_CMD_SEID_NUM_MASK (0x3FF << \
+ I40E_AQC_ADD_TAG_CMD_SEID_NUM_SHIFT)
+ __le16 tag;
+ __le16 queue_number;
+ u8 reserved[8];
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_add_tag);
+
+struct i40e_aqc_add_remove_tag_completion {
+ u8 reserved[12];
+ __le16 tags_used;
+ __le16 tags_free;
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_add_remove_tag_completion);
+
+/* Remove S/E-tag command (direct 0x0256)
+ * Uses generic i40e_aqc_add_remove_tag_completion for completion
+ */
+struct i40e_aqc_remove_tag {
+ __le16 seid;
+#define I40E_AQC_REMOVE_TAG_CMD_SEID_NUM_SHIFT 0
+#define I40E_AQC_REMOVE_TAG_CMD_SEID_NUM_MASK (0x3FF << \
+ I40E_AQC_REMOVE_TAG_CMD_SEID_NUM_SHIFT)
+ __le16 tag;
+ u8 reserved[12];
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_remove_tag);
+
+/* Add multicast E-Tag (direct 0x0257)
+ * del multicast E-Tag (direct 0x0258) only uses pv_seid and etag fields
+ * and no external data
+ */
+struct i40e_aqc_add_remove_mcast_etag {
+ __le16 pv_seid;
+ __le16 etag;
+ u8 num_unicast_etags;
+ u8 reserved[3];
+ __le32 addr_high; /* address of array of 2-byte s-tags */
+ __le32 addr_low;
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_add_remove_mcast_etag);
+
+struct i40e_aqc_add_remove_mcast_etag_completion {
+ u8 reserved[4];
+ __le16 mcast_etags_used;
+ __le16 mcast_etags_free;
+ __le32 addr_high;
+ __le32 addr_low;
+
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_add_remove_mcast_etag_completion);
+
+/* Update S/E-Tag (direct 0x0259) */
+struct i40e_aqc_update_tag {
+ __le16 seid;
+#define I40E_AQC_UPDATE_TAG_CMD_SEID_NUM_SHIFT 0
+#define I40E_AQC_UPDATE_TAG_CMD_SEID_NUM_MASK (0x3FF << \
+ I40E_AQC_UPDATE_TAG_CMD_SEID_NUM_SHIFT)
+ __le16 old_tag;
+ __le16 new_tag;
+ u8 reserved[10];
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_update_tag);
+
+struct i40e_aqc_update_tag_completion {
+ u8 reserved[12];
+ __le16 tags_used;
+ __le16 tags_free;
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_update_tag_completion);
+
+/* Add Control Packet filter (direct 0x025A)
+ * Remove Control Packet filter (direct 0x025B)
+ * uses the i40e_aqc_add_oveb_cloud,
+ * and the generic direct completion structure
+ */
+struct i40e_aqc_add_remove_control_packet_filter {
+ u8 mac[6];
+ __le16 etype;
+ __le16 flags;
+#define I40E_AQC_ADD_CONTROL_PACKET_FLAGS_IGNORE_MAC 0x0001
+#define I40E_AQC_ADD_CONTROL_PACKET_FLAGS_DROP 0x0002
+#define I40E_AQC_ADD_CONTROL_PACKET_FLAGS_TO_QUEUE 0x0004
+#define I40E_AQC_ADD_CONTROL_PACKET_FLAGS_TX 0x0008
+#define I40E_AQC_ADD_CONTROL_PACKET_FLAGS_RX 0x0000
+ __le16 seid;
+#define I40E_AQC_ADD_CONTROL_PACKET_CMD_SEID_NUM_SHIFT 0
+#define I40E_AQC_ADD_CONTROL_PACKET_CMD_SEID_NUM_MASK (0x3FF << \
+ I40E_AQC_ADD_CONTROL_PACKET_CMD_SEID_NUM_SHIFT)
+ __le16 queue;
+ u8 reserved[2];
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_add_remove_control_packet_filter);
+
+struct i40e_aqc_add_remove_control_packet_filter_completion {
+ __le16 mac_etype_used;
+ __le16 etype_used;
+ __le16 mac_etype_free;
+ __le16 etype_free;
+ u8 reserved[8];
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_add_remove_control_packet_filter_completion);
+
+/* Add Cloud filters (indirect 0x025C)
+ * Remove Cloud filters (indirect 0x025D)
+ * uses the i40e_aqc_add_remove_cloud_filters,
+ * and the generic indirect completion structure
+ */
+struct i40e_aqc_add_remove_cloud_filters {
+ u8 num_filters;
+ u8 reserved;
+ __le16 seid;
+#define I40E_AQC_ADD_CLOUD_CMD_SEID_NUM_SHIFT 0
+#define I40E_AQC_ADD_CLOUD_CMD_SEID_NUM_MASK (0x3FF << \
+ I40E_AQC_ADD_CLOUD_CMD_SEID_NUM_SHIFT)
+ u8 reserved2[4];
+ __le32 addr_high;
+ __le32 addr_low;
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_add_remove_cloud_filters);
+
+struct i40e_aqc_add_remove_cloud_filters_element_data {
+ u8 outer_mac[6];
+ u8 inner_mac[6];
+ __le16 inner_vlan;
+ union {
+ struct {
+ u8 reserved[12];
+ u8 data[4];
+ } v4;
+ struct {
+ u8 data[16];
+ } v6;
+ } ipaddr;
+ __le16 flags;
+#define I40E_AQC_ADD_CLOUD_FILTER_SHIFT 0
+#define I40E_AQC_ADD_CLOUD_FILTER_MASK (0x3F << \
+ I40E_AQC_ADD_CLOUD_FILTER_SHIFT)
+/* 0x0000 reserved */
+#define I40E_AQC_ADD_CLOUD_FILTER_OIP 0x0001
+/* 0x0002 reserved */
+#define I40E_AQC_ADD_CLOUD_FILTER_IMAC_IVLAN 0x0003
+#define I40E_AQC_ADD_CLOUD_FILTER_IMAC_IVLAN_TEN_ID 0x0004
+/* 0x0005 reserved */
+#define I40E_AQC_ADD_CLOUD_FILTER_IMAC_TEN_ID 0x0006
+/* 0x0007 reserved */
+/* 0x0008 reserved */
+#define I40E_AQC_ADD_CLOUD_FILTER_OMAC 0x0009
+#define I40E_AQC_ADD_CLOUD_FILTER_IMAC 0x000A
+#define I40E_AQC_ADD_CLOUD_FILTER_OMAC_TEN_ID_IMAC 0x000B
+#define I40E_AQC_ADD_CLOUD_FILTER_IIP 0x000C
+
+#define I40E_AQC_ADD_CLOUD_FLAGS_TO_QUEUE 0x0080
+#define I40E_AQC_ADD_CLOUD_VNK_SHIFT 6
+#define I40E_AQC_ADD_CLOUD_VNK_MASK 0x00C0
+#define I40E_AQC_ADD_CLOUD_FLAGS_IPV4 0
+#define I40E_AQC_ADD_CLOUD_FLAGS_IPV6 0x0100
+
+#define I40E_AQC_ADD_CLOUD_TNL_TYPE_SHIFT 9
+#define I40E_AQC_ADD_CLOUD_TNL_TYPE_MASK 0x1E00
+#define I40E_AQC_ADD_CLOUD_TNL_TYPE_VXLAN 0
+#define I40E_AQC_ADD_CLOUD_TNL_TYPE_NVGRE_OMAC 1
+#define I40E_AQC_ADD_CLOUD_TNL_TYPE_GENEVE 2
+#define I40E_AQC_ADD_CLOUD_TNL_TYPE_IP 3
+#define I40E_AQC_ADD_CLOUD_TNL_TYPE_RESERVED 4
+#define I40E_AQC_ADD_CLOUD_TNL_TYPE_VXLAN_GPE 5
+
+#define I40E_AQC_ADD_CLOUD_FLAGS_SHARED_OUTER_MAC 0x2000
+#define I40E_AQC_ADD_CLOUD_FLAGS_SHARED_INNER_MAC 0x4000
+#define I40E_AQC_ADD_CLOUD_FLAGS_SHARED_OUTER_IP 0x8000
+
+ __le32 tenant_id;
+ u8 reserved[4];
+ __le16 queue_number;
+#define I40E_AQC_ADD_CLOUD_QUEUE_SHIFT 0
+#define I40E_AQC_ADD_CLOUD_QUEUE_MASK (0x7FF << \
+ I40E_AQC_ADD_CLOUD_QUEUE_SHIFT)
+ u8 reserved2[14];
+ /* response section */
+ u8 allocation_result;
+#define I40E_AQC_ADD_CLOUD_FILTER_SUCCESS 0x0
+#define I40E_AQC_ADD_CLOUD_FILTER_FAIL 0xFF
+ u8 response_reserved[7];
+};
+
+struct i40e_aqc_remove_cloud_filters_completion {
+ __le16 perfect_ovlan_used;
+ __le16 perfect_ovlan_free;
+ __le16 vlan_used;
+ __le16 vlan_free;
+ __le32 addr_high;
+ __le32 addr_low;
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_remove_cloud_filters_completion);
+
+/* Add Mirror Rule (indirect or direct 0x0260)
+ * Delete Mirror Rule (indirect or direct 0x0261)
+ * note: some rule types (4,5) do not use an external buffer.
+ * take care to set the flags correctly.
+ */
+struct i40e_aqc_add_delete_mirror_rule {
+ __le16 seid;
+ __le16 rule_type;
+#define I40E_AQC_MIRROR_RULE_TYPE_SHIFT 0
+#define I40E_AQC_MIRROR_RULE_TYPE_MASK (0x7 << \
+ I40E_AQC_MIRROR_RULE_TYPE_SHIFT)
+#define I40E_AQC_MIRROR_RULE_TYPE_VPORT_INGRESS 1
+#define I40E_AQC_MIRROR_RULE_TYPE_VPORT_EGRESS 2
+#define I40E_AQC_MIRROR_RULE_TYPE_VLAN 3
+#define I40E_AQC_MIRROR_RULE_TYPE_ALL_INGRESS 4
+#define I40E_AQC_MIRROR_RULE_TYPE_ALL_EGRESS 5
+ __le16 num_entries;
+ __le16 destination; /* VSI for add, rule id for delete */
+ __le32 addr_high; /* address of array of 2-byte VSI or VLAN ids */
+ __le32 addr_low;
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_add_delete_mirror_rule);
+
+struct i40e_aqc_add_delete_mirror_rule_completion {
+ u8 reserved[2];
+ __le16 rule_id; /* only used on add */
+ __le16 mirror_rules_used;
+ __le16 mirror_rules_free;
+ __le32 addr_high;
+ __le32 addr_low;
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_add_delete_mirror_rule_completion);
+
+/* DCB 0x03xx*/
+
+/* PFC Ignore (direct 0x0301)
+ * the command and response use the same descriptor structure
+ */
+struct i40e_aqc_pfc_ignore {
+ u8 tc_bitmap;
+ u8 command_flags; /* unused on response */
+#define I40E_AQC_PFC_IGNORE_SET 0x80
+#define I40E_AQC_PFC_IGNORE_CLEAR 0x0
+ u8 reserved[14];
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_pfc_ignore);
+
+/* DCB Update (direct 0x0302) uses the i40e_aq_desc structure
+ * with no parameters
+ */
+
+/* TX scheduler 0x04xx */
+
+/* Almost all the indirect commands use
+ * this generic struct to pass the SEID in param0
+ */
+struct i40e_aqc_tx_sched_ind {
+ __le16 vsi_seid;
+ u8 reserved[6];
+ __le32 addr_high;
+ __le32 addr_low;
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_tx_sched_ind);
+
+/* Several commands respond with a set of queue set handles */
+struct i40e_aqc_qs_handles_resp {
+ __le16 qs_handles[8];
+};
+
+/* Configure VSI BW limits (direct 0x0400) */
+struct i40e_aqc_configure_vsi_bw_limit {
+ __le16 vsi_seid;
+ u8 reserved[2];
+ __le16 credit;
+ u8 reserved1[2];
+ u8 max_credit; /* 0-3, limit = 2^max */
+ u8 reserved2[7];
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_configure_vsi_bw_limit);
+
+/* Configure VSI Bandwidth Limit per Traffic Type (indirect 0x0406)
+ * responds with i40e_aqc_qs_handles_resp
+ */
+struct i40e_aqc_configure_vsi_ets_sla_bw_data {
+ u8 tc_valid_bits;
+ u8 reserved[15];
+ __le16 tc_bw_credits[8]; /* FW writesback QS handles here */
+
+ /* 4 bits per tc 0-7, 4th bit is reserved, limit = 2^max */
+ __le16 tc_bw_max[2];
+ u8 reserved1[28];
+};
+
+I40E_CHECK_STRUCT_LEN(0x40, i40e_aqc_configure_vsi_ets_sla_bw_data);
+
+/* Configure VSI Bandwidth Allocation per Traffic Type (indirect 0x0407)
+ * responds with i40e_aqc_qs_handles_resp
+ */
+struct i40e_aqc_configure_vsi_tc_bw_data {
+ u8 tc_valid_bits;
+ u8 reserved[3];
+ u8 tc_bw_credits[8];
+ u8 reserved1[4];
+ __le16 qs_handles[8];
+};
+
+I40E_CHECK_STRUCT_LEN(0x20, i40e_aqc_configure_vsi_tc_bw_data);
+
+/* Query vsi bw configuration (indirect 0x0408) */
+struct i40e_aqc_query_vsi_bw_config_resp {
+ u8 tc_valid_bits;
+ u8 tc_suspended_bits;
+ u8 reserved[14];
+ __le16 qs_handles[8];
+ u8 reserved1[4];
+ __le16 port_bw_limit;
+ u8 reserved2[2];
+ u8 max_bw; /* 0-3, limit = 2^max */
+ u8 reserved3[23];
+};
+
+I40E_CHECK_STRUCT_LEN(0x40, i40e_aqc_query_vsi_bw_config_resp);
+
+/* Query VSI Bandwidth Allocation per Traffic Type (indirect 0x040A) */
+struct i40e_aqc_query_vsi_ets_sla_config_resp {
+ u8 tc_valid_bits;
+ u8 reserved[3];
+ u8 share_credits[8];
+ __le16 credits[8];
+
+ /* 4 bits per tc 0-7, 4th bit is reserved, limit = 2^max */
+ __le16 tc_bw_max[2];
+};
+
+I40E_CHECK_STRUCT_LEN(0x20, i40e_aqc_query_vsi_ets_sla_config_resp);
+
+/* Configure Switching Component Bandwidth Limit (direct 0x0410) */
+struct i40e_aqc_configure_switching_comp_bw_limit {
+ __le16 seid;
+ u8 reserved[2];
+ __le16 credit;
+ u8 reserved1[2];
+ u8 max_bw; /* 0-3, limit = 2^max */
+ u8 reserved2[7];
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_configure_switching_comp_bw_limit);
+
+/* Enable Physical Port ETS (indirect 0x0413)
+ * Modify Physical Port ETS (indirect 0x0414)
+ * Disable Physical Port ETS (indirect 0x0415)
+ */
+struct i40e_aqc_configure_switching_comp_ets_data {
+ u8 reserved[4];
+ u8 tc_valid_bits;
+ u8 seepage;
+#define I40E_AQ_ETS_SEEPAGE_EN_MASK 0x1
+ u8 tc_strict_priority_flags;
+ u8 reserved1[17];
+ u8 tc_bw_share_credits[8];
+ u8 reserved2[96];
+};
+
+I40E_CHECK_STRUCT_LEN(0x80, i40e_aqc_configure_switching_comp_ets_data);
+
+/* Configure Switching Component Bandwidth Limits per Tc (indirect 0x0416) */
+struct i40e_aqc_configure_switching_comp_ets_bw_limit_data {
+ u8 tc_valid_bits;
+ u8 reserved[15];
+ __le16 tc_bw_credit[8];
+
+ /* 4 bits per tc 0-7, 4th bit is reserved, limit = 2^max */
+ __le16 tc_bw_max[2];
+ u8 reserved1[28];
+};
+
+I40E_CHECK_STRUCT_LEN(0x40,
+ i40e_aqc_configure_switching_comp_ets_bw_limit_data);
+
+/* Configure Switching Component Bandwidth Allocation per Tc
+ * (indirect 0x0417)
+ */
+struct i40e_aqc_configure_switching_comp_bw_config_data {
+ u8 tc_valid_bits;
+ u8 reserved[2];
+ u8 absolute_credits; /* bool */
+ u8 tc_bw_share_credits[8];
+ u8 reserved1[20];
+};
+
+I40E_CHECK_STRUCT_LEN(0x20, i40e_aqc_configure_switching_comp_bw_config_data);
+
+/* Query Switching Component Configuration (indirect 0x0418) */
+struct i40e_aqc_query_switching_comp_ets_config_resp {
+ u8 tc_valid_bits;
+ u8 reserved[35];
+ __le16 port_bw_limit;
+ u8 reserved1[2];
+ u8 tc_bw_max; /* 0-3, limit = 2^max */
+ u8 reserved2[23];
+};
+
+I40E_CHECK_STRUCT_LEN(0x40, i40e_aqc_query_switching_comp_ets_config_resp);
+
+/* Query PhysicalPort ETS Configuration (indirect 0x0419) */
+struct i40e_aqc_query_port_ets_config_resp {
+ u8 reserved[4];
+ u8 tc_valid_bits;
+ u8 reserved1;
+ u8 tc_strict_priority_bits;
+ u8 reserved2;
+ u8 tc_bw_share_credits[8];
+ __le16 tc_bw_limits[8];
+
+ /* 4 bits per tc 0-7, 4th bit reserved, limit = 2^max */
+ __le16 tc_bw_max[2];
+ u8 reserved3[32];
+};
+
+I40E_CHECK_STRUCT_LEN(0x44, i40e_aqc_query_port_ets_config_resp);
+
+/* Query Switching Component Bandwidth Allocation per Traffic Type
+ * (indirect 0x041A)
+ */
+struct i40e_aqc_query_switching_comp_bw_config_resp {
+ u8 tc_valid_bits;
+ u8 reserved[2];
+ u8 absolute_credits_enable; /* bool */
+ u8 tc_bw_share_credits[8];
+ __le16 tc_bw_limits[8];
+
+ /* 4 bits per tc 0-7, 4th bit is reserved, limit = 2^max */
+ __le16 tc_bw_max[2];
+};
+
+I40E_CHECK_STRUCT_LEN(0x20, i40e_aqc_query_switching_comp_bw_config_resp);
+
+/* Suspend/resume port TX traffic
+ * (direct 0x041B and 0x041C) uses the generic SEID struct
+ */
+
+/* Configure partition BW
+ * (indirect 0x041D)
+ */
+struct i40e_aqc_configure_partition_bw_data {
+ __le16 pf_valid_bits;
+ u8 min_bw[16]; /* guaranteed bandwidth */
+ u8 max_bw[16]; /* bandwidth limit */
+};
+
+I40E_CHECK_STRUCT_LEN(0x22, i40e_aqc_configure_partition_bw_data);
+
+/* Get PHY Abilities (indirect 0x0600) uses the generic indirect struct */
+
+/* set in param0 for get phy abilities to report qualified modules */
+#define I40E_AQ_PHY_REPORT_QUALIFIED_MODULES 0x0001
+#define I40E_AQ_PHY_REPORT_INITIAL_VALUES 0x0002
+
+enum i40e_aq_phy_type {
+ I40E_PHY_TYPE_SGMII = 0x0,
+ I40E_PHY_TYPE_1000BASE_KX = 0x1,
+ I40E_PHY_TYPE_10GBASE_KX4 = 0x2,
+ I40E_PHY_TYPE_10GBASE_KR = 0x3,
+ I40E_PHY_TYPE_40GBASE_KR4 = 0x4,
+ I40E_PHY_TYPE_XAUI = 0x5,
+ I40E_PHY_TYPE_XFI = 0x6,
+ I40E_PHY_TYPE_SFI = 0x7,
+ I40E_PHY_TYPE_XLAUI = 0x8,
+ I40E_PHY_TYPE_XLPPI = 0x9,
+ I40E_PHY_TYPE_40GBASE_CR4_CU = 0xA,
+ I40E_PHY_TYPE_10GBASE_CR1_CU = 0xB,
+ I40E_PHY_TYPE_10GBASE_AOC = 0xC,
+ I40E_PHY_TYPE_40GBASE_AOC = 0xD,
+ I40E_PHY_TYPE_100BASE_TX = 0x11,
+ I40E_PHY_TYPE_1000BASE_T = 0x12,
+ I40E_PHY_TYPE_10GBASE_T = 0x13,
+ I40E_PHY_TYPE_10GBASE_SR = 0x14,
+ I40E_PHY_TYPE_10GBASE_LR = 0x15,
+ I40E_PHY_TYPE_10GBASE_SFPP_CU = 0x16,
+ I40E_PHY_TYPE_10GBASE_CR1 = 0x17,
+ I40E_PHY_TYPE_40GBASE_CR4 = 0x18,
+ I40E_PHY_TYPE_40GBASE_SR4 = 0x19,
+ I40E_PHY_TYPE_40GBASE_LR4 = 0x1A,
+ I40E_PHY_TYPE_1000BASE_SX = 0x1B,
+ I40E_PHY_TYPE_1000BASE_LX = 0x1C,
+ I40E_PHY_TYPE_1000BASE_T_OPTICAL = 0x1D,
+ I40E_PHY_TYPE_20GBASE_KR2 = 0x1E,
+ I40E_PHY_TYPE_25GBASE_KR = 0x1F,
+ I40E_PHY_TYPE_25GBASE_CR = 0x20,
+ I40E_PHY_TYPE_25GBASE_SR = 0x21,
+ I40E_PHY_TYPE_25GBASE_LR = 0x22,
+ I40E_PHY_TYPE_MAX
+};
+
+#define I40E_LINK_SPEED_100MB_SHIFT 0x1
+#define I40E_LINK_SPEED_1000MB_SHIFT 0x2
+#define I40E_LINK_SPEED_10GB_SHIFT 0x3
+#define I40E_LINK_SPEED_40GB_SHIFT 0x4
+#define I40E_LINK_SPEED_20GB_SHIFT 0x5
+
+enum i40e_aq_link_speed {
+ I40E_LINK_SPEED_UNKNOWN = 0,
+ I40E_LINK_SPEED_100MB = (1 << I40E_LINK_SPEED_100MB_SHIFT),
+ I40E_LINK_SPEED_1GB = (1 << I40E_LINK_SPEED_1000MB_SHIFT),
+ I40E_LINK_SPEED_10GB = (1 << I40E_LINK_SPEED_10GB_SHIFT),
+ I40E_LINK_SPEED_40GB = (1 << I40E_LINK_SPEED_40GB_SHIFT),
+ I40E_LINK_SPEED_20GB = (1 << I40E_LINK_SPEED_20GB_SHIFT)
+};
+
+struct i40e_aqc_module_desc {
+ u8 oui[3];
+ u8 reserved1;
+ u8 part_number[16];
+ u8 revision[4];
+ u8 reserved2[8];
+};
+
+I40E_CHECK_STRUCT_LEN(0x20, i40e_aqc_module_desc);
+
+struct i40e_aq_get_phy_abilities_resp {
+ __le32 phy_type; /* bitmap using the above enum for offsets */
+ u8 link_speed; /* bitmap using the above enum bit patterns */
+ u8 abilities;
+#define I40E_AQ_PHY_FLAG_PAUSE_TX 0x01
+#define I40E_AQ_PHY_FLAG_PAUSE_RX 0x02
+#define I40E_AQ_PHY_FLAG_LOW_POWER 0x04
+#define I40E_AQ_PHY_LINK_ENABLED 0x08
+#define I40E_AQ_PHY_AN_ENABLED 0x10
+#define I40E_AQ_PHY_FLAG_MODULE_QUAL 0x20
+ __le16 eee_capability;
+#define I40E_AQ_EEE_100BASE_TX 0x0002
+#define I40E_AQ_EEE_1000BASE_T 0x0004
+#define I40E_AQ_EEE_10GBASE_T 0x0008
+#define I40E_AQ_EEE_1000BASE_KX 0x0010
+#define I40E_AQ_EEE_10GBASE_KX4 0x0020
+#define I40E_AQ_EEE_10GBASE_KR 0x0040
+ __le32 eeer_val;
+ u8 d3_lpan;
+#define I40E_AQ_SET_PHY_D3_LPAN_ENA 0x01
+ u8 reserved[3];
+ u8 phy_id[4];
+ u8 module_type[3];
+ u8 qualified_module_count;
+#define I40E_AQ_PHY_MAX_QMS 16
+ struct i40e_aqc_module_desc qualified_module[I40E_AQ_PHY_MAX_QMS];
+};
+
+I40E_CHECK_STRUCT_LEN(0x218, i40e_aq_get_phy_abilities_resp);
+
+/* Set PHY Config (direct 0x0601) */
+struct i40e_aq_set_phy_config { /* same bits as above in all */
+ __le32 phy_type;
+ u8 link_speed;
+ u8 abilities;
+/* bits 0-2 use the values from get_phy_abilities_resp */
+#define I40E_AQ_PHY_ENABLE_LINK 0x08
+#define I40E_AQ_PHY_ENABLE_AN 0x10
+#define I40E_AQ_PHY_ENABLE_ATOMIC_LINK 0x20
+ __le16 eee_capability;
+ __le32 eeer;
+ u8 low_power_ctrl;
+ u8 reserved[3];
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aq_set_phy_config);
+
+/* Set MAC Config command data structure (direct 0x0603) */
+struct i40e_aq_set_mac_config {
+ __le16 max_frame_size;
+ u8 params;
+#define I40E_AQ_SET_MAC_CONFIG_CRC_EN 0x04
+#define I40E_AQ_SET_MAC_CONFIG_PACING_MASK 0x78
+#define I40E_AQ_SET_MAC_CONFIG_PACING_SHIFT 3
+#define I40E_AQ_SET_MAC_CONFIG_PACING_NONE 0x0
+#define I40E_AQ_SET_MAC_CONFIG_PACING_1B_13TX 0xF
+#define I40E_AQ_SET_MAC_CONFIG_PACING_1DW_9TX 0x9
+#define I40E_AQ_SET_MAC_CONFIG_PACING_1DW_4TX 0x8
+#define I40E_AQ_SET_MAC_CONFIG_PACING_3DW_7TX 0x7
+#define I40E_AQ_SET_MAC_CONFIG_PACING_2DW_3TX 0x6
+#define I40E_AQ_SET_MAC_CONFIG_PACING_1DW_1TX 0x5
+#define I40E_AQ_SET_MAC_CONFIG_PACING_3DW_2TX 0x4
+#define I40E_AQ_SET_MAC_CONFIG_PACING_7DW_3TX 0x3
+#define I40E_AQ_SET_MAC_CONFIG_PACING_4DW_1TX 0x2
+#define I40E_AQ_SET_MAC_CONFIG_PACING_9DW_1TX 0x1
+ u8 tx_timer_priority; /* bitmap */
+ __le16 tx_timer_value;
+ __le16 fc_refresh_threshold;
+ u8 reserved[8];
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aq_set_mac_config);
+
+/* Restart Auto-Negotiation (direct 0x605) */
+struct i40e_aqc_set_link_restart_an {
+ u8 command;
+#define I40E_AQ_PHY_RESTART_AN 0x02
+#define I40E_AQ_PHY_LINK_ENABLE 0x04
+ u8 reserved[15];
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_set_link_restart_an);
+
+/* Get Link Status cmd & response data structure (direct 0x0607) */
+struct i40e_aqc_get_link_status {
+ __le16 command_flags; /* only field set on command */
+#define I40E_AQ_LSE_MASK 0x3
+#define I40E_AQ_LSE_NOP 0x0
+#define I40E_AQ_LSE_DISABLE 0x2
+#define I40E_AQ_LSE_ENABLE 0x3
+/* only response uses this flag */
+#define I40E_AQ_LSE_IS_ENABLED 0x1
+ u8 phy_type; /* i40e_aq_phy_type */
+ u8 link_speed; /* i40e_aq_link_speed */
+ u8 link_info;
+#define I40E_AQ_LINK_UP 0x01 /* obsolete */
+#define I40E_AQ_LINK_UP_FUNCTION 0x01
+#define I40E_AQ_LINK_FAULT 0x02
+#define I40E_AQ_LINK_FAULT_TX 0x04
+#define I40E_AQ_LINK_FAULT_RX 0x08
+#define I40E_AQ_LINK_FAULT_REMOTE 0x10
+#define I40E_AQ_LINK_UP_PORT 0x20
+#define I40E_AQ_MEDIA_AVAILABLE 0x40
+#define I40E_AQ_SIGNAL_DETECT 0x80
+ u8 an_info;
+#define I40E_AQ_AN_COMPLETED 0x01
+#define I40E_AQ_LP_AN_ABILITY 0x02
+#define I40E_AQ_PD_FAULT 0x04
+#define I40E_AQ_FEC_EN 0x08
+#define I40E_AQ_PHY_LOW_POWER 0x10
+#define I40E_AQ_LINK_PAUSE_TX 0x20
+#define I40E_AQ_LINK_PAUSE_RX 0x40
+#define I40E_AQ_QUALIFIED_MODULE 0x80
+ u8 ext_info;
+#define I40E_AQ_LINK_PHY_TEMP_ALARM 0x01
+#define I40E_AQ_LINK_XCESSIVE_ERRORS 0x02
+#define I40E_AQ_LINK_TX_SHIFT 0x02
+#define I40E_AQ_LINK_TX_MASK (0x03 << I40E_AQ_LINK_TX_SHIFT)
+#define I40E_AQ_LINK_TX_ACTIVE 0x00
+#define I40E_AQ_LINK_TX_DRAINED 0x01
+#define I40E_AQ_LINK_TX_FLUSHED 0x03
+#define I40E_AQ_LINK_FORCED_40G 0x10
+ u8 loopback; /* use defines from i40e_aqc_set_lb_mode */
+ __le16 max_frame_size;
+ u8 config;
+#define I40E_AQ_CONFIG_CRC_ENA 0x04
+#define I40E_AQ_CONFIG_PACING_MASK 0x78
+ u8 external_power_ability;
+#define I40E_AQ_LINK_POWER_CLASS_1 0x00
+#define I40E_AQ_LINK_POWER_CLASS_2 0x01
+#define I40E_AQ_LINK_POWER_CLASS_3 0x02
+#define I40E_AQ_LINK_POWER_CLASS_4 0x03
+ u8 reserved[4];
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_get_link_status);
+
+/* Set event mask command (direct 0x613) */
+struct i40e_aqc_set_phy_int_mask {
+ u8 reserved[8];
+ __le16 event_mask;
+#define I40E_AQ_EVENT_LINK_UPDOWN 0x0002
+#define I40E_AQ_EVENT_MEDIA_NA 0x0004
+#define I40E_AQ_EVENT_LINK_FAULT 0x0008
+#define I40E_AQ_EVENT_PHY_TEMP_ALARM 0x0010
+#define I40E_AQ_EVENT_EXCESSIVE_ERRORS 0x0020
+#define I40E_AQ_EVENT_SIGNAL_DETECT 0x0040
+#define I40E_AQ_EVENT_AN_COMPLETED 0x0080
+#define I40E_AQ_EVENT_MODULE_QUAL_FAIL 0x0100
+#define I40E_AQ_EVENT_PORT_TX_SUSPENDED 0x0200
+ u8 reserved1[6];
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_set_phy_int_mask);
+
+/* Get Local AN advt register (direct 0x0614)
+ * Set Local AN advt register (direct 0x0615)
+ * Get Link Partner AN advt register (direct 0x0616)
+ */
+struct i40e_aqc_an_advt_reg {
+ __le32 local_an_reg0;
+ __le16 local_an_reg1;
+ u8 reserved[10];
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_an_advt_reg);
+
+/* Set Loopback mode (0x0618) */
+struct i40e_aqc_set_lb_mode {
+ __le16 lb_mode;
+#define I40E_AQ_LB_PHY_LOCAL 0x01
+#define I40E_AQ_LB_PHY_REMOTE 0x02
+#define I40E_AQ_LB_MAC_LOCAL 0x04
+ u8 reserved[14];
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_set_lb_mode);
+
+/* Set PHY Debug command (0x0622) */
+struct i40e_aqc_set_phy_debug {
+ u8 command_flags;
+#define I40E_AQ_PHY_DEBUG_RESET_INTERNAL 0x02
+#define I40E_AQ_PHY_DEBUG_RESET_EXTERNAL_SHIFT 2
+#define I40E_AQ_PHY_DEBUG_RESET_EXTERNAL_MASK (0x03 << \
+ I40E_AQ_PHY_DEBUG_RESET_EXTERNAL_SHIFT)
+#define I40E_AQ_PHY_DEBUG_RESET_EXTERNAL_NONE 0x00
+#define I40E_AQ_PHY_DEBUG_RESET_EXTERNAL_HARD 0x01
+#define I40E_AQ_PHY_DEBUG_RESET_EXTERNAL_SOFT 0x02
+/* Disable link manageability on a single port */
+#define I40E_AQ_PHY_DEBUG_DISABLE_LINK_FW 0x10
+/* Disable link manageability on all ports needs both bits 4 and 5 */
+#define I40E_AQ_PHY_DEBUG_DISABLE_ALL_LINK_FW 0x20
+ u8 reserved[15];
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_set_phy_debug);
+
+enum i40e_aq_phy_reg_type {
+ I40E_AQC_PHY_REG_INTERNAL = 0x1,
+ I40E_AQC_PHY_REG_EXERNAL_BASET = 0x2,
+ I40E_AQC_PHY_REG_EXERNAL_MODULE = 0x3
+};
+
+/* Run PHY Activity (0x0626) */
+struct i40e_aqc_run_phy_activity {
+ __le16 activity_id;
+ u8 flags;
+ u8 reserved1;
+ __le32 control;
+ __le32 data;
+ u8 reserved2[4];
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_run_phy_activity);
+
+/* NVM Read command (indirect 0x0701)
+ * NVM Erase commands (direct 0x0702)
+ * NVM Update commands (indirect 0x0703)
+ */
+struct i40e_aqc_nvm_update {
+ u8 command_flags;
+#define I40E_AQ_NVM_LAST_CMD 0x01
+#define I40E_AQ_NVM_FLASH_ONLY 0x80
+ u8 module_pointer;
+ __le16 length;
+ __le32 offset;
+ __le32 addr_high;
+ __le32 addr_low;
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_nvm_update);
+
+/* NVM Config Read (indirect 0x0704) */
+struct i40e_aqc_nvm_config_read {
+ __le16 cmd_flags;
+#define I40E_AQ_ANVM_SINGLE_OR_MULTIPLE_FEATURES_MASK 1
+#define I40E_AQ_ANVM_READ_SINGLE_FEATURE 0
+#define I40E_AQ_ANVM_READ_MULTIPLE_FEATURES 1
+ __le16 element_count;
+ __le16 element_id; /* Feature/field ID */
+ __le16 element_id_msw; /* MSWord of field ID */
+ __le32 address_high;
+ __le32 address_low;
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_nvm_config_read);
+
+/* NVM Config Write (indirect 0x0705) */
+struct i40e_aqc_nvm_config_write {
+ __le16 cmd_flags;
+ __le16 element_count;
+ u8 reserved[4];
+ __le32 address_high;
+ __le32 address_low;
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_nvm_config_write);
+
+/* Used for 0x0704 as well as for 0x0705 commands */
+#define I40E_AQ_ANVM_FEATURE_OR_IMMEDIATE_SHIFT 1
+#define I40E_AQ_ANVM_FEATURE_OR_IMMEDIATE_MASK \
+ (1 << I40E_AQ_ANVM_FEATURE_OR_IMMEDIATE_SHIFT)
+#define I40E_AQ_ANVM_FEATURE 0
+#define I40E_AQ_ANVM_IMMEDIATE_FIELD (1 << FEATURE_OR_IMMEDIATE_SHIFT)
+struct i40e_aqc_nvm_config_data_feature {
+ __le16 feature_id;
+#define I40E_AQ_ANVM_FEATURE_OPTION_OEM_ONLY 0x01
+#define I40E_AQ_ANVM_FEATURE_OPTION_DWORD_MAP 0x08
+#define I40E_AQ_ANVM_FEATURE_OPTION_POR_CSR 0x10
+ __le16 feature_options;
+ __le16 feature_selection;
+};
+
+I40E_CHECK_STRUCT_LEN(0x6, i40e_aqc_nvm_config_data_feature);
+
+struct i40e_aqc_nvm_config_data_immediate_field {
+ __le32 field_id;
+ __le32 field_value;
+ __le16 field_options;
+ __le16 reserved;
+};
+
+I40E_CHECK_STRUCT_LEN(0xc, i40e_aqc_nvm_config_data_immediate_field);
+
+/* OEM Post Update (indirect 0x0720)
+ * no command data struct used
+ */
+struct i40e_aqc_nvm_oem_post_update {
+#define I40E_AQ_NVM_OEM_POST_UPDATE_EXTERNAL_DATA 0x01
+ u8 sel_data;
+ u8 reserved[7];
+};
+
+I40E_CHECK_STRUCT_LEN(0x8, i40e_aqc_nvm_oem_post_update);
+
+struct i40e_aqc_nvm_oem_post_update_buffer {
+ u8 str_len;
+ u8 dev_addr;
+ __le16 eeprom_addr;
+ u8 data[36];
+};
+
+I40E_CHECK_STRUCT_LEN(0x28, i40e_aqc_nvm_oem_post_update_buffer);
+
+/* Thermal Sensor (indirect 0x0721)
+ * read or set thermal sensor configs and values
+ * takes a sensor and command specific data buffer, not detailed here
+ */
+struct i40e_aqc_thermal_sensor {
+ u8 sensor_action;
+#define I40E_AQ_THERMAL_SENSOR_READ_CONFIG 0
+#define I40E_AQ_THERMAL_SENSOR_SET_CONFIG 1
+#define I40E_AQ_THERMAL_SENSOR_READ_TEMP 2
+ u8 reserved[7];
+ __le32 addr_high;
+ __le32 addr_low;
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_thermal_sensor);
+
+/* Send to PF command (indirect 0x0801) id is only used by PF
+ * Send to VF command (indirect 0x0802) id is only used by PF
+ * Send to Peer PF command (indirect 0x0803)
+ */
+struct i40e_aqc_pf_vf_message {
+ __le32 id;
+ u8 reserved[4];
+ __le32 addr_high;
+ __le32 addr_low;
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_pf_vf_message);
+
+/* Alternate structure */
+
+/* Direct write (direct 0x0900)
+ * Direct read (direct 0x0902)
+ */
+struct i40e_aqc_alternate_write {
+ __le32 address0;
+ __le32 data0;
+ __le32 address1;
+ __le32 data1;
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_alternate_write);
+
+/* Indirect write (indirect 0x0901)
+ * Indirect read (indirect 0x0903)
+ */
+
+struct i40e_aqc_alternate_ind_write {
+ __le32 address;
+ __le32 length;
+ __le32 addr_high;
+ __le32 addr_low;
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_alternate_ind_write);
+
+/* Done alternate write (direct 0x0904)
+ * uses i40e_aq_desc
+ */
+struct i40e_aqc_alternate_write_done {
+ __le16 cmd_flags;
+#define I40E_AQ_ALTERNATE_MODE_BIOS_MASK 1
+#define I40E_AQ_ALTERNATE_MODE_BIOS_LEGACY 0
+#define I40E_AQ_ALTERNATE_MODE_BIOS_UEFI 1
+#define I40E_AQ_ALTERNATE_RESET_NEEDED 2
+ u8 reserved[14];
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_alternate_write_done);
+
+/* Set OEM mode (direct 0x0905) */
+struct i40e_aqc_alternate_set_mode {
+ __le32 mode;
+#define I40E_AQ_ALTERNATE_MODE_NONE 0
+#define I40E_AQ_ALTERNATE_MODE_OEM 1
+ u8 reserved[12];
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_alternate_set_mode);
+
+/* Clear port Alternate RAM (direct 0x0906) uses i40e_aq_desc */
+
+/* async events 0x10xx */
+
+/* Lan Queue Overflow Event (direct, 0x1001) */
+struct i40e_aqc_lan_overflow {
+ __le32 prtdcb_rupto;
+ __le32 otx_ctl;
+ u8 reserved[8];
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_lan_overflow);
+
+/* Get LLDP MIB (indirect 0x0A00) */
+struct i40e_aqc_lldp_get_mib {
+ u8 type;
+ u8 reserved1;
+#define I40E_AQ_LLDP_MIB_TYPE_MASK 0x3
+#define I40E_AQ_LLDP_MIB_LOCAL 0x0
+#define I40E_AQ_LLDP_MIB_REMOTE 0x1
+#define I40E_AQ_LLDP_MIB_LOCAL_AND_REMOTE 0x2
+#define I40E_AQ_LLDP_BRIDGE_TYPE_MASK 0xC
+#define I40E_AQ_LLDP_BRIDGE_TYPE_SHIFT 0x2
+#define I40E_AQ_LLDP_BRIDGE_TYPE_NEAREST_BRIDGE 0x0
+#define I40E_AQ_LLDP_BRIDGE_TYPE_NON_TPMR 0x1
+#define I40E_AQ_LLDP_TX_SHIFT 0x4
+#define I40E_AQ_LLDP_TX_MASK (0x03 << I40E_AQ_LLDP_TX_SHIFT)
+/* TX pause flags use I40E_AQ_LINK_TX_* above */
+ __le16 local_len;
+ __le16 remote_len;
+ u8 reserved2[2];
+ __le32 addr_high;
+ __le32 addr_low;
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_lldp_get_mib);
+
+/* Configure LLDP MIB Change Event (direct 0x0A01)
+ * also used for the event (with type in the command field)
+ */
+struct i40e_aqc_lldp_update_mib {
+ u8 command;
+#define I40E_AQ_LLDP_MIB_UPDATE_ENABLE 0x0
+#define I40E_AQ_LLDP_MIB_UPDATE_DISABLE 0x1
+ u8 reserved[7];
+ __le32 addr_high;
+ __le32 addr_low;
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_lldp_update_mib);
+
+/* Add LLDP TLV (indirect 0x0A02)
+ * Delete LLDP TLV (indirect 0x0A04)
+ */
+struct i40e_aqc_lldp_add_tlv {
+ u8 type; /* only nearest bridge and non-TPMR from 0x0A00 */
+ u8 reserved1[1];
+ __le16 len;
+ u8 reserved2[4];
+ __le32 addr_high;
+ __le32 addr_low;
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_lldp_add_tlv);
+
+/* Update LLDP TLV (indirect 0x0A03) */
+struct i40e_aqc_lldp_update_tlv {
+ u8 type; /* only nearest bridge and non-TPMR from 0x0A00 */
+ u8 reserved;
+ __le16 old_len;
+ __le16 new_offset;
+ __le16 new_len;
+ __le32 addr_high;
+ __le32 addr_low;
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_lldp_update_tlv);
+
+/* Stop LLDP (direct 0x0A05) */
+struct i40e_aqc_lldp_stop {
+ u8 command;
+#define I40E_AQ_LLDP_AGENT_STOP 0x0
+#define I40E_AQ_LLDP_AGENT_SHUTDOWN 0x1
+ u8 reserved[15];
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_lldp_stop);
+
+/* Start LLDP (direct 0x0A06) */
+
+struct i40e_aqc_lldp_start {
+ u8 command;
+#define I40E_AQ_LLDP_AGENT_START 0x1
+ u8 reserved[15];
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_lldp_start);
+
+/* Get CEE DCBX Oper Config (0x0A07)
+ * uses the generic descriptor struct
+ * returns below as indirect response
+ */
+
+#define I40E_AQC_CEE_APP_FCOE_SHIFT 0x0
+#define I40E_AQC_CEE_APP_FCOE_MASK (0x7 << I40E_AQC_CEE_APP_FCOE_SHIFT)
+#define I40E_AQC_CEE_APP_ISCSI_SHIFT 0x3
+#define I40E_AQC_CEE_APP_ISCSI_MASK (0x7 << I40E_AQC_CEE_APP_ISCSI_SHIFT)
+#define I40E_AQC_CEE_APP_FIP_SHIFT 0x8
+#define I40E_AQC_CEE_APP_FIP_MASK (0x7 << I40E_AQC_CEE_APP_FIP_SHIFT)
+
+#define I40E_AQC_CEE_PG_STATUS_SHIFT 0x0
+#define I40E_AQC_CEE_PG_STATUS_MASK (0x7 << I40E_AQC_CEE_PG_STATUS_SHIFT)
+#define I40E_AQC_CEE_PFC_STATUS_SHIFT 0x3
+#define I40E_AQC_CEE_PFC_STATUS_MASK (0x7 << I40E_AQC_CEE_PFC_STATUS_SHIFT)
+#define I40E_AQC_CEE_APP_STATUS_SHIFT 0x8
+#define I40E_AQC_CEE_APP_STATUS_MASK (0x7 << I40E_AQC_CEE_APP_STATUS_SHIFT)
+#define I40E_AQC_CEE_FCOE_STATUS_SHIFT 0x8
+#define I40E_AQC_CEE_FCOE_STATUS_MASK (0x7 << I40E_AQC_CEE_FCOE_STATUS_SHIFT)
+#define I40E_AQC_CEE_ISCSI_STATUS_SHIFT 0xB
+#define I40E_AQC_CEE_ISCSI_STATUS_MASK (0x7 << I40E_AQC_CEE_ISCSI_STATUS_SHIFT)
+#define I40E_AQC_CEE_FIP_STATUS_SHIFT 0x10
+#define I40E_AQC_CEE_FIP_STATUS_MASK (0x7 << I40E_AQC_CEE_FIP_STATUS_SHIFT)
+
+/* struct i40e_aqc_get_cee_dcb_cfg_v1_resp was originally defined with
+ * word boundary layout issues, which the Linux compilers silently deal
+ * with by adding padding, making the actual struct larger than designed.
+ * However, the FW compiler for the NIC is less lenient and complains
+ * about the struct. Hence, the struct defined here has an extra byte in
+ * fields reserved3 and reserved4 to directly acknowledge that padding,
+ * and the new length is used in the length check macro.
+ */
+struct i40e_aqc_get_cee_dcb_cfg_v1_resp {
+ u8 reserved1;
+ u8 oper_num_tc;
+ u8 oper_prio_tc[4];
+ u8 reserved2;
+ u8 oper_tc_bw[8];
+ u8 oper_pfc_en;
+ u8 reserved3[2];
+ __le16 oper_app_prio;
+ u8 reserved4[2];
+ __le16 tlv_status;
+};
+
+I40E_CHECK_STRUCT_LEN(0x18, i40e_aqc_get_cee_dcb_cfg_v1_resp);
+
+struct i40e_aqc_get_cee_dcb_cfg_resp {
+ u8 oper_num_tc;
+ u8 oper_prio_tc[4];
+ u8 oper_tc_bw[8];
+ u8 oper_pfc_en;
+ __le16 oper_app_prio;
+ __le32 tlv_status;
+ u8 reserved[12];
+};
+
+I40E_CHECK_STRUCT_LEN(0x20, i40e_aqc_get_cee_dcb_cfg_resp);
+
+/* Set Local LLDP MIB (indirect 0x0A08)
+ * Used to replace the local MIB of a given LLDP agent. e.g. DCBx
+ */
+struct i40e_aqc_lldp_set_local_mib {
+#define SET_LOCAL_MIB_AC_TYPE_DCBX_SHIFT 0
+#define SET_LOCAL_MIB_AC_TYPE_DCBX_MASK (1 << \
+ SET_LOCAL_MIB_AC_TYPE_DCBX_SHIFT)
+#define SET_LOCAL_MIB_AC_TYPE_LOCAL_MIB 0x0
+#define SET_LOCAL_MIB_AC_TYPE_NON_WILLING_APPS_SHIFT (1)
+#define SET_LOCAL_MIB_AC_TYPE_NON_WILLING_APPS_MASK (1 << \
+ SET_LOCAL_MIB_AC_TYPE_NON_WILLING_APPS_SHIFT)
+#define SET_LOCAL_MIB_AC_TYPE_NON_WILLING_APPS 0x1
+ u8 type;
+ u8 reserved0;
+ __le16 length;
+ u8 reserved1[4];
+ __le32 address_high;
+ __le32 address_low;
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_lldp_set_local_mib);
+
+struct i40e_aqc_lldp_set_local_mib_resp {
+#define SET_LOCAL_MIB_RESP_EVENT_TRIGGERED_MASK 0x01
+ u8 status;
+ u8 reserved[15];
+};
+
+I40E_CHECK_STRUCT_LEN(0x10, i40e_aqc_lldp_set_local_mib_resp);
+
+/* Stop/Start LLDP Agent (direct 0x0A09)
+ * Used for stopping/starting specific LLDP agent. e.g. DCBx
+ */
+struct i40e_aqc_lldp_stop_start_specific_agent {
+#define I40E_AQC_START_SPECIFIC_AGENT_SHIFT 0
+#define I40E_AQC_START_SPECIFIC_AGENT_MASK \
+ (1 << I40E_AQC_START_SPECIFIC_AGENT_SHIFT)
+ u8 command;
+ u8 reserved[15];
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_lldp_stop_start_specific_agent);
+
+/* Add Udp Tunnel command and completion (direct 0x0B00) */
+struct i40e_aqc_add_udp_tunnel {
+ __le16 udp_port;
+ u8 reserved0[3];
+ u8 protocol_type;
+#define I40E_AQC_TUNNEL_TYPE_VXLAN 0x00
+#define I40E_AQC_TUNNEL_TYPE_NGE 0x01
+#define I40E_AQC_TUNNEL_TYPE_TEREDO 0x10
+#define I40E_AQC_TUNNEL_TYPE_VXLAN_GPE 0x11
+ u8 reserved1[10];
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_add_udp_tunnel);
+
+struct i40e_aqc_add_udp_tunnel_completion {
+ __le16 udp_port;
+ u8 filter_entry_index;
+ u8 multiple_pfs;
+#define I40E_AQC_SINGLE_PF 0x0
+#define I40E_AQC_MULTIPLE_PFS 0x1
+ u8 total_filters;
+ u8 reserved[11];
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_add_udp_tunnel_completion);
+
+/* remove UDP Tunnel command (0x0B01) */
+struct i40e_aqc_remove_udp_tunnel {
+ u8 reserved[2];
+ u8 index; /* 0 to 15 */
+ u8 reserved2[13];
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_remove_udp_tunnel);
+
+struct i40e_aqc_del_udp_tunnel_completion {
+ __le16 udp_port;
+ u8 index; /* 0 to 15 */
+ u8 multiple_pfs;
+ u8 total_filters_used;
+ u8 reserved1[11];
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_del_udp_tunnel_completion);
+#ifdef X722_SUPPORT
+
+struct i40e_aqc_get_set_rss_key {
+#define I40E_AQC_SET_RSS_KEY_VSI_VALID (0x1 << 15)
+#define I40E_AQC_SET_RSS_KEY_VSI_ID_SHIFT 0
+#define I40E_AQC_SET_RSS_KEY_VSI_ID_MASK (0x3FF << \
+ I40E_AQC_SET_RSS_KEY_VSI_ID_SHIFT)
+ __le16 vsi_id;
+ u8 reserved[6];
+ __le32 addr_high;
+ __le32 addr_low;
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_get_set_rss_key);
+
+struct i40e_aqc_get_set_rss_key_data {
+ u8 standard_rss_key[0x28];
+ u8 extended_hash_key[0xc];
+};
+
+I40E_CHECK_STRUCT_LEN(0x34, i40e_aqc_get_set_rss_key_data);
+
+struct i40e_aqc_get_set_rss_lut {
+#define I40E_AQC_SET_RSS_LUT_VSI_VALID (0x1 << 15)
+#define I40E_AQC_SET_RSS_LUT_VSI_ID_SHIFT 0
+#define I40E_AQC_SET_RSS_LUT_VSI_ID_MASK (0x3FF << \
+ I40E_AQC_SET_RSS_LUT_VSI_ID_SHIFT)
+ __le16 vsi_id;
+#define I40E_AQC_SET_RSS_LUT_TABLE_TYPE_SHIFT 0
+#define I40E_AQC_SET_RSS_LUT_TABLE_TYPE_MASK (0x1 << \
+ I40E_AQC_SET_RSS_LUT_TABLE_TYPE_SHIFT)
+
+#define I40E_AQC_SET_RSS_LUT_TABLE_TYPE_VSI 0
+#define I40E_AQC_SET_RSS_LUT_TABLE_TYPE_PF 1
+ __le16 flags;
+ u8 reserved[4];
+ __le32 addr_high;
+ __le32 addr_low;
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_get_set_rss_lut);
+#endif
+
+/* tunnel key structure 0x0B10 */
+
+struct i40e_aqc_tunnel_key_structure {
+ u8 key1_off;
+ u8 key2_off;
+ u8 key1_len; /* 0 to 15 */
+ u8 key2_len; /* 0 to 15 */
+ u8 flags;
+#define I40E_AQC_TUNNEL_KEY_STRUCT_OVERRIDE 0x01
+/* response flags */
+#define I40E_AQC_TUNNEL_KEY_STRUCT_SUCCESS 0x01
+#define I40E_AQC_TUNNEL_KEY_STRUCT_MODIFIED 0x02
+#define I40E_AQC_TUNNEL_KEY_STRUCT_OVERRIDDEN 0x03
+ u8 network_key_index;
+#define I40E_AQC_NETWORK_KEY_INDEX_VXLAN 0x0
+#define I40E_AQC_NETWORK_KEY_INDEX_NGE 0x1
+#define I40E_AQC_NETWORK_KEY_INDEX_FLEX_MAC_IN_UDP 0x2
+#define I40E_AQC_NETWORK_KEY_INDEX_GRE 0x3
+ u8 reserved[10];
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_tunnel_key_structure);
+
+/* OEM mode commands (direct 0xFE0x) */
+struct i40e_aqc_oem_param_change {
+ __le32 param_type;
+#define I40E_AQ_OEM_PARAM_TYPE_PF_CTL 0
+#define I40E_AQ_OEM_PARAM_TYPE_BW_CTL 1
+#define I40E_AQ_OEM_PARAM_MAC 2
+ __le32 param_value1;
+ __le16 param_value2;
+ u8 reserved[6];
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_oem_param_change);
+
+struct i40e_aqc_oem_state_change {
+ __le32 state;
+#define I40E_AQ_OEM_STATE_LINK_DOWN 0x0
+#define I40E_AQ_OEM_STATE_LINK_UP 0x1
+ u8 reserved[12];
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_oem_state_change);
+
+/* Initialize OCSD (0xFE02, direct) */
+struct i40e_aqc_opc_oem_ocsd_initialize {
+ u8 type_status;
+ u8 reserved1[3];
+ __le32 ocsd_memory_block_addr_high;
+ __le32 ocsd_memory_block_addr_low;
+ __le32 requested_update_interval;
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_opc_oem_ocsd_initialize);
+
+/* Initialize OCBB (0xFE03, direct) */
+struct i40e_aqc_opc_oem_ocbb_initialize {
+ u8 type_status;
+ u8 reserved1[3];
+ __le32 ocbb_memory_block_addr_high;
+ __le32 ocbb_memory_block_addr_low;
+ u8 reserved2[4];
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_opc_oem_ocbb_initialize);
+
+/* debug commands */
+
+/* get device id (0xFF00) uses the generic structure */
+
+/* set test more (0xFF01, internal) */
+
+struct i40e_acq_set_test_mode {
+ u8 mode;
+#define I40E_AQ_TEST_PARTIAL 0
+#define I40E_AQ_TEST_FULL 1
+#define I40E_AQ_TEST_NVM 2
+ u8 reserved[3];
+ u8 command;
+#define I40E_AQ_TEST_OPEN 0
+#define I40E_AQ_TEST_CLOSE 1
+#define I40E_AQ_TEST_INC 2
+ u8 reserved2[3];
+ __le32 address_high;
+ __le32 address_low;
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_acq_set_test_mode);
+
+/* Debug Read Register command (0xFF03)
+ * Debug Write Register command (0xFF04)
+ */
+struct i40e_aqc_debug_reg_read_write {
+ __le32 reserved;
+ __le32 address;
+ __le32 value_high;
+ __le32 value_low;
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_debug_reg_read_write);
+
+/* Scatter/gather Reg Read (indirect 0xFF05)
+ * Scatter/gather Reg Write (indirect 0xFF06)
+ */
+
+/* i40e_aq_desc is used for the command */
+struct i40e_aqc_debug_reg_sg_element_data {
+ __le32 address;
+ __le32 value;
+};
+
+/* Debug Modify register (direct 0xFF07) */
+struct i40e_aqc_debug_modify_reg {
+ __le32 address;
+ __le32 value;
+ __le32 clear_mask;
+ __le32 set_mask;
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_debug_modify_reg);
+
+/* dump internal data (0xFF08, indirect) */
+
+#define I40E_AQ_CLUSTER_ID_AUX 0
+#define I40E_AQ_CLUSTER_ID_SWITCH_FLU 1
+#define I40E_AQ_CLUSTER_ID_TXSCHED 2
+#define I40E_AQ_CLUSTER_ID_HMC 3
+#define I40E_AQ_CLUSTER_ID_MAC0 4
+#define I40E_AQ_CLUSTER_ID_MAC1 5
+#define I40E_AQ_CLUSTER_ID_MAC2 6
+#define I40E_AQ_CLUSTER_ID_MAC3 7
+#define I40E_AQ_CLUSTER_ID_DCB 8
+#define I40E_AQ_CLUSTER_ID_EMP_MEM 9
+#define I40E_AQ_CLUSTER_ID_PKT_BUF 10
+#define I40E_AQ_CLUSTER_ID_ALTRAM 11
+
+struct i40e_aqc_debug_dump_internals {
+ u8 cluster_id;
+ u8 table_id;
+ __le16 data_size;
+ __le32 idx;
+ __le32 address_high;
+ __le32 address_low;
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_debug_dump_internals);
+
+struct i40e_aqc_debug_modify_internals {
+ u8 cluster_id;
+ u8 cluster_specific_params[7];
+ __le32 address_high;
+ __le32 address_low;
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_debug_modify_internals);
+
+#endif /* _I40E_ADMINQ_CMD_H_ */
diff --git a/src/dpdk/drivers/net/i40e/base/i40e_alloc.h b/src/dpdk/drivers/net/i40e/base/i40e_alloc.h
new file mode 100644
index 00000000..38c2f655
--- /dev/null
+++ b/src/dpdk/drivers/net/i40e/base/i40e_alloc.h
@@ -0,0 +1,65 @@
+/*******************************************************************************
+
+Copyright (c) 2013 - 2015, Intel Corporation
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are met:
+
+ 1. Redistributions of source code must retain the above copyright notice,
+ this list of conditions and the following disclaimer.
+
+ 2. Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in the
+ documentation and/or other materials provided with the distribution.
+
+ 3. Neither the name of the Intel Corporation nor the names of its
+ contributors may be used to endorse or promote products derived from
+ this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+POSSIBILITY OF SUCH DAMAGE.
+
+***************************************************************************/
+
+#ifndef _I40E_ALLOC_H_
+#define _I40E_ALLOC_H_
+
+struct i40e_hw;
+
+/* Memory allocation types */
+enum i40e_memory_type {
+ i40e_mem_arq_buf = 0, /* ARQ indirect command buffer */
+ i40e_mem_asq_buf = 1,
+ i40e_mem_atq_buf = 2, /* ATQ indirect command buffer */
+ i40e_mem_arq_ring = 3, /* ARQ descriptor ring */
+ i40e_mem_atq_ring = 4, /* ATQ descriptor ring */
+ i40e_mem_pd = 5, /* Page Descriptor */
+ i40e_mem_bp = 6, /* Backing Page - 4KB */
+ i40e_mem_bp_jumbo = 7, /* Backing Page - > 4KB */
+ i40e_mem_reserved
+};
+
+/* prototype for functions used for dynamic memory allocation */
+enum i40e_status_code i40e_allocate_dma_mem(struct i40e_hw *hw,
+ struct i40e_dma_mem *mem,
+ enum i40e_memory_type type,
+ u64 size, u32 alignment);
+enum i40e_status_code i40e_free_dma_mem(struct i40e_hw *hw,
+ struct i40e_dma_mem *mem);
+enum i40e_status_code i40e_allocate_virt_mem(struct i40e_hw *hw,
+ struct i40e_virt_mem *mem,
+ u32 size);
+enum i40e_status_code i40e_free_virt_mem(struct i40e_hw *hw,
+ struct i40e_virt_mem *mem);
+
+#endif /* _I40E_ALLOC_H_ */
diff --git a/src/dpdk/drivers/net/i40e/base/i40e_common.c b/src/dpdk/drivers/net/i40e/base/i40e_common.c
new file mode 100644
index 00000000..98ed4b68
--- /dev/null
+++ b/src/dpdk/drivers/net/i40e/base/i40e_common.c
@@ -0,0 +1,6681 @@
+/*******************************************************************************
+
+Copyright (c) 2013 - 2015, Intel Corporation
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are met:
+
+ 1. Redistributions of source code must retain the above copyright notice,
+ this list of conditions and the following disclaimer.
+
+ 2. Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in the
+ documentation and/or other materials provided with the distribution.
+
+ 3. Neither the name of the Intel Corporation nor the names of its
+ contributors may be used to endorse or promote products derived from
+ this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+POSSIBILITY OF SUCH DAMAGE.
+
+***************************************************************************/
+
+#include "i40e_type.h"
+#include "i40e_adminq.h"
+#include "i40e_prototype.h"
+#include "i40e_virtchnl.h"
+
+
+/**
+ * i40e_set_mac_type - Sets MAC type
+ * @hw: pointer to the HW structure
+ *
+ * This function sets the mac type of the adapter based on the
+ * vendor ID and device ID stored in the hw structure.
+ **/
+#if defined(INTEGRATED_VF) || defined(VF_DRIVER)
+enum i40e_status_code i40e_set_mac_type(struct i40e_hw *hw)
+#else
+STATIC enum i40e_status_code i40e_set_mac_type(struct i40e_hw *hw)
+#endif
+{
+ enum i40e_status_code status = I40E_SUCCESS;
+
+ DEBUGFUNC("i40e_set_mac_type\n");
+
+ if (hw->vendor_id == I40E_INTEL_VENDOR_ID) {
+ switch (hw->device_id) {
+ case I40E_DEV_ID_SFP_XL710:
+ case I40E_DEV_ID_QEMU:
+ case I40E_DEV_ID_KX_B:
+ case I40E_DEV_ID_KX_C:
+ case I40E_DEV_ID_QSFP_A:
+ case I40E_DEV_ID_QSFP_B:
+ case I40E_DEV_ID_QSFP_C:
+ case I40E_DEV_ID_10G_BASE_T:
+ case I40E_DEV_ID_10G_BASE_T4:
+ case I40E_DEV_ID_20G_KR2:
+ case I40E_DEV_ID_20G_KR2_A:
+ case I40E_DEV_ID_25G_B:
+ case I40E_DEV_ID_25G_SFP28:
+ hw->mac.type = I40E_MAC_XL710;
+ break;
+#ifdef X722_SUPPORT
+#ifdef X722_A0_SUPPORT
+ case I40E_DEV_ID_X722_A0:
+#endif
+ case I40E_DEV_ID_KX_X722:
+ case I40E_DEV_ID_QSFP_X722:
+ case I40E_DEV_ID_SFP_X722:
+ case I40E_DEV_ID_1G_BASE_T_X722:
+ case I40E_DEV_ID_10G_BASE_T_X722:
+ case I40E_DEV_ID_SFP_I_X722:
+ case I40E_DEV_ID_QSFP_I_X722:
+ hw->mac.type = I40E_MAC_X722;
+ break;
+#endif
+#ifdef X722_SUPPORT
+#if defined(INTEGRATED_VF) || defined(VF_DRIVER)
+ case I40E_DEV_ID_X722_VF:
+ case I40E_DEV_ID_X722_VF_HV:
+#ifdef X722_A0_SUPPORT
+ case I40E_DEV_ID_X722_A0_VF:
+#endif
+ hw->mac.type = I40E_MAC_X722_VF;
+ break;
+#endif /* INTEGRATED_VF || VF_DRIVER */
+#endif /* X722_SUPPORT */
+#if defined(INTEGRATED_VF) || defined(VF_DRIVER)
+ case I40E_DEV_ID_VF:
+ case I40E_DEV_ID_VF_HV:
+ hw->mac.type = I40E_MAC_VF;
+ break;
+#endif
+ default:
+ hw->mac.type = I40E_MAC_GENERIC;
+ break;
+ }
+ } else {
+ status = I40E_ERR_DEVICE_NOT_SUPPORTED;
+ }
+
+ DEBUGOUT2("i40e_set_mac_type found mac: %d, returns: %d\n",
+ hw->mac.type, status);
+ return status;
+}
+
+#ifndef I40E_NDIS_SUPPORT
+/**
+ * i40e_aq_str - convert AQ err code to a string
+ * @hw: pointer to the HW structure
+ * @aq_err: the AQ error code to convert
+ **/
+const char *i40e_aq_str(struct i40e_hw *hw, enum i40e_admin_queue_err aq_err)
+{
+ switch (aq_err) {
+ case I40E_AQ_RC_OK:
+ return "OK";
+ case I40E_AQ_RC_EPERM:
+ return "I40E_AQ_RC_EPERM";
+ case I40E_AQ_RC_ENOENT:
+ return "I40E_AQ_RC_ENOENT";
+ case I40E_AQ_RC_ESRCH:
+ return "I40E_AQ_RC_ESRCH";
+ case I40E_AQ_RC_EINTR:
+ return "I40E_AQ_RC_EINTR";
+ case I40E_AQ_RC_EIO:
+ return "I40E_AQ_RC_EIO";
+ case I40E_AQ_RC_ENXIO:
+ return "I40E_AQ_RC_ENXIO";
+ case I40E_AQ_RC_E2BIG:
+ return "I40E_AQ_RC_E2BIG";
+ case I40E_AQ_RC_EAGAIN:
+ return "I40E_AQ_RC_EAGAIN";
+ case I40E_AQ_RC_ENOMEM:
+ return "I40E_AQ_RC_ENOMEM";
+ case I40E_AQ_RC_EACCES:
+ return "I40E_AQ_RC_EACCES";
+ case I40E_AQ_RC_EFAULT:
+ return "I40E_AQ_RC_EFAULT";
+ case I40E_AQ_RC_EBUSY:
+ return "I40E_AQ_RC_EBUSY";
+ case I40E_AQ_RC_EEXIST:
+ return "I40E_AQ_RC_EEXIST";
+ case I40E_AQ_RC_EINVAL:
+ return "I40E_AQ_RC_EINVAL";
+ case I40E_AQ_RC_ENOTTY:
+ return "I40E_AQ_RC_ENOTTY";
+ case I40E_AQ_RC_ENOSPC:
+ return "I40E_AQ_RC_ENOSPC";
+ case I40E_AQ_RC_ENOSYS:
+ return "I40E_AQ_RC_ENOSYS";
+ case I40E_AQ_RC_ERANGE:
+ return "I40E_AQ_RC_ERANGE";
+ case I40E_AQ_RC_EFLUSHED:
+ return "I40E_AQ_RC_EFLUSHED";
+ case I40E_AQ_RC_BAD_ADDR:
+ return "I40E_AQ_RC_BAD_ADDR";
+ case I40E_AQ_RC_EMODE:
+ return "I40E_AQ_RC_EMODE";
+ case I40E_AQ_RC_EFBIG:
+ return "I40E_AQ_RC_EFBIG";
+ }
+
+ snprintf(hw->err_str, sizeof(hw->err_str), "%d", aq_err);
+ return hw->err_str;
+}
+
+/**
+ * i40e_stat_str - convert status err code to a string
+ * @hw: pointer to the HW structure
+ * @stat_err: the status error code to convert
+ **/
+const char *i40e_stat_str(struct i40e_hw *hw, enum i40e_status_code stat_err)
+{
+ switch (stat_err) {
+ case I40E_SUCCESS:
+ return "OK";
+ case I40E_ERR_NVM:
+ return "I40E_ERR_NVM";
+ case I40E_ERR_NVM_CHECKSUM:
+ return "I40E_ERR_NVM_CHECKSUM";
+ case I40E_ERR_PHY:
+ return "I40E_ERR_PHY";
+ case I40E_ERR_CONFIG:
+ return "I40E_ERR_CONFIG";
+ case I40E_ERR_PARAM:
+ return "I40E_ERR_PARAM";
+ case I40E_ERR_MAC_TYPE:
+ return "I40E_ERR_MAC_TYPE";
+ case I40E_ERR_UNKNOWN_PHY:
+ return "I40E_ERR_UNKNOWN_PHY";
+ case I40E_ERR_LINK_SETUP:
+ return "I40E_ERR_LINK_SETUP";
+ case I40E_ERR_ADAPTER_STOPPED:
+ return "I40E_ERR_ADAPTER_STOPPED";
+ case I40E_ERR_INVALID_MAC_ADDR:
+ return "I40E_ERR_INVALID_MAC_ADDR";
+ case I40E_ERR_DEVICE_NOT_SUPPORTED:
+ return "I40E_ERR_DEVICE_NOT_SUPPORTED";
+ case I40E_ERR_MASTER_REQUESTS_PENDING:
+ return "I40E_ERR_MASTER_REQUESTS_PENDING";
+ case I40E_ERR_INVALID_LINK_SETTINGS:
+ return "I40E_ERR_INVALID_LINK_SETTINGS";
+ case I40E_ERR_AUTONEG_NOT_COMPLETE:
+ return "I40E_ERR_AUTONEG_NOT_COMPLETE";
+ case I40E_ERR_RESET_FAILED:
+ return "I40E_ERR_RESET_FAILED";
+ case I40E_ERR_SWFW_SYNC:
+ return "I40E_ERR_SWFW_SYNC";
+ case I40E_ERR_NO_AVAILABLE_VSI:
+ return "I40E_ERR_NO_AVAILABLE_VSI";
+ case I40E_ERR_NO_MEMORY:
+ return "I40E_ERR_NO_MEMORY";
+ case I40E_ERR_BAD_PTR:
+ return "I40E_ERR_BAD_PTR";
+ case I40E_ERR_RING_FULL:
+ return "I40E_ERR_RING_FULL";
+ case I40E_ERR_INVALID_PD_ID:
+ return "I40E_ERR_INVALID_PD_ID";
+ case I40E_ERR_INVALID_QP_ID:
+ return "I40E_ERR_INVALID_QP_ID";
+ case I40E_ERR_INVALID_CQ_ID:
+ return "I40E_ERR_INVALID_CQ_ID";
+ case I40E_ERR_INVALID_CEQ_ID:
+ return "I40E_ERR_INVALID_CEQ_ID";
+ case I40E_ERR_INVALID_AEQ_ID:
+ return "I40E_ERR_INVALID_AEQ_ID";
+ case I40E_ERR_INVALID_SIZE:
+ return "I40E_ERR_INVALID_SIZE";
+ case I40E_ERR_INVALID_ARP_INDEX:
+ return "I40E_ERR_INVALID_ARP_INDEX";
+ case I40E_ERR_INVALID_FPM_FUNC_ID:
+ return "I40E_ERR_INVALID_FPM_FUNC_ID";
+ case I40E_ERR_QP_INVALID_MSG_SIZE:
+ return "I40E_ERR_QP_INVALID_MSG_SIZE";
+ case I40E_ERR_QP_TOOMANY_WRS_POSTED:
+ return "I40E_ERR_QP_TOOMANY_WRS_POSTED";
+ case I40E_ERR_INVALID_FRAG_COUNT:
+ return "I40E_ERR_INVALID_FRAG_COUNT";
+ case I40E_ERR_QUEUE_EMPTY:
+ return "I40E_ERR_QUEUE_EMPTY";
+ case I40E_ERR_INVALID_ALIGNMENT:
+ return "I40E_ERR_INVALID_ALIGNMENT";
+ case I40E_ERR_FLUSHED_QUEUE:
+ return "I40E_ERR_FLUSHED_QUEUE";
+ case I40E_ERR_INVALID_PUSH_PAGE_INDEX:
+ return "I40E_ERR_INVALID_PUSH_PAGE_INDEX";
+ case I40E_ERR_INVALID_IMM_DATA_SIZE:
+ return "I40E_ERR_INVALID_IMM_DATA_SIZE";
+ case I40E_ERR_TIMEOUT:
+ return "I40E_ERR_TIMEOUT";
+ case I40E_ERR_OPCODE_MISMATCH:
+ return "I40E_ERR_OPCODE_MISMATCH";
+ case I40E_ERR_CQP_COMPL_ERROR:
+ return "I40E_ERR_CQP_COMPL_ERROR";
+ case I40E_ERR_INVALID_VF_ID:
+ return "I40E_ERR_INVALID_VF_ID";
+ case I40E_ERR_INVALID_HMCFN_ID:
+ return "I40E_ERR_INVALID_HMCFN_ID";
+ case I40E_ERR_BACKING_PAGE_ERROR:
+ return "I40E_ERR_BACKING_PAGE_ERROR";
+ case I40E_ERR_NO_PBLCHUNKS_AVAILABLE:
+ return "I40E_ERR_NO_PBLCHUNKS_AVAILABLE";
+ case I40E_ERR_INVALID_PBLE_INDEX:
+ return "I40E_ERR_INVALID_PBLE_INDEX";
+ case I40E_ERR_INVALID_SD_INDEX:
+ return "I40E_ERR_INVALID_SD_INDEX";
+ case I40E_ERR_INVALID_PAGE_DESC_INDEX:
+ return "I40E_ERR_INVALID_PAGE_DESC_INDEX";
+ case I40E_ERR_INVALID_SD_TYPE:
+ return "I40E_ERR_INVALID_SD_TYPE";
+ case I40E_ERR_MEMCPY_FAILED:
+ return "I40E_ERR_MEMCPY_FAILED";
+ case I40E_ERR_INVALID_HMC_OBJ_INDEX:
+ return "I40E_ERR_INVALID_HMC_OBJ_INDEX";
+ case I40E_ERR_INVALID_HMC_OBJ_COUNT:
+ return "I40E_ERR_INVALID_HMC_OBJ_COUNT";
+ case I40E_ERR_INVALID_SRQ_ARM_LIMIT:
+ return "I40E_ERR_INVALID_SRQ_ARM_LIMIT";
+ case I40E_ERR_SRQ_ENABLED:
+ return "I40E_ERR_SRQ_ENABLED";
+ case I40E_ERR_ADMIN_QUEUE_ERROR:
+ return "I40E_ERR_ADMIN_QUEUE_ERROR";
+ case I40E_ERR_ADMIN_QUEUE_TIMEOUT:
+ return "I40E_ERR_ADMIN_QUEUE_TIMEOUT";
+ case I40E_ERR_BUF_TOO_SHORT:
+ return "I40E_ERR_BUF_TOO_SHORT";
+ case I40E_ERR_ADMIN_QUEUE_FULL:
+ return "I40E_ERR_ADMIN_QUEUE_FULL";
+ case I40E_ERR_ADMIN_QUEUE_NO_WORK:
+ return "I40E_ERR_ADMIN_QUEUE_NO_WORK";
+ case I40E_ERR_BAD_IWARP_CQE:
+ return "I40E_ERR_BAD_IWARP_CQE";
+ case I40E_ERR_NVM_BLANK_MODE:
+ return "I40E_ERR_NVM_BLANK_MODE";
+ case I40E_ERR_NOT_IMPLEMENTED:
+ return "I40E_ERR_NOT_IMPLEMENTED";
+ case I40E_ERR_PE_DOORBELL_NOT_ENABLED:
+ return "I40E_ERR_PE_DOORBELL_NOT_ENABLED";
+ case I40E_ERR_DIAG_TEST_FAILED:
+ return "I40E_ERR_DIAG_TEST_FAILED";
+ case I40E_ERR_NOT_READY:
+ return "I40E_ERR_NOT_READY";
+ case I40E_NOT_SUPPORTED:
+ return "I40E_NOT_SUPPORTED";
+ case I40E_ERR_FIRMWARE_API_VERSION:
+ return "I40E_ERR_FIRMWARE_API_VERSION";
+ }
+
+ snprintf(hw->err_str, sizeof(hw->err_str), "%d", stat_err);
+ return hw->err_str;
+}
+
+#endif /* I40E_NDIS_SUPPORT */
+/**
+ * i40e_debug_aq
+ * @hw: debug mask related to admin queue
+ * @mask: debug mask
+ * @desc: pointer to admin queue descriptor
+ * @buffer: pointer to command buffer
+ * @buf_len: max length of buffer
+ *
+ * Dumps debug log about adminq command with descriptor contents.
+ **/
+void i40e_debug_aq(struct i40e_hw *hw, enum i40e_debug_mask mask, void *desc,
+ void *buffer, u16 buf_len)
+{
+ struct i40e_aq_desc *aq_desc = (struct i40e_aq_desc *)desc;
+ u16 len = LE16_TO_CPU(aq_desc->datalen);
+ u8 *buf = (u8 *)buffer;
+ u16 i = 0;
+
+ if ((!(mask & hw->debug_mask)) || (desc == NULL))
+ return;
+
+ i40e_debug(hw, mask,
+ "AQ CMD: opcode 0x%04X, flags 0x%04X, datalen 0x%04X, retval 0x%04X\n",
+ LE16_TO_CPU(aq_desc->opcode),
+ LE16_TO_CPU(aq_desc->flags),
+ LE16_TO_CPU(aq_desc->datalen),
+ LE16_TO_CPU(aq_desc->retval));
+ i40e_debug(hw, mask, "\tcookie (h,l) 0x%08X 0x%08X\n",
+ LE32_TO_CPU(aq_desc->cookie_high),
+ LE32_TO_CPU(aq_desc->cookie_low));
+ i40e_debug(hw, mask, "\tparam (0,1) 0x%08X 0x%08X\n",
+ LE32_TO_CPU(aq_desc->params.internal.param0),
+ LE32_TO_CPU(aq_desc->params.internal.param1));
+ i40e_debug(hw, mask, "\taddr (h,l) 0x%08X 0x%08X\n",
+ LE32_TO_CPU(aq_desc->params.external.addr_high),
+ LE32_TO_CPU(aq_desc->params.external.addr_low));
+
+ if ((buffer != NULL) && (aq_desc->datalen != 0)) {
+ i40e_debug(hw, mask, "AQ CMD Buffer:\n");
+ if (buf_len < len)
+ len = buf_len;
+ /* write the full 16-byte chunks */
+ for (i = 0; i < (len - 16); i += 16)
+ i40e_debug(hw, mask,
+ "\t0x%04X %02X %02X %02X %02X %02X %02X %02X %02X %02X %02X %02X %02X %02X %02X %02X %02X\n",
+ i, buf[i], buf[i+1], buf[i+2], buf[i+3],
+ buf[i+4], buf[i+5], buf[i+6], buf[i+7],
+ buf[i+8], buf[i+9], buf[i+10], buf[i+11],
+ buf[i+12], buf[i+13], buf[i+14], buf[i+15]);
+ /* the most we could have left is 16 bytes, pad with zeros */
+ if (i < len) {
+ char d_buf[16];
+ int j, i_sav;
+
+ i_sav = i;
+ memset(d_buf, 0, sizeof(d_buf));
+ for (j = 0; i < len; j++, i++)
+ d_buf[j] = buf[i];
+ i40e_debug(hw, mask,
+ "\t0x%04X %02X %02X %02X %02X %02X %02X %02X %02X %02X %02X %02X %02X %02X %02X %02X %02X\n",
+ i_sav, d_buf[0], d_buf[1],
+ d_buf[2], d_buf[3],
+ d_buf[4], d_buf[5], d_buf[6], d_buf[7],
+ d_buf[8], d_buf[9], d_buf[10], d_buf[11],
+ d_buf[12], d_buf[13], d_buf[14], d_buf[15]);
+ }
+ }
+}
+
+/**
+ * i40e_check_asq_alive
+ * @hw: pointer to the hw struct
+ *
+ * Returns true if Queue is enabled else false.
+ **/
+bool i40e_check_asq_alive(struct i40e_hw *hw)
+{
+ if (hw->aq.asq.len)
+#ifdef PF_DRIVER
+#ifdef INTEGRATED_VF
+ if (!i40e_is_vf(hw))
+ return !!(rd32(hw, hw->aq.asq.len) &
+ I40E_PF_ATQLEN_ATQENABLE_MASK);
+#else
+ return !!(rd32(hw, hw->aq.asq.len) &
+ I40E_PF_ATQLEN_ATQENABLE_MASK);
+#endif /* INTEGRATED_VF */
+#endif /* PF_DRIVER */
+#ifdef VF_DRIVER
+#ifdef INTEGRATED_VF
+ if (i40e_is_vf(hw))
+ return !!(rd32(hw, hw->aq.asq.len) &
+ I40E_VF_ATQLEN1_ATQENABLE_MASK);
+#else
+ return !!(rd32(hw, hw->aq.asq.len) &
+ I40E_VF_ATQLEN1_ATQENABLE_MASK);
+#endif /* INTEGRATED_VF */
+#endif /* VF_DRIVER */
+ return false;
+}
+
+/**
+ * i40e_aq_queue_shutdown
+ * @hw: pointer to the hw struct
+ * @unloading: is the driver unloading itself
+ *
+ * Tell the Firmware that we're shutting down the AdminQ and whether
+ * or not the driver is unloading as well.
+ **/
+enum i40e_status_code i40e_aq_queue_shutdown(struct i40e_hw *hw,
+ bool unloading)
+{
+ struct i40e_aq_desc desc;
+ struct i40e_aqc_queue_shutdown *cmd =
+ (struct i40e_aqc_queue_shutdown *)&desc.params.raw;
+ enum i40e_status_code status;
+
+ i40e_fill_default_direct_cmd_desc(&desc,
+ i40e_aqc_opc_queue_shutdown);
+
+ if (unloading)
+ cmd->driver_unloading = CPU_TO_LE32(I40E_AQ_DRIVER_UNLOADING);
+ status = i40e_asq_send_command(hw, &desc, NULL, 0, NULL);
+
+ return status;
+}
+#ifdef X722_SUPPORT
+
+/**
+ * i40e_aq_get_set_rss_lut
+ * @hw: pointer to the hardware structure
+ * @vsi_id: vsi fw index
+ * @pf_lut: for PF table set true, for VSI table set false
+ * @lut: pointer to the lut buffer provided by the caller
+ * @lut_size: size of the lut buffer
+ * @set: set true to set the table, false to get the table
+ *
+ * Internal function to get or set RSS look up table
+ **/
+STATIC enum i40e_status_code i40e_aq_get_set_rss_lut(struct i40e_hw *hw,
+ u16 vsi_id, bool pf_lut,
+ u8 *lut, u16 lut_size,
+ bool set)
+{
+ enum i40e_status_code status;
+ struct i40e_aq_desc desc;
+ struct i40e_aqc_get_set_rss_lut *cmd_resp =
+ (struct i40e_aqc_get_set_rss_lut *)&desc.params.raw;
+
+ if (set)
+ i40e_fill_default_direct_cmd_desc(&desc,
+ i40e_aqc_opc_set_rss_lut);
+ else
+ i40e_fill_default_direct_cmd_desc(&desc,
+ i40e_aqc_opc_get_rss_lut);
+
+ /* Indirect command */
+ desc.flags |= CPU_TO_LE16((u16)I40E_AQ_FLAG_BUF);
+ desc.flags |= CPU_TO_LE16((u16)I40E_AQ_FLAG_RD);
+
+ cmd_resp->vsi_id =
+ CPU_TO_LE16((u16)((vsi_id <<
+ I40E_AQC_SET_RSS_LUT_VSI_ID_SHIFT) &
+ I40E_AQC_SET_RSS_LUT_VSI_ID_MASK));
+ cmd_resp->vsi_id |= CPU_TO_LE16((u16)I40E_AQC_SET_RSS_LUT_VSI_VALID);
+
+ if (pf_lut)
+ cmd_resp->flags |= CPU_TO_LE16((u16)
+ ((I40E_AQC_SET_RSS_LUT_TABLE_TYPE_PF <<
+ I40E_AQC_SET_RSS_LUT_TABLE_TYPE_SHIFT) &
+ I40E_AQC_SET_RSS_LUT_TABLE_TYPE_MASK));
+ else
+ cmd_resp->flags |= CPU_TO_LE16((u16)
+ ((I40E_AQC_SET_RSS_LUT_TABLE_TYPE_VSI <<
+ I40E_AQC_SET_RSS_LUT_TABLE_TYPE_SHIFT) &
+ I40E_AQC_SET_RSS_LUT_TABLE_TYPE_MASK));
+
+ status = i40e_asq_send_command(hw, &desc, lut, lut_size, NULL);
+
+ return status;
+}
+
+/**
+ * i40e_aq_get_rss_lut
+ * @hw: pointer to the hardware structure
+ * @vsi_id: vsi fw index
+ * @pf_lut: for PF table set true, for VSI table set false
+ * @lut: pointer to the lut buffer provided by the caller
+ * @lut_size: size of the lut buffer
+ *
+ * get the RSS lookup table, PF or VSI type
+ **/
+enum i40e_status_code i40e_aq_get_rss_lut(struct i40e_hw *hw, u16 vsi_id,
+ bool pf_lut, u8 *lut, u16 lut_size)
+{
+ return i40e_aq_get_set_rss_lut(hw, vsi_id, pf_lut, lut, lut_size,
+ false);
+}
+
+/**
+ * i40e_aq_set_rss_lut
+ * @hw: pointer to the hardware structure
+ * @vsi_id: vsi fw index
+ * @pf_lut: for PF table set true, for VSI table set false
+ * @lut: pointer to the lut buffer provided by the caller
+ * @lut_size: size of the lut buffer
+ *
+ * set the RSS lookup table, PF or VSI type
+ **/
+enum i40e_status_code i40e_aq_set_rss_lut(struct i40e_hw *hw, u16 vsi_id,
+ bool pf_lut, u8 *lut, u16 lut_size)
+{
+ return i40e_aq_get_set_rss_lut(hw, vsi_id, pf_lut, lut, lut_size, true);
+}
+
+/**
+ * i40e_aq_get_set_rss_key
+ * @hw: pointer to the hw struct
+ * @vsi_id: vsi fw index
+ * @key: pointer to key info struct
+ * @set: set true to set the key, false to get the key
+ *
+ * get the RSS key per VSI
+ **/
+STATIC enum i40e_status_code i40e_aq_get_set_rss_key(struct i40e_hw *hw,
+ u16 vsi_id,
+ struct i40e_aqc_get_set_rss_key_data *key,
+ bool set)
+{
+ enum i40e_status_code status;
+ struct i40e_aq_desc desc;
+ struct i40e_aqc_get_set_rss_key *cmd_resp =
+ (struct i40e_aqc_get_set_rss_key *)&desc.params.raw;
+ u16 key_size = sizeof(struct i40e_aqc_get_set_rss_key_data);
+
+ if (set)
+ i40e_fill_default_direct_cmd_desc(&desc,
+ i40e_aqc_opc_set_rss_key);
+ else
+ i40e_fill_default_direct_cmd_desc(&desc,
+ i40e_aqc_opc_get_rss_key);
+
+ /* Indirect command */
+ desc.flags |= CPU_TO_LE16((u16)I40E_AQ_FLAG_BUF);
+ desc.flags |= CPU_TO_LE16((u16)I40E_AQ_FLAG_RD);
+
+ cmd_resp->vsi_id =
+ CPU_TO_LE16((u16)((vsi_id <<
+ I40E_AQC_SET_RSS_KEY_VSI_ID_SHIFT) &
+ I40E_AQC_SET_RSS_KEY_VSI_ID_MASK));
+ cmd_resp->vsi_id |= CPU_TO_LE16((u16)I40E_AQC_SET_RSS_KEY_VSI_VALID);
+
+ status = i40e_asq_send_command(hw, &desc, key, key_size, NULL);
+
+ return status;
+}
+
+/**
+ * i40e_aq_get_rss_key
+ * @hw: pointer to the hw struct
+ * @vsi_id: vsi fw index
+ * @key: pointer to key info struct
+ *
+ **/
+enum i40e_status_code i40e_aq_get_rss_key(struct i40e_hw *hw,
+ u16 vsi_id,
+ struct i40e_aqc_get_set_rss_key_data *key)
+{
+ return i40e_aq_get_set_rss_key(hw, vsi_id, key, false);
+}
+
+/**
+ * i40e_aq_set_rss_key
+ * @hw: pointer to the hw struct
+ * @vsi_id: vsi fw index
+ * @key: pointer to key info struct
+ *
+ * set the RSS key per VSI
+ **/
+enum i40e_status_code i40e_aq_set_rss_key(struct i40e_hw *hw,
+ u16 vsi_id,
+ struct i40e_aqc_get_set_rss_key_data *key)
+{
+ return i40e_aq_get_set_rss_key(hw, vsi_id, key, true);
+}
+#endif /* X722_SUPPORT */
+
+/* The i40e_ptype_lookup table is used to convert from the 8-bit ptype in the
+ * hardware to a bit-field that can be used by SW to more easily determine the
+ * packet type.
+ *
+ * Macros are used to shorten the table lines and make this table human
+ * readable.
+ *
+ * We store the PTYPE in the top byte of the bit field - this is just so that
+ * we can check that the table doesn't have a row missing, as the index into
+ * the table should be the PTYPE.
+ *
+ * Typical work flow:
+ *
+ * IF NOT i40e_ptype_lookup[ptype].known
+ * THEN
+ * Packet is unknown
+ * ELSE IF i40e_ptype_lookup[ptype].outer_ip == I40E_RX_PTYPE_OUTER_IP
+ * Use the rest of the fields to look at the tunnels, inner protocols, etc
+ * ELSE
+ * Use the enum i40e_rx_l2_ptype to decode the packet type
+ * ENDIF
+ */
+
+/* macro to make the table lines short */
+#define I40E_PTT(PTYPE, OUTER_IP, OUTER_IP_VER, OUTER_FRAG, T, TE, TEF, I, PL)\
+ { PTYPE, \
+ 1, \
+ I40E_RX_PTYPE_OUTER_##OUTER_IP, \
+ I40E_RX_PTYPE_OUTER_##OUTER_IP_VER, \
+ I40E_RX_PTYPE_##OUTER_FRAG, \
+ I40E_RX_PTYPE_TUNNEL_##T, \
+ I40E_RX_PTYPE_TUNNEL_END_##TE, \
+ I40E_RX_PTYPE_##TEF, \
+ I40E_RX_PTYPE_INNER_PROT_##I, \
+ I40E_RX_PTYPE_PAYLOAD_LAYER_##PL }
+
+#define I40E_PTT_UNUSED_ENTRY(PTYPE) \
+ { PTYPE, 0, 0, 0, 0, 0, 0, 0, 0, 0 }
+
+/* shorter macros makes the table fit but are terse */
+#define I40E_RX_PTYPE_NOF I40E_RX_PTYPE_NOT_FRAG
+#define I40E_RX_PTYPE_FRG I40E_RX_PTYPE_FRAG
+#define I40E_RX_PTYPE_INNER_PROT_TS I40E_RX_PTYPE_INNER_PROT_TIMESYNC
+
+/* Lookup table mapping the HW PTYPE to the bit field for decoding */
+struct i40e_rx_ptype_decoded i40e_ptype_lookup[] = {
+ /* L2 Packet types */
+ I40E_PTT_UNUSED_ENTRY(0),
+ I40E_PTT(1, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY2),
+ I40E_PTT(2, L2, NONE, NOF, NONE, NONE, NOF, TS, PAY2),
+ I40E_PTT(3, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY2),
+ I40E_PTT_UNUSED_ENTRY(4),
+ I40E_PTT_UNUSED_ENTRY(5),
+ I40E_PTT(6, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY2),
+ I40E_PTT(7, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY2),
+ I40E_PTT_UNUSED_ENTRY(8),
+ I40E_PTT_UNUSED_ENTRY(9),
+ I40E_PTT(10, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY2),
+ I40E_PTT(11, L2, NONE, NOF, NONE, NONE, NOF, NONE, NONE),
+ I40E_PTT(12, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3),
+ I40E_PTT(13, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3),
+ I40E_PTT(14, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3),
+ I40E_PTT(15, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3),
+ I40E_PTT(16, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3),
+ I40E_PTT(17, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3),
+ I40E_PTT(18, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3),
+ I40E_PTT(19, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3),
+ I40E_PTT(20, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3),
+ I40E_PTT(21, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3),
+
+ /* Non Tunneled IPv4 */
+ I40E_PTT(22, IP, IPV4, FRG, NONE, NONE, NOF, NONE, PAY3),
+ I40E_PTT(23, IP, IPV4, NOF, NONE, NONE, NOF, NONE, PAY3),
+ I40E_PTT(24, IP, IPV4, NOF, NONE, NONE, NOF, UDP, PAY4),
+ I40E_PTT_UNUSED_ENTRY(25),
+ I40E_PTT(26, IP, IPV4, NOF, NONE, NONE, NOF, TCP, PAY4),
+ I40E_PTT(27, IP, IPV4, NOF, NONE, NONE, NOF, SCTP, PAY4),
+ I40E_PTT(28, IP, IPV4, NOF, NONE, NONE, NOF, ICMP, PAY4),
+
+ /* IPv4 --> IPv4 */
+ I40E_PTT(29, IP, IPV4, NOF, IP_IP, IPV4, FRG, NONE, PAY3),
+ I40E_PTT(30, IP, IPV4, NOF, IP_IP, IPV4, NOF, NONE, PAY3),
+ I40E_PTT(31, IP, IPV4, NOF, IP_IP, IPV4, NOF, UDP, PAY4),
+ I40E_PTT_UNUSED_ENTRY(32),
+ I40E_PTT(33, IP, IPV4, NOF, IP_IP, IPV4, NOF, TCP, PAY4),
+ I40E_PTT(34, IP, IPV4, NOF, IP_IP, IPV4, NOF, SCTP, PAY4),
+ I40E_PTT(35, IP, IPV4, NOF, IP_IP, IPV4, NOF, ICMP, PAY4),
+
+ /* IPv4 --> IPv6 */
+ I40E_PTT(36, IP, IPV4, NOF, IP_IP, IPV6, FRG, NONE, PAY3),
+ I40E_PTT(37, IP, IPV4, NOF, IP_IP, IPV6, NOF, NONE, PAY3),
+ I40E_PTT(38, IP, IPV4, NOF, IP_IP, IPV6, NOF, UDP, PAY4),
+ I40E_PTT_UNUSED_ENTRY(39),
+ I40E_PTT(40, IP, IPV4, NOF, IP_IP, IPV6, NOF, TCP, PAY4),
+ I40E_PTT(41, IP, IPV4, NOF, IP_IP, IPV6, NOF, SCTP, PAY4),
+ I40E_PTT(42, IP, IPV4, NOF, IP_IP, IPV6, NOF, ICMP, PAY4),
+
+ /* IPv4 --> GRE/NAT */
+ I40E_PTT(43, IP, IPV4, NOF, IP_GRENAT, NONE, NOF, NONE, PAY3),
+
+ /* IPv4 --> GRE/NAT --> IPv4 */
+ I40E_PTT(44, IP, IPV4, NOF, IP_GRENAT, IPV4, FRG, NONE, PAY3),
+ I40E_PTT(45, IP, IPV4, NOF, IP_GRENAT, IPV4, NOF, NONE, PAY3),
+ I40E_PTT(46, IP, IPV4, NOF, IP_GRENAT, IPV4, NOF, UDP, PAY4),
+ I40E_PTT_UNUSED_ENTRY(47),
+ I40E_PTT(48, IP, IPV4, NOF, IP_GRENAT, IPV4, NOF, TCP, PAY4),
+ I40E_PTT(49, IP, IPV4, NOF, IP_GRENAT, IPV4, NOF, SCTP, PAY4),
+ I40E_PTT(50, IP, IPV4, NOF, IP_GRENAT, IPV4, NOF, ICMP, PAY4),
+
+ /* IPv4 --> GRE/NAT --> IPv6 */
+ I40E_PTT(51, IP, IPV4, NOF, IP_GRENAT, IPV6, FRG, NONE, PAY3),
+ I40E_PTT(52, IP, IPV4, NOF, IP_GRENAT, IPV6, NOF, NONE, PAY3),
+ I40E_PTT(53, IP, IPV4, NOF, IP_GRENAT, IPV6, NOF, UDP, PAY4),
+ I40E_PTT_UNUSED_ENTRY(54),
+ I40E_PTT(55, IP, IPV4, NOF, IP_GRENAT, IPV6, NOF, TCP, PAY4),
+ I40E_PTT(56, IP, IPV4, NOF, IP_GRENAT, IPV6, NOF, SCTP, PAY4),
+ I40E_PTT(57, IP, IPV4, NOF, IP_GRENAT, IPV6, NOF, ICMP, PAY4),
+
+ /* IPv4 --> GRE/NAT --> MAC */
+ I40E_PTT(58, IP, IPV4, NOF, IP_GRENAT_MAC, NONE, NOF, NONE, PAY3),
+
+ /* IPv4 --> GRE/NAT --> MAC --> IPv4 */
+ I40E_PTT(59, IP, IPV4, NOF, IP_GRENAT_MAC, IPV4, FRG, NONE, PAY3),
+ I40E_PTT(60, IP, IPV4, NOF, IP_GRENAT_MAC, IPV4, NOF, NONE, PAY3),
+ I40E_PTT(61, IP, IPV4, NOF, IP_GRENAT_MAC, IPV4, NOF, UDP, PAY4),
+ I40E_PTT_UNUSED_ENTRY(62),
+ I40E_PTT(63, IP, IPV4, NOF, IP_GRENAT_MAC, IPV4, NOF, TCP, PAY4),
+ I40E_PTT(64, IP, IPV4, NOF, IP_GRENAT_MAC, IPV4, NOF, SCTP, PAY4),
+ I40E_PTT(65, IP, IPV4, NOF, IP_GRENAT_MAC, IPV4, NOF, ICMP, PAY4),
+
+ /* IPv4 --> GRE/NAT -> MAC --> IPv6 */
+ I40E_PTT(66, IP, IPV4, NOF, IP_GRENAT_MAC, IPV6, FRG, NONE, PAY3),
+ I40E_PTT(67, IP, IPV4, NOF, IP_GRENAT_MAC, IPV6, NOF, NONE, PAY3),
+ I40E_PTT(68, IP, IPV4, NOF, IP_GRENAT_MAC, IPV6, NOF, UDP, PAY4),
+ I40E_PTT_UNUSED_ENTRY(69),
+ I40E_PTT(70, IP, IPV4, NOF, IP_GRENAT_MAC, IPV6, NOF, TCP, PAY4),
+ I40E_PTT(71, IP, IPV4, NOF, IP_GRENAT_MAC, IPV6, NOF, SCTP, PAY4),
+ I40E_PTT(72, IP, IPV4, NOF, IP_GRENAT_MAC, IPV6, NOF, ICMP, PAY4),
+
+ /* IPv4 --> GRE/NAT --> MAC/VLAN */
+ I40E_PTT(73, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, NONE, NOF, NONE, PAY3),
+
+ /* IPv4 ---> GRE/NAT -> MAC/VLAN --> IPv4 */
+ I40E_PTT(74, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV4, FRG, NONE, PAY3),
+ I40E_PTT(75, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, NONE, PAY3),
+ I40E_PTT(76, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, UDP, PAY4),
+ I40E_PTT_UNUSED_ENTRY(77),
+ I40E_PTT(78, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, TCP, PAY4),
+ I40E_PTT(79, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, SCTP, PAY4),
+ I40E_PTT(80, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, ICMP, PAY4),
+
+ /* IPv4 -> GRE/NAT -> MAC/VLAN --> IPv6 */
+ I40E_PTT(81, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV6, FRG, NONE, PAY3),
+ I40E_PTT(82, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, NONE, PAY3),
+ I40E_PTT(83, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, UDP, PAY4),
+ I40E_PTT_UNUSED_ENTRY(84),
+ I40E_PTT(85, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, TCP, PAY4),
+ I40E_PTT(86, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, SCTP, PAY4),
+ I40E_PTT(87, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, ICMP, PAY4),
+
+ /* Non Tunneled IPv6 */
+ I40E_PTT(88, IP, IPV6, FRG, NONE, NONE, NOF, NONE, PAY3),
+ I40E_PTT(89, IP, IPV6, NOF, NONE, NONE, NOF, NONE, PAY3),
+ I40E_PTT(90, IP, IPV6, NOF, NONE, NONE, NOF, UDP, PAY3),
+ I40E_PTT_UNUSED_ENTRY(91),
+ I40E_PTT(92, IP, IPV6, NOF, NONE, NONE, NOF, TCP, PAY4),
+ I40E_PTT(93, IP, IPV6, NOF, NONE, NONE, NOF, SCTP, PAY4),
+ I40E_PTT(94, IP, IPV6, NOF, NONE, NONE, NOF, ICMP, PAY4),
+
+ /* IPv6 --> IPv4 */
+ I40E_PTT(95, IP, IPV6, NOF, IP_IP, IPV4, FRG, NONE, PAY3),
+ I40E_PTT(96, IP, IPV6, NOF, IP_IP, IPV4, NOF, NONE, PAY3),
+ I40E_PTT(97, IP, IPV6, NOF, IP_IP, IPV4, NOF, UDP, PAY4),
+ I40E_PTT_UNUSED_ENTRY(98),
+ I40E_PTT(99, IP, IPV6, NOF, IP_IP, IPV4, NOF, TCP, PAY4),
+ I40E_PTT(100, IP, IPV6, NOF, IP_IP, IPV4, NOF, SCTP, PAY4),
+ I40E_PTT(101, IP, IPV6, NOF, IP_IP, IPV4, NOF, ICMP, PAY4),
+
+ /* IPv6 --> IPv6 */
+ I40E_PTT(102, IP, IPV6, NOF, IP_IP, IPV6, FRG, NONE, PAY3),
+ I40E_PTT(103, IP, IPV6, NOF, IP_IP, IPV6, NOF, NONE, PAY3),
+ I40E_PTT(104, IP, IPV6, NOF, IP_IP, IPV6, NOF, UDP, PAY4),
+ I40E_PTT_UNUSED_ENTRY(105),
+ I40E_PTT(106, IP, IPV6, NOF, IP_IP, IPV6, NOF, TCP, PAY4),
+ I40E_PTT(107, IP, IPV6, NOF, IP_IP, IPV6, NOF, SCTP, PAY4),
+ I40E_PTT(108, IP, IPV6, NOF, IP_IP, IPV6, NOF, ICMP, PAY4),
+
+ /* IPv6 --> GRE/NAT */
+ I40E_PTT(109, IP, IPV6, NOF, IP_GRENAT, NONE, NOF, NONE, PAY3),
+
+ /* IPv6 --> GRE/NAT -> IPv4 */
+ I40E_PTT(110, IP, IPV6, NOF, IP_GRENAT, IPV4, FRG, NONE, PAY3),
+ I40E_PTT(111, IP, IPV6, NOF, IP_GRENAT, IPV4, NOF, NONE, PAY3),
+ I40E_PTT(112, IP, IPV6, NOF, IP_GRENAT, IPV4, NOF, UDP, PAY4),
+ I40E_PTT_UNUSED_ENTRY(113),
+ I40E_PTT(114, IP, IPV6, NOF, IP_GRENAT, IPV4, NOF, TCP, PAY4),
+ I40E_PTT(115, IP, IPV6, NOF, IP_GRENAT, IPV4, NOF, SCTP, PAY4),
+ I40E_PTT(116, IP, IPV6, NOF, IP_GRENAT, IPV4, NOF, ICMP, PAY4),
+
+ /* IPv6 --> GRE/NAT -> IPv6 */
+ I40E_PTT(117, IP, IPV6, NOF, IP_GRENAT, IPV6, FRG, NONE, PAY3),
+ I40E_PTT(118, IP, IPV6, NOF, IP_GRENAT, IPV6, NOF, NONE, PAY3),
+ I40E_PTT(119, IP, IPV6, NOF, IP_GRENAT, IPV6, NOF, UDP, PAY4),
+ I40E_PTT_UNUSED_ENTRY(120),
+ I40E_PTT(121, IP, IPV6, NOF, IP_GRENAT, IPV6, NOF, TCP, PAY4),
+ I40E_PTT(122, IP, IPV6, NOF, IP_GRENAT, IPV6, NOF, SCTP, PAY4),
+ I40E_PTT(123, IP, IPV6, NOF, IP_GRENAT, IPV6, NOF, ICMP, PAY4),
+
+ /* IPv6 --> GRE/NAT -> MAC */
+ I40E_PTT(124, IP, IPV6, NOF, IP_GRENAT_MAC, NONE, NOF, NONE, PAY3),
+
+ /* IPv6 --> GRE/NAT -> MAC -> IPv4 */
+ I40E_PTT(125, IP, IPV6, NOF, IP_GRENAT_MAC, IPV4, FRG, NONE, PAY3),
+ I40E_PTT(126, IP, IPV6, NOF, IP_GRENAT_MAC, IPV4, NOF, NONE, PAY3),
+ I40E_PTT(127, IP, IPV6, NOF, IP_GRENAT_MAC, IPV4, NOF, UDP, PAY4),
+ I40E_PTT_UNUSED_ENTRY(128),
+ I40E_PTT(129, IP, IPV6, NOF, IP_GRENAT_MAC, IPV4, NOF, TCP, PAY4),
+ I40E_PTT(130, IP, IPV6, NOF, IP_GRENAT_MAC, IPV4, NOF, SCTP, PAY4),
+ I40E_PTT(131, IP, IPV6, NOF, IP_GRENAT_MAC, IPV4, NOF, ICMP, PAY4),
+
+ /* IPv6 --> GRE/NAT -> MAC -> IPv6 */
+ I40E_PTT(132, IP, IPV6, NOF, IP_GRENAT_MAC, IPV6, FRG, NONE, PAY3),
+ I40E_PTT(133, IP, IPV6, NOF, IP_GRENAT_MAC, IPV6, NOF, NONE, PAY3),
+ I40E_PTT(134, IP, IPV6, NOF, IP_GRENAT_MAC, IPV6, NOF, UDP, PAY4),
+ I40E_PTT_UNUSED_ENTRY(135),
+ I40E_PTT(136, IP, IPV6, NOF, IP_GRENAT_MAC, IPV6, NOF, TCP, PAY4),
+ I40E_PTT(137, IP, IPV6, NOF, IP_GRENAT_MAC, IPV6, NOF, SCTP, PAY4),
+ I40E_PTT(138, IP, IPV6, NOF, IP_GRENAT_MAC, IPV6, NOF, ICMP, PAY4),
+
+ /* IPv6 --> GRE/NAT -> MAC/VLAN */
+ I40E_PTT(139, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, NONE, NOF, NONE, PAY3),
+
+ /* IPv6 --> GRE/NAT -> MAC/VLAN --> IPv4 */
+ I40E_PTT(140, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV4, FRG, NONE, PAY3),
+ I40E_PTT(141, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, NONE, PAY3),
+ I40E_PTT(142, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, UDP, PAY4),
+ I40E_PTT_UNUSED_ENTRY(143),
+ I40E_PTT(144, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, TCP, PAY4),
+ I40E_PTT(145, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, SCTP, PAY4),
+ I40E_PTT(146, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, ICMP, PAY4),
+
+ /* IPv6 --> GRE/NAT -> MAC/VLAN --> IPv6 */
+ I40E_PTT(147, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV6, FRG, NONE, PAY3),
+ I40E_PTT(148, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, NONE, PAY3),
+ I40E_PTT(149, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, UDP, PAY4),
+ I40E_PTT_UNUSED_ENTRY(150),
+ I40E_PTT(151, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, TCP, PAY4),
+ I40E_PTT(152, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, SCTP, PAY4),
+ I40E_PTT(153, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, ICMP, PAY4),
+
+ /* unused entries */
+ I40E_PTT_UNUSED_ENTRY(154),
+ I40E_PTT_UNUSED_ENTRY(155),
+ I40E_PTT_UNUSED_ENTRY(156),
+ I40E_PTT_UNUSED_ENTRY(157),
+ I40E_PTT_UNUSED_ENTRY(158),
+ I40E_PTT_UNUSED_ENTRY(159),
+
+ I40E_PTT_UNUSED_ENTRY(160),
+ I40E_PTT_UNUSED_ENTRY(161),
+ I40E_PTT_UNUSED_ENTRY(162),
+ I40E_PTT_UNUSED_ENTRY(163),
+ I40E_PTT_UNUSED_ENTRY(164),
+ I40E_PTT_UNUSED_ENTRY(165),
+ I40E_PTT_UNUSED_ENTRY(166),
+ I40E_PTT_UNUSED_ENTRY(167),
+ I40E_PTT_UNUSED_ENTRY(168),
+ I40E_PTT_UNUSED_ENTRY(169),
+
+ I40E_PTT_UNUSED_ENTRY(170),
+ I40E_PTT_UNUSED_ENTRY(171),
+ I40E_PTT_UNUSED_ENTRY(172),
+ I40E_PTT_UNUSED_ENTRY(173),
+ I40E_PTT_UNUSED_ENTRY(174),
+ I40E_PTT_UNUSED_ENTRY(175),
+ I40E_PTT_UNUSED_ENTRY(176),
+ I40E_PTT_UNUSED_ENTRY(177),
+ I40E_PTT_UNUSED_ENTRY(178),
+ I40E_PTT_UNUSED_ENTRY(179),
+
+ I40E_PTT_UNUSED_ENTRY(180),
+ I40E_PTT_UNUSED_ENTRY(181),
+ I40E_PTT_UNUSED_ENTRY(182),
+ I40E_PTT_UNUSED_ENTRY(183),
+ I40E_PTT_UNUSED_ENTRY(184),
+ I40E_PTT_UNUSED_ENTRY(185),
+ I40E_PTT_UNUSED_ENTRY(186),
+ I40E_PTT_UNUSED_ENTRY(187),
+ I40E_PTT_UNUSED_ENTRY(188),
+ I40E_PTT_UNUSED_ENTRY(189),
+
+ I40E_PTT_UNUSED_ENTRY(190),
+ I40E_PTT_UNUSED_ENTRY(191),
+ I40E_PTT_UNUSED_ENTRY(192),
+ I40E_PTT_UNUSED_ENTRY(193),
+ I40E_PTT_UNUSED_ENTRY(194),
+ I40E_PTT_UNUSED_ENTRY(195),
+ I40E_PTT_UNUSED_ENTRY(196),
+ I40E_PTT_UNUSED_ENTRY(197),
+ I40E_PTT_UNUSED_ENTRY(198),
+ I40E_PTT_UNUSED_ENTRY(199),
+
+ I40E_PTT_UNUSED_ENTRY(200),
+ I40E_PTT_UNUSED_ENTRY(201),
+ I40E_PTT_UNUSED_ENTRY(202),
+ I40E_PTT_UNUSED_ENTRY(203),
+ I40E_PTT_UNUSED_ENTRY(204),
+ I40E_PTT_UNUSED_ENTRY(205),
+ I40E_PTT_UNUSED_ENTRY(206),
+ I40E_PTT_UNUSED_ENTRY(207),
+ I40E_PTT_UNUSED_ENTRY(208),
+ I40E_PTT_UNUSED_ENTRY(209),
+
+ I40E_PTT_UNUSED_ENTRY(210),
+ I40E_PTT_UNUSED_ENTRY(211),
+ I40E_PTT_UNUSED_ENTRY(212),
+ I40E_PTT_UNUSED_ENTRY(213),
+ I40E_PTT_UNUSED_ENTRY(214),
+ I40E_PTT_UNUSED_ENTRY(215),
+ I40E_PTT_UNUSED_ENTRY(216),
+ I40E_PTT_UNUSED_ENTRY(217),
+ I40E_PTT_UNUSED_ENTRY(218),
+ I40E_PTT_UNUSED_ENTRY(219),
+
+ I40E_PTT_UNUSED_ENTRY(220),
+ I40E_PTT_UNUSED_ENTRY(221),
+ I40E_PTT_UNUSED_ENTRY(222),
+ I40E_PTT_UNUSED_ENTRY(223),
+ I40E_PTT_UNUSED_ENTRY(224),
+ I40E_PTT_UNUSED_ENTRY(225),
+ I40E_PTT_UNUSED_ENTRY(226),
+ I40E_PTT_UNUSED_ENTRY(227),
+ I40E_PTT_UNUSED_ENTRY(228),
+ I40E_PTT_UNUSED_ENTRY(229),
+
+ I40E_PTT_UNUSED_ENTRY(230),
+ I40E_PTT_UNUSED_ENTRY(231),
+ I40E_PTT_UNUSED_ENTRY(232),
+ I40E_PTT_UNUSED_ENTRY(233),
+ I40E_PTT_UNUSED_ENTRY(234),
+ I40E_PTT_UNUSED_ENTRY(235),
+ I40E_PTT_UNUSED_ENTRY(236),
+ I40E_PTT_UNUSED_ENTRY(237),
+ I40E_PTT_UNUSED_ENTRY(238),
+ I40E_PTT_UNUSED_ENTRY(239),
+
+ I40E_PTT_UNUSED_ENTRY(240),
+ I40E_PTT_UNUSED_ENTRY(241),
+ I40E_PTT_UNUSED_ENTRY(242),
+ I40E_PTT_UNUSED_ENTRY(243),
+ I40E_PTT_UNUSED_ENTRY(244),
+ I40E_PTT_UNUSED_ENTRY(245),
+ I40E_PTT_UNUSED_ENTRY(246),
+ I40E_PTT_UNUSED_ENTRY(247),
+ I40E_PTT_UNUSED_ENTRY(248),
+ I40E_PTT_UNUSED_ENTRY(249),
+
+ I40E_PTT_UNUSED_ENTRY(250),
+ I40E_PTT_UNUSED_ENTRY(251),
+ I40E_PTT_UNUSED_ENTRY(252),
+ I40E_PTT_UNUSED_ENTRY(253),
+ I40E_PTT_UNUSED_ENTRY(254),
+ I40E_PTT_UNUSED_ENTRY(255)
+};
+
+
+/**
+ * i40e_validate_mac_addr - Validate unicast MAC address
+ * @mac_addr: pointer to MAC address
+ *
+ * Tests a MAC address to ensure it is a valid Individual Address
+ **/
+enum i40e_status_code i40e_validate_mac_addr(u8 *mac_addr)
+{
+ enum i40e_status_code status = I40E_SUCCESS;
+
+ DEBUGFUNC("i40e_validate_mac_addr");
+
+ /* Broadcast addresses ARE multicast addresses
+ * Make sure it is not a multicast address
+ * Reject the zero address
+ */
+ if (I40E_IS_MULTICAST(mac_addr) ||
+ (mac_addr[0] == 0 && mac_addr[1] == 0 && mac_addr[2] == 0 &&
+ mac_addr[3] == 0 && mac_addr[4] == 0 && mac_addr[5] == 0))
+ status = I40E_ERR_INVALID_MAC_ADDR;
+
+ return status;
+}
+#ifdef PF_DRIVER
+
+/**
+ * i40e_init_shared_code - Initialize the shared code
+ * @hw: pointer to hardware structure
+ *
+ * This assigns the MAC type and PHY code and inits the NVM.
+ * Does not touch the hardware. This function must be called prior to any
+ * other function in the shared code. The i40e_hw structure should be
+ * memset to 0 prior to calling this function. The following fields in
+ * hw structure should be filled in prior to calling this function:
+ * hw_addr, back, device_id, vendor_id, subsystem_device_id,
+ * subsystem_vendor_id, and revision_id
+ **/
+enum i40e_status_code i40e_init_shared_code(struct i40e_hw *hw)
+{
+ enum i40e_status_code status = I40E_SUCCESS;
+ u32 port, ari, func_rid;
+
+ DEBUGFUNC("i40e_init_shared_code");
+
+ i40e_set_mac_type(hw);
+
+ switch (hw->mac.type) {
+ case I40E_MAC_XL710:
+#ifdef X722_SUPPORT
+ case I40E_MAC_X722:
+#endif
+ break;
+ default:
+ return I40E_ERR_DEVICE_NOT_SUPPORTED;
+ }
+
+ hw->phy.get_link_info = true;
+
+ /* Determine port number and PF number*/
+ port = (rd32(hw, I40E_PFGEN_PORTNUM) & I40E_PFGEN_PORTNUM_PORT_NUM_MASK)
+ >> I40E_PFGEN_PORTNUM_PORT_NUM_SHIFT;
+ hw->port = (u8)port;
+ ari = (rd32(hw, I40E_GLPCI_CAPSUP) & I40E_GLPCI_CAPSUP_ARI_EN_MASK) >>
+ I40E_GLPCI_CAPSUP_ARI_EN_SHIFT;
+ func_rid = rd32(hw, I40E_PF_FUNC_RID);
+ if (ari)
+ hw->pf_id = (u8)(func_rid & 0xff);
+ else
+ hw->pf_id = (u8)(func_rid & 0x7);
+
+#ifdef X722_SUPPORT
+ if (hw->mac.type == I40E_MAC_X722)
+ hw->flags |= I40E_HW_FLAG_AQ_SRCTL_ACCESS_ENABLE;
+
+#endif
+ status = i40e_init_nvm(hw);
+ return status;
+}
+
+/**
+ * i40e_aq_mac_address_read - Retrieve the MAC addresses
+ * @hw: pointer to the hw struct
+ * @flags: a return indicator of what addresses were added to the addr store
+ * @addrs: the requestor's mac addr store
+ * @cmd_details: pointer to command details structure or NULL
+ **/
+STATIC enum i40e_status_code i40e_aq_mac_address_read(struct i40e_hw *hw,
+ u16 *flags,
+ struct i40e_aqc_mac_address_read_data *addrs,
+ struct i40e_asq_cmd_details *cmd_details)
+{
+ struct i40e_aq_desc desc;
+ struct i40e_aqc_mac_address_read *cmd_data =
+ (struct i40e_aqc_mac_address_read *)&desc.params.raw;
+ enum i40e_status_code status;
+
+ i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_mac_address_read);
+ desc.flags |= CPU_TO_LE16(I40E_AQ_FLAG_BUF);
+
+ status = i40e_asq_send_command(hw, &desc, addrs,
+ sizeof(*addrs), cmd_details);
+ *flags = LE16_TO_CPU(cmd_data->command_flags);
+
+ return status;
+}
+
+/**
+ * i40e_aq_mac_address_write - Change the MAC addresses
+ * @hw: pointer to the hw struct
+ * @flags: indicates which MAC to be written
+ * @mac_addr: address to write
+ * @cmd_details: pointer to command details structure or NULL
+ **/
+enum i40e_status_code i40e_aq_mac_address_write(struct i40e_hw *hw,
+ u16 flags, u8 *mac_addr,
+ struct i40e_asq_cmd_details *cmd_details)
+{
+ struct i40e_aq_desc desc;
+ struct i40e_aqc_mac_address_write *cmd_data =
+ (struct i40e_aqc_mac_address_write *)&desc.params.raw;
+ enum i40e_status_code status;
+
+ i40e_fill_default_direct_cmd_desc(&desc,
+ i40e_aqc_opc_mac_address_write);
+ cmd_data->command_flags = CPU_TO_LE16(flags);
+ cmd_data->mac_sah = CPU_TO_LE16((u16)mac_addr[0] << 8 | mac_addr[1]);
+ cmd_data->mac_sal = CPU_TO_LE32(((u32)mac_addr[2] << 24) |
+ ((u32)mac_addr[3] << 16) |
+ ((u32)mac_addr[4] << 8) |
+ mac_addr[5]);
+
+ status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
+
+ return status;
+}
+
+/**
+ * i40e_get_mac_addr - get MAC address
+ * @hw: pointer to the HW structure
+ * @mac_addr: pointer to MAC address
+ *
+ * Reads the adapter's MAC address from register
+ **/
+enum i40e_status_code i40e_get_mac_addr(struct i40e_hw *hw, u8 *mac_addr)
+{
+ struct i40e_aqc_mac_address_read_data addrs;
+ enum i40e_status_code status;
+ u16 flags = 0;
+
+ status = i40e_aq_mac_address_read(hw, &flags, &addrs, NULL);
+
+ if (flags & I40E_AQC_LAN_ADDR_VALID)
+ memcpy(mac_addr, &addrs.pf_lan_mac, sizeof(addrs.pf_lan_mac));
+
+ return status;
+}
+
+/**
+ * i40e_get_port_mac_addr - get Port MAC address
+ * @hw: pointer to the HW structure
+ * @mac_addr: pointer to Port MAC address
+ *
+ * Reads the adapter's Port MAC address
+ **/
+enum i40e_status_code i40e_get_port_mac_addr(struct i40e_hw *hw, u8 *mac_addr)
+{
+ struct i40e_aqc_mac_address_read_data addrs;
+ enum i40e_status_code status;
+ u16 flags = 0;
+
+ status = i40e_aq_mac_address_read(hw, &flags, &addrs, NULL);
+ if (status)
+ return status;
+
+ if (flags & I40E_AQC_PORT_ADDR_VALID)
+ memcpy(mac_addr, &addrs.port_mac, sizeof(addrs.port_mac));
+ else
+ status = I40E_ERR_INVALID_MAC_ADDR;
+
+ return status;
+}
+
+/**
+ * i40e_pre_tx_queue_cfg - pre tx queue configure
+ * @hw: pointer to the HW structure
+ * @queue: target pf queue index
+ * @enable: state change request
+ *
+ * Handles hw requirement to indicate intention to enable
+ * or disable target queue.
+ **/
+void i40e_pre_tx_queue_cfg(struct i40e_hw *hw, u32 queue, bool enable)
+{
+ u32 abs_queue_idx = hw->func_caps.base_queue + queue;
+ u32 reg_block = 0;
+ u32 reg_val;
+
+ if (abs_queue_idx >= 128) {
+ reg_block = abs_queue_idx / 128;
+ abs_queue_idx %= 128;
+ }
+
+ reg_val = rd32(hw, I40E_GLLAN_TXPRE_QDIS(reg_block));
+ reg_val &= ~I40E_GLLAN_TXPRE_QDIS_QINDX_MASK;
+ reg_val |= (abs_queue_idx << I40E_GLLAN_TXPRE_QDIS_QINDX_SHIFT);
+
+ if (enable)
+ reg_val |= I40E_GLLAN_TXPRE_QDIS_CLEAR_QDIS_MASK;
+ else
+ reg_val |= I40E_GLLAN_TXPRE_QDIS_SET_QDIS_MASK;
+
+ wr32(hw, I40E_GLLAN_TXPRE_QDIS(reg_block), reg_val);
+}
+
+/**
+ * i40e_read_pba_string - Reads part number string from EEPROM
+ * @hw: pointer to hardware structure
+ * @pba_num: stores the part number string from the EEPROM
+ * @pba_num_size: part number string buffer length
+ *
+ * Reads the part number string from the EEPROM.
+ **/
+enum i40e_status_code i40e_read_pba_string(struct i40e_hw *hw, u8 *pba_num,
+ u32 pba_num_size)
+{
+ enum i40e_status_code status = I40E_SUCCESS;
+ u16 pba_word = 0;
+ u16 pba_size = 0;
+ u16 pba_ptr = 0;
+ u16 i = 0;
+
+ status = i40e_read_nvm_word(hw, I40E_SR_PBA_FLAGS, &pba_word);
+ if ((status != I40E_SUCCESS) || (pba_word != 0xFAFA)) {
+ DEBUGOUT("Failed to read PBA flags or flag is invalid.\n");
+ return status;
+ }
+
+ status = i40e_read_nvm_word(hw, I40E_SR_PBA_BLOCK_PTR, &pba_ptr);
+ if (status != I40E_SUCCESS) {
+ DEBUGOUT("Failed to read PBA Block pointer.\n");
+ return status;
+ }
+
+ status = i40e_read_nvm_word(hw, pba_ptr, &pba_size);
+ if (status != I40E_SUCCESS) {
+ DEBUGOUT("Failed to read PBA Block size.\n");
+ return status;
+ }
+
+ /* Subtract one to get PBA word count (PBA Size word is included in
+ * total size)
+ */
+ pba_size--;
+ if (pba_num_size < (((u32)pba_size * 2) + 1)) {
+ DEBUGOUT("Buffer to small for PBA data.\n");
+ return I40E_ERR_PARAM;
+ }
+
+ for (i = 0; i < pba_size; i++) {
+ status = i40e_read_nvm_word(hw, (pba_ptr + 1) + i, &pba_word);
+ if (status != I40E_SUCCESS) {
+ DEBUGOUT1("Failed to read PBA Block word %d.\n", i);
+ return status;
+ }
+
+ pba_num[(i * 2)] = (pba_word >> 8) & 0xFF;
+ pba_num[(i * 2) + 1] = pba_word & 0xFF;
+ }
+ pba_num[(pba_size * 2)] = '\0';
+
+ return status;
+}
+
+/**
+ * i40e_get_media_type - Gets media type
+ * @hw: pointer to the hardware structure
+ **/
+STATIC enum i40e_media_type i40e_get_media_type(struct i40e_hw *hw)
+{
+ enum i40e_media_type media;
+
+ switch (hw->phy.link_info.phy_type) {
+ case I40E_PHY_TYPE_10GBASE_SR:
+ case I40E_PHY_TYPE_10GBASE_LR:
+ case I40E_PHY_TYPE_1000BASE_SX:
+ case I40E_PHY_TYPE_1000BASE_LX:
+ case I40E_PHY_TYPE_40GBASE_SR4:
+ case I40E_PHY_TYPE_40GBASE_LR4:
+ media = I40E_MEDIA_TYPE_FIBER;
+ break;
+ case I40E_PHY_TYPE_100BASE_TX:
+ case I40E_PHY_TYPE_1000BASE_T:
+ case I40E_PHY_TYPE_10GBASE_T:
+ media = I40E_MEDIA_TYPE_BASET;
+ break;
+ case I40E_PHY_TYPE_10GBASE_CR1_CU:
+ case I40E_PHY_TYPE_40GBASE_CR4_CU:
+ case I40E_PHY_TYPE_10GBASE_CR1:
+ case I40E_PHY_TYPE_40GBASE_CR4:
+ case I40E_PHY_TYPE_10GBASE_SFPP_CU:
+ case I40E_PHY_TYPE_40GBASE_AOC:
+ case I40E_PHY_TYPE_10GBASE_AOC:
+ media = I40E_MEDIA_TYPE_DA;
+ break;
+ case I40E_PHY_TYPE_1000BASE_KX:
+ case I40E_PHY_TYPE_10GBASE_KX4:
+ case I40E_PHY_TYPE_10GBASE_KR:
+ case I40E_PHY_TYPE_40GBASE_KR4:
+ case I40E_PHY_TYPE_20GBASE_KR2:
+ media = I40E_MEDIA_TYPE_BACKPLANE;
+ break;
+ case I40E_PHY_TYPE_SGMII:
+ case I40E_PHY_TYPE_XAUI:
+ case I40E_PHY_TYPE_XFI:
+ case I40E_PHY_TYPE_XLAUI:
+ case I40E_PHY_TYPE_XLPPI:
+ default:
+ media = I40E_MEDIA_TYPE_UNKNOWN;
+ break;
+ }
+
+ return media;
+}
+
+#define I40E_PF_RESET_WAIT_COUNT 200
+/**
+ * i40e_pf_reset - Reset the PF
+ * @hw: pointer to the hardware structure
+ *
+ * Assuming someone else has triggered a global reset,
+ * assure the global reset is complete and then reset the PF
+ **/
+enum i40e_status_code i40e_pf_reset(struct i40e_hw *hw)
+{
+ u32 cnt = 0;
+ u32 cnt1 = 0;
+ u32 reg = 0;
+ u32 grst_del;
+
+ /* Poll for Global Reset steady state in case of recent GRST.
+ * The grst delay value is in 100ms units, and we'll wait a
+ * couple counts longer to be sure we don't just miss the end.
+ */
+ grst_del = (rd32(hw, I40E_GLGEN_RSTCTL) &
+ I40E_GLGEN_RSTCTL_GRSTDEL_MASK) >>
+ I40E_GLGEN_RSTCTL_GRSTDEL_SHIFT;
+
+ grst_del = grst_del * 20;
+
+ for (cnt = 0; cnt < grst_del; cnt++) {
+ reg = rd32(hw, I40E_GLGEN_RSTAT);
+ if (!(reg & I40E_GLGEN_RSTAT_DEVSTATE_MASK))
+ break;
+ i40e_msec_delay(100);
+ }
+ if (reg & I40E_GLGEN_RSTAT_DEVSTATE_MASK) {
+ DEBUGOUT("Global reset polling failed to complete.\n");
+ return I40E_ERR_RESET_FAILED;
+ }
+
+ /* Now Wait for the FW to be ready */
+ for (cnt1 = 0; cnt1 < I40E_PF_RESET_WAIT_COUNT; cnt1++) {
+ reg = rd32(hw, I40E_GLNVM_ULD);
+ reg &= (I40E_GLNVM_ULD_CONF_CORE_DONE_MASK |
+ I40E_GLNVM_ULD_CONF_GLOBAL_DONE_MASK);
+ if (reg == (I40E_GLNVM_ULD_CONF_CORE_DONE_MASK |
+ I40E_GLNVM_ULD_CONF_GLOBAL_DONE_MASK)) {
+ DEBUGOUT1("Core and Global modules ready %d\n", cnt1);
+ break;
+ }
+ i40e_msec_delay(10);
+ }
+ if (!(reg & (I40E_GLNVM_ULD_CONF_CORE_DONE_MASK |
+ I40E_GLNVM_ULD_CONF_GLOBAL_DONE_MASK))) {
+ DEBUGOUT("wait for FW Reset complete timedout\n");
+ DEBUGOUT1("I40E_GLNVM_ULD = 0x%x\n", reg);
+ return I40E_ERR_RESET_FAILED;
+ }
+
+ /* If there was a Global Reset in progress when we got here,
+ * we don't need to do the PF Reset
+ */
+ if (!cnt) {
+ reg = rd32(hw, I40E_PFGEN_CTRL);
+ wr32(hw, I40E_PFGEN_CTRL,
+ (reg | I40E_PFGEN_CTRL_PFSWR_MASK));
+ for (cnt = 0; cnt < I40E_PF_RESET_WAIT_COUNT; cnt++) {
+ reg = rd32(hw, I40E_PFGEN_CTRL);
+ if (!(reg & I40E_PFGEN_CTRL_PFSWR_MASK))
+ break;
+ i40e_msec_delay(1);
+ }
+ if (reg & I40E_PFGEN_CTRL_PFSWR_MASK) {
+ DEBUGOUT("PF reset polling failed to complete.\n");
+ return I40E_ERR_RESET_FAILED;
+ }
+ }
+
+ i40e_clear_pxe_mode(hw);
+
+
+ return I40E_SUCCESS;
+}
+
+/**
+ * i40e_clear_hw - clear out any left over hw state
+ * @hw: pointer to the hw struct
+ *
+ * Clear queues and interrupts, typically called at init time,
+ * but after the capabilities have been found so we know how many
+ * queues and msix vectors have been allocated.
+ **/
+void i40e_clear_hw(struct i40e_hw *hw)
+{
+ u32 num_queues, base_queue;
+ u32 num_pf_int;
+ u32 num_vf_int;
+ u32 num_vfs;
+ u32 i, j;
+ u32 val;
+ u32 eol = 0x7ff;
+
+ /* get number of interrupts, queues, and vfs */
+ val = rd32(hw, I40E_GLPCI_CNF2);
+ num_pf_int = (val & I40E_GLPCI_CNF2_MSI_X_PF_N_MASK) >>
+ I40E_GLPCI_CNF2_MSI_X_PF_N_SHIFT;
+ num_vf_int = (val & I40E_GLPCI_CNF2_MSI_X_VF_N_MASK) >>
+ I40E_GLPCI_CNF2_MSI_X_VF_N_SHIFT;
+
+ val = rd32(hw, I40E_PFLAN_QALLOC);
+ base_queue = (val & I40E_PFLAN_QALLOC_FIRSTQ_MASK) >>
+ I40E_PFLAN_QALLOC_FIRSTQ_SHIFT;
+ j = (val & I40E_PFLAN_QALLOC_LASTQ_MASK) >>
+ I40E_PFLAN_QALLOC_LASTQ_SHIFT;
+ if (val & I40E_PFLAN_QALLOC_VALID_MASK)
+ num_queues = (j - base_queue) + 1;
+ else
+ num_queues = 0;
+
+ val = rd32(hw, I40E_PF_VT_PFALLOC);
+ i = (val & I40E_PF_VT_PFALLOC_FIRSTVF_MASK) >>
+ I40E_PF_VT_PFALLOC_FIRSTVF_SHIFT;
+ j = (val & I40E_PF_VT_PFALLOC_LASTVF_MASK) >>
+ I40E_PF_VT_PFALLOC_LASTVF_SHIFT;
+ if (val & I40E_PF_VT_PFALLOC_VALID_MASK)
+ num_vfs = (j - i) + 1;
+ else
+ num_vfs = 0;
+
+ /* stop all the interrupts */
+ wr32(hw, I40E_PFINT_ICR0_ENA, 0);
+ val = 0x3 << I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT;
+ for (i = 0; i < num_pf_int - 2; i++)
+ wr32(hw, I40E_PFINT_DYN_CTLN(i), val);
+
+ /* Set the FIRSTQ_INDX field to 0x7FF in PFINT_LNKLSTx */
+ val = eol << I40E_PFINT_LNKLST0_FIRSTQ_INDX_SHIFT;
+ wr32(hw, I40E_PFINT_LNKLST0, val);
+ for (i = 0; i < num_pf_int - 2; i++)
+ wr32(hw, I40E_PFINT_LNKLSTN(i), val);
+ val = eol << I40E_VPINT_LNKLST0_FIRSTQ_INDX_SHIFT;
+ for (i = 0; i < num_vfs; i++)
+ wr32(hw, I40E_VPINT_LNKLST0(i), val);
+ for (i = 0; i < num_vf_int - 2; i++)
+ wr32(hw, I40E_VPINT_LNKLSTN(i), val);
+
+ /* warn the HW of the coming Tx disables */
+ for (i = 0; i < num_queues; i++) {
+ u32 abs_queue_idx = base_queue + i;
+ u32 reg_block = 0;
+
+ if (abs_queue_idx >= 128) {
+ reg_block = abs_queue_idx / 128;
+ abs_queue_idx %= 128;
+ }
+
+ val = rd32(hw, I40E_GLLAN_TXPRE_QDIS(reg_block));
+ val &= ~I40E_GLLAN_TXPRE_QDIS_QINDX_MASK;
+ val |= (abs_queue_idx << I40E_GLLAN_TXPRE_QDIS_QINDX_SHIFT);
+ val |= I40E_GLLAN_TXPRE_QDIS_SET_QDIS_MASK;
+
+ wr32(hw, I40E_GLLAN_TXPRE_QDIS(reg_block), val);
+ }
+ i40e_usec_delay(400);
+
+ /* stop all the queues */
+ for (i = 0; i < num_queues; i++) {
+ wr32(hw, I40E_QINT_TQCTL(i), 0);
+ wr32(hw, I40E_QTX_ENA(i), 0);
+ wr32(hw, I40E_QINT_RQCTL(i), 0);
+ wr32(hw, I40E_QRX_ENA(i), 0);
+ }
+
+ /* short wait for all queue disables to settle */
+ i40e_usec_delay(50);
+}
+
+/**
+ * i40e_clear_pxe_mode - clear pxe operations mode
+ * @hw: pointer to the hw struct
+ *
+ * Make sure all PXE mode settings are cleared, including things
+ * like descriptor fetch/write-back mode.
+ **/
+void i40e_clear_pxe_mode(struct i40e_hw *hw)
+{
+ if (i40e_check_asq_alive(hw))
+ i40e_aq_clear_pxe_mode(hw, NULL);
+}
+
+/**
+ * i40e_led_is_mine - helper to find matching led
+ * @hw: pointer to the hw struct
+ * @idx: index into GPIO registers
+ *
+ * returns: 0 if no match, otherwise the value of the GPIO_CTL register
+ */
+static u32 i40e_led_is_mine(struct i40e_hw *hw, int idx)
+{
+ u32 gpio_val = 0;
+ u32 port;
+
+ if (!hw->func_caps.led[idx])
+ return 0;
+
+ gpio_val = rd32(hw, I40E_GLGEN_GPIO_CTL(idx));
+ port = (gpio_val & I40E_GLGEN_GPIO_CTL_PRT_NUM_MASK) >>
+ I40E_GLGEN_GPIO_CTL_PRT_NUM_SHIFT;
+
+ /* if PRT_NUM_NA is 1 then this LED is not port specific, OR
+ * if it is not our port then ignore
+ */
+ if ((gpio_val & I40E_GLGEN_GPIO_CTL_PRT_NUM_NA_MASK) ||
+ (port != hw->port))
+ return 0;
+
+ return gpio_val;
+}
+
+#define I40E_COMBINED_ACTIVITY 0xA
+#define I40E_FILTER_ACTIVITY 0xE
+#define I40E_LINK_ACTIVITY 0xC
+#define I40E_MAC_ACTIVITY 0xD
+#define I40E_LED0 22
+
+/**
+ * i40e_led_get - return current on/off mode
+ * @hw: pointer to the hw struct
+ *
+ * The value returned is the 'mode' field as defined in the
+ * GPIO register definitions: 0x0 = off, 0xf = on, and other
+ * values are variations of possible behaviors relating to
+ * blink, link, and wire.
+ **/
+u32 i40e_led_get(struct i40e_hw *hw)
+{
+ u32 current_mode = 0;
+ u32 mode = 0;
+ int i;
+
+ /* as per the documentation GPIO 22-29 are the LED
+ * GPIO pins named LED0..LED7
+ */
+ for (i = I40E_LED0; i <= I40E_GLGEN_GPIO_CTL_MAX_INDEX; i++) {
+ u32 gpio_val = i40e_led_is_mine(hw, i);
+
+ if (!gpio_val)
+ continue;
+
+ /* ignore gpio LED src mode entries related to the activity
+ * LEDs
+ */
+ current_mode = ((gpio_val & I40E_GLGEN_GPIO_CTL_LED_MODE_MASK)
+ >> I40E_GLGEN_GPIO_CTL_LED_MODE_SHIFT);
+ switch (current_mode) {
+ case I40E_COMBINED_ACTIVITY:
+ case I40E_FILTER_ACTIVITY:
+ case I40E_MAC_ACTIVITY:
+ continue;
+ default:
+ break;
+ }
+
+ mode = (gpio_val & I40E_GLGEN_GPIO_CTL_LED_MODE_MASK) >>
+ I40E_GLGEN_GPIO_CTL_LED_MODE_SHIFT;
+ break;
+ }
+
+ return mode;
+}
+
+/**
+ * i40e_led_set - set new on/off mode
+ * @hw: pointer to the hw struct
+ * @mode: 0=off, 0xf=on (else see manual for mode details)
+ * @blink: true if the LED should blink when on, false if steady
+ *
+ * if this function is used to turn on the blink it should
+ * be used to disable the blink when restoring the original state.
+ **/
+void i40e_led_set(struct i40e_hw *hw, u32 mode, bool blink)
+{
+ u32 current_mode = 0;
+ int i;
+
+ if (mode & 0xfffffff0)
+ DEBUGOUT1("invalid mode passed in %X\n", mode);
+
+ /* as per the documentation GPIO 22-29 are the LED
+ * GPIO pins named LED0..LED7
+ */
+ for (i = I40E_LED0; i <= I40E_GLGEN_GPIO_CTL_MAX_INDEX; i++) {
+ u32 gpio_val = i40e_led_is_mine(hw, i);
+
+ if (!gpio_val)
+ continue;
+
+ /* ignore gpio LED src mode entries related to the activity
+ * LEDs
+ */
+ current_mode = ((gpio_val & I40E_GLGEN_GPIO_CTL_LED_MODE_MASK)
+ >> I40E_GLGEN_GPIO_CTL_LED_MODE_SHIFT);
+ switch (current_mode) {
+ case I40E_COMBINED_ACTIVITY:
+ case I40E_FILTER_ACTIVITY:
+ case I40E_MAC_ACTIVITY:
+ continue;
+ default:
+ break;
+ }
+
+ gpio_val &= ~I40E_GLGEN_GPIO_CTL_LED_MODE_MASK;
+ /* this & is a bit of paranoia, but serves as a range check */
+ gpio_val |= ((mode << I40E_GLGEN_GPIO_CTL_LED_MODE_SHIFT) &
+ I40E_GLGEN_GPIO_CTL_LED_MODE_MASK);
+
+ if (mode == I40E_LINK_ACTIVITY)
+ blink = false;
+
+ if (blink)
+ gpio_val |= BIT(I40E_GLGEN_GPIO_CTL_LED_BLINK_SHIFT);
+ else
+ gpio_val &= ~BIT(I40E_GLGEN_GPIO_CTL_LED_BLINK_SHIFT);
+
+ wr32(hw, I40E_GLGEN_GPIO_CTL(i), gpio_val);
+ break;
+ }
+}
+
+/* Admin command wrappers */
+
+/**
+ * i40e_aq_get_phy_capabilities
+ * @hw: pointer to the hw struct
+ * @abilities: structure for PHY capabilities to be filled
+ * @qualified_modules: report Qualified Modules
+ * @report_init: report init capabilities (active are default)
+ * @cmd_details: pointer to command details structure or NULL
+ *
+ * Returns the various PHY abilities supported on the Port.
+ **/
+enum i40e_status_code i40e_aq_get_phy_capabilities(struct i40e_hw *hw,
+ bool qualified_modules, bool report_init,
+ struct i40e_aq_get_phy_abilities_resp *abilities,
+ struct i40e_asq_cmd_details *cmd_details)
+{
+ struct i40e_aq_desc desc;
+ enum i40e_status_code status;
+ u16 abilities_size = sizeof(struct i40e_aq_get_phy_abilities_resp);
+
+ if (!abilities)
+ return I40E_ERR_PARAM;
+
+ i40e_fill_default_direct_cmd_desc(&desc,
+ i40e_aqc_opc_get_phy_abilities);
+
+ desc.flags |= CPU_TO_LE16((u16)I40E_AQ_FLAG_BUF);
+ if (abilities_size > I40E_AQ_LARGE_BUF)
+ desc.flags |= CPU_TO_LE16((u16)I40E_AQ_FLAG_LB);
+
+ if (qualified_modules)
+ desc.params.external.param0 |=
+ CPU_TO_LE32(I40E_AQ_PHY_REPORT_QUALIFIED_MODULES);
+
+ if (report_init)
+ desc.params.external.param0 |=
+ CPU_TO_LE32(I40E_AQ_PHY_REPORT_INITIAL_VALUES);
+
+ status = i40e_asq_send_command(hw, &desc, abilities, abilities_size,
+ cmd_details);
+
+ if (hw->aq.asq_last_status == I40E_AQ_RC_EIO)
+ status = I40E_ERR_UNKNOWN_PHY;
+
+ if (report_init)
+ hw->phy.phy_types = LE32_TO_CPU(abilities->phy_type);
+
+ return status;
+}
+
+/**
+ * i40e_aq_set_phy_config
+ * @hw: pointer to the hw struct
+ * @config: structure with PHY configuration to be set
+ * @cmd_details: pointer to command details structure or NULL
+ *
+ * Set the various PHY configuration parameters
+ * supported on the Port.One or more of the Set PHY config parameters may be
+ * ignored in an MFP mode as the PF may not have the privilege to set some
+ * of the PHY Config parameters. This status will be indicated by the
+ * command response.
+ **/
+enum i40e_status_code i40e_aq_set_phy_config(struct i40e_hw *hw,
+ struct i40e_aq_set_phy_config *config,
+ struct i40e_asq_cmd_details *cmd_details)
+{
+ struct i40e_aq_desc desc;
+ struct i40e_aq_set_phy_config *cmd =
+ (struct i40e_aq_set_phy_config *)&desc.params.raw;
+ enum i40e_status_code status;
+
+ if (!config)
+ return I40E_ERR_PARAM;
+
+ i40e_fill_default_direct_cmd_desc(&desc,
+ i40e_aqc_opc_set_phy_config);
+
+ *cmd = *config;
+
+ status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
+
+ return status;
+}
+
+/**
+ * i40e_set_fc
+ * @hw: pointer to the hw struct
+ *
+ * Set the requested flow control mode using set_phy_config.
+ **/
+enum i40e_status_code i40e_set_fc(struct i40e_hw *hw, u8 *aq_failures,
+ bool atomic_restart)
+{
+ enum i40e_fc_mode fc_mode = hw->fc.requested_mode;
+ struct i40e_aq_get_phy_abilities_resp abilities;
+ struct i40e_aq_set_phy_config config;
+ enum i40e_status_code status;
+ u8 pause_mask = 0x0;
+
+ *aq_failures = 0x0;
+
+ switch (fc_mode) {
+ case I40E_FC_FULL:
+ pause_mask |= I40E_AQ_PHY_FLAG_PAUSE_TX;
+ pause_mask |= I40E_AQ_PHY_FLAG_PAUSE_RX;
+ break;
+ case I40E_FC_RX_PAUSE:
+ pause_mask |= I40E_AQ_PHY_FLAG_PAUSE_RX;
+ break;
+ case I40E_FC_TX_PAUSE:
+ pause_mask |= I40E_AQ_PHY_FLAG_PAUSE_TX;
+ break;
+ default:
+ break;
+ }
+
+ /* Get the current phy config */
+ status = i40e_aq_get_phy_capabilities(hw, false, false, &abilities,
+ NULL);
+ if (status) {
+ *aq_failures |= I40E_SET_FC_AQ_FAIL_GET;
+ return status;
+ }
+
+ memset(&config, 0, sizeof(config));
+ /* clear the old pause settings */
+ config.abilities = abilities.abilities & ~(I40E_AQ_PHY_FLAG_PAUSE_TX) &
+ ~(I40E_AQ_PHY_FLAG_PAUSE_RX);
+ /* set the new abilities */
+ config.abilities |= pause_mask;
+ /* If the abilities have changed, then set the new config */
+ if (config.abilities != abilities.abilities) {
+ /* Auto restart link so settings take effect */
+ if (atomic_restart)
+ config.abilities |= I40E_AQ_PHY_ENABLE_ATOMIC_LINK;
+ /* Copy over all the old settings */
+ config.phy_type = abilities.phy_type;
+ config.link_speed = abilities.link_speed;
+ config.eee_capability = abilities.eee_capability;
+ config.eeer = abilities.eeer_val;
+ config.low_power_ctrl = abilities.d3_lpan;
+ status = i40e_aq_set_phy_config(hw, &config, NULL);
+
+ if (status)
+ *aq_failures |= I40E_SET_FC_AQ_FAIL_SET;
+ }
+ /* Update the link info */
+ status = i40e_update_link_info(hw);
+ if (status) {
+ /* Wait a little bit (on 40G cards it sometimes takes a really
+ * long time for link to come back from the atomic reset)
+ * and try once more
+ */
+ i40e_msec_delay(1000);
+ status = i40e_update_link_info(hw);
+ }
+ if (status)
+ *aq_failures |= I40E_SET_FC_AQ_FAIL_UPDATE;
+
+ return status;
+}
+
+/**
+ * i40e_aq_set_mac_config
+ * @hw: pointer to the hw struct
+ * @max_frame_size: Maximum Frame Size to be supported by the port
+ * @crc_en: Tell HW to append a CRC to outgoing frames
+ * @pacing: Pacing configurations
+ * @cmd_details: pointer to command details structure or NULL
+ *
+ * Configure MAC settings for frame size, jumbo frame support and the
+ * addition of a CRC by the hardware.
+ **/
+enum i40e_status_code i40e_aq_set_mac_config(struct i40e_hw *hw,
+ u16 max_frame_size,
+ bool crc_en, u16 pacing,
+ struct i40e_asq_cmd_details *cmd_details)
+{
+ struct i40e_aq_desc desc;
+ struct i40e_aq_set_mac_config *cmd =
+ (struct i40e_aq_set_mac_config *)&desc.params.raw;
+ enum i40e_status_code status;
+
+ if (max_frame_size == 0)
+ return I40E_ERR_PARAM;
+
+ i40e_fill_default_direct_cmd_desc(&desc,
+ i40e_aqc_opc_set_mac_config);
+
+ cmd->max_frame_size = CPU_TO_LE16(max_frame_size);
+ cmd->params = ((u8)pacing & 0x0F) << 3;
+ if (crc_en)
+ cmd->params |= I40E_AQ_SET_MAC_CONFIG_CRC_EN;
+
+ status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
+
+ return status;
+}
+
+/**
+ * i40e_aq_clear_pxe_mode
+ * @hw: pointer to the hw struct
+ * @cmd_details: pointer to command details structure or NULL
+ *
+ * Tell the firmware that the driver is taking over from PXE
+ **/
+enum i40e_status_code i40e_aq_clear_pxe_mode(struct i40e_hw *hw,
+ struct i40e_asq_cmd_details *cmd_details)
+{
+ enum i40e_status_code status;
+ struct i40e_aq_desc desc;
+ struct i40e_aqc_clear_pxe *cmd =
+ (struct i40e_aqc_clear_pxe *)&desc.params.raw;
+
+ i40e_fill_default_direct_cmd_desc(&desc,
+ i40e_aqc_opc_clear_pxe_mode);
+
+ cmd->rx_cnt = 0x2;
+
+ status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
+
+ wr32(hw, I40E_GLLAN_RCTL_0, 0x1);
+
+ return status;
+}
+
+/**
+ * i40e_aq_set_link_restart_an
+ * @hw: pointer to the hw struct
+ * @enable_link: if true: enable link, if false: disable link
+ * @cmd_details: pointer to command details structure or NULL
+ *
+ * Sets up the link and restarts the Auto-Negotiation over the link.
+ **/
+enum i40e_status_code i40e_aq_set_link_restart_an(struct i40e_hw *hw,
+ bool enable_link, struct i40e_asq_cmd_details *cmd_details)
+{
+ struct i40e_aq_desc desc;
+ struct i40e_aqc_set_link_restart_an *cmd =
+ (struct i40e_aqc_set_link_restart_an *)&desc.params.raw;
+ enum i40e_status_code status;
+
+ i40e_fill_default_direct_cmd_desc(&desc,
+ i40e_aqc_opc_set_link_restart_an);
+
+ cmd->command = I40E_AQ_PHY_RESTART_AN;
+ if (enable_link)
+ cmd->command |= I40E_AQ_PHY_LINK_ENABLE;
+ else
+ cmd->command &= ~I40E_AQ_PHY_LINK_ENABLE;
+
+ status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
+
+ return status;
+}
+
+/**
+ * i40e_aq_get_link_info
+ * @hw: pointer to the hw struct
+ * @enable_lse: enable/disable LinkStatusEvent reporting
+ * @link: pointer to link status structure - optional
+ * @cmd_details: pointer to command details structure or NULL
+ *
+ * Returns the link status of the adapter.
+ **/
+enum i40e_status_code i40e_aq_get_link_info(struct i40e_hw *hw,
+ bool enable_lse, struct i40e_link_status *link,
+ struct i40e_asq_cmd_details *cmd_details)
+{
+ struct i40e_aq_desc desc;
+ struct i40e_aqc_get_link_status *resp =
+ (struct i40e_aqc_get_link_status *)&desc.params.raw;
+ struct i40e_link_status *hw_link_info = &hw->phy.link_info;
+ enum i40e_status_code status;
+ bool tx_pause, rx_pause;
+ u16 command_flags;
+
+ i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_get_link_status);
+
+ if (enable_lse)
+ command_flags = I40E_AQ_LSE_ENABLE;
+ else
+ command_flags = I40E_AQ_LSE_DISABLE;
+ resp->command_flags = CPU_TO_LE16(command_flags);
+
+ status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
+
+ if (status != I40E_SUCCESS)
+ goto aq_get_link_info_exit;
+
+ /* save off old link status information */
+ i40e_memcpy(&hw->phy.link_info_old, hw_link_info,
+ sizeof(*hw_link_info), I40E_NONDMA_TO_NONDMA);
+
+ /* update link status */
+ hw_link_info->phy_type = (enum i40e_aq_phy_type)resp->phy_type;
+ hw->phy.media_type = i40e_get_media_type(hw);
+ hw_link_info->link_speed = (enum i40e_aq_link_speed)resp->link_speed;
+ hw_link_info->link_info = resp->link_info;
+ hw_link_info->an_info = resp->an_info;
+ hw_link_info->ext_info = resp->ext_info;
+ hw_link_info->loopback = resp->loopback;
+ hw_link_info->max_frame_size = LE16_TO_CPU(resp->max_frame_size);
+ hw_link_info->pacing = resp->config & I40E_AQ_CONFIG_PACING_MASK;
+
+ /* update fc info */
+ tx_pause = !!(resp->an_info & I40E_AQ_LINK_PAUSE_TX);
+ rx_pause = !!(resp->an_info & I40E_AQ_LINK_PAUSE_RX);
+ if (tx_pause & rx_pause)
+ hw->fc.current_mode = I40E_FC_FULL;
+ else if (tx_pause)
+ hw->fc.current_mode = I40E_FC_TX_PAUSE;
+ else if (rx_pause)
+ hw->fc.current_mode = I40E_FC_RX_PAUSE;
+ else
+ hw->fc.current_mode = I40E_FC_NONE;
+
+ if (resp->config & I40E_AQ_CONFIG_CRC_ENA)
+ hw_link_info->crc_enable = true;
+ else
+ hw_link_info->crc_enable = false;
+
+ if (resp->command_flags & CPU_TO_LE16(I40E_AQ_LSE_ENABLE))
+ hw_link_info->lse_enable = true;
+ else
+ hw_link_info->lse_enable = false;
+
+ if ((hw->aq.fw_maj_ver < 4 || (hw->aq.fw_maj_ver == 4 &&
+ hw->aq.fw_min_ver < 40)) && hw_link_info->phy_type == 0xE)
+ hw_link_info->phy_type = I40E_PHY_TYPE_10GBASE_SFPP_CU;
+
+ /* save link status information */
+ if (link)
+ i40e_memcpy(link, hw_link_info, sizeof(*hw_link_info),
+ I40E_NONDMA_TO_NONDMA);
+
+ /* flag cleared so helper functions don't call AQ again */
+ hw->phy.get_link_info = false;
+
+aq_get_link_info_exit:
+ return status;
+}
+
+/**
+ * i40e_aq_set_phy_int_mask
+ * @hw: pointer to the hw struct
+ * @mask: interrupt mask to be set
+ * @cmd_details: pointer to command details structure or NULL
+ *
+ * Set link interrupt mask.
+ **/
+enum i40e_status_code i40e_aq_set_phy_int_mask(struct i40e_hw *hw,
+ u16 mask,
+ struct i40e_asq_cmd_details *cmd_details)
+{
+ struct i40e_aq_desc desc;
+ struct i40e_aqc_set_phy_int_mask *cmd =
+ (struct i40e_aqc_set_phy_int_mask *)&desc.params.raw;
+ enum i40e_status_code status;
+
+ i40e_fill_default_direct_cmd_desc(&desc,
+ i40e_aqc_opc_set_phy_int_mask);
+
+ cmd->event_mask = CPU_TO_LE16(mask);
+
+ status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
+
+ return status;
+}
+
+/**
+ * i40e_aq_get_local_advt_reg
+ * @hw: pointer to the hw struct
+ * @advt_reg: local AN advertisement register value
+ * @cmd_details: pointer to command details structure or NULL
+ *
+ * Get the Local AN advertisement register value.
+ **/
+enum i40e_status_code i40e_aq_get_local_advt_reg(struct i40e_hw *hw,
+ u64 *advt_reg,
+ struct i40e_asq_cmd_details *cmd_details)
+{
+ struct i40e_aq_desc desc;
+ struct i40e_aqc_an_advt_reg *resp =
+ (struct i40e_aqc_an_advt_reg *)&desc.params.raw;
+ enum i40e_status_code status;
+
+ i40e_fill_default_direct_cmd_desc(&desc,
+ i40e_aqc_opc_get_local_advt_reg);
+
+ status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
+
+ if (status != I40E_SUCCESS)
+ goto aq_get_local_advt_reg_exit;
+
+ *advt_reg = (u64)(LE16_TO_CPU(resp->local_an_reg1)) << 32;
+ *advt_reg |= LE32_TO_CPU(resp->local_an_reg0);
+
+aq_get_local_advt_reg_exit:
+ return status;
+}
+
+/**
+ * i40e_aq_set_local_advt_reg
+ * @hw: pointer to the hw struct
+ * @advt_reg: local AN advertisement register value
+ * @cmd_details: pointer to command details structure or NULL
+ *
+ * Get the Local AN advertisement register value.
+ **/
+enum i40e_status_code i40e_aq_set_local_advt_reg(struct i40e_hw *hw,
+ u64 advt_reg,
+ struct i40e_asq_cmd_details *cmd_details)
+{
+ struct i40e_aq_desc desc;
+ struct i40e_aqc_an_advt_reg *cmd =
+ (struct i40e_aqc_an_advt_reg *)&desc.params.raw;
+ enum i40e_status_code status;
+
+ i40e_fill_default_direct_cmd_desc(&desc,
+ i40e_aqc_opc_get_local_advt_reg);
+
+ cmd->local_an_reg0 = CPU_TO_LE32(I40E_LO_DWORD(advt_reg));
+ cmd->local_an_reg1 = CPU_TO_LE16(I40E_HI_DWORD(advt_reg));
+
+ status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
+
+ return status;
+}
+
+/**
+ * i40e_aq_get_partner_advt
+ * @hw: pointer to the hw struct
+ * @advt_reg: AN partner advertisement register value
+ * @cmd_details: pointer to command details structure or NULL
+ *
+ * Get the link partner AN advertisement register value.
+ **/
+enum i40e_status_code i40e_aq_get_partner_advt(struct i40e_hw *hw,
+ u64 *advt_reg,
+ struct i40e_asq_cmd_details *cmd_details)
+{
+ struct i40e_aq_desc desc;
+ struct i40e_aqc_an_advt_reg *resp =
+ (struct i40e_aqc_an_advt_reg *)&desc.params.raw;
+ enum i40e_status_code status;
+
+ i40e_fill_default_direct_cmd_desc(&desc,
+ i40e_aqc_opc_get_partner_advt);
+
+ status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
+
+ if (status != I40E_SUCCESS)
+ goto aq_get_partner_advt_exit;
+
+ *advt_reg = (u64)(LE16_TO_CPU(resp->local_an_reg1)) << 32;
+ *advt_reg |= LE32_TO_CPU(resp->local_an_reg0);
+
+aq_get_partner_advt_exit:
+ return status;
+}
+
+/**
+ * i40e_aq_set_lb_modes
+ * @hw: pointer to the hw struct
+ * @lb_modes: loopback mode to be set
+ * @cmd_details: pointer to command details structure or NULL
+ *
+ * Sets loopback modes.
+ **/
+enum i40e_status_code i40e_aq_set_lb_modes(struct i40e_hw *hw,
+ u16 lb_modes,
+ struct i40e_asq_cmd_details *cmd_details)
+{
+ struct i40e_aq_desc desc;
+ struct i40e_aqc_set_lb_mode *cmd =
+ (struct i40e_aqc_set_lb_mode *)&desc.params.raw;
+ enum i40e_status_code status;
+
+ i40e_fill_default_direct_cmd_desc(&desc,
+ i40e_aqc_opc_set_lb_modes);
+
+ cmd->lb_mode = CPU_TO_LE16(lb_modes);
+
+ status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
+
+ return status;
+}
+
+/**
+ * i40e_aq_set_phy_debug
+ * @hw: pointer to the hw struct
+ * @cmd_flags: debug command flags
+ * @cmd_details: pointer to command details structure or NULL
+ *
+ * Reset the external PHY.
+ **/
+enum i40e_status_code i40e_aq_set_phy_debug(struct i40e_hw *hw, u8 cmd_flags,
+ struct i40e_asq_cmd_details *cmd_details)
+{
+ struct i40e_aq_desc desc;
+ struct i40e_aqc_set_phy_debug *cmd =
+ (struct i40e_aqc_set_phy_debug *)&desc.params.raw;
+ enum i40e_status_code status;
+
+ i40e_fill_default_direct_cmd_desc(&desc,
+ i40e_aqc_opc_set_phy_debug);
+
+ cmd->command_flags = cmd_flags;
+
+ status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
+
+ return status;
+}
+
+/**
+ * i40e_aq_add_vsi
+ * @hw: pointer to the hw struct
+ * @vsi_ctx: pointer to a vsi context struct
+ * @cmd_details: pointer to command details structure or NULL
+ *
+ * Add a VSI context to the hardware.
+**/
+enum i40e_status_code i40e_aq_add_vsi(struct i40e_hw *hw,
+ struct i40e_vsi_context *vsi_ctx,
+ struct i40e_asq_cmd_details *cmd_details)
+{
+ struct i40e_aq_desc desc;
+ struct i40e_aqc_add_get_update_vsi *cmd =
+ (struct i40e_aqc_add_get_update_vsi *)&desc.params.raw;
+ struct i40e_aqc_add_get_update_vsi_completion *resp =
+ (struct i40e_aqc_add_get_update_vsi_completion *)
+ &desc.params.raw;
+ enum i40e_status_code status;
+
+ i40e_fill_default_direct_cmd_desc(&desc,
+ i40e_aqc_opc_add_vsi);
+
+ cmd->uplink_seid = CPU_TO_LE16(vsi_ctx->uplink_seid);
+ cmd->connection_type = vsi_ctx->connection_type;
+ cmd->vf_id = vsi_ctx->vf_num;
+ cmd->vsi_flags = CPU_TO_LE16(vsi_ctx->flags);
+
+ desc.flags |= CPU_TO_LE16((u16)(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD));
+
+ status = i40e_asq_send_command(hw, &desc, &vsi_ctx->info,
+ sizeof(vsi_ctx->info), cmd_details);
+
+ if (status != I40E_SUCCESS)
+ goto aq_add_vsi_exit;
+
+ vsi_ctx->seid = LE16_TO_CPU(resp->seid);
+ vsi_ctx->vsi_number = LE16_TO_CPU(resp->vsi_number);
+ vsi_ctx->vsis_allocated = LE16_TO_CPU(resp->vsi_used);
+ vsi_ctx->vsis_unallocated = LE16_TO_CPU(resp->vsi_free);
+
+aq_add_vsi_exit:
+ return status;
+}
+
+/**
+ * i40e_aq_set_default_vsi
+ * @hw: pointer to the hw struct
+ * @seid: vsi number
+ * @cmd_details: pointer to command details structure or NULL
+ **/
+enum i40e_status_code i40e_aq_set_default_vsi(struct i40e_hw *hw,
+ u16 seid,
+ struct i40e_asq_cmd_details *cmd_details)
+{
+ struct i40e_aq_desc desc;
+ struct i40e_aqc_set_vsi_promiscuous_modes *cmd =
+ (struct i40e_aqc_set_vsi_promiscuous_modes *)
+ &desc.params.raw;
+ enum i40e_status_code status;
+
+ i40e_fill_default_direct_cmd_desc(&desc,
+ i40e_aqc_opc_set_vsi_promiscuous_modes);
+
+ cmd->promiscuous_flags = CPU_TO_LE16(I40E_AQC_SET_VSI_DEFAULT);
+ cmd->valid_flags = CPU_TO_LE16(I40E_AQC_SET_VSI_DEFAULT);
+ cmd->seid = CPU_TO_LE16(seid);
+
+ status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
+
+ return status;
+}
+
+/**
+ * i40e_aq_set_vsi_unicast_promiscuous
+ * @hw: pointer to the hw struct
+ * @seid: vsi number
+ * @set: set unicast promiscuous enable/disable
+ * @cmd_details: pointer to command details structure or NULL
+ * @rx_only_promisc: flag to decide if egress traffic gets mirrored in promisc
+ **/
+enum i40e_status_code i40e_aq_set_vsi_unicast_promiscuous(struct i40e_hw *hw,
+ u16 seid, bool set,
+ struct i40e_asq_cmd_details *cmd_details,
+ bool rx_only_promisc)
+{
+ struct i40e_aq_desc desc;
+ struct i40e_aqc_set_vsi_promiscuous_modes *cmd =
+ (struct i40e_aqc_set_vsi_promiscuous_modes *)&desc.params.raw;
+ enum i40e_status_code status;
+ u16 flags = 0;
+
+ i40e_fill_default_direct_cmd_desc(&desc,
+ i40e_aqc_opc_set_vsi_promiscuous_modes);
+
+ if (set) {
+ flags |= I40E_AQC_SET_VSI_PROMISC_UNICAST;
+ if (rx_only_promisc &&
+ (((hw->aq.api_maj_ver == 1) && (hw->aq.api_min_ver >= 5)) ||
+ (hw->aq.api_maj_ver > 1)))
+ flags |= I40E_AQC_SET_VSI_PROMISC_TX;
+ }
+
+ cmd->promiscuous_flags = CPU_TO_LE16(flags);
+
+ cmd->valid_flags = CPU_TO_LE16(I40E_AQC_SET_VSI_PROMISC_UNICAST);
+ if (((hw->aq.api_maj_ver >= 1) && (hw->aq.api_min_ver >= 5)) ||
+ (hw->aq.api_maj_ver > 1))
+ cmd->valid_flags |= CPU_TO_LE16(I40E_AQC_SET_VSI_PROMISC_TX);
+
+ cmd->seid = CPU_TO_LE16(seid);
+ status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
+
+ return status;
+}
+
+/**
+ * i40e_aq_set_vsi_multicast_promiscuous
+ * @hw: pointer to the hw struct
+ * @seid: vsi number
+ * @set: set multicast promiscuous enable/disable
+ * @cmd_details: pointer to command details structure or NULL
+ **/
+enum i40e_status_code i40e_aq_set_vsi_multicast_promiscuous(struct i40e_hw *hw,
+ u16 seid, bool set, struct i40e_asq_cmd_details *cmd_details)
+{
+ struct i40e_aq_desc desc;
+ struct i40e_aqc_set_vsi_promiscuous_modes *cmd =
+ (struct i40e_aqc_set_vsi_promiscuous_modes *)&desc.params.raw;
+ enum i40e_status_code status;
+ u16 flags = 0;
+
+ i40e_fill_default_direct_cmd_desc(&desc,
+ i40e_aqc_opc_set_vsi_promiscuous_modes);
+
+ if (set)
+ flags |= I40E_AQC_SET_VSI_PROMISC_MULTICAST;
+
+ cmd->promiscuous_flags = CPU_TO_LE16(flags);
+
+ cmd->valid_flags = CPU_TO_LE16(I40E_AQC_SET_VSI_PROMISC_MULTICAST);
+
+ cmd->seid = CPU_TO_LE16(seid);
+ status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
+
+ return status;
+}
+
+/**
+ * i40e_aq_set_vsi_mc_promisc_on_vlan
+ * @hw: pointer to the hw struct
+ * @seid: vsi number
+ * @enable: set MAC L2 layer unicast promiscuous enable/disable for a given VLAN
+ * @vid: The VLAN tag filter - capture any multicast packet with this VLAN tag
+ * @cmd_details: pointer to command details structure or NULL
+ **/
+enum i40e_status_code i40e_aq_set_vsi_mc_promisc_on_vlan(struct i40e_hw *hw,
+ u16 seid, bool enable, u16 vid,
+ struct i40e_asq_cmd_details *cmd_details)
+{
+ struct i40e_aq_desc desc;
+ struct i40e_aqc_set_vsi_promiscuous_modes *cmd =
+ (struct i40e_aqc_set_vsi_promiscuous_modes *)&desc.params.raw;
+ enum i40e_status_code status;
+ u16 flags = 0;
+
+ i40e_fill_default_direct_cmd_desc(&desc,
+ i40e_aqc_opc_set_vsi_promiscuous_modes);
+
+ if (enable)
+ flags |= I40E_AQC_SET_VSI_PROMISC_MULTICAST;
+
+ cmd->promiscuous_flags = CPU_TO_LE16(flags);
+ cmd->valid_flags = CPU_TO_LE16(I40E_AQC_SET_VSI_PROMISC_MULTICAST);
+ cmd->seid = CPU_TO_LE16(seid);
+ cmd->vlan_tag = CPU_TO_LE16(vid | I40E_AQC_SET_VSI_VLAN_VALID);
+
+ status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
+
+ return status;
+}
+
+/**
+ * i40e_aq_set_vsi_uc_promisc_on_vlan
+ * @hw: pointer to the hw struct
+ * @seid: vsi number
+ * @enable: set MAC L2 layer unicast promiscuous enable/disable for a given VLAN
+ * @vid: The VLAN tag filter - capture any unicast packet with this VLAN tag
+ * @cmd_details: pointer to command details structure or NULL
+ **/
+enum i40e_status_code i40e_aq_set_vsi_uc_promisc_on_vlan(struct i40e_hw *hw,
+ u16 seid, bool enable, u16 vid,
+ struct i40e_asq_cmd_details *cmd_details)
+{
+ struct i40e_aq_desc desc;
+ struct i40e_aqc_set_vsi_promiscuous_modes *cmd =
+ (struct i40e_aqc_set_vsi_promiscuous_modes *)&desc.params.raw;
+ enum i40e_status_code status;
+ u16 flags = 0;
+
+ i40e_fill_default_direct_cmd_desc(&desc,
+ i40e_aqc_opc_set_vsi_promiscuous_modes);
+
+ if (enable)
+ flags |= I40E_AQC_SET_VSI_PROMISC_UNICAST;
+
+ cmd->promiscuous_flags = CPU_TO_LE16(flags);
+ cmd->valid_flags = CPU_TO_LE16(I40E_AQC_SET_VSI_PROMISC_UNICAST);
+ cmd->seid = CPU_TO_LE16(seid);
+ cmd->vlan_tag = CPU_TO_LE16(vid | I40E_AQC_SET_VSI_VLAN_VALID);
+
+ status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
+
+ return status;
+}
+
+/**
+ * i40e_aq_set_vsi_broadcast
+ * @hw: pointer to the hw struct
+ * @seid: vsi number
+ * @set_filter: true to set filter, false to clear filter
+ * @cmd_details: pointer to command details structure or NULL
+ *
+ * Set or clear the broadcast promiscuous flag (filter) for a given VSI.
+ **/
+enum i40e_status_code i40e_aq_set_vsi_broadcast(struct i40e_hw *hw,
+ u16 seid, bool set_filter,
+ struct i40e_asq_cmd_details *cmd_details)
+{
+ struct i40e_aq_desc desc;
+ struct i40e_aqc_set_vsi_promiscuous_modes *cmd =
+ (struct i40e_aqc_set_vsi_promiscuous_modes *)&desc.params.raw;
+ enum i40e_status_code status;
+
+ i40e_fill_default_direct_cmd_desc(&desc,
+ i40e_aqc_opc_set_vsi_promiscuous_modes);
+
+ if (set_filter)
+ cmd->promiscuous_flags
+ |= CPU_TO_LE16(I40E_AQC_SET_VSI_PROMISC_BROADCAST);
+ else
+ cmd->promiscuous_flags
+ &= CPU_TO_LE16(~I40E_AQC_SET_VSI_PROMISC_BROADCAST);
+
+ cmd->valid_flags = CPU_TO_LE16(I40E_AQC_SET_VSI_PROMISC_BROADCAST);
+ cmd->seid = CPU_TO_LE16(seid);
+ status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
+
+ return status;
+}
+
+/**
+ * i40e_aq_set_vsi_vlan_promisc - control the VLAN promiscuous setting
+ * @hw: pointer to the hw struct
+ * @seid: vsi number
+ * @enable: set MAC L2 layer unicast promiscuous enable/disable for a given VLAN
+ * @cmd_details: pointer to command details structure or NULL
+ **/
+enum i40e_status_code i40e_aq_set_vsi_vlan_promisc(struct i40e_hw *hw,
+ u16 seid, bool enable,
+ struct i40e_asq_cmd_details *cmd_details)
+{
+ struct i40e_aq_desc desc;
+ struct i40e_aqc_set_vsi_promiscuous_modes *cmd =
+ (struct i40e_aqc_set_vsi_promiscuous_modes *)&desc.params.raw;
+ enum i40e_status_code status;
+ u16 flags = 0;
+
+ i40e_fill_default_direct_cmd_desc(&desc,
+ i40e_aqc_opc_set_vsi_promiscuous_modes);
+ if (enable)
+ flags |= I40E_AQC_SET_VSI_PROMISC_VLAN;
+
+ cmd->promiscuous_flags = CPU_TO_LE16(flags);
+ cmd->valid_flags = CPU_TO_LE16(I40E_AQC_SET_VSI_PROMISC_VLAN);
+ cmd->seid = CPU_TO_LE16(seid);
+
+ status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
+
+ return status;
+}
+
+/**
+ * i40e_get_vsi_params - get VSI configuration info
+ * @hw: pointer to the hw struct
+ * @vsi_ctx: pointer to a vsi context struct
+ * @cmd_details: pointer to command details structure or NULL
+ **/
+enum i40e_status_code i40e_aq_get_vsi_params(struct i40e_hw *hw,
+ struct i40e_vsi_context *vsi_ctx,
+ struct i40e_asq_cmd_details *cmd_details)
+{
+ struct i40e_aq_desc desc;
+ struct i40e_aqc_add_get_update_vsi *cmd =
+ (struct i40e_aqc_add_get_update_vsi *)&desc.params.raw;
+ struct i40e_aqc_add_get_update_vsi_completion *resp =
+ (struct i40e_aqc_add_get_update_vsi_completion *)
+ &desc.params.raw;
+ enum i40e_status_code status;
+
+ UNREFERENCED_1PARAMETER(cmd_details);
+ i40e_fill_default_direct_cmd_desc(&desc,
+ i40e_aqc_opc_get_vsi_parameters);
+
+ cmd->uplink_seid = CPU_TO_LE16(vsi_ctx->seid);
+
+ desc.flags |= CPU_TO_LE16((u16)I40E_AQ_FLAG_BUF);
+
+ status = i40e_asq_send_command(hw, &desc, &vsi_ctx->info,
+ sizeof(vsi_ctx->info), NULL);
+
+ if (status != I40E_SUCCESS)
+ goto aq_get_vsi_params_exit;
+
+ vsi_ctx->seid = LE16_TO_CPU(resp->seid);
+ vsi_ctx->vsi_number = LE16_TO_CPU(resp->vsi_number);
+ vsi_ctx->vsis_allocated = LE16_TO_CPU(resp->vsi_used);
+ vsi_ctx->vsis_unallocated = LE16_TO_CPU(resp->vsi_free);
+
+aq_get_vsi_params_exit:
+ return status;
+}
+
+/**
+ * i40e_aq_update_vsi_params
+ * @hw: pointer to the hw struct
+ * @vsi_ctx: pointer to a vsi context struct
+ * @cmd_details: pointer to command details structure or NULL
+ *
+ * Update a VSI context.
+ **/
+enum i40e_status_code i40e_aq_update_vsi_params(struct i40e_hw *hw,
+ struct i40e_vsi_context *vsi_ctx,
+ struct i40e_asq_cmd_details *cmd_details)
+{
+ struct i40e_aq_desc desc;
+ struct i40e_aqc_add_get_update_vsi *cmd =
+ (struct i40e_aqc_add_get_update_vsi *)&desc.params.raw;
+ struct i40e_aqc_add_get_update_vsi_completion *resp =
+ (struct i40e_aqc_add_get_update_vsi_completion *)
+ &desc.params.raw;
+ enum i40e_status_code status;
+
+ i40e_fill_default_direct_cmd_desc(&desc,
+ i40e_aqc_opc_update_vsi_parameters);
+ cmd->uplink_seid = CPU_TO_LE16(vsi_ctx->seid);
+
+ desc.flags |= CPU_TO_LE16((u16)(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD));
+
+ status = i40e_asq_send_command(hw, &desc, &vsi_ctx->info,
+ sizeof(vsi_ctx->info), cmd_details);
+
+ vsi_ctx->vsis_allocated = LE16_TO_CPU(resp->vsi_used);
+ vsi_ctx->vsis_unallocated = LE16_TO_CPU(resp->vsi_free);
+
+ return status;
+}
+
+/**
+ * i40e_aq_get_switch_config
+ * @hw: pointer to the hardware structure
+ * @buf: pointer to the result buffer
+ * @buf_size: length of input buffer
+ * @start_seid: seid to start for the report, 0 == beginning
+ * @cmd_details: pointer to command details structure or NULL
+ *
+ * Fill the buf with switch configuration returned from AdminQ command
+ **/
+enum i40e_status_code i40e_aq_get_switch_config(struct i40e_hw *hw,
+ struct i40e_aqc_get_switch_config_resp *buf,
+ u16 buf_size, u16 *start_seid,
+ struct i40e_asq_cmd_details *cmd_details)
+{
+ struct i40e_aq_desc desc;
+ struct i40e_aqc_switch_seid *scfg =
+ (struct i40e_aqc_switch_seid *)&desc.params.raw;
+ enum i40e_status_code status;
+
+ i40e_fill_default_direct_cmd_desc(&desc,
+ i40e_aqc_opc_get_switch_config);
+ desc.flags |= CPU_TO_LE16((u16)I40E_AQ_FLAG_BUF);
+ if (buf_size > I40E_AQ_LARGE_BUF)
+ desc.flags |= CPU_TO_LE16((u16)I40E_AQ_FLAG_LB);
+ scfg->seid = CPU_TO_LE16(*start_seid);
+
+ status = i40e_asq_send_command(hw, &desc, buf, buf_size, cmd_details);
+ *start_seid = LE16_TO_CPU(scfg->seid);
+
+ return status;
+}
+
+/**
+ * i40e_aq_set_switch_config
+ * @hw: pointer to the hardware structure
+ * @flags: bit flag values to set
+ * @valid_flags: which bit flags to set
+ * @cmd_details: pointer to command details structure or NULL
+ *
+ * Set switch configuration bits
+ **/
+enum i40e_status_code i40e_aq_set_switch_config(struct i40e_hw *hw,
+ u16 flags, u16 valid_flags,
+ struct i40e_asq_cmd_details *cmd_details)
+{
+ struct i40e_aq_desc desc;
+ struct i40e_aqc_set_switch_config *scfg =
+ (struct i40e_aqc_set_switch_config *)&desc.params.raw;
+ enum i40e_status_code status;
+
+ i40e_fill_default_direct_cmd_desc(&desc,
+ i40e_aqc_opc_set_switch_config);
+ scfg->flags = CPU_TO_LE16(flags);
+ scfg->valid_flags = CPU_TO_LE16(valid_flags);
+
+ status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
+
+ return status;
+}
+
+/**
+ * i40e_aq_get_firmware_version
+ * @hw: pointer to the hw struct
+ * @fw_major_version: firmware major version
+ * @fw_minor_version: firmware minor version
+ * @fw_build: firmware build number
+ * @api_major_version: major queue version
+ * @api_minor_version: minor queue version
+ * @cmd_details: pointer to command details structure or NULL
+ *
+ * Get the firmware version from the admin queue commands
+ **/
+enum i40e_status_code i40e_aq_get_firmware_version(struct i40e_hw *hw,
+ u16 *fw_major_version, u16 *fw_minor_version,
+ u32 *fw_build,
+ u16 *api_major_version, u16 *api_minor_version,
+ struct i40e_asq_cmd_details *cmd_details)
+{
+ struct i40e_aq_desc desc;
+ struct i40e_aqc_get_version *resp =
+ (struct i40e_aqc_get_version *)&desc.params.raw;
+ enum i40e_status_code status;
+
+ i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_get_version);
+
+ status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
+
+ if (status == I40E_SUCCESS) {
+ if (fw_major_version != NULL)
+ *fw_major_version = LE16_TO_CPU(resp->fw_major);
+ if (fw_minor_version != NULL)
+ *fw_minor_version = LE16_TO_CPU(resp->fw_minor);
+ if (fw_build != NULL)
+ *fw_build = LE32_TO_CPU(resp->fw_build);
+ if (api_major_version != NULL)
+ *api_major_version = LE16_TO_CPU(resp->api_major);
+ if (api_minor_version != NULL)
+ *api_minor_version = LE16_TO_CPU(resp->api_minor);
+
+ /* A workaround to fix the API version in SW */
+ if (api_major_version && api_minor_version &&
+ fw_major_version && fw_minor_version &&
+ ((*api_major_version == 1) && (*api_minor_version == 1)) &&
+ (((*fw_major_version == 4) && (*fw_minor_version >= 2)) ||
+ (*fw_major_version > 4)))
+ *api_minor_version = 2;
+ }
+
+ return status;
+}
+
+/**
+ * i40e_aq_send_driver_version
+ * @hw: pointer to the hw struct
+ * @dv: driver's major, minor version
+ * @cmd_details: pointer to command details structure or NULL
+ *
+ * Send the driver version to the firmware
+ **/
+enum i40e_status_code i40e_aq_send_driver_version(struct i40e_hw *hw,
+ struct i40e_driver_version *dv,
+ struct i40e_asq_cmd_details *cmd_details)
+{
+ struct i40e_aq_desc desc;
+ struct i40e_aqc_driver_version *cmd =
+ (struct i40e_aqc_driver_version *)&desc.params.raw;
+ enum i40e_status_code status;
+ u16 len;
+
+ if (dv == NULL)
+ return I40E_ERR_PARAM;
+
+ i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_driver_version);
+
+ desc.flags |= CPU_TO_LE16(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD);
+ cmd->driver_major_ver = dv->major_version;
+ cmd->driver_minor_ver = dv->minor_version;
+ cmd->driver_build_ver = dv->build_version;
+ cmd->driver_subbuild_ver = dv->subbuild_version;
+
+ len = 0;
+ while (len < sizeof(dv->driver_string) &&
+ (dv->driver_string[len] < 0x80) &&
+ dv->driver_string[len])
+ len++;
+ status = i40e_asq_send_command(hw, &desc, dv->driver_string,
+ len, cmd_details);
+
+ return status;
+}
+
+/**
+ * i40e_get_link_status - get status of the HW network link
+ * @hw: pointer to the hw struct
+ * @link_up: pointer to bool (true/false = linkup/linkdown)
+ *
+ * Variable link_up true if link is up, false if link is down.
+ * The variable link_up is invalid if returned value of status != I40E_SUCCESS
+ *
+ * Side effect: LinkStatusEvent reporting becomes enabled
+ **/
+enum i40e_status_code i40e_get_link_status(struct i40e_hw *hw, bool *link_up)
+{
+ enum i40e_status_code status = I40E_SUCCESS;
+
+ if (hw->phy.get_link_info) {
+ status = i40e_update_link_info(hw);
+
+ if (status != I40E_SUCCESS)
+ i40e_debug(hw, I40E_DEBUG_LINK, "get link failed: status %d\n",
+ status);
+ }
+
+ *link_up = hw->phy.link_info.link_info & I40E_AQ_LINK_UP;
+
+ return status;
+}
+
+/**
+ * i40e_updatelink_status - update status of the HW network link
+ * @hw: pointer to the hw struct
+ **/
+enum i40e_status_code i40e_update_link_info(struct i40e_hw *hw)
+{
+ struct i40e_aq_get_phy_abilities_resp abilities;
+ enum i40e_status_code status = I40E_SUCCESS;
+
+ status = i40e_aq_get_link_info(hw, true, NULL, NULL);
+ if (status)
+ return status;
+
+ if (hw->phy.link_info.link_info & I40E_AQ_MEDIA_AVAILABLE) {
+ status = i40e_aq_get_phy_capabilities(hw, false, false,
+ &abilities, NULL);
+ if (status)
+ return status;
+
+ memcpy(hw->phy.link_info.module_type, &abilities.module_type,
+ sizeof(hw->phy.link_info.module_type));
+ }
+ return status;
+}
+
+
+/**
+ * i40e_get_link_speed
+ * @hw: pointer to the hw struct
+ *
+ * Returns the link speed of the adapter.
+ **/
+enum i40e_aq_link_speed i40e_get_link_speed(struct i40e_hw *hw)
+{
+ enum i40e_aq_link_speed speed = I40E_LINK_SPEED_UNKNOWN;
+ enum i40e_status_code status = I40E_SUCCESS;
+
+ if (hw->phy.get_link_info) {
+ status = i40e_aq_get_link_info(hw, true, NULL, NULL);
+
+ if (status != I40E_SUCCESS)
+ goto i40e_link_speed_exit;
+ }
+
+ speed = hw->phy.link_info.link_speed;
+
+i40e_link_speed_exit:
+ return speed;
+}
+
+/**
+ * i40e_aq_add_veb - Insert a VEB between the VSI and the MAC
+ * @hw: pointer to the hw struct
+ * @uplink_seid: the MAC or other gizmo SEID
+ * @downlink_seid: the VSI SEID
+ * @enabled_tc: bitmap of TCs to be enabled
+ * @default_port: true for default port VSI, false for control port
+ * @veb_seid: pointer to where to put the resulting VEB SEID
+ * @enable_stats: true to turn on VEB stats
+ * @cmd_details: pointer to command details structure or NULL
+ *
+ * This asks the FW to add a VEB between the uplink and downlink
+ * elements. If the uplink SEID is 0, this will be a floating VEB.
+ **/
+enum i40e_status_code i40e_aq_add_veb(struct i40e_hw *hw, u16 uplink_seid,
+ u16 downlink_seid, u8 enabled_tc,
+ bool default_port, u16 *veb_seid,
+ bool enable_stats,
+ struct i40e_asq_cmd_details *cmd_details)
+{
+ struct i40e_aq_desc desc;
+ struct i40e_aqc_add_veb *cmd =
+ (struct i40e_aqc_add_veb *)&desc.params.raw;
+ struct i40e_aqc_add_veb_completion *resp =
+ (struct i40e_aqc_add_veb_completion *)&desc.params.raw;
+ enum i40e_status_code status;
+ u16 veb_flags = 0;
+
+ /* SEIDs need to either both be set or both be 0 for floating VEB */
+ if (!!uplink_seid != !!downlink_seid)
+ return I40E_ERR_PARAM;
+
+ i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_add_veb);
+
+ cmd->uplink_seid = CPU_TO_LE16(uplink_seid);
+ cmd->downlink_seid = CPU_TO_LE16(downlink_seid);
+ cmd->enable_tcs = enabled_tc;
+ if (!uplink_seid)
+ veb_flags |= I40E_AQC_ADD_VEB_FLOATING;
+ if (default_port)
+ veb_flags |= I40E_AQC_ADD_VEB_PORT_TYPE_DEFAULT;
+ else
+ veb_flags |= I40E_AQC_ADD_VEB_PORT_TYPE_DATA;
+
+ /* reverse logic here: set the bitflag to disable the stats */
+ if (!enable_stats)
+ veb_flags |= I40E_AQC_ADD_VEB_ENABLE_DISABLE_STATS;
+
+ cmd->veb_flags = CPU_TO_LE16(veb_flags);
+
+ status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
+
+ if (!status && veb_seid)
+ *veb_seid = LE16_TO_CPU(resp->veb_seid);
+
+ return status;
+}
+
+/**
+ * i40e_aq_get_veb_parameters - Retrieve VEB parameters
+ * @hw: pointer to the hw struct
+ * @veb_seid: the SEID of the VEB to query
+ * @switch_id: the uplink switch id
+ * @floating: set to true if the VEB is floating
+ * @statistic_index: index of the stats counter block for this VEB
+ * @vebs_used: number of VEB's used by function
+ * @vebs_free: total VEB's not reserved by any function
+ * @cmd_details: pointer to command details structure or NULL
+ *
+ * This retrieves the parameters for a particular VEB, specified by
+ * uplink_seid, and returns them to the caller.
+ **/
+enum i40e_status_code i40e_aq_get_veb_parameters(struct i40e_hw *hw,
+ u16 veb_seid, u16 *switch_id,
+ bool *floating, u16 *statistic_index,
+ u16 *vebs_used, u16 *vebs_free,
+ struct i40e_asq_cmd_details *cmd_details)
+{
+ struct i40e_aq_desc desc;
+ struct i40e_aqc_get_veb_parameters_completion *cmd_resp =
+ (struct i40e_aqc_get_veb_parameters_completion *)
+ &desc.params.raw;
+ enum i40e_status_code status;
+
+ if (veb_seid == 0)
+ return I40E_ERR_PARAM;
+
+ i40e_fill_default_direct_cmd_desc(&desc,
+ i40e_aqc_opc_get_veb_parameters);
+ cmd_resp->seid = CPU_TO_LE16(veb_seid);
+
+ status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
+ if (status)
+ goto get_veb_exit;
+
+ if (switch_id)
+ *switch_id = LE16_TO_CPU(cmd_resp->switch_id);
+ if (statistic_index)
+ *statistic_index = LE16_TO_CPU(cmd_resp->statistic_index);
+ if (vebs_used)
+ *vebs_used = LE16_TO_CPU(cmd_resp->vebs_used);
+ if (vebs_free)
+ *vebs_free = LE16_TO_CPU(cmd_resp->vebs_free);
+ if (floating) {
+ u16 flags = LE16_TO_CPU(cmd_resp->veb_flags);
+
+ if (flags & I40E_AQC_ADD_VEB_FLOATING)
+ *floating = true;
+ else
+ *floating = false;
+ }
+
+get_veb_exit:
+ return status;
+}
+
+/**
+ * i40e_aq_add_macvlan
+ * @hw: pointer to the hw struct
+ * @seid: VSI for the mac address
+ * @mv_list: list of macvlans to be added
+ * @count: length of the list
+ * @cmd_details: pointer to command details structure or NULL
+ *
+ * Add MAC/VLAN addresses to the HW filtering
+ **/
+enum i40e_status_code i40e_aq_add_macvlan(struct i40e_hw *hw, u16 seid,
+ struct i40e_aqc_add_macvlan_element_data *mv_list,
+ u16 count, struct i40e_asq_cmd_details *cmd_details)
+{
+ struct i40e_aq_desc desc;
+ struct i40e_aqc_macvlan *cmd =
+ (struct i40e_aqc_macvlan *)&desc.params.raw;
+ enum i40e_status_code status;
+ u16 buf_size;
+ int i;
+
+ if (count == 0 || !mv_list || !hw)
+ return I40E_ERR_PARAM;
+
+ buf_size = count * sizeof(*mv_list);
+
+ /* prep the rest of the request */
+ i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_add_macvlan);
+ cmd->num_addresses = CPU_TO_LE16(count);
+ cmd->seid[0] = CPU_TO_LE16(I40E_AQC_MACVLAN_CMD_SEID_VALID | seid);
+ cmd->seid[1] = 0;
+ cmd->seid[2] = 0;
+
+ for (i = 0; i < count; i++)
+ if (I40E_IS_MULTICAST(mv_list[i].mac_addr))
+ mv_list[i].flags |=
+ CPU_TO_LE16(I40E_AQC_MACVLAN_ADD_USE_SHARED_MAC);
+
+ desc.flags |= CPU_TO_LE16((u16)(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD));
+ if (buf_size > I40E_AQ_LARGE_BUF)
+ desc.flags |= CPU_TO_LE16((u16)I40E_AQ_FLAG_LB);
+
+ status = i40e_asq_send_command(hw, &desc, mv_list, buf_size,
+ cmd_details);
+
+ return status;
+}
+
+/**
+ * i40e_aq_remove_macvlan
+ * @hw: pointer to the hw struct
+ * @seid: VSI for the mac address
+ * @mv_list: list of macvlans to be removed
+ * @count: length of the list
+ * @cmd_details: pointer to command details structure or NULL
+ *
+ * Remove MAC/VLAN addresses from the HW filtering
+ **/
+enum i40e_status_code i40e_aq_remove_macvlan(struct i40e_hw *hw, u16 seid,
+ struct i40e_aqc_remove_macvlan_element_data *mv_list,
+ u16 count, struct i40e_asq_cmd_details *cmd_details)
+{
+ struct i40e_aq_desc desc;
+ struct i40e_aqc_macvlan *cmd =
+ (struct i40e_aqc_macvlan *)&desc.params.raw;
+ enum i40e_status_code status;
+ u16 buf_size;
+
+ if (count == 0 || !mv_list || !hw)
+ return I40E_ERR_PARAM;
+
+ buf_size = count * sizeof(*mv_list);
+
+ /* prep the rest of the request */
+ i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_remove_macvlan);
+ cmd->num_addresses = CPU_TO_LE16(count);
+ cmd->seid[0] = CPU_TO_LE16(I40E_AQC_MACVLAN_CMD_SEID_VALID | seid);
+ cmd->seid[1] = 0;
+ cmd->seid[2] = 0;
+
+ desc.flags |= CPU_TO_LE16((u16)(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD));
+ if (buf_size > I40E_AQ_LARGE_BUF)
+ desc.flags |= CPU_TO_LE16((u16)I40E_AQ_FLAG_LB);
+
+ status = i40e_asq_send_command(hw, &desc, mv_list, buf_size,
+ cmd_details);
+
+ return status;
+}
+
+/**
+ * i40e_mirrorrule_op - Internal helper function to add/delete mirror rule
+ * @hw: pointer to the hw struct
+ * @opcode: AQ opcode for add or delete mirror rule
+ * @sw_seid: Switch SEID (to which rule refers)
+ * @rule_type: Rule Type (ingress/egress/VLAN)
+ * @id: Destination VSI SEID or Rule ID
+ * @count: length of the list
+ * @mr_list: list of mirrored VSI SEIDs or VLAN IDs
+ * @cmd_details: pointer to command details structure or NULL
+ * @rule_id: Rule ID returned from FW
+ * @rule_used: Number of rules used in internal switch
+ * @rule_free: Number of rules free in internal switch
+ *
+ * Add/Delete a mirror rule to a specific switch. Mirror rules are supported for
+ * VEBs/VEPA elements only
+ **/
+static enum i40e_status_code i40e_mirrorrule_op(struct i40e_hw *hw,
+ u16 opcode, u16 sw_seid, u16 rule_type, u16 id,
+ u16 count, __le16 *mr_list,
+ struct i40e_asq_cmd_details *cmd_details,
+ u16 *rule_id, u16 *rules_used, u16 *rules_free)
+{
+ struct i40e_aq_desc desc;
+ struct i40e_aqc_add_delete_mirror_rule *cmd =
+ (struct i40e_aqc_add_delete_mirror_rule *)&desc.params.raw;
+ struct i40e_aqc_add_delete_mirror_rule_completion *resp =
+ (struct i40e_aqc_add_delete_mirror_rule_completion *)&desc.params.raw;
+ enum i40e_status_code status;
+ u16 buf_size;
+
+ buf_size = count * sizeof(*mr_list);
+
+ /* prep the rest of the request */
+ i40e_fill_default_direct_cmd_desc(&desc, opcode);
+ cmd->seid = CPU_TO_LE16(sw_seid);
+ cmd->rule_type = CPU_TO_LE16(rule_type &
+ I40E_AQC_MIRROR_RULE_TYPE_MASK);
+ cmd->num_entries = CPU_TO_LE16(count);
+ /* Dest VSI for add, rule_id for delete */
+ cmd->destination = CPU_TO_LE16(id);
+ if (mr_list) {
+ desc.flags |= CPU_TO_LE16((u16)(I40E_AQ_FLAG_BUF |
+ I40E_AQ_FLAG_RD));
+ if (buf_size > I40E_AQ_LARGE_BUF)
+ desc.flags |= CPU_TO_LE16((u16)I40E_AQ_FLAG_LB);
+ }
+
+ status = i40e_asq_send_command(hw, &desc, mr_list, buf_size,
+ cmd_details);
+ if (status == I40E_SUCCESS ||
+ hw->aq.asq_last_status == I40E_AQ_RC_ENOSPC) {
+ if (rule_id)
+ *rule_id = LE16_TO_CPU(resp->rule_id);
+ if (rules_used)
+ *rules_used = LE16_TO_CPU(resp->mirror_rules_used);
+ if (rules_free)
+ *rules_free = LE16_TO_CPU(resp->mirror_rules_free);
+ }
+ return status;
+}
+
+/**
+ * i40e_aq_add_mirrorrule - add a mirror rule
+ * @hw: pointer to the hw struct
+ * @sw_seid: Switch SEID (to which rule refers)
+ * @rule_type: Rule Type (ingress/egress/VLAN)
+ * @dest_vsi: SEID of VSI to which packets will be mirrored
+ * @count: length of the list
+ * @mr_list: list of mirrored VSI SEIDs or VLAN IDs
+ * @cmd_details: pointer to command details structure or NULL
+ * @rule_id: Rule ID returned from FW
+ * @rule_used: Number of rules used in internal switch
+ * @rule_free: Number of rules free in internal switch
+ *
+ * Add mirror rule. Mirror rules are supported for VEBs or VEPA elements only
+ **/
+enum i40e_status_code i40e_aq_add_mirrorrule(struct i40e_hw *hw, u16 sw_seid,
+ u16 rule_type, u16 dest_vsi, u16 count, __le16 *mr_list,
+ struct i40e_asq_cmd_details *cmd_details,
+ u16 *rule_id, u16 *rules_used, u16 *rules_free)
+{
+ if (!(rule_type == I40E_AQC_MIRROR_RULE_TYPE_ALL_INGRESS ||
+ rule_type == I40E_AQC_MIRROR_RULE_TYPE_ALL_EGRESS)) {
+ if (count == 0 || !mr_list)
+ return I40E_ERR_PARAM;
+ }
+
+ return i40e_mirrorrule_op(hw, i40e_aqc_opc_add_mirror_rule, sw_seid,
+ rule_type, dest_vsi, count, mr_list,
+ cmd_details, rule_id, rules_used, rules_free);
+}
+
+/**
+ * i40e_aq_delete_mirrorrule - delete a mirror rule
+ * @hw: pointer to the hw struct
+ * @sw_seid: Switch SEID (to which rule refers)
+ * @rule_type: Rule Type (ingress/egress/VLAN)
+ * @count: length of the list
+ * @rule_id: Rule ID that is returned in the receive desc as part of
+ * add_mirrorrule.
+ * @mr_list: list of mirrored VLAN IDs to be removed
+ * @cmd_details: pointer to command details structure or NULL
+ * @rule_used: Number of rules used in internal switch
+ * @rule_free: Number of rules free in internal switch
+ *
+ * Delete a mirror rule. Mirror rules are supported for VEBs/VEPA elements only
+ **/
+enum i40e_status_code i40e_aq_delete_mirrorrule(struct i40e_hw *hw, u16 sw_seid,
+ u16 rule_type, u16 rule_id, u16 count, __le16 *mr_list,
+ struct i40e_asq_cmd_details *cmd_details,
+ u16 *rules_used, u16 *rules_free)
+{
+ /* Rule ID has to be valid except rule_type: INGRESS VLAN mirroring */
+ if (rule_type == I40E_AQC_MIRROR_RULE_TYPE_VLAN) {
+ /* count and mr_list shall be valid for rule_type INGRESS VLAN
+ * mirroring. For other rule_type, count and rule_type should
+ * not matter.
+ */
+ if (count == 0 || !mr_list)
+ return I40E_ERR_PARAM;
+ }
+
+ return i40e_mirrorrule_op(hw, i40e_aqc_opc_delete_mirror_rule, sw_seid,
+ rule_type, rule_id, count, mr_list,
+ cmd_details, NULL, rules_used, rules_free);
+}
+
+/**
+ * i40e_aq_add_vlan - Add VLAN ids to the HW filtering
+ * @hw: pointer to the hw struct
+ * @seid: VSI for the vlan filters
+ * @v_list: list of vlan filters to be added
+ * @count: length of the list
+ * @cmd_details: pointer to command details structure or NULL
+ **/
+enum i40e_status_code i40e_aq_add_vlan(struct i40e_hw *hw, u16 seid,
+ struct i40e_aqc_add_remove_vlan_element_data *v_list,
+ u8 count, struct i40e_asq_cmd_details *cmd_details)
+{
+ struct i40e_aq_desc desc;
+ struct i40e_aqc_macvlan *cmd =
+ (struct i40e_aqc_macvlan *)&desc.params.raw;
+ enum i40e_status_code status;
+ u16 buf_size;
+
+ if (count == 0 || !v_list || !hw)
+ return I40E_ERR_PARAM;
+
+ buf_size = count * sizeof(*v_list);
+
+ /* prep the rest of the request */
+ i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_add_vlan);
+ cmd->num_addresses = CPU_TO_LE16(count);
+ cmd->seid[0] = CPU_TO_LE16(seid | I40E_AQC_MACVLAN_CMD_SEID_VALID);
+ cmd->seid[1] = 0;
+ cmd->seid[2] = 0;
+
+ desc.flags |= CPU_TO_LE16((u16)(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD));
+ if (buf_size > I40E_AQ_LARGE_BUF)
+ desc.flags |= CPU_TO_LE16((u16)I40E_AQ_FLAG_LB);
+
+ status = i40e_asq_send_command(hw, &desc, v_list, buf_size,
+ cmd_details);
+
+ return status;
+}
+
+/**
+ * i40e_aq_remove_vlan - Remove VLANs from the HW filtering
+ * @hw: pointer to the hw struct
+ * @seid: VSI for the vlan filters
+ * @v_list: list of macvlans to be removed
+ * @count: length of the list
+ * @cmd_details: pointer to command details structure or NULL
+ **/
+enum i40e_status_code i40e_aq_remove_vlan(struct i40e_hw *hw, u16 seid,
+ struct i40e_aqc_add_remove_vlan_element_data *v_list,
+ u8 count, struct i40e_asq_cmd_details *cmd_details)
+{
+ struct i40e_aq_desc desc;
+ struct i40e_aqc_macvlan *cmd =
+ (struct i40e_aqc_macvlan *)&desc.params.raw;
+ enum i40e_status_code status;
+ u16 buf_size;
+
+ if (count == 0 || !v_list || !hw)
+ return I40E_ERR_PARAM;
+
+ buf_size = count * sizeof(*v_list);
+
+ /* prep the rest of the request */
+ i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_remove_vlan);
+ cmd->num_addresses = CPU_TO_LE16(count);
+ cmd->seid[0] = CPU_TO_LE16(seid | I40E_AQC_MACVLAN_CMD_SEID_VALID);
+ cmd->seid[1] = 0;
+ cmd->seid[2] = 0;
+
+ desc.flags |= CPU_TO_LE16((u16)(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD));
+ if (buf_size > I40E_AQ_LARGE_BUF)
+ desc.flags |= CPU_TO_LE16((u16)I40E_AQ_FLAG_LB);
+
+ status = i40e_asq_send_command(hw, &desc, v_list, buf_size,
+ cmd_details);
+
+ return status;
+}
+
+/**
+ * i40e_aq_send_msg_to_vf
+ * @hw: pointer to the hardware structure
+ * @vfid: vf id to send msg
+ * @v_opcode: opcodes for VF-PF communication
+ * @v_retval: return error code
+ * @msg: pointer to the msg buffer
+ * @msglen: msg length
+ * @cmd_details: pointer to command details
+ *
+ * send msg to vf
+ **/
+enum i40e_status_code i40e_aq_send_msg_to_vf(struct i40e_hw *hw, u16 vfid,
+ u32 v_opcode, u32 v_retval, u8 *msg, u16 msglen,
+ struct i40e_asq_cmd_details *cmd_details)
+{
+ struct i40e_aq_desc desc;
+ struct i40e_aqc_pf_vf_message *cmd =
+ (struct i40e_aqc_pf_vf_message *)&desc.params.raw;
+ enum i40e_status_code status;
+
+ i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_send_msg_to_vf);
+ cmd->id = CPU_TO_LE32(vfid);
+ desc.cookie_high = CPU_TO_LE32(v_opcode);
+ desc.cookie_low = CPU_TO_LE32(v_retval);
+ desc.flags |= CPU_TO_LE16((u16)I40E_AQ_FLAG_SI);
+ if (msglen) {
+ desc.flags |= CPU_TO_LE16((u16)(I40E_AQ_FLAG_BUF |
+ I40E_AQ_FLAG_RD));
+ if (msglen > I40E_AQ_LARGE_BUF)
+ desc.flags |= CPU_TO_LE16((u16)I40E_AQ_FLAG_LB);
+ desc.datalen = CPU_TO_LE16(msglen);
+ }
+ status = i40e_asq_send_command(hw, &desc, msg, msglen, cmd_details);
+
+ return status;
+}
+
+/**
+ * i40e_aq_debug_read_register
+ * @hw: pointer to the hw struct
+ * @reg_addr: register address
+ * @reg_val: register value
+ * @cmd_details: pointer to command details structure or NULL
+ *
+ * Read the register using the admin queue commands
+ **/
+enum i40e_status_code i40e_aq_debug_read_register(struct i40e_hw *hw,
+ u32 reg_addr, u64 *reg_val,
+ struct i40e_asq_cmd_details *cmd_details)
+{
+ struct i40e_aq_desc desc;
+ struct i40e_aqc_debug_reg_read_write *cmd_resp =
+ (struct i40e_aqc_debug_reg_read_write *)&desc.params.raw;
+ enum i40e_status_code status;
+
+ if (reg_val == NULL)
+ return I40E_ERR_PARAM;
+
+ i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_debug_read_reg);
+
+ cmd_resp->address = CPU_TO_LE32(reg_addr);
+
+ status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
+
+ if (status == I40E_SUCCESS) {
+ *reg_val = ((u64)LE32_TO_CPU(cmd_resp->value_high) << 32) |
+ (u64)LE32_TO_CPU(cmd_resp->value_low);
+ }
+
+ return status;
+}
+
+/**
+ * i40e_aq_debug_write_register
+ * @hw: pointer to the hw struct
+ * @reg_addr: register address
+ * @reg_val: register value
+ * @cmd_details: pointer to command details structure or NULL
+ *
+ * Write to a register using the admin queue commands
+ **/
+enum i40e_status_code i40e_aq_debug_write_register(struct i40e_hw *hw,
+ u32 reg_addr, u64 reg_val,
+ struct i40e_asq_cmd_details *cmd_details)
+{
+ struct i40e_aq_desc desc;
+ struct i40e_aqc_debug_reg_read_write *cmd =
+ (struct i40e_aqc_debug_reg_read_write *)&desc.params.raw;
+ enum i40e_status_code status;
+
+ i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_debug_write_reg);
+
+ cmd->address = CPU_TO_LE32(reg_addr);
+ cmd->value_high = CPU_TO_LE32((u32)(reg_val >> 32));
+ cmd->value_low = CPU_TO_LE32((u32)(reg_val & 0xFFFFFFFF));
+
+ status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
+
+ return status;
+}
+
+/**
+ * i40e_aq_request_resource
+ * @hw: pointer to the hw struct
+ * @resource: resource id
+ * @access: access type
+ * @sdp_number: resource number
+ * @timeout: the maximum time in ms that the driver may hold the resource
+ * @cmd_details: pointer to command details structure or NULL
+ *
+ * requests common resource using the admin queue commands
+ **/
+enum i40e_status_code i40e_aq_request_resource(struct i40e_hw *hw,
+ enum i40e_aq_resources_ids resource,
+ enum i40e_aq_resource_access_type access,
+ u8 sdp_number, u64 *timeout,
+ struct i40e_asq_cmd_details *cmd_details)
+{
+ struct i40e_aq_desc desc;
+ struct i40e_aqc_request_resource *cmd_resp =
+ (struct i40e_aqc_request_resource *)&desc.params.raw;
+ enum i40e_status_code status;
+
+ DEBUGFUNC("i40e_aq_request_resource");
+
+ i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_request_resource);
+
+ cmd_resp->resource_id = CPU_TO_LE16(resource);
+ cmd_resp->access_type = CPU_TO_LE16(access);
+ cmd_resp->resource_number = CPU_TO_LE32(sdp_number);
+
+ status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
+ /* The completion specifies the maximum time in ms that the driver
+ * may hold the resource in the Timeout field.
+ * If the resource is held by someone else, the command completes with
+ * busy return value and the timeout field indicates the maximum time
+ * the current owner of the resource has to free it.
+ */
+ if (status == I40E_SUCCESS || hw->aq.asq_last_status == I40E_AQ_RC_EBUSY)
+ *timeout = LE32_TO_CPU(cmd_resp->timeout);
+
+ return status;
+}
+
+/**
+ * i40e_aq_release_resource
+ * @hw: pointer to the hw struct
+ * @resource: resource id
+ * @sdp_number: resource number
+ * @cmd_details: pointer to command details structure or NULL
+ *
+ * release common resource using the admin queue commands
+ **/
+enum i40e_status_code i40e_aq_release_resource(struct i40e_hw *hw,
+ enum i40e_aq_resources_ids resource,
+ u8 sdp_number,
+ struct i40e_asq_cmd_details *cmd_details)
+{
+ struct i40e_aq_desc desc;
+ struct i40e_aqc_request_resource *cmd =
+ (struct i40e_aqc_request_resource *)&desc.params.raw;
+ enum i40e_status_code status;
+
+ DEBUGFUNC("i40e_aq_release_resource");
+
+ i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_release_resource);
+
+ cmd->resource_id = CPU_TO_LE16(resource);
+ cmd->resource_number = CPU_TO_LE32(sdp_number);
+
+ status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
+
+ return status;
+}
+
+/**
+ * i40e_aq_read_nvm
+ * @hw: pointer to the hw struct
+ * @module_pointer: module pointer location in words from the NVM beginning
+ * @offset: byte offset from the module beginning
+ * @length: length of the section to be read (in bytes from the offset)
+ * @data: command buffer (size [bytes] = length)
+ * @last_command: tells if this is the last command in a series
+ * @cmd_details: pointer to command details structure or NULL
+ *
+ * Read the NVM using the admin queue commands
+ **/
+enum i40e_status_code i40e_aq_read_nvm(struct i40e_hw *hw, u8 module_pointer,
+ u32 offset, u16 length, void *data,
+ bool last_command,
+ struct i40e_asq_cmd_details *cmd_details)
+{
+ struct i40e_aq_desc desc;
+ struct i40e_aqc_nvm_update *cmd =
+ (struct i40e_aqc_nvm_update *)&desc.params.raw;
+ enum i40e_status_code status;
+
+ DEBUGFUNC("i40e_aq_read_nvm");
+
+ /* In offset the highest byte must be zeroed. */
+ if (offset & 0xFF000000) {
+ status = I40E_ERR_PARAM;
+ goto i40e_aq_read_nvm_exit;
+ }
+
+ i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_nvm_read);
+
+ /* If this is the last command in a series, set the proper flag. */
+ if (last_command)
+ cmd->command_flags |= I40E_AQ_NVM_LAST_CMD;
+ cmd->module_pointer = module_pointer;
+ cmd->offset = CPU_TO_LE32(offset);
+ cmd->length = CPU_TO_LE16(length);
+
+ desc.flags |= CPU_TO_LE16((u16)I40E_AQ_FLAG_BUF);
+ if (length > I40E_AQ_LARGE_BUF)
+ desc.flags |= CPU_TO_LE16((u16)I40E_AQ_FLAG_LB);
+
+ status = i40e_asq_send_command(hw, &desc, data, length, cmd_details);
+
+i40e_aq_read_nvm_exit:
+ return status;
+}
+
+/**
+ * i40e_aq_read_nvm_config - read an nvm config block
+ * @hw: pointer to the hw struct
+ * @cmd_flags: NVM access admin command bits
+ * @field_id: field or feature id
+ * @data: buffer for result
+ * @buf_size: buffer size
+ * @element_count: pointer to count of elements read by FW
+ * @cmd_details: pointer to command details structure or NULL
+ **/
+enum i40e_status_code i40e_aq_read_nvm_config(struct i40e_hw *hw,
+ u8 cmd_flags, u32 field_id, void *data,
+ u16 buf_size, u16 *element_count,
+ struct i40e_asq_cmd_details *cmd_details)
+{
+ struct i40e_aq_desc desc;
+ struct i40e_aqc_nvm_config_read *cmd =
+ (struct i40e_aqc_nvm_config_read *)&desc.params.raw;
+ enum i40e_status_code status;
+
+ i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_nvm_config_read);
+ desc.flags |= CPU_TO_LE16((u16)(I40E_AQ_FLAG_BUF));
+ if (buf_size > I40E_AQ_LARGE_BUF)
+ desc.flags |= CPU_TO_LE16((u16)I40E_AQ_FLAG_LB);
+
+ cmd->cmd_flags = CPU_TO_LE16(cmd_flags);
+ cmd->element_id = CPU_TO_LE16((u16)(0xffff & field_id));
+ if (cmd_flags & I40E_AQ_ANVM_FEATURE_OR_IMMEDIATE_MASK)
+ cmd->element_id_msw = CPU_TO_LE16((u16)(field_id >> 16));
+ else
+ cmd->element_id_msw = 0;
+
+ status = i40e_asq_send_command(hw, &desc, data, buf_size, cmd_details);
+
+ if (!status && element_count)
+ *element_count = LE16_TO_CPU(cmd->element_count);
+
+ return status;
+}
+
+/**
+ * i40e_aq_write_nvm_config - write an nvm config block
+ * @hw: pointer to the hw struct
+ * @cmd_flags: NVM access admin command bits
+ * @data: buffer for result
+ * @buf_size: buffer size
+ * @element_count: count of elements to be written
+ * @cmd_details: pointer to command details structure or NULL
+ **/
+enum i40e_status_code i40e_aq_write_nvm_config(struct i40e_hw *hw,
+ u8 cmd_flags, void *data, u16 buf_size,
+ u16 element_count,
+ struct i40e_asq_cmd_details *cmd_details)
+{
+ struct i40e_aq_desc desc;
+ struct i40e_aqc_nvm_config_write *cmd =
+ (struct i40e_aqc_nvm_config_write *)&desc.params.raw;
+ enum i40e_status_code status;
+
+ i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_nvm_config_write);
+ desc.flags |= CPU_TO_LE16((u16)(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD));
+ if (buf_size > I40E_AQ_LARGE_BUF)
+ desc.flags |= CPU_TO_LE16((u16)I40E_AQ_FLAG_LB);
+
+ cmd->element_count = CPU_TO_LE16(element_count);
+ cmd->cmd_flags = CPU_TO_LE16(cmd_flags);
+ status = i40e_asq_send_command(hw, &desc, data, buf_size, cmd_details);
+
+ return status;
+}
+
+/**
+ * i40e_aq_oem_post_update - triggers an OEM specific flow after update
+ * @hw: pointer to the hw struct
+ * @cmd_details: pointer to command details structure or NULL
+ **/
+enum i40e_status_code i40e_aq_oem_post_update(struct i40e_hw *hw,
+ void *buff, u16 buff_size,
+ struct i40e_asq_cmd_details *cmd_details)
+{
+ struct i40e_aq_desc desc;
+ enum i40e_status_code status;
+
+ UNREFERENCED_2PARAMETER(buff, buff_size);
+
+ i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_oem_post_update);
+ status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
+ if (status && LE16_TO_CPU(desc.retval) == I40E_AQ_RC_ESRCH)
+ status = I40E_ERR_NOT_IMPLEMENTED;
+
+ return status;
+}
+
+/**
+ * i40e_aq_erase_nvm
+ * @hw: pointer to the hw struct
+ * @module_pointer: module pointer location in words from the NVM beginning
+ * @offset: offset in the module (expressed in 4 KB from module's beginning)
+ * @length: length of the section to be erased (expressed in 4 KB)
+ * @last_command: tells if this is the last command in a series
+ * @cmd_details: pointer to command details structure or NULL
+ *
+ * Erase the NVM sector using the admin queue commands
+ **/
+enum i40e_status_code i40e_aq_erase_nvm(struct i40e_hw *hw, u8 module_pointer,
+ u32 offset, u16 length, bool last_command,
+ struct i40e_asq_cmd_details *cmd_details)
+{
+ struct i40e_aq_desc desc;
+ struct i40e_aqc_nvm_update *cmd =
+ (struct i40e_aqc_nvm_update *)&desc.params.raw;
+ enum i40e_status_code status;
+
+ DEBUGFUNC("i40e_aq_erase_nvm");
+
+ /* In offset the highest byte must be zeroed. */
+ if (offset & 0xFF000000) {
+ status = I40E_ERR_PARAM;
+ goto i40e_aq_erase_nvm_exit;
+ }
+
+ i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_nvm_erase);
+
+ /* If this is the last command in a series, set the proper flag. */
+ if (last_command)
+ cmd->command_flags |= I40E_AQ_NVM_LAST_CMD;
+ cmd->module_pointer = module_pointer;
+ cmd->offset = CPU_TO_LE32(offset);
+ cmd->length = CPU_TO_LE16(length);
+
+ status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
+
+i40e_aq_erase_nvm_exit:
+ return status;
+}
+
+/**
+ * i40e_parse_discover_capabilities
+ * @hw: pointer to the hw struct
+ * @buff: pointer to a buffer containing device/function capability records
+ * @cap_count: number of capability records in the list
+ * @list_type_opc: type of capabilities list to parse
+ *
+ * Parse the device/function capabilities list.
+ **/
+STATIC void i40e_parse_discover_capabilities(struct i40e_hw *hw, void *buff,
+ u32 cap_count,
+ enum i40e_admin_queue_opc list_type_opc)
+{
+ struct i40e_aqc_list_capabilities_element_resp *cap;
+ u32 valid_functions, num_functions;
+ u32 number, logical_id, phys_id;
+ struct i40e_hw_capabilities *p;
+ u8 major_rev;
+ u32 i = 0;
+ u16 id;
+
+ cap = (struct i40e_aqc_list_capabilities_element_resp *) buff;
+
+ if (list_type_opc == i40e_aqc_opc_list_dev_capabilities)
+ p = (struct i40e_hw_capabilities *)&hw->dev_caps;
+ else if (list_type_opc == i40e_aqc_opc_list_func_capabilities)
+ p = (struct i40e_hw_capabilities *)&hw->func_caps;
+ else
+ return;
+
+ for (i = 0; i < cap_count; i++, cap++) {
+ id = LE16_TO_CPU(cap->id);
+ number = LE32_TO_CPU(cap->number);
+ logical_id = LE32_TO_CPU(cap->logical_id);
+ phys_id = LE32_TO_CPU(cap->phys_id);
+ major_rev = cap->major_rev;
+
+ switch (id) {
+ case I40E_AQ_CAP_ID_SWITCH_MODE:
+ p->switch_mode = number;
+ i40e_debug(hw, I40E_DEBUG_INIT,
+ "HW Capability: Switch mode = %d\n",
+ p->switch_mode);
+ break;
+ case I40E_AQ_CAP_ID_MNG_MODE:
+ p->management_mode = number;
+ i40e_debug(hw, I40E_DEBUG_INIT,
+ "HW Capability: Management Mode = %d\n",
+ p->management_mode);
+ break;
+ case I40E_AQ_CAP_ID_NPAR_ACTIVE:
+ p->npar_enable = number;
+ i40e_debug(hw, I40E_DEBUG_INIT,
+ "HW Capability: NPAR enable = %d\n",
+ p->npar_enable);
+ break;
+ case I40E_AQ_CAP_ID_OS2BMC_CAP:
+ p->os2bmc = number;
+ i40e_debug(hw, I40E_DEBUG_INIT,
+ "HW Capability: OS2BMC = %d\n", p->os2bmc);
+ break;
+ case I40E_AQ_CAP_ID_FUNCTIONS_VALID:
+ p->valid_functions = number;
+ i40e_debug(hw, I40E_DEBUG_INIT,
+ "HW Capability: Valid Functions = %d\n",
+ p->valid_functions);
+ break;
+ case I40E_AQ_CAP_ID_SRIOV:
+ if (number == 1)
+ p->sr_iov_1_1 = true;
+ i40e_debug(hw, I40E_DEBUG_INIT,
+ "HW Capability: SR-IOV = %d\n",
+ p->sr_iov_1_1);
+ break;
+ case I40E_AQ_CAP_ID_VF:
+ p->num_vfs = number;
+ p->vf_base_id = logical_id;
+ i40e_debug(hw, I40E_DEBUG_INIT,
+ "HW Capability: VF count = %d\n",
+ p->num_vfs);
+ i40e_debug(hw, I40E_DEBUG_INIT,
+ "HW Capability: VF base_id = %d\n",
+ p->vf_base_id);
+ break;
+ case I40E_AQ_CAP_ID_VMDQ:
+ if (number == 1)
+ p->vmdq = true;
+ i40e_debug(hw, I40E_DEBUG_INIT,
+ "HW Capability: VMDQ = %d\n", p->vmdq);
+ break;
+ case I40E_AQ_CAP_ID_8021QBG:
+ if (number == 1)
+ p->evb_802_1_qbg = true;
+ i40e_debug(hw, I40E_DEBUG_INIT,
+ "HW Capability: 802.1Qbg = %d\n", number);
+ break;
+ case I40E_AQ_CAP_ID_8021QBR:
+ if (number == 1)
+ p->evb_802_1_qbh = true;
+ i40e_debug(hw, I40E_DEBUG_INIT,
+ "HW Capability: 802.1Qbh = %d\n", number);
+ break;
+ case I40E_AQ_CAP_ID_VSI:
+ p->num_vsis = number;
+ i40e_debug(hw, I40E_DEBUG_INIT,
+ "HW Capability: VSI count = %d\n",
+ p->num_vsis);
+ break;
+ case I40E_AQ_CAP_ID_DCB:
+ if (number == 1) {
+ p->dcb = true;
+ p->enabled_tcmap = logical_id;
+ p->maxtc = phys_id;
+ }
+ i40e_debug(hw, I40E_DEBUG_INIT,
+ "HW Capability: DCB = %d\n", p->dcb);
+ i40e_debug(hw, I40E_DEBUG_INIT,
+ "HW Capability: TC Mapping = %d\n",
+ logical_id);
+ i40e_debug(hw, I40E_DEBUG_INIT,
+ "HW Capability: TC Max = %d\n", p->maxtc);
+ break;
+ case I40E_AQ_CAP_ID_FCOE:
+ if (number == 1)
+ p->fcoe = true;
+ i40e_debug(hw, I40E_DEBUG_INIT,
+ "HW Capability: FCOE = %d\n", p->fcoe);
+ break;
+ case I40E_AQ_CAP_ID_ISCSI:
+ if (number == 1)
+ p->iscsi = true;
+ i40e_debug(hw, I40E_DEBUG_INIT,
+ "HW Capability: iSCSI = %d\n", p->iscsi);
+ break;
+ case I40E_AQ_CAP_ID_RSS:
+ p->rss = true;
+ p->rss_table_size = number;
+ p->rss_table_entry_width = logical_id;
+ i40e_debug(hw, I40E_DEBUG_INIT,
+ "HW Capability: RSS = %d\n", p->rss);
+ i40e_debug(hw, I40E_DEBUG_INIT,
+ "HW Capability: RSS table size = %d\n",
+ p->rss_table_size);
+ i40e_debug(hw, I40E_DEBUG_INIT,
+ "HW Capability: RSS table width = %d\n",
+ p->rss_table_entry_width);
+ break;
+ case I40E_AQ_CAP_ID_RXQ:
+ p->num_rx_qp = number;
+ p->base_queue = phys_id;
+ i40e_debug(hw, I40E_DEBUG_INIT,
+ "HW Capability: Rx QP = %d\n", number);
+ i40e_debug(hw, I40E_DEBUG_INIT,
+ "HW Capability: base_queue = %d\n",
+ p->base_queue);
+ break;
+ case I40E_AQ_CAP_ID_TXQ:
+ p->num_tx_qp = number;
+ p->base_queue = phys_id;
+ i40e_debug(hw, I40E_DEBUG_INIT,
+ "HW Capability: Tx QP = %d\n", number);
+ i40e_debug(hw, I40E_DEBUG_INIT,
+ "HW Capability: base_queue = %d\n",
+ p->base_queue);
+ break;
+ case I40E_AQ_CAP_ID_MSIX:
+ p->num_msix_vectors = number;
+ i40e_debug(hw, I40E_DEBUG_INIT,
+ "HW Capability: MSIX vector count = %d\n",
+ p->num_msix_vectors);
+ break;
+ case I40E_AQ_CAP_ID_VF_MSIX:
+ p->num_msix_vectors_vf = number;
+ i40e_debug(hw, I40E_DEBUG_INIT,
+ "HW Capability: MSIX VF vector count = %d\n",
+ p->num_msix_vectors_vf);
+ break;
+ case I40E_AQ_CAP_ID_FLEX10:
+ if (major_rev == 1) {
+ if (number == 1) {
+ p->flex10_enable = true;
+ p->flex10_capable = true;
+ }
+ } else {
+ /* Capability revision >= 2 */
+ if (number & 1)
+ p->flex10_enable = true;
+ if (number & 2)
+ p->flex10_capable = true;
+ }
+ p->flex10_mode = logical_id;
+ p->flex10_status = phys_id;
+ i40e_debug(hw, I40E_DEBUG_INIT,
+ "HW Capability: Flex10 mode = %d\n",
+ p->flex10_mode);
+ i40e_debug(hw, I40E_DEBUG_INIT,
+ "HW Capability: Flex10 status = %d\n",
+ p->flex10_status);
+ break;
+ case I40E_AQ_CAP_ID_CEM:
+ if (number == 1)
+ p->mgmt_cem = true;
+ i40e_debug(hw, I40E_DEBUG_INIT,
+ "HW Capability: CEM = %d\n", p->mgmt_cem);
+ break;
+ case I40E_AQ_CAP_ID_IWARP:
+ if (number == 1)
+ p->iwarp = true;
+ i40e_debug(hw, I40E_DEBUG_INIT,
+ "HW Capability: iWARP = %d\n", p->iwarp);
+ break;
+ case I40E_AQ_CAP_ID_LED:
+ if (phys_id < I40E_HW_CAP_MAX_GPIO)
+ p->led[phys_id] = true;
+ i40e_debug(hw, I40E_DEBUG_INIT,
+ "HW Capability: LED - PIN %d\n", phys_id);
+ break;
+ case I40E_AQ_CAP_ID_SDP:
+ if (phys_id < I40E_HW_CAP_MAX_GPIO)
+ p->sdp[phys_id] = true;
+ i40e_debug(hw, I40E_DEBUG_INIT,
+ "HW Capability: SDP - PIN %d\n", phys_id);
+ break;
+ case I40E_AQ_CAP_ID_MDIO:
+ if (number == 1) {
+ p->mdio_port_num = phys_id;
+ p->mdio_port_mode = logical_id;
+ }
+ i40e_debug(hw, I40E_DEBUG_INIT,
+ "HW Capability: MDIO port number = %d\n",
+ p->mdio_port_num);
+ i40e_debug(hw, I40E_DEBUG_INIT,
+ "HW Capability: MDIO port mode = %d\n",
+ p->mdio_port_mode);
+ break;
+ case I40E_AQ_CAP_ID_1588:
+ if (number == 1)
+ p->ieee_1588 = true;
+ i40e_debug(hw, I40E_DEBUG_INIT,
+ "HW Capability: IEEE 1588 = %d\n",
+ p->ieee_1588);
+ break;
+ case I40E_AQ_CAP_ID_FLOW_DIRECTOR:
+ p->fd = true;
+ p->fd_filters_guaranteed = number;
+ p->fd_filters_best_effort = logical_id;
+ i40e_debug(hw, I40E_DEBUG_INIT,
+ "HW Capability: Flow Director = 1\n");
+ i40e_debug(hw, I40E_DEBUG_INIT,
+ "HW Capability: Guaranteed FD filters = %d\n",
+ p->fd_filters_guaranteed);
+ break;
+ case I40E_AQ_CAP_ID_WSR_PROT:
+ p->wr_csr_prot = (u64)number;
+ p->wr_csr_prot |= (u64)logical_id << 32;
+ i40e_debug(hw, I40E_DEBUG_INIT,
+ "HW Capability: wr_csr_prot = 0x%llX\n\n",
+ (p->wr_csr_prot & 0xffff));
+ break;
+ case I40E_AQ_CAP_ID_NVM_MGMT:
+ if (number & I40E_NVM_MGMT_SEC_REV_DISABLED)
+ p->sec_rev_disabled = true;
+ if (number & I40E_NVM_MGMT_UPDATE_DISABLED)
+ p->update_disabled = true;
+ break;
+#ifdef X722_SUPPORT
+ case I40E_AQ_CAP_ID_WOL_AND_PROXY:
+ hw->num_wol_proxy_filters = (u16)number;
+ hw->wol_proxy_vsi_seid = (u16)logical_id;
+ p->apm_wol_support = phys_id & I40E_WOL_SUPPORT_MASK;
+ if (phys_id & I40E_ACPI_PROGRAMMING_METHOD_MASK)
+ p->acpi_prog_method = I40E_ACPI_PROGRAMMING_METHOD_AQC_FPK;
+ else
+ p->acpi_prog_method = I40E_ACPI_PROGRAMMING_METHOD_HW_FVL;
+ p->proxy_support = (phys_id & I40E_PROXY_SUPPORT_MASK) ? 1 : 0;
+ p->proxy_support = p->proxy_support;
+ i40e_debug(hw, I40E_DEBUG_INIT,
+ "HW Capability: WOL proxy filters = %d\n",
+ hw->num_wol_proxy_filters);
+ break;
+#endif
+ default:
+ break;
+ }
+ }
+
+ if (p->fcoe)
+ i40e_debug(hw, I40E_DEBUG_ALL, "device is FCoE capable\n");
+
+#ifdef I40E_FCOE_ENA
+ /* Software override ensuring FCoE is disabled if npar or mfp
+ * mode because it is not supported in these modes.
+ */
+ if (p->npar_enable || p->flex10_enable)
+ p->fcoe = false;
+#else
+ /* Always disable FCoE if compiled without the I40E_FCOE_ENA flag */
+ p->fcoe = false;
+#endif
+
+ /* count the enabled ports (aka the "not disabled" ports) */
+ hw->num_ports = 0;
+ for (i = 0; i < 4; i++) {
+ u32 port_cfg_reg = I40E_PRTGEN_CNF + (4 * i);
+ u64 port_cfg = 0;
+
+ /* use AQ read to get the physical register offset instead
+ * of the port relative offset
+ */
+ i40e_aq_debug_read_register(hw, port_cfg_reg, &port_cfg, NULL);
+ if (!(port_cfg & I40E_PRTGEN_CNF_PORT_DIS_MASK))
+ hw->num_ports++;
+ }
+
+ valid_functions = p->valid_functions;
+ num_functions = 0;
+ while (valid_functions) {
+ if (valid_functions & 1)
+ num_functions++;
+ valid_functions >>= 1;
+ }
+
+ /* partition id is 1-based, and functions are evenly spread
+ * across the ports as partitions
+ */
+ hw->partition_id = (hw->pf_id / hw->num_ports) + 1;
+ hw->num_partitions = num_functions / hw->num_ports;
+
+ /* additional HW specific goodies that might
+ * someday be HW version specific
+ */
+ p->rx_buf_chain_len = I40E_MAX_CHAINED_RX_BUFFERS;
+}
+
+/**
+ * i40e_aq_discover_capabilities
+ * @hw: pointer to the hw struct
+ * @buff: a virtual buffer to hold the capabilities
+ * @buff_size: Size of the virtual buffer
+ * @data_size: Size of the returned data, or buff size needed if AQ err==ENOMEM
+ * @list_type_opc: capabilities type to discover - pass in the command opcode
+ * @cmd_details: pointer to command details structure or NULL
+ *
+ * Get the device capabilities descriptions from the firmware
+ **/
+enum i40e_status_code i40e_aq_discover_capabilities(struct i40e_hw *hw,
+ void *buff, u16 buff_size, u16 *data_size,
+ enum i40e_admin_queue_opc list_type_opc,
+ struct i40e_asq_cmd_details *cmd_details)
+{
+ struct i40e_aqc_list_capabilites *cmd;
+ struct i40e_aq_desc desc;
+ enum i40e_status_code status = I40E_SUCCESS;
+
+ cmd = (struct i40e_aqc_list_capabilites *)&desc.params.raw;
+
+ if (list_type_opc != i40e_aqc_opc_list_func_capabilities &&
+ list_type_opc != i40e_aqc_opc_list_dev_capabilities) {
+ status = I40E_ERR_PARAM;
+ goto exit;
+ }
+
+ i40e_fill_default_direct_cmd_desc(&desc, list_type_opc);
+
+ desc.flags |= CPU_TO_LE16((u16)I40E_AQ_FLAG_BUF);
+ if (buff_size > I40E_AQ_LARGE_BUF)
+ desc.flags |= CPU_TO_LE16((u16)I40E_AQ_FLAG_LB);
+
+ status = i40e_asq_send_command(hw, &desc, buff, buff_size, cmd_details);
+ *data_size = LE16_TO_CPU(desc.datalen);
+
+ if (status)
+ goto exit;
+
+ i40e_parse_discover_capabilities(hw, buff, LE32_TO_CPU(cmd->count),
+ list_type_opc);
+
+exit:
+ return status;
+}
+
+/**
+ * i40e_aq_update_nvm
+ * @hw: pointer to the hw struct
+ * @module_pointer: module pointer location in words from the NVM beginning
+ * @offset: byte offset from the module beginning
+ * @length: length of the section to be written (in bytes from the offset)
+ * @data: command buffer (size [bytes] = length)
+ * @last_command: tells if this is the last command in a series
+ * @cmd_details: pointer to command details structure or NULL
+ *
+ * Update the NVM using the admin queue commands
+ **/
+enum i40e_status_code i40e_aq_update_nvm(struct i40e_hw *hw, u8 module_pointer,
+ u32 offset, u16 length, void *data,
+ bool last_command,
+ struct i40e_asq_cmd_details *cmd_details)
+{
+ struct i40e_aq_desc desc;
+ struct i40e_aqc_nvm_update *cmd =
+ (struct i40e_aqc_nvm_update *)&desc.params.raw;
+ enum i40e_status_code status;
+
+ DEBUGFUNC("i40e_aq_update_nvm");
+
+ /* In offset the highest byte must be zeroed. */
+ if (offset & 0xFF000000) {
+ status = I40E_ERR_PARAM;
+ goto i40e_aq_update_nvm_exit;
+ }
+
+ i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_nvm_update);
+
+ /* If this is the last command in a series, set the proper flag. */
+ if (last_command)
+ cmd->command_flags |= I40E_AQ_NVM_LAST_CMD;
+ cmd->module_pointer = module_pointer;
+ cmd->offset = CPU_TO_LE32(offset);
+ cmd->length = CPU_TO_LE16(length);
+
+ desc.flags |= CPU_TO_LE16((u16)(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD));
+ if (length > I40E_AQ_LARGE_BUF)
+ desc.flags |= CPU_TO_LE16((u16)I40E_AQ_FLAG_LB);
+
+ status = i40e_asq_send_command(hw, &desc, data, length, cmd_details);
+
+i40e_aq_update_nvm_exit:
+ return status;
+}
+
+/**
+ * i40e_aq_get_lldp_mib
+ * @hw: pointer to the hw struct
+ * @bridge_type: type of bridge requested
+ * @mib_type: Local, Remote or both Local and Remote MIBs
+ * @buff: pointer to a user supplied buffer to store the MIB block
+ * @buff_size: size of the buffer (in bytes)
+ * @local_len : length of the returned Local LLDP MIB
+ * @remote_len: length of the returned Remote LLDP MIB
+ * @cmd_details: pointer to command details structure or NULL
+ *
+ * Requests the complete LLDP MIB (entire packet).
+ **/
+enum i40e_status_code i40e_aq_get_lldp_mib(struct i40e_hw *hw, u8 bridge_type,
+ u8 mib_type, void *buff, u16 buff_size,
+ u16 *local_len, u16 *remote_len,
+ struct i40e_asq_cmd_details *cmd_details)
+{
+ struct i40e_aq_desc desc;
+ struct i40e_aqc_lldp_get_mib *cmd =
+ (struct i40e_aqc_lldp_get_mib *)&desc.params.raw;
+ struct i40e_aqc_lldp_get_mib *resp =
+ (struct i40e_aqc_lldp_get_mib *)&desc.params.raw;
+ enum i40e_status_code status;
+
+ if (buff_size == 0 || !buff)
+ return I40E_ERR_PARAM;
+
+ i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_lldp_get_mib);
+ /* Indirect Command */
+ desc.flags |= CPU_TO_LE16((u16)I40E_AQ_FLAG_BUF);
+
+ cmd->type = mib_type & I40E_AQ_LLDP_MIB_TYPE_MASK;
+ cmd->type |= ((bridge_type << I40E_AQ_LLDP_BRIDGE_TYPE_SHIFT) &
+ I40E_AQ_LLDP_BRIDGE_TYPE_MASK);
+
+ desc.datalen = CPU_TO_LE16(buff_size);
+
+ desc.flags |= CPU_TO_LE16((u16)I40E_AQ_FLAG_BUF);
+ if (buff_size > I40E_AQ_LARGE_BUF)
+ desc.flags |= CPU_TO_LE16((u16)I40E_AQ_FLAG_LB);
+
+ status = i40e_asq_send_command(hw, &desc, buff, buff_size, cmd_details);
+ if (!status) {
+ if (local_len != NULL)
+ *local_len = LE16_TO_CPU(resp->local_len);
+ if (remote_len != NULL)
+ *remote_len = LE16_TO_CPU(resp->remote_len);
+ }
+
+ return status;
+}
+
+ /**
+ * i40e_aq_set_lldp_mib - Set the LLDP MIB
+ * @hw: pointer to the hw struct
+ * @mib_type: Local, Remote or both Local and Remote MIBs
+ * @buff: pointer to a user supplied buffer to store the MIB block
+ * @buff_size: size of the buffer (in bytes)
+ * @cmd_details: pointer to command details structure or NULL
+ *
+ * Set the LLDP MIB.
+ **/
+enum i40e_status_code i40e_aq_set_lldp_mib(struct i40e_hw *hw,
+ u8 mib_type, void *buff, u16 buff_size,
+ struct i40e_asq_cmd_details *cmd_details)
+{
+ struct i40e_aq_desc desc;
+ struct i40e_aqc_lldp_set_local_mib *cmd =
+ (struct i40e_aqc_lldp_set_local_mib *)&desc.params.raw;
+ enum i40e_status_code status;
+
+ if (buff_size == 0 || !buff)
+ return I40E_ERR_PARAM;
+
+ i40e_fill_default_direct_cmd_desc(&desc,
+ i40e_aqc_opc_lldp_set_local_mib);
+ /* Indirect Command */
+ desc.flags |= CPU_TO_LE16((u16)(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD));
+ if (buff_size > I40E_AQ_LARGE_BUF)
+ desc.flags |= CPU_TO_LE16((u16)I40E_AQ_FLAG_LB);
+ desc.datalen = CPU_TO_LE16(buff_size);
+
+ cmd->type = mib_type;
+ cmd->length = CPU_TO_LE16(buff_size);
+ cmd->address_high = CPU_TO_LE32(I40E_HI_WORD((u64)buff));
+ cmd->address_low = CPU_TO_LE32(I40E_LO_DWORD((u64)buff));
+
+ status = i40e_asq_send_command(hw, &desc, buff, buff_size, cmd_details);
+ return status;
+}
+
+/**
+ * i40e_aq_cfg_lldp_mib_change_event
+ * @hw: pointer to the hw struct
+ * @enable_update: Enable or Disable event posting
+ * @cmd_details: pointer to command details structure or NULL
+ *
+ * Enable or Disable posting of an event on ARQ when LLDP MIB
+ * associated with the interface changes
+ **/
+enum i40e_status_code i40e_aq_cfg_lldp_mib_change_event(struct i40e_hw *hw,
+ bool enable_update,
+ struct i40e_asq_cmd_details *cmd_details)
+{
+ struct i40e_aq_desc desc;
+ struct i40e_aqc_lldp_update_mib *cmd =
+ (struct i40e_aqc_lldp_update_mib *)&desc.params.raw;
+ enum i40e_status_code status;
+
+ i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_lldp_update_mib);
+
+ if (!enable_update)
+ cmd->command |= I40E_AQ_LLDP_MIB_UPDATE_DISABLE;
+
+ status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
+
+ return status;
+}
+
+/**
+ * i40e_aq_add_lldp_tlv
+ * @hw: pointer to the hw struct
+ * @bridge_type: type of bridge
+ * @buff: buffer with TLV to add
+ * @buff_size: length of the buffer
+ * @tlv_len: length of the TLV to be added
+ * @mib_len: length of the LLDP MIB returned in response
+ * @cmd_details: pointer to command details structure or NULL
+ *
+ * Add the specified TLV to LLDP Local MIB for the given bridge type,
+ * it is responsibility of the caller to make sure that the TLV is not
+ * already present in the LLDPDU.
+ * In return firmware will write the complete LLDP MIB with the newly
+ * added TLV in the response buffer.
+ **/
+enum i40e_status_code i40e_aq_add_lldp_tlv(struct i40e_hw *hw, u8 bridge_type,
+ void *buff, u16 buff_size, u16 tlv_len,
+ u16 *mib_len,
+ struct i40e_asq_cmd_details *cmd_details)
+{
+ struct i40e_aq_desc desc;
+ struct i40e_aqc_lldp_add_tlv *cmd =
+ (struct i40e_aqc_lldp_add_tlv *)&desc.params.raw;
+ enum i40e_status_code status;
+
+ if (buff_size == 0 || !buff || tlv_len == 0)
+ return I40E_ERR_PARAM;
+
+ i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_lldp_add_tlv);
+
+ /* Indirect Command */
+ desc.flags |= CPU_TO_LE16((u16)(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD));
+ if (buff_size > I40E_AQ_LARGE_BUF)
+ desc.flags |= CPU_TO_LE16((u16)I40E_AQ_FLAG_LB);
+ desc.datalen = CPU_TO_LE16(buff_size);
+
+ cmd->type = ((bridge_type << I40E_AQ_LLDP_BRIDGE_TYPE_SHIFT) &
+ I40E_AQ_LLDP_BRIDGE_TYPE_MASK);
+ cmd->len = CPU_TO_LE16(tlv_len);
+
+ status = i40e_asq_send_command(hw, &desc, buff, buff_size, cmd_details);
+ if (!status) {
+ if (mib_len != NULL)
+ *mib_len = LE16_TO_CPU(desc.datalen);
+ }
+
+ return status;
+}
+
+/**
+ * i40e_aq_update_lldp_tlv
+ * @hw: pointer to the hw struct
+ * @bridge_type: type of bridge
+ * @buff: buffer with TLV to update
+ * @buff_size: size of the buffer holding original and updated TLVs
+ * @old_len: Length of the Original TLV
+ * @new_len: Length of the Updated TLV
+ * @offset: offset of the updated TLV in the buff
+ * @mib_len: length of the returned LLDP MIB
+ * @cmd_details: pointer to command details structure or NULL
+ *
+ * Update the specified TLV to the LLDP Local MIB for the given bridge type.
+ * Firmware will place the complete LLDP MIB in response buffer with the
+ * updated TLV.
+ **/
+enum i40e_status_code i40e_aq_update_lldp_tlv(struct i40e_hw *hw,
+ u8 bridge_type, void *buff, u16 buff_size,
+ u16 old_len, u16 new_len, u16 offset,
+ u16 *mib_len,
+ struct i40e_asq_cmd_details *cmd_details)
+{
+ struct i40e_aq_desc desc;
+ struct i40e_aqc_lldp_update_tlv *cmd =
+ (struct i40e_aqc_lldp_update_tlv *)&desc.params.raw;
+ enum i40e_status_code status;
+
+ if (buff_size == 0 || !buff || offset == 0 ||
+ old_len == 0 || new_len == 0)
+ return I40E_ERR_PARAM;
+
+ i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_lldp_update_tlv);
+
+ /* Indirect Command */
+ desc.flags |= CPU_TO_LE16((u16)(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD));
+ if (buff_size > I40E_AQ_LARGE_BUF)
+ desc.flags |= CPU_TO_LE16((u16)I40E_AQ_FLAG_LB);
+ desc.datalen = CPU_TO_LE16(buff_size);
+
+ cmd->type = ((bridge_type << I40E_AQ_LLDP_BRIDGE_TYPE_SHIFT) &
+ I40E_AQ_LLDP_BRIDGE_TYPE_MASK);
+ cmd->old_len = CPU_TO_LE16(old_len);
+ cmd->new_offset = CPU_TO_LE16(offset);
+ cmd->new_len = CPU_TO_LE16(new_len);
+
+ status = i40e_asq_send_command(hw, &desc, buff, buff_size, cmd_details);
+ if (!status) {
+ if (mib_len != NULL)
+ *mib_len = LE16_TO_CPU(desc.datalen);
+ }
+
+ return status;
+}
+
+/**
+ * i40e_aq_delete_lldp_tlv
+ * @hw: pointer to the hw struct
+ * @bridge_type: type of bridge
+ * @buff: pointer to a user supplied buffer that has the TLV
+ * @buff_size: length of the buffer
+ * @tlv_len: length of the TLV to be deleted
+ * @mib_len: length of the returned LLDP MIB
+ * @cmd_details: pointer to command details structure or NULL
+ *
+ * Delete the specified TLV from LLDP Local MIB for the given bridge type.
+ * The firmware places the entire LLDP MIB in the response buffer.
+ **/
+enum i40e_status_code i40e_aq_delete_lldp_tlv(struct i40e_hw *hw,
+ u8 bridge_type, void *buff, u16 buff_size,
+ u16 tlv_len, u16 *mib_len,
+ struct i40e_asq_cmd_details *cmd_details)
+{
+ struct i40e_aq_desc desc;
+ struct i40e_aqc_lldp_add_tlv *cmd =
+ (struct i40e_aqc_lldp_add_tlv *)&desc.params.raw;
+ enum i40e_status_code status;
+
+ if (buff_size == 0 || !buff)
+ return I40E_ERR_PARAM;
+
+ i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_lldp_delete_tlv);
+
+ /* Indirect Command */
+ desc.flags |= CPU_TO_LE16((u16)(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD));
+ if (buff_size > I40E_AQ_LARGE_BUF)
+ desc.flags |= CPU_TO_LE16((u16)I40E_AQ_FLAG_LB);
+ desc.datalen = CPU_TO_LE16(buff_size);
+ cmd->len = CPU_TO_LE16(tlv_len);
+ cmd->type = ((bridge_type << I40E_AQ_LLDP_BRIDGE_TYPE_SHIFT) &
+ I40E_AQ_LLDP_BRIDGE_TYPE_MASK);
+
+ status = i40e_asq_send_command(hw, &desc, buff, buff_size, cmd_details);
+ if (!status) {
+ if (mib_len != NULL)
+ *mib_len = LE16_TO_CPU(desc.datalen);
+ }
+
+ return status;
+}
+
+/**
+ * i40e_aq_stop_lldp
+ * @hw: pointer to the hw struct
+ * @shutdown_agent: True if LLDP Agent needs to be Shutdown
+ * @cmd_details: pointer to command details structure or NULL
+ *
+ * Stop or Shutdown the embedded LLDP Agent
+ **/
+enum i40e_status_code i40e_aq_stop_lldp(struct i40e_hw *hw, bool shutdown_agent,
+ struct i40e_asq_cmd_details *cmd_details)
+{
+ struct i40e_aq_desc desc;
+ struct i40e_aqc_lldp_stop *cmd =
+ (struct i40e_aqc_lldp_stop *)&desc.params.raw;
+ enum i40e_status_code status;
+
+ i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_lldp_stop);
+
+ if (shutdown_agent)
+ cmd->command |= I40E_AQ_LLDP_AGENT_SHUTDOWN;
+
+ status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
+
+ return status;
+}
+
+/**
+ * i40e_aq_start_lldp
+ * @hw: pointer to the hw struct
+ * @cmd_details: pointer to command details structure or NULL
+ *
+ * Start the embedded LLDP Agent on all ports.
+ **/
+enum i40e_status_code i40e_aq_start_lldp(struct i40e_hw *hw,
+ struct i40e_asq_cmd_details *cmd_details)
+{
+ struct i40e_aq_desc desc;
+ struct i40e_aqc_lldp_start *cmd =
+ (struct i40e_aqc_lldp_start *)&desc.params.raw;
+ enum i40e_status_code status;
+
+ i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_lldp_start);
+
+ cmd->command = I40E_AQ_LLDP_AGENT_START;
+
+ status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
+
+ return status;
+}
+
+/**
+ * i40e_aq_get_cee_dcb_config
+ * @hw: pointer to the hw struct
+ * @buff: response buffer that stores CEE operational configuration
+ * @buff_size: size of the buffer passed
+ * @cmd_details: pointer to command details structure or NULL
+ *
+ * Get CEE DCBX mode operational configuration from firmware
+ **/
+enum i40e_status_code i40e_aq_get_cee_dcb_config(struct i40e_hw *hw,
+ void *buff, u16 buff_size,
+ struct i40e_asq_cmd_details *cmd_details)
+{
+ struct i40e_aq_desc desc;
+ enum i40e_status_code status;
+
+ if (buff_size == 0 || !buff)
+ return I40E_ERR_PARAM;
+
+ i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_get_cee_dcb_cfg);
+
+ desc.flags |= CPU_TO_LE16((u16)I40E_AQ_FLAG_BUF);
+ status = i40e_asq_send_command(hw, &desc, (void *)buff, buff_size,
+ cmd_details);
+
+ return status;
+}
+
+/**
+ * i40e_aq_start_stop_dcbx - Start/Stop DCBx service in FW
+ * @hw: pointer to the hw struct
+ * @start_agent: True if DCBx Agent needs to be Started
+ * False if DCBx Agent needs to be Stopped
+ * @cmd_details: pointer to command details structure or NULL
+ *
+ * Start/Stop the embedded dcbx Agent
+ **/
+enum i40e_status_code i40e_aq_start_stop_dcbx(struct i40e_hw *hw,
+ bool start_agent,
+ struct i40e_asq_cmd_details *cmd_details)
+{
+ struct i40e_aq_desc desc;
+ struct i40e_aqc_lldp_stop_start_specific_agent *cmd =
+ (struct i40e_aqc_lldp_stop_start_specific_agent *)
+ &desc.params.raw;
+ enum i40e_status_code status;
+
+ i40e_fill_default_direct_cmd_desc(&desc,
+ i40e_aqc_opc_lldp_stop_start_spec_agent);
+
+ if (start_agent)
+ cmd->command = I40E_AQC_START_SPECIFIC_AGENT_MASK;
+
+ status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
+
+ return status;
+}
+
+/**
+ * i40e_aq_add_udp_tunnel
+ * @hw: pointer to the hw struct
+ * @udp_port: the UDP port to add
+ * @header_len: length of the tunneling header length in DWords
+ * @protocol_index: protocol index type
+ * @filter_index: pointer to filter index
+ * @cmd_details: pointer to command details structure or NULL
+ **/
+enum i40e_status_code i40e_aq_add_udp_tunnel(struct i40e_hw *hw,
+ u16 udp_port, u8 protocol_index,
+ u8 *filter_index,
+ struct i40e_asq_cmd_details *cmd_details)
+{
+ struct i40e_aq_desc desc;
+ struct i40e_aqc_add_udp_tunnel *cmd =
+ (struct i40e_aqc_add_udp_tunnel *)&desc.params.raw;
+ struct i40e_aqc_del_udp_tunnel_completion *resp =
+ (struct i40e_aqc_del_udp_tunnel_completion *)&desc.params.raw;
+ enum i40e_status_code status;
+
+ i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_add_udp_tunnel);
+
+ cmd->udp_port = CPU_TO_LE16(udp_port);
+ cmd->protocol_type = protocol_index;
+
+ status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
+
+ if (!status && filter_index)
+ *filter_index = resp->index;
+
+ return status;
+}
+
+/**
+ * i40e_aq_del_udp_tunnel
+ * @hw: pointer to the hw struct
+ * @index: filter index
+ * @cmd_details: pointer to command details structure or NULL
+ **/
+enum i40e_status_code i40e_aq_del_udp_tunnel(struct i40e_hw *hw, u8 index,
+ struct i40e_asq_cmd_details *cmd_details)
+{
+ struct i40e_aq_desc desc;
+ struct i40e_aqc_remove_udp_tunnel *cmd =
+ (struct i40e_aqc_remove_udp_tunnel *)&desc.params.raw;
+ enum i40e_status_code status;
+
+ i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_del_udp_tunnel);
+
+ cmd->index = index;
+
+ status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
+
+ return status;
+}
+
+/**
+ * i40e_aq_get_switch_resource_alloc (0x0204)
+ * @hw: pointer to the hw struct
+ * @num_entries: pointer to u8 to store the number of resource entries returned
+ * @buf: pointer to a user supplied buffer. This buffer must be large enough
+ * to store the resource information for all resource types. Each
+ * resource type is a i40e_aqc_switch_resource_alloc_data structure.
+ * @count: size, in bytes, of the buffer provided
+ * @cmd_details: pointer to command details structure or NULL
+ *
+ * Query the resources allocated to a function.
+ **/
+enum i40e_status_code i40e_aq_get_switch_resource_alloc(struct i40e_hw *hw,
+ u8 *num_entries,
+ struct i40e_aqc_switch_resource_alloc_element_resp *buf,
+ u16 count,
+ struct i40e_asq_cmd_details *cmd_details)
+{
+ struct i40e_aq_desc desc;
+ struct i40e_aqc_get_switch_resource_alloc *cmd_resp =
+ (struct i40e_aqc_get_switch_resource_alloc *)&desc.params.raw;
+ enum i40e_status_code status;
+ u16 length = count * sizeof(*buf);
+
+ i40e_fill_default_direct_cmd_desc(&desc,
+ i40e_aqc_opc_get_switch_resource_alloc);
+
+ desc.flags |= CPU_TO_LE16((u16)I40E_AQ_FLAG_BUF);
+ if (length > I40E_AQ_LARGE_BUF)
+ desc.flags |= CPU_TO_LE16((u16)I40E_AQ_FLAG_LB);
+
+ status = i40e_asq_send_command(hw, &desc, buf, length, cmd_details);
+
+ if (!status && num_entries)
+ *num_entries = cmd_resp->num_entries;
+
+ return status;
+}
+
+/**
+ * i40e_aq_delete_element - Delete switch element
+ * @hw: pointer to the hw struct
+ * @seid: the SEID to delete from the switch
+ * @cmd_details: pointer to command details structure or NULL
+ *
+ * This deletes a switch element from the switch.
+ **/
+enum i40e_status_code i40e_aq_delete_element(struct i40e_hw *hw, u16 seid,
+ struct i40e_asq_cmd_details *cmd_details)
+{
+ struct i40e_aq_desc desc;
+ struct i40e_aqc_switch_seid *cmd =
+ (struct i40e_aqc_switch_seid *)&desc.params.raw;
+ enum i40e_status_code status;
+
+ if (seid == 0)
+ return I40E_ERR_PARAM;
+
+ i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_delete_element);
+
+ cmd->seid = CPU_TO_LE16(seid);
+
+ status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
+
+ return status;
+}
+
+/**
+ * i40e_aq_add_pvirt - Instantiate a Port Virtualizer on a port
+ * @hw: pointer to the hw struct
+ * @flags: component flags
+ * @mac_seid: uplink seid (MAC SEID)
+ * @vsi_seid: connected vsi seid
+ * @ret_seid: seid of create pv component
+ *
+ * This instantiates an i40e port virtualizer with specified flags.
+ * Depending on specified flags the port virtualizer can act as a
+ * 802.1Qbr port virtualizer or a 802.1Qbg S-component.
+ */
+enum i40e_status_code i40e_aq_add_pvirt(struct i40e_hw *hw, u16 flags,
+ u16 mac_seid, u16 vsi_seid,
+ u16 *ret_seid)
+{
+ struct i40e_aq_desc desc;
+ struct i40e_aqc_add_update_pv *cmd =
+ (struct i40e_aqc_add_update_pv *)&desc.params.raw;
+ struct i40e_aqc_add_update_pv_completion *resp =
+ (struct i40e_aqc_add_update_pv_completion *)&desc.params.raw;
+ enum i40e_status_code status;
+
+ if (vsi_seid == 0)
+ return I40E_ERR_PARAM;
+
+ i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_add_pv);
+ cmd->command_flags = CPU_TO_LE16(flags);
+ cmd->uplink_seid = CPU_TO_LE16(mac_seid);
+ cmd->connected_seid = CPU_TO_LE16(vsi_seid);
+
+ status = i40e_asq_send_command(hw, &desc, NULL, 0, NULL);
+ if (!status && ret_seid)
+ *ret_seid = LE16_TO_CPU(resp->pv_seid);
+
+ return status;
+}
+
+/**
+ * i40e_aq_add_tag - Add an S/E-tag
+ * @hw: pointer to the hw struct
+ * @direct_to_queue: should s-tag direct flow to a specific queue
+ * @vsi_seid: VSI SEID to use this tag
+ * @tag: value of the tag
+ * @queue_num: queue number, only valid is direct_to_queue is true
+ * @tags_used: return value, number of tags in use by this PF
+ * @tags_free: return value, number of unallocated tags
+ * @cmd_details: pointer to command details structure or NULL
+ *
+ * This associates an S- or E-tag to a VSI in the switch complex. It returns
+ * the number of tags allocated by the PF, and the number of unallocated
+ * tags available.
+ **/
+enum i40e_status_code i40e_aq_add_tag(struct i40e_hw *hw, bool direct_to_queue,
+ u16 vsi_seid, u16 tag, u16 queue_num,
+ u16 *tags_used, u16 *tags_free,
+ struct i40e_asq_cmd_details *cmd_details)
+{
+ struct i40e_aq_desc desc;
+ struct i40e_aqc_add_tag *cmd =
+ (struct i40e_aqc_add_tag *)&desc.params.raw;
+ struct i40e_aqc_add_remove_tag_completion *resp =
+ (struct i40e_aqc_add_remove_tag_completion *)&desc.params.raw;
+ enum i40e_status_code status;
+
+ if (vsi_seid == 0)
+ return I40E_ERR_PARAM;
+
+ i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_add_tag);
+
+ cmd->seid = CPU_TO_LE16(vsi_seid);
+ cmd->tag = CPU_TO_LE16(tag);
+ if (direct_to_queue) {
+ cmd->flags = CPU_TO_LE16(I40E_AQC_ADD_TAG_FLAG_TO_QUEUE);
+ cmd->queue_number = CPU_TO_LE16(queue_num);
+ }
+
+ status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
+
+ if (!status) {
+ if (tags_used != NULL)
+ *tags_used = LE16_TO_CPU(resp->tags_used);
+ if (tags_free != NULL)
+ *tags_free = LE16_TO_CPU(resp->tags_free);
+ }
+
+ return status;
+}
+
+/**
+ * i40e_aq_remove_tag - Remove an S- or E-tag
+ * @hw: pointer to the hw struct
+ * @vsi_seid: VSI SEID this tag is associated with
+ * @tag: value of the S-tag to delete
+ * @tags_used: return value, number of tags in use by this PF
+ * @tags_free: return value, number of unallocated tags
+ * @cmd_details: pointer to command details structure or NULL
+ *
+ * This deletes an S- or E-tag from a VSI in the switch complex. It returns
+ * the number of tags allocated by the PF, and the number of unallocated
+ * tags available.
+ **/
+enum i40e_status_code i40e_aq_remove_tag(struct i40e_hw *hw, u16 vsi_seid,
+ u16 tag, u16 *tags_used, u16 *tags_free,
+ struct i40e_asq_cmd_details *cmd_details)
+{
+ struct i40e_aq_desc desc;
+ struct i40e_aqc_remove_tag *cmd =
+ (struct i40e_aqc_remove_tag *)&desc.params.raw;
+ struct i40e_aqc_add_remove_tag_completion *resp =
+ (struct i40e_aqc_add_remove_tag_completion *)&desc.params.raw;
+ enum i40e_status_code status;
+
+ if (vsi_seid == 0)
+ return I40E_ERR_PARAM;
+
+ i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_remove_tag);
+
+ cmd->seid = CPU_TO_LE16(vsi_seid);
+ cmd->tag = CPU_TO_LE16(tag);
+
+ status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
+
+ if (!status) {
+ if (tags_used != NULL)
+ *tags_used = LE16_TO_CPU(resp->tags_used);
+ if (tags_free != NULL)
+ *tags_free = LE16_TO_CPU(resp->tags_free);
+ }
+
+ return status;
+}
+
+/**
+ * i40e_aq_add_mcast_etag - Add a multicast E-tag
+ * @hw: pointer to the hw struct
+ * @pv_seid: Port Virtualizer of this SEID to associate E-tag with
+ * @etag: value of E-tag to add
+ * @num_tags_in_buf: number of unicast E-tags in indirect buffer
+ * @buf: address of indirect buffer
+ * @tags_used: return value, number of E-tags in use by this port
+ * @tags_free: return value, number of unallocated M-tags
+ * @cmd_details: pointer to command details structure or NULL
+ *
+ * This associates a multicast E-tag to a port virtualizer. It will return
+ * the number of tags allocated by the PF, and the number of unallocated
+ * tags available.
+ *
+ * The indirect buffer pointed to by buf is a list of 2-byte E-tags,
+ * num_tags_in_buf long.
+ **/
+enum i40e_status_code i40e_aq_add_mcast_etag(struct i40e_hw *hw, u16 pv_seid,
+ u16 etag, u8 num_tags_in_buf, void *buf,
+ u16 *tags_used, u16 *tags_free,
+ struct i40e_asq_cmd_details *cmd_details)
+{
+ struct i40e_aq_desc desc;
+ struct i40e_aqc_add_remove_mcast_etag *cmd =
+ (struct i40e_aqc_add_remove_mcast_etag *)&desc.params.raw;
+ struct i40e_aqc_add_remove_mcast_etag_completion *resp =
+ (struct i40e_aqc_add_remove_mcast_etag_completion *)&desc.params.raw;
+ enum i40e_status_code status;
+ u16 length = sizeof(u16) * num_tags_in_buf;
+
+ if ((pv_seid == 0) || (buf == NULL) || (num_tags_in_buf == 0))
+ return I40E_ERR_PARAM;
+
+ i40e_fill_default_direct_cmd_desc(&desc,
+ i40e_aqc_opc_add_multicast_etag);
+
+ cmd->pv_seid = CPU_TO_LE16(pv_seid);
+ cmd->etag = CPU_TO_LE16(etag);
+ cmd->num_unicast_etags = num_tags_in_buf;
+
+ desc.flags |= CPU_TO_LE16((u16)(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD));
+ if (length > I40E_AQ_LARGE_BUF)
+ desc.flags |= CPU_TO_LE16((u16)I40E_AQ_FLAG_LB);
+
+ status = i40e_asq_send_command(hw, &desc, buf, length, cmd_details);
+
+ if (!status) {
+ if (tags_used != NULL)
+ *tags_used = LE16_TO_CPU(resp->mcast_etags_used);
+ if (tags_free != NULL)
+ *tags_free = LE16_TO_CPU(resp->mcast_etags_free);
+ }
+
+ return status;
+}
+
+/**
+ * i40e_aq_remove_mcast_etag - Remove a multicast E-tag
+ * @hw: pointer to the hw struct
+ * @pv_seid: Port Virtualizer SEID this M-tag is associated with
+ * @etag: value of the E-tag to remove
+ * @tags_used: return value, number of tags in use by this port
+ * @tags_free: return value, number of unallocated tags
+ * @cmd_details: pointer to command details structure or NULL
+ *
+ * This deletes an E-tag from the port virtualizer. It will return
+ * the number of tags allocated by the port, and the number of unallocated
+ * tags available.
+ **/
+enum i40e_status_code i40e_aq_remove_mcast_etag(struct i40e_hw *hw, u16 pv_seid,
+ u16 etag, u16 *tags_used, u16 *tags_free,
+ struct i40e_asq_cmd_details *cmd_details)
+{
+ struct i40e_aq_desc desc;
+ struct i40e_aqc_add_remove_mcast_etag *cmd =
+ (struct i40e_aqc_add_remove_mcast_etag *)&desc.params.raw;
+ struct i40e_aqc_add_remove_mcast_etag_completion *resp =
+ (struct i40e_aqc_add_remove_mcast_etag_completion *)&desc.params.raw;
+ enum i40e_status_code status;
+
+
+ if (pv_seid == 0)
+ return I40E_ERR_PARAM;
+
+ i40e_fill_default_direct_cmd_desc(&desc,
+ i40e_aqc_opc_remove_multicast_etag);
+
+ cmd->pv_seid = CPU_TO_LE16(pv_seid);
+ cmd->etag = CPU_TO_LE16(etag);
+
+ status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
+
+ if (!status) {
+ if (tags_used != NULL)
+ *tags_used = LE16_TO_CPU(resp->mcast_etags_used);
+ if (tags_free != NULL)
+ *tags_free = LE16_TO_CPU(resp->mcast_etags_free);
+ }
+
+ return status;
+}
+
+/**
+ * i40e_aq_update_tag - Update an S/E-tag
+ * @hw: pointer to the hw struct
+ * @vsi_seid: VSI SEID using this S-tag
+ * @old_tag: old tag value
+ * @new_tag: new tag value
+ * @tags_used: return value, number of tags in use by this PF
+ * @tags_free: return value, number of unallocated tags
+ * @cmd_details: pointer to command details structure or NULL
+ *
+ * This updates the value of the tag currently attached to this VSI
+ * in the switch complex. It will return the number of tags allocated
+ * by the PF, and the number of unallocated tags available.
+ **/
+enum i40e_status_code i40e_aq_update_tag(struct i40e_hw *hw, u16 vsi_seid,
+ u16 old_tag, u16 new_tag, u16 *tags_used,
+ u16 *tags_free,
+ struct i40e_asq_cmd_details *cmd_details)
+{
+ struct i40e_aq_desc desc;
+ struct i40e_aqc_update_tag *cmd =
+ (struct i40e_aqc_update_tag *)&desc.params.raw;
+ struct i40e_aqc_update_tag_completion *resp =
+ (struct i40e_aqc_update_tag_completion *)&desc.params.raw;
+ enum i40e_status_code status;
+
+ if (vsi_seid == 0)
+ return I40E_ERR_PARAM;
+
+ i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_update_tag);
+
+ cmd->seid = CPU_TO_LE16(vsi_seid);
+ cmd->old_tag = CPU_TO_LE16(old_tag);
+ cmd->new_tag = CPU_TO_LE16(new_tag);
+
+ status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
+
+ if (!status) {
+ if (tags_used != NULL)
+ *tags_used = LE16_TO_CPU(resp->tags_used);
+ if (tags_free != NULL)
+ *tags_free = LE16_TO_CPU(resp->tags_free);
+ }
+
+ return status;
+}
+
+/**
+ * i40e_aq_dcb_ignore_pfc - Ignore PFC for given TCs
+ * @hw: pointer to the hw struct
+ * @tcmap: TC map for request/release any ignore PFC condition
+ * @request: request or release ignore PFC condition
+ * @tcmap_ret: return TCs for which PFC is currently ignored
+ * @cmd_details: pointer to command details structure or NULL
+ *
+ * This sends out request/release to ignore PFC condition for a TC.
+ * It will return the TCs for which PFC is currently ignored.
+ **/
+enum i40e_status_code i40e_aq_dcb_ignore_pfc(struct i40e_hw *hw, u8 tcmap,
+ bool request, u8 *tcmap_ret,
+ struct i40e_asq_cmd_details *cmd_details)
+{
+ struct i40e_aq_desc desc;
+ struct i40e_aqc_pfc_ignore *cmd_resp =
+ (struct i40e_aqc_pfc_ignore *)&desc.params.raw;
+ enum i40e_status_code status;
+
+ i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_dcb_ignore_pfc);
+
+ if (request)
+ cmd_resp->command_flags = I40E_AQC_PFC_IGNORE_SET;
+
+ cmd_resp->tc_bitmap = tcmap;
+
+ status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
+
+ if (!status) {
+ if (tcmap_ret != NULL)
+ *tcmap_ret = cmd_resp->tc_bitmap;
+ }
+
+ return status;
+}
+
+/**
+ * i40e_aq_dcb_updated - DCB Updated Command
+ * @hw: pointer to the hw struct
+ * @cmd_details: pointer to command details structure or NULL
+ *
+ * When LLDP is handled in PF this command is used by the PF
+ * to notify EMP that a DCB setting is modified.
+ * When LLDP is handled in EMP this command is used by the PF
+ * to notify EMP whenever one of the following parameters get
+ * modified:
+ * - PFCLinkDelayAllowance in PRTDCB_GENC.PFCLDA
+ * - PCIRTT in PRTDCB_GENC.PCIRTT
+ * - Maximum Frame Size for non-FCoE TCs set by PRTDCB_TDPUC.MAX_TXFRAME.
+ * EMP will return when the shared RPB settings have been
+ * recomputed and modified. The retval field in the descriptor
+ * will be set to 0 when RPB is modified.
+ **/
+enum i40e_status_code i40e_aq_dcb_updated(struct i40e_hw *hw,
+ struct i40e_asq_cmd_details *cmd_details)
+{
+ struct i40e_aq_desc desc;
+ enum i40e_status_code status;
+
+ i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_dcb_updated);
+
+ status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
+
+ return status;
+}
+
+/**
+ * i40e_aq_add_statistics - Add a statistics block to a VLAN in a switch.
+ * @hw: pointer to the hw struct
+ * @seid: defines the SEID of the switch for which the stats are requested
+ * @vlan_id: the VLAN ID for which the statistics are requested
+ * @stat_index: index of the statistics counters block assigned to this VLAN
+ * @cmd_details: pointer to command details structure or NULL
+ *
+ * XL710 supports 128 smonVlanStats counters.This command is used to
+ * allocate a set of smonVlanStats counters to a specific VLAN in a specific
+ * switch.
+ **/
+enum i40e_status_code i40e_aq_add_statistics(struct i40e_hw *hw, u16 seid,
+ u16 vlan_id, u16 *stat_index,
+ struct i40e_asq_cmd_details *cmd_details)
+{
+ struct i40e_aq_desc desc;
+ struct i40e_aqc_add_remove_statistics *cmd_resp =
+ (struct i40e_aqc_add_remove_statistics *)&desc.params.raw;
+ enum i40e_status_code status;
+
+ if ((seid == 0) || (stat_index == NULL))
+ return I40E_ERR_PARAM;
+
+ i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_add_statistics);
+
+ cmd_resp->seid = CPU_TO_LE16(seid);
+ cmd_resp->vlan = CPU_TO_LE16(vlan_id);
+
+ status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
+
+ if (!status && stat_index)
+ *stat_index = LE16_TO_CPU(cmd_resp->stat_index);
+
+ return status;
+}
+
+/**
+ * i40e_aq_remove_statistics - Remove a statistics block to a VLAN in a switch.
+ * @hw: pointer to the hw struct
+ * @seid: defines the SEID of the switch for which the stats are requested
+ * @vlan_id: the VLAN ID for which the statistics are requested
+ * @stat_index: index of the statistics counters block assigned to this VLAN
+ * @cmd_details: pointer to command details structure or NULL
+ *
+ * XL710 supports 128 smonVlanStats counters.This command is used to
+ * deallocate a set of smonVlanStats counters to a specific VLAN in a specific
+ * switch.
+ **/
+enum i40e_status_code i40e_aq_remove_statistics(struct i40e_hw *hw, u16 seid,
+ u16 vlan_id, u16 stat_index,
+ struct i40e_asq_cmd_details *cmd_details)
+{
+ struct i40e_aq_desc desc;
+ struct i40e_aqc_add_remove_statistics *cmd =
+ (struct i40e_aqc_add_remove_statistics *)&desc.params.raw;
+ enum i40e_status_code status;
+
+ if (seid == 0)
+ return I40E_ERR_PARAM;
+
+ i40e_fill_default_direct_cmd_desc(&desc,
+ i40e_aqc_opc_remove_statistics);
+
+ cmd->seid = CPU_TO_LE16(seid);
+ cmd->vlan = CPU_TO_LE16(vlan_id);
+ cmd->stat_index = CPU_TO_LE16(stat_index);
+
+ status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
+
+ return status;
+}
+
+/**
+ * i40e_aq_set_port_parameters - set physical port parameters.
+ * @hw: pointer to the hw struct
+ * @bad_frame_vsi: defines the VSI to which bad frames are forwarded
+ * @save_bad_pac: if set packets with errors are forwarded to the bad frames VSI
+ * @pad_short_pac: if set transmit packets smaller than 60 bytes are padded
+ * @double_vlan: if set double VLAN is enabled
+ * @cmd_details: pointer to command details structure or NULL
+ **/
+enum i40e_status_code i40e_aq_set_port_parameters(struct i40e_hw *hw,
+ u16 bad_frame_vsi, bool save_bad_pac,
+ bool pad_short_pac, bool double_vlan,
+ struct i40e_asq_cmd_details *cmd_details)
+{
+ struct i40e_aqc_set_port_parameters *cmd;
+ enum i40e_status_code status;
+ struct i40e_aq_desc desc;
+ u16 command_flags = 0;
+
+ cmd = (struct i40e_aqc_set_port_parameters *)&desc.params.raw;
+
+ i40e_fill_default_direct_cmd_desc(&desc,
+ i40e_aqc_opc_set_port_parameters);
+
+ cmd->bad_frame_vsi = CPU_TO_LE16(bad_frame_vsi);
+ if (save_bad_pac)
+ command_flags |= I40E_AQ_SET_P_PARAMS_SAVE_BAD_PACKETS;
+ if (pad_short_pac)
+ command_flags |= I40E_AQ_SET_P_PARAMS_PAD_SHORT_PACKETS;
+ if (double_vlan)
+ command_flags |= I40E_AQ_SET_P_PARAMS_DOUBLE_VLAN_ENA;
+ cmd->command_flags = CPU_TO_LE16(command_flags);
+
+ status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
+
+ return status;
+}
+
+/**
+ * i40e_aq_tx_sched_cmd - generic Tx scheduler AQ command handler
+ * @hw: pointer to the hw struct
+ * @seid: seid for the physical port/switching component/vsi
+ * @buff: Indirect buffer to hold data parameters and response
+ * @buff_size: Indirect buffer size
+ * @opcode: Tx scheduler AQ command opcode
+ * @cmd_details: pointer to command details structure or NULL
+ *
+ * Generic command handler for Tx scheduler AQ commands
+ **/
+static enum i40e_status_code i40e_aq_tx_sched_cmd(struct i40e_hw *hw, u16 seid,
+ void *buff, u16 buff_size,
+ enum i40e_admin_queue_opc opcode,
+ struct i40e_asq_cmd_details *cmd_details)
+{
+ struct i40e_aq_desc desc;
+ struct i40e_aqc_tx_sched_ind *cmd =
+ (struct i40e_aqc_tx_sched_ind *)&desc.params.raw;
+ enum i40e_status_code status;
+ bool cmd_param_flag = false;
+
+ switch (opcode) {
+ case i40e_aqc_opc_configure_vsi_ets_sla_bw_limit:
+ case i40e_aqc_opc_configure_vsi_tc_bw:
+ case i40e_aqc_opc_enable_switching_comp_ets:
+ case i40e_aqc_opc_modify_switching_comp_ets:
+ case i40e_aqc_opc_disable_switching_comp_ets:
+ case i40e_aqc_opc_configure_switching_comp_ets_bw_limit:
+ case i40e_aqc_opc_configure_switching_comp_bw_config:
+ cmd_param_flag = true;
+ break;
+ case i40e_aqc_opc_query_vsi_bw_config:
+ case i40e_aqc_opc_query_vsi_ets_sla_config:
+ case i40e_aqc_opc_query_switching_comp_ets_config:
+ case i40e_aqc_opc_query_port_ets_config:
+ case i40e_aqc_opc_query_switching_comp_bw_config:
+ cmd_param_flag = false;
+ break;
+ default:
+ return I40E_ERR_PARAM;
+ }
+
+ i40e_fill_default_direct_cmd_desc(&desc, opcode);
+
+ /* Indirect command */
+ desc.flags |= CPU_TO_LE16((u16)I40E_AQ_FLAG_BUF);
+ if (cmd_param_flag)
+ desc.flags |= CPU_TO_LE16((u16)I40E_AQ_FLAG_RD);
+ if (buff_size > I40E_AQ_LARGE_BUF)
+ desc.flags |= CPU_TO_LE16((u16)I40E_AQ_FLAG_LB);
+
+ desc.datalen = CPU_TO_LE16(buff_size);
+
+ cmd->vsi_seid = CPU_TO_LE16(seid);
+
+ status = i40e_asq_send_command(hw, &desc, buff, buff_size, cmd_details);
+
+ return status;
+}
+
+/**
+ * i40e_aq_config_vsi_bw_limit - Configure VSI BW Limit
+ * @hw: pointer to the hw struct
+ * @seid: VSI seid
+ * @credit: BW limit credits (0 = disabled)
+ * @max_credit: Max BW limit credits
+ * @cmd_details: pointer to command details structure or NULL
+ **/
+enum i40e_status_code i40e_aq_config_vsi_bw_limit(struct i40e_hw *hw,
+ u16 seid, u16 credit, u8 max_credit,
+ struct i40e_asq_cmd_details *cmd_details)
+{
+ struct i40e_aq_desc desc;
+ struct i40e_aqc_configure_vsi_bw_limit *cmd =
+ (struct i40e_aqc_configure_vsi_bw_limit *)&desc.params.raw;
+ enum i40e_status_code status;
+
+ i40e_fill_default_direct_cmd_desc(&desc,
+ i40e_aqc_opc_configure_vsi_bw_limit);
+
+ cmd->vsi_seid = CPU_TO_LE16(seid);
+ cmd->credit = CPU_TO_LE16(credit);
+ cmd->max_credit = max_credit;
+
+ status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
+
+ return status;
+}
+
+/**
+ * i40e_aq_config_switch_comp_bw_limit - Configure Switching component BW Limit
+ * @hw: pointer to the hw struct
+ * @seid: switching component seid
+ * @credit: BW limit credits (0 = disabled)
+ * @max_bw: Max BW limit credits
+ * @cmd_details: pointer to command details structure or NULL
+ **/
+enum i40e_status_code i40e_aq_config_switch_comp_bw_limit(struct i40e_hw *hw,
+ u16 seid, u16 credit, u8 max_bw,
+ struct i40e_asq_cmd_details *cmd_details)
+{
+ struct i40e_aq_desc desc;
+ struct i40e_aqc_configure_switching_comp_bw_limit *cmd =
+ (struct i40e_aqc_configure_switching_comp_bw_limit *)&desc.params.raw;
+ enum i40e_status_code status;
+
+ i40e_fill_default_direct_cmd_desc(&desc,
+ i40e_aqc_opc_configure_switching_comp_bw_limit);
+
+ cmd->seid = CPU_TO_LE16(seid);
+ cmd->credit = CPU_TO_LE16(credit);
+ cmd->max_bw = max_bw;
+
+ status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
+
+ return status;
+}
+
+/**
+ * i40e_aq_config_vsi_ets_sla_bw_limit - Config VSI BW Limit per TC
+ * @hw: pointer to the hw struct
+ * @seid: VSI seid
+ * @bw_data: Buffer holding enabled TCs, per TC BW limit/credits
+ * @cmd_details: pointer to command details structure or NULL
+ **/
+enum i40e_status_code i40e_aq_config_vsi_ets_sla_bw_limit(struct i40e_hw *hw,
+ u16 seid,
+ struct i40e_aqc_configure_vsi_ets_sla_bw_data *bw_data,
+ struct i40e_asq_cmd_details *cmd_details)
+{
+ return i40e_aq_tx_sched_cmd(hw, seid, (void *)bw_data, sizeof(*bw_data),
+ i40e_aqc_opc_configure_vsi_ets_sla_bw_limit,
+ cmd_details);
+}
+
+/**
+ * i40e_aq_config_vsi_tc_bw - Config VSI BW Allocation per TC
+ * @hw: pointer to the hw struct
+ * @seid: VSI seid
+ * @bw_data: Buffer holding enabled TCs, relative TC BW limit/credits
+ * @cmd_details: pointer to command details structure or NULL
+ **/
+enum i40e_status_code i40e_aq_config_vsi_tc_bw(struct i40e_hw *hw,
+ u16 seid,
+ struct i40e_aqc_configure_vsi_tc_bw_data *bw_data,
+ struct i40e_asq_cmd_details *cmd_details)
+{
+ return i40e_aq_tx_sched_cmd(hw, seid, (void *)bw_data, sizeof(*bw_data),
+ i40e_aqc_opc_configure_vsi_tc_bw,
+ cmd_details);
+}
+
+/**
+ * i40e_aq_config_switch_comp_ets - Enable/Disable/Modify ETS on the port
+ * @hw: pointer to the hw struct
+ * @seid: seid of the switching component connected to Physical Port
+ * @ets_data: Buffer holding ETS parameters
+ * @cmd_details: pointer to command details structure or NULL
+ **/
+enum i40e_status_code i40e_aq_config_switch_comp_ets(struct i40e_hw *hw,
+ u16 seid,
+ struct i40e_aqc_configure_switching_comp_ets_data *ets_data,
+ enum i40e_admin_queue_opc opcode,
+ struct i40e_asq_cmd_details *cmd_details)
+{
+ return i40e_aq_tx_sched_cmd(hw, seid, (void *)ets_data,
+ sizeof(*ets_data), opcode, cmd_details);
+}
+
+/**
+ * i40e_aq_config_switch_comp_bw_config - Config Switch comp BW Alloc per TC
+ * @hw: pointer to the hw struct
+ * @seid: seid of the switching component
+ * @bw_data: Buffer holding enabled TCs, relative/absolute TC BW limit/credits
+ * @cmd_details: pointer to command details structure or NULL
+ **/
+enum i40e_status_code i40e_aq_config_switch_comp_bw_config(struct i40e_hw *hw,
+ u16 seid,
+ struct i40e_aqc_configure_switching_comp_bw_config_data *bw_data,
+ struct i40e_asq_cmd_details *cmd_details)
+{
+ return i40e_aq_tx_sched_cmd(hw, seid, (void *)bw_data, sizeof(*bw_data),
+ i40e_aqc_opc_configure_switching_comp_bw_config,
+ cmd_details);
+}
+
+/**
+ * i40e_aq_config_switch_comp_ets_bw_limit - Config Switch comp BW Limit per TC
+ * @hw: pointer to the hw struct
+ * @seid: seid of the switching component
+ * @bw_data: Buffer holding enabled TCs, per TC BW limit/credits
+ * @cmd_details: pointer to command details structure or NULL
+ **/
+enum i40e_status_code i40e_aq_config_switch_comp_ets_bw_limit(
+ struct i40e_hw *hw, u16 seid,
+ struct i40e_aqc_configure_switching_comp_ets_bw_limit_data *bw_data,
+ struct i40e_asq_cmd_details *cmd_details)
+{
+ return i40e_aq_tx_sched_cmd(hw, seid, (void *)bw_data, sizeof(*bw_data),
+ i40e_aqc_opc_configure_switching_comp_ets_bw_limit,
+ cmd_details);
+}
+
+/**
+ * i40e_aq_query_vsi_bw_config - Query VSI BW configuration
+ * @hw: pointer to the hw struct
+ * @seid: seid of the VSI
+ * @bw_data: Buffer to hold VSI BW configuration
+ * @cmd_details: pointer to command details structure or NULL
+ **/
+enum i40e_status_code i40e_aq_query_vsi_bw_config(struct i40e_hw *hw,
+ u16 seid,
+ struct i40e_aqc_query_vsi_bw_config_resp *bw_data,
+ struct i40e_asq_cmd_details *cmd_details)
+{
+ return i40e_aq_tx_sched_cmd(hw, seid, (void *)bw_data, sizeof(*bw_data),
+ i40e_aqc_opc_query_vsi_bw_config,
+ cmd_details);
+}
+
+/**
+ * i40e_aq_query_vsi_ets_sla_config - Query VSI BW configuration per TC
+ * @hw: pointer to the hw struct
+ * @seid: seid of the VSI
+ * @bw_data: Buffer to hold VSI BW configuration per TC
+ * @cmd_details: pointer to command details structure or NULL
+ **/
+enum i40e_status_code i40e_aq_query_vsi_ets_sla_config(struct i40e_hw *hw,
+ u16 seid,
+ struct i40e_aqc_query_vsi_ets_sla_config_resp *bw_data,
+ struct i40e_asq_cmd_details *cmd_details)
+{
+ return i40e_aq_tx_sched_cmd(hw, seid, (void *)bw_data, sizeof(*bw_data),
+ i40e_aqc_opc_query_vsi_ets_sla_config,
+ cmd_details);
+}
+
+/**
+ * i40e_aq_query_switch_comp_ets_config - Query Switch comp BW config per TC
+ * @hw: pointer to the hw struct
+ * @seid: seid of the switching component
+ * @bw_data: Buffer to hold switching component's per TC BW config
+ * @cmd_details: pointer to command details structure or NULL
+ **/
+enum i40e_status_code i40e_aq_query_switch_comp_ets_config(struct i40e_hw *hw,
+ u16 seid,
+ struct i40e_aqc_query_switching_comp_ets_config_resp *bw_data,
+ struct i40e_asq_cmd_details *cmd_details)
+{
+ return i40e_aq_tx_sched_cmd(hw, seid, (void *)bw_data, sizeof(*bw_data),
+ i40e_aqc_opc_query_switching_comp_ets_config,
+ cmd_details);
+}
+
+/**
+ * i40e_aq_query_port_ets_config - Query Physical Port ETS configuration
+ * @hw: pointer to the hw struct
+ * @seid: seid of the VSI or switching component connected to Physical Port
+ * @bw_data: Buffer to hold current ETS configuration for the Physical Port
+ * @cmd_details: pointer to command details structure or NULL
+ **/
+enum i40e_status_code i40e_aq_query_port_ets_config(struct i40e_hw *hw,
+ u16 seid,
+ struct i40e_aqc_query_port_ets_config_resp *bw_data,
+ struct i40e_asq_cmd_details *cmd_details)
+{
+ return i40e_aq_tx_sched_cmd(hw, seid, (void *)bw_data, sizeof(*bw_data),
+ i40e_aqc_opc_query_port_ets_config,
+ cmd_details);
+}
+
+/**
+ * i40e_aq_query_switch_comp_bw_config - Query Switch comp BW configuration
+ * @hw: pointer to the hw struct
+ * @seid: seid of the switching component
+ * @bw_data: Buffer to hold switching component's BW configuration
+ * @cmd_details: pointer to command details structure or NULL
+ **/
+enum i40e_status_code i40e_aq_query_switch_comp_bw_config(struct i40e_hw *hw,
+ u16 seid,
+ struct i40e_aqc_query_switching_comp_bw_config_resp *bw_data,
+ struct i40e_asq_cmd_details *cmd_details)
+{
+ return i40e_aq_tx_sched_cmd(hw, seid, (void *)bw_data, sizeof(*bw_data),
+ i40e_aqc_opc_query_switching_comp_bw_config,
+ cmd_details);
+}
+
+/**
+ * i40e_validate_filter_settings
+ * @hw: pointer to the hardware structure
+ * @settings: Filter control settings
+ *
+ * Check and validate the filter control settings passed.
+ * The function checks for the valid filter/context sizes being
+ * passed for FCoE and PE.
+ *
+ * Returns I40E_SUCCESS if the values passed are valid and within
+ * range else returns an error.
+ **/
+STATIC enum i40e_status_code i40e_validate_filter_settings(struct i40e_hw *hw,
+ struct i40e_filter_control_settings *settings)
+{
+ u32 fcoe_cntx_size, fcoe_filt_size;
+ u32 pe_cntx_size, pe_filt_size;
+ u32 fcoe_fmax;
+
+ u32 val;
+
+ /* Validate FCoE settings passed */
+ switch (settings->fcoe_filt_num) {
+ case I40E_HASH_FILTER_SIZE_1K:
+ case I40E_HASH_FILTER_SIZE_2K:
+ case I40E_HASH_FILTER_SIZE_4K:
+ case I40E_HASH_FILTER_SIZE_8K:
+ case I40E_HASH_FILTER_SIZE_16K:
+ case I40E_HASH_FILTER_SIZE_32K:
+ fcoe_filt_size = I40E_HASH_FILTER_BASE_SIZE;
+ fcoe_filt_size <<= (u32)settings->fcoe_filt_num;
+ break;
+ default:
+ return I40E_ERR_PARAM;
+ }
+
+ switch (settings->fcoe_cntx_num) {
+ case I40E_DMA_CNTX_SIZE_512:
+ case I40E_DMA_CNTX_SIZE_1K:
+ case I40E_DMA_CNTX_SIZE_2K:
+ case I40E_DMA_CNTX_SIZE_4K:
+ fcoe_cntx_size = I40E_DMA_CNTX_BASE_SIZE;
+ fcoe_cntx_size <<= (u32)settings->fcoe_cntx_num;
+ break;
+ default:
+ return I40E_ERR_PARAM;
+ }
+
+ /* Validate PE settings passed */
+ switch (settings->pe_filt_num) {
+ case I40E_HASH_FILTER_SIZE_1K:
+ case I40E_HASH_FILTER_SIZE_2K:
+ case I40E_HASH_FILTER_SIZE_4K:
+ case I40E_HASH_FILTER_SIZE_8K:
+ case I40E_HASH_FILTER_SIZE_16K:
+ case I40E_HASH_FILTER_SIZE_32K:
+ case I40E_HASH_FILTER_SIZE_64K:
+ case I40E_HASH_FILTER_SIZE_128K:
+ case I40E_HASH_FILTER_SIZE_256K:
+ case I40E_HASH_FILTER_SIZE_512K:
+ case I40E_HASH_FILTER_SIZE_1M:
+ pe_filt_size = I40E_HASH_FILTER_BASE_SIZE;
+ pe_filt_size <<= (u32)settings->pe_filt_num;
+ break;
+ default:
+ return I40E_ERR_PARAM;
+ }
+
+ switch (settings->pe_cntx_num) {
+ case I40E_DMA_CNTX_SIZE_512:
+ case I40E_DMA_CNTX_SIZE_1K:
+ case I40E_DMA_CNTX_SIZE_2K:
+ case I40E_DMA_CNTX_SIZE_4K:
+ case I40E_DMA_CNTX_SIZE_8K:
+ case I40E_DMA_CNTX_SIZE_16K:
+ case I40E_DMA_CNTX_SIZE_32K:
+ case I40E_DMA_CNTX_SIZE_64K:
+ case I40E_DMA_CNTX_SIZE_128K:
+ case I40E_DMA_CNTX_SIZE_256K:
+ pe_cntx_size = I40E_DMA_CNTX_BASE_SIZE;
+ pe_cntx_size <<= (u32)settings->pe_cntx_num;
+ break;
+ default:
+ return I40E_ERR_PARAM;
+ }
+
+ /* FCHSIZE + FCDSIZE should not be greater than PMFCOEFMAX */
+ val = rd32(hw, I40E_GLHMC_FCOEFMAX);
+ fcoe_fmax = (val & I40E_GLHMC_FCOEFMAX_PMFCOEFMAX_MASK)
+ >> I40E_GLHMC_FCOEFMAX_PMFCOEFMAX_SHIFT;
+ if (fcoe_filt_size + fcoe_cntx_size > fcoe_fmax)
+ return I40E_ERR_INVALID_SIZE;
+
+ return I40E_SUCCESS;
+}
+
+/**
+ * i40e_set_filter_control
+ * @hw: pointer to the hardware structure
+ * @settings: Filter control settings
+ *
+ * Set the Queue Filters for PE/FCoE and enable filters required
+ * for a single PF. It is expected that these settings are programmed
+ * at the driver initialization time.
+ **/
+enum i40e_status_code i40e_set_filter_control(struct i40e_hw *hw,
+ struct i40e_filter_control_settings *settings)
+{
+ enum i40e_status_code ret = I40E_SUCCESS;
+ u32 hash_lut_size = 0;
+ u32 val;
+
+ if (!settings)
+ return I40E_ERR_PARAM;
+
+ /* Validate the input settings */
+ ret = i40e_validate_filter_settings(hw, settings);
+ if (ret)
+ return ret;
+
+ /* Read the PF Queue Filter control register */
+ val = i40e_read_rx_ctl(hw, I40E_PFQF_CTL_0);
+
+ /* Program required PE hash buckets for the PF */
+ val &= ~I40E_PFQF_CTL_0_PEHSIZE_MASK;
+ val |= ((u32)settings->pe_filt_num << I40E_PFQF_CTL_0_PEHSIZE_SHIFT) &
+ I40E_PFQF_CTL_0_PEHSIZE_MASK;
+ /* Program required PE contexts for the PF */
+ val &= ~I40E_PFQF_CTL_0_PEDSIZE_MASK;
+ val |= ((u32)settings->pe_cntx_num << I40E_PFQF_CTL_0_PEDSIZE_SHIFT) &
+ I40E_PFQF_CTL_0_PEDSIZE_MASK;
+
+ /* Program required FCoE hash buckets for the PF */
+ val &= ~I40E_PFQF_CTL_0_PFFCHSIZE_MASK;
+ val |= ((u32)settings->fcoe_filt_num <<
+ I40E_PFQF_CTL_0_PFFCHSIZE_SHIFT) &
+ I40E_PFQF_CTL_0_PFFCHSIZE_MASK;
+ /* Program required FCoE DDP contexts for the PF */
+ val &= ~I40E_PFQF_CTL_0_PFFCDSIZE_MASK;
+ val |= ((u32)settings->fcoe_cntx_num <<
+ I40E_PFQF_CTL_0_PFFCDSIZE_SHIFT) &
+ I40E_PFQF_CTL_0_PFFCDSIZE_MASK;
+
+ /* Program Hash LUT size for the PF */
+ val &= ~I40E_PFQF_CTL_0_HASHLUTSIZE_MASK;
+ if (settings->hash_lut_size == I40E_HASH_LUT_SIZE_512)
+ hash_lut_size = 1;
+ val |= (hash_lut_size << I40E_PFQF_CTL_0_HASHLUTSIZE_SHIFT) &
+ I40E_PFQF_CTL_0_HASHLUTSIZE_MASK;
+
+ /* Enable FDIR, Ethertype and MACVLAN filters for PF and VFs */
+ if (settings->enable_fdir)
+ val |= I40E_PFQF_CTL_0_FD_ENA_MASK;
+ if (settings->enable_ethtype)
+ val |= I40E_PFQF_CTL_0_ETYPE_ENA_MASK;
+ if (settings->enable_macvlan)
+ val |= I40E_PFQF_CTL_0_MACVLAN_ENA_MASK;
+
+ i40e_write_rx_ctl(hw, I40E_PFQF_CTL_0, val);
+
+ return I40E_SUCCESS;
+}
+
+/**
+ * i40e_aq_add_rem_control_packet_filter - Add or Remove Control Packet Filter
+ * @hw: pointer to the hw struct
+ * @mac_addr: MAC address to use in the filter
+ * @ethtype: Ethertype to use in the filter
+ * @flags: Flags that needs to be applied to the filter
+ * @vsi_seid: seid of the control VSI
+ * @queue: VSI queue number to send the packet to
+ * @is_add: Add control packet filter if True else remove
+ * @stats: Structure to hold information on control filter counts
+ * @cmd_details: pointer to command details structure or NULL
+ *
+ * This command will Add or Remove control packet filter for a control VSI.
+ * In return it will update the total number of perfect filter count in
+ * the stats member.
+ **/
+enum i40e_status_code i40e_aq_add_rem_control_packet_filter(struct i40e_hw *hw,
+ u8 *mac_addr, u16 ethtype, u16 flags,
+ u16 vsi_seid, u16 queue, bool is_add,
+ struct i40e_control_filter_stats *stats,
+ struct i40e_asq_cmd_details *cmd_details)
+{
+ struct i40e_aq_desc desc;
+ struct i40e_aqc_add_remove_control_packet_filter *cmd =
+ (struct i40e_aqc_add_remove_control_packet_filter *)
+ &desc.params.raw;
+ struct i40e_aqc_add_remove_control_packet_filter_completion *resp =
+ (struct i40e_aqc_add_remove_control_packet_filter_completion *)
+ &desc.params.raw;
+ enum i40e_status_code status;
+
+ if (vsi_seid == 0)
+ return I40E_ERR_PARAM;
+
+ if (is_add) {
+ i40e_fill_default_direct_cmd_desc(&desc,
+ i40e_aqc_opc_add_control_packet_filter);
+ cmd->queue = CPU_TO_LE16(queue);
+ } else {
+ i40e_fill_default_direct_cmd_desc(&desc,
+ i40e_aqc_opc_remove_control_packet_filter);
+ }
+
+ if (mac_addr)
+ i40e_memcpy(cmd->mac, mac_addr, I40E_ETH_LENGTH_OF_ADDRESS,
+ I40E_NONDMA_TO_NONDMA);
+
+ cmd->etype = CPU_TO_LE16(ethtype);
+ cmd->flags = CPU_TO_LE16(flags);
+ cmd->seid = CPU_TO_LE16(vsi_seid);
+
+ status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
+
+ if (!status && stats) {
+ stats->mac_etype_used = LE16_TO_CPU(resp->mac_etype_used);
+ stats->etype_used = LE16_TO_CPU(resp->etype_used);
+ stats->mac_etype_free = LE16_TO_CPU(resp->mac_etype_free);
+ stats->etype_free = LE16_TO_CPU(resp->etype_free);
+ }
+
+ return status;
+}
+
+/**
+ * i40e_add_filter_to_drop_tx_flow_control_frames- filter to drop flow control
+ * @hw: pointer to the hw struct
+ * @seid: VSI seid to add ethertype filter from
+ **/
+#define I40E_FLOW_CONTROL_ETHTYPE 0x8808
+void i40e_add_filter_to_drop_tx_flow_control_frames(struct i40e_hw *hw,
+ u16 seid)
+{
+ u16 flag = I40E_AQC_ADD_CONTROL_PACKET_FLAGS_IGNORE_MAC |
+ I40E_AQC_ADD_CONTROL_PACKET_FLAGS_DROP |
+ I40E_AQC_ADD_CONTROL_PACKET_FLAGS_TX;
+ u16 ethtype = I40E_FLOW_CONTROL_ETHTYPE;
+ enum i40e_status_code status;
+
+ status = i40e_aq_add_rem_control_packet_filter(hw, NULL, ethtype, flag,
+ seid, 0, true, NULL,
+ NULL);
+ if (status)
+ DEBUGOUT("Ethtype Filter Add failed: Error pruning Tx flow control frames\n");
+}
+
+/**
+ * i40e_fix_up_geneve_vni - adjust Geneve VNI for HW issue
+ * @filters: list of cloud filters
+ * @filter_count: length of list
+ *
+ * There's an issue in the device where the Geneve VNI layout needs
+ * to be shifted 1 byte over from the VxLAN VNI
+ **/
+STATIC void i40e_fix_up_geneve_vni(
+ struct i40e_aqc_add_remove_cloud_filters_element_data *filters,
+ u8 filter_count)
+{
+ struct i40e_aqc_add_remove_cloud_filters_element_data *f = filters;
+ int i;
+
+ for (i = 0; i < filter_count; i++) {
+ u16 tnl_type;
+ u32 ti;
+
+ tnl_type = (le16_to_cpu(f[i].flags) &
+ I40E_AQC_ADD_CLOUD_TNL_TYPE_MASK) >>
+ I40E_AQC_ADD_CLOUD_TNL_TYPE_SHIFT;
+ if (tnl_type == I40E_AQC_ADD_CLOUD_TNL_TYPE_GENEVE) {
+ ti = le32_to_cpu(f[i].tenant_id);
+ f[i].tenant_id = cpu_to_le32(ti << 8);
+ }
+ }
+}
+
+/**
+ * i40e_aq_add_cloud_filters
+ * @hw: pointer to the hardware structure
+ * @seid: VSI seid to add cloud filters from
+ * @filters: Buffer which contains the filters to be added
+ * @filter_count: number of filters contained in the buffer
+ *
+ * Set the cloud filters for a given VSI. The contents of the
+ * i40e_aqc_add_remove_cloud_filters_element_data are filled
+ * in by the caller of the function.
+ *
+ **/
+enum i40e_status_code i40e_aq_add_cloud_filters(struct i40e_hw *hw,
+ u16 seid,
+ struct i40e_aqc_add_remove_cloud_filters_element_data *filters,
+ u8 filter_count)
+{
+ struct i40e_aq_desc desc;
+ struct i40e_aqc_add_remove_cloud_filters *cmd =
+ (struct i40e_aqc_add_remove_cloud_filters *)&desc.params.raw;
+ enum i40e_status_code status;
+ u16 buff_len;
+
+ i40e_fill_default_direct_cmd_desc(&desc,
+ i40e_aqc_opc_add_cloud_filters);
+
+ buff_len = filter_count * sizeof(*filters);
+ desc.datalen = CPU_TO_LE16(buff_len);
+ desc.flags |= CPU_TO_LE16((u16)(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD));
+ cmd->num_filters = filter_count;
+ cmd->seid = CPU_TO_LE16(seid);
+
+ i40e_fix_up_geneve_vni(filters, filter_count);
+
+ status = i40e_asq_send_command(hw, &desc, filters, buff_len, NULL);
+
+ return status;
+}
+
+/**
+ * i40e_aq_remove_cloud_filters
+ * @hw: pointer to the hardware structure
+ * @seid: VSI seid to remove cloud filters from
+ * @filters: Buffer which contains the filters to be removed
+ * @filter_count: number of filters contained in the buffer
+ *
+ * Remove the cloud filters for a given VSI. The contents of the
+ * i40e_aqc_add_remove_cloud_filters_element_data are filled
+ * in by the caller of the function.
+ *
+ **/
+enum i40e_status_code i40e_aq_remove_cloud_filters(struct i40e_hw *hw,
+ u16 seid,
+ struct i40e_aqc_add_remove_cloud_filters_element_data *filters,
+ u8 filter_count)
+{
+ struct i40e_aq_desc desc;
+ struct i40e_aqc_add_remove_cloud_filters *cmd =
+ (struct i40e_aqc_add_remove_cloud_filters *)&desc.params.raw;
+ enum i40e_status_code status;
+ u16 buff_len;
+
+ i40e_fill_default_direct_cmd_desc(&desc,
+ i40e_aqc_opc_remove_cloud_filters);
+
+ buff_len = filter_count * sizeof(*filters);
+ desc.datalen = CPU_TO_LE16(buff_len);
+ desc.flags |= CPU_TO_LE16((u16)(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD));
+ cmd->num_filters = filter_count;
+ cmd->seid = CPU_TO_LE16(seid);
+
+ i40e_fix_up_geneve_vni(filters, filter_count);
+
+ status = i40e_asq_send_command(hw, &desc, filters, buff_len, NULL);
+
+ return status;
+}
+
+/**
+ * i40e_aq_alternate_write
+ * @hw: pointer to the hardware structure
+ * @reg_addr0: address of first dword to be read
+ * @reg_val0: value to be written under 'reg_addr0'
+ * @reg_addr1: address of second dword to be read
+ * @reg_val1: value to be written under 'reg_addr1'
+ *
+ * Write one or two dwords to alternate structure. Fields are indicated
+ * by 'reg_addr0' and 'reg_addr1' register numbers.
+ *
+ **/
+enum i40e_status_code i40e_aq_alternate_write(struct i40e_hw *hw,
+ u32 reg_addr0, u32 reg_val0,
+ u32 reg_addr1, u32 reg_val1)
+{
+ struct i40e_aq_desc desc;
+ struct i40e_aqc_alternate_write *cmd_resp =
+ (struct i40e_aqc_alternate_write *)&desc.params.raw;
+ enum i40e_status_code status;
+
+ i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_alternate_write);
+ cmd_resp->address0 = CPU_TO_LE32(reg_addr0);
+ cmd_resp->address1 = CPU_TO_LE32(reg_addr1);
+ cmd_resp->data0 = CPU_TO_LE32(reg_val0);
+ cmd_resp->data1 = CPU_TO_LE32(reg_val1);
+
+ status = i40e_asq_send_command(hw, &desc, NULL, 0, NULL);
+
+ return status;
+}
+
+/**
+ * i40e_aq_alternate_write_indirect
+ * @hw: pointer to the hardware structure
+ * @addr: address of a first register to be modified
+ * @dw_count: number of alternate structure fields to write
+ * @buffer: pointer to the command buffer
+ *
+ * Write 'dw_count' dwords from 'buffer' to alternate structure
+ * starting at 'addr'.
+ *
+ **/
+enum i40e_status_code i40e_aq_alternate_write_indirect(struct i40e_hw *hw,
+ u32 addr, u32 dw_count, void *buffer)
+{
+ struct i40e_aq_desc desc;
+ struct i40e_aqc_alternate_ind_write *cmd_resp =
+ (struct i40e_aqc_alternate_ind_write *)&desc.params.raw;
+ enum i40e_status_code status;
+
+ if (buffer == NULL)
+ return I40E_ERR_PARAM;
+
+ /* Indirect command */
+ i40e_fill_default_direct_cmd_desc(&desc,
+ i40e_aqc_opc_alternate_write_indirect);
+
+ desc.flags |= CPU_TO_LE16(I40E_AQ_FLAG_RD);
+ desc.flags |= CPU_TO_LE16(I40E_AQ_FLAG_BUF);
+ if (dw_count > (I40E_AQ_LARGE_BUF/4))
+ desc.flags |= CPU_TO_LE16((u16)I40E_AQ_FLAG_LB);
+
+ cmd_resp->address = CPU_TO_LE32(addr);
+ cmd_resp->length = CPU_TO_LE32(dw_count);
+
+ status = i40e_asq_send_command(hw, &desc, buffer,
+ I40E_LO_DWORD(4*dw_count), NULL);
+
+ return status;
+}
+
+/**
+ * i40e_aq_alternate_read
+ * @hw: pointer to the hardware structure
+ * @reg_addr0: address of first dword to be read
+ * @reg_val0: pointer for data read from 'reg_addr0'
+ * @reg_addr1: address of second dword to be read
+ * @reg_val1: pointer for data read from 'reg_addr1'
+ *
+ * Read one or two dwords from alternate structure. Fields are indicated
+ * by 'reg_addr0' and 'reg_addr1' register numbers. If 'reg_val1' pointer
+ * is not passed then only register at 'reg_addr0' is read.
+ *
+ **/
+enum i40e_status_code i40e_aq_alternate_read(struct i40e_hw *hw,
+ u32 reg_addr0, u32 *reg_val0,
+ u32 reg_addr1, u32 *reg_val1)
+{
+ struct i40e_aq_desc desc;
+ struct i40e_aqc_alternate_write *cmd_resp =
+ (struct i40e_aqc_alternate_write *)&desc.params.raw;
+ enum i40e_status_code status;
+
+ if (reg_val0 == NULL)
+ return I40E_ERR_PARAM;
+
+ i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_alternate_read);
+ cmd_resp->address0 = CPU_TO_LE32(reg_addr0);
+ cmd_resp->address1 = CPU_TO_LE32(reg_addr1);
+
+ status = i40e_asq_send_command(hw, &desc, NULL, 0, NULL);
+
+ if (status == I40E_SUCCESS) {
+ *reg_val0 = LE32_TO_CPU(cmd_resp->data0);
+
+ if (reg_val1 != NULL)
+ *reg_val1 = LE32_TO_CPU(cmd_resp->data1);
+ }
+
+ return status;
+}
+
+/**
+ * i40e_aq_alternate_read_indirect
+ * @hw: pointer to the hardware structure
+ * @addr: address of the alternate structure field
+ * @dw_count: number of alternate structure fields to read
+ * @buffer: pointer to the command buffer
+ *
+ * Read 'dw_count' dwords from alternate structure starting at 'addr' and
+ * place them in 'buffer'. The buffer should be allocated by caller.
+ *
+ **/
+enum i40e_status_code i40e_aq_alternate_read_indirect(struct i40e_hw *hw,
+ u32 addr, u32 dw_count, void *buffer)
+{
+ struct i40e_aq_desc desc;
+ struct i40e_aqc_alternate_ind_write *cmd_resp =
+ (struct i40e_aqc_alternate_ind_write *)&desc.params.raw;
+ enum i40e_status_code status;
+
+ if (buffer == NULL)
+ return I40E_ERR_PARAM;
+
+ /* Indirect command */
+ i40e_fill_default_direct_cmd_desc(&desc,
+ i40e_aqc_opc_alternate_read_indirect);
+
+ desc.flags |= CPU_TO_LE16(I40E_AQ_FLAG_RD);
+ desc.flags |= CPU_TO_LE16(I40E_AQ_FLAG_BUF);
+ if (dw_count > (I40E_AQ_LARGE_BUF/4))
+ desc.flags |= CPU_TO_LE16((u16)I40E_AQ_FLAG_LB);
+
+ cmd_resp->address = CPU_TO_LE32(addr);
+ cmd_resp->length = CPU_TO_LE32(dw_count);
+
+ status = i40e_asq_send_command(hw, &desc, buffer,
+ I40E_LO_DWORD(4*dw_count), NULL);
+
+ return status;
+}
+
+/**
+ * i40e_aq_alternate_clear
+ * @hw: pointer to the HW structure.
+ *
+ * Clear the alternate structures of the port from which the function
+ * is called.
+ *
+ **/
+enum i40e_status_code i40e_aq_alternate_clear(struct i40e_hw *hw)
+{
+ struct i40e_aq_desc desc;
+ enum i40e_status_code status;
+
+ i40e_fill_default_direct_cmd_desc(&desc,
+ i40e_aqc_opc_alternate_clear_port);
+
+ status = i40e_asq_send_command(hw, &desc, NULL, 0, NULL);
+
+ return status;
+}
+
+/**
+ * i40e_aq_alternate_write_done
+ * @hw: pointer to the HW structure.
+ * @bios_mode: indicates whether the command is executed by UEFI or legacy BIOS
+ * @reset_needed: indicates the SW should trigger GLOBAL reset
+ *
+ * Indicates to the FW that alternate structures have been changed.
+ *
+ **/
+enum i40e_status_code i40e_aq_alternate_write_done(struct i40e_hw *hw,
+ u8 bios_mode, bool *reset_needed)
+{
+ struct i40e_aq_desc desc;
+ struct i40e_aqc_alternate_write_done *cmd =
+ (struct i40e_aqc_alternate_write_done *)&desc.params.raw;
+ enum i40e_status_code status;
+
+ if (reset_needed == NULL)
+ return I40E_ERR_PARAM;
+
+ i40e_fill_default_direct_cmd_desc(&desc,
+ i40e_aqc_opc_alternate_write_done);
+
+ cmd->cmd_flags = CPU_TO_LE16(bios_mode);
+
+ status = i40e_asq_send_command(hw, &desc, NULL, 0, NULL);
+ if (!status && reset_needed)
+ *reset_needed = ((LE16_TO_CPU(cmd->cmd_flags) &
+ I40E_AQ_ALTERNATE_RESET_NEEDED) != 0);
+
+ return status;
+}
+
+/**
+ * i40e_aq_set_oem_mode
+ * @hw: pointer to the HW structure.
+ * @oem_mode: the OEM mode to be used
+ *
+ * Sets the device to a specific operating mode. Currently the only supported
+ * mode is no_clp, which causes FW to refrain from using Alternate RAM.
+ *
+ **/
+enum i40e_status_code i40e_aq_set_oem_mode(struct i40e_hw *hw,
+ u8 oem_mode)
+{
+ struct i40e_aq_desc desc;
+ struct i40e_aqc_alternate_write_done *cmd =
+ (struct i40e_aqc_alternate_write_done *)&desc.params.raw;
+ enum i40e_status_code status;
+
+ i40e_fill_default_direct_cmd_desc(&desc,
+ i40e_aqc_opc_alternate_set_mode);
+
+ cmd->cmd_flags = CPU_TO_LE16(oem_mode);
+
+ status = i40e_asq_send_command(hw, &desc, NULL, 0, NULL);
+
+ return status;
+}
+
+/**
+ * i40e_aq_resume_port_tx
+ * @hw: pointer to the hardware structure
+ * @cmd_details: pointer to command details structure or NULL
+ *
+ * Resume port's Tx traffic
+ **/
+enum i40e_status_code i40e_aq_resume_port_tx(struct i40e_hw *hw,
+ struct i40e_asq_cmd_details *cmd_details)
+{
+ struct i40e_aq_desc desc;
+ enum i40e_status_code status;
+
+ i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_resume_port_tx);
+
+ status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
+
+ return status;
+}
+
+/**
+ * i40e_set_pci_config_data - store PCI bus info
+ * @hw: pointer to hardware structure
+ * @link_status: the link status word from PCI config space
+ *
+ * Stores the PCI bus info (speed, width, type) within the i40e_hw structure
+ **/
+void i40e_set_pci_config_data(struct i40e_hw *hw, u16 link_status)
+{
+ hw->bus.type = i40e_bus_type_pci_express;
+
+ switch (link_status & I40E_PCI_LINK_WIDTH) {
+ case I40E_PCI_LINK_WIDTH_1:
+ hw->bus.width = i40e_bus_width_pcie_x1;
+ break;
+ case I40E_PCI_LINK_WIDTH_2:
+ hw->bus.width = i40e_bus_width_pcie_x2;
+ break;
+ case I40E_PCI_LINK_WIDTH_4:
+ hw->bus.width = i40e_bus_width_pcie_x4;
+ break;
+ case I40E_PCI_LINK_WIDTH_8:
+ hw->bus.width = i40e_bus_width_pcie_x8;
+ break;
+ default:
+ hw->bus.width = i40e_bus_width_unknown;
+ break;
+ }
+
+ switch (link_status & I40E_PCI_LINK_SPEED) {
+ case I40E_PCI_LINK_SPEED_2500:
+ hw->bus.speed = i40e_bus_speed_2500;
+ break;
+ case I40E_PCI_LINK_SPEED_5000:
+ hw->bus.speed = i40e_bus_speed_5000;
+ break;
+ case I40E_PCI_LINK_SPEED_8000:
+ hw->bus.speed = i40e_bus_speed_8000;
+ break;
+ default:
+ hw->bus.speed = i40e_bus_speed_unknown;
+ break;
+ }
+}
+
+/**
+ * i40e_aq_debug_dump
+ * @hw: pointer to the hardware structure
+ * @cluster_id: specific cluster to dump
+ * @table_id: table id within cluster
+ * @start_index: index of line in the block to read
+ * @buff_size: dump buffer size
+ * @buff: dump buffer
+ * @ret_buff_size: actual buffer size returned
+ * @ret_next_table: next block to read
+ * @ret_next_index: next index to read
+ *
+ * Dump internal FW/HW data for debug purposes.
+ *
+ **/
+enum i40e_status_code i40e_aq_debug_dump(struct i40e_hw *hw, u8 cluster_id,
+ u8 table_id, u32 start_index, u16 buff_size,
+ void *buff, u16 *ret_buff_size,
+ u8 *ret_next_table, u32 *ret_next_index,
+ struct i40e_asq_cmd_details *cmd_details)
+{
+ struct i40e_aq_desc desc;
+ struct i40e_aqc_debug_dump_internals *cmd =
+ (struct i40e_aqc_debug_dump_internals *)&desc.params.raw;
+ struct i40e_aqc_debug_dump_internals *resp =
+ (struct i40e_aqc_debug_dump_internals *)&desc.params.raw;
+ enum i40e_status_code status;
+
+ if (buff_size == 0 || !buff)
+ return I40E_ERR_PARAM;
+
+ i40e_fill_default_direct_cmd_desc(&desc,
+ i40e_aqc_opc_debug_dump_internals);
+ /* Indirect Command */
+ desc.flags |= CPU_TO_LE16((u16)I40E_AQ_FLAG_BUF);
+ if (buff_size > I40E_AQ_LARGE_BUF)
+ desc.flags |= CPU_TO_LE16((u16)I40E_AQ_FLAG_LB);
+
+ cmd->cluster_id = cluster_id;
+ cmd->table_id = table_id;
+ cmd->idx = CPU_TO_LE32(start_index);
+
+ desc.datalen = CPU_TO_LE16(buff_size);
+
+ status = i40e_asq_send_command(hw, &desc, buff, buff_size, cmd_details);
+ if (!status) {
+ if (ret_buff_size != NULL)
+ *ret_buff_size = LE16_TO_CPU(desc.datalen);
+ if (ret_next_table != NULL)
+ *ret_next_table = resp->table_id;
+ if (ret_next_index != NULL)
+ *ret_next_index = LE32_TO_CPU(resp->idx);
+ }
+
+ return status;
+}
+
+/**
+ * i40e_read_bw_from_alt_ram
+ * @hw: pointer to the hardware structure
+ * @max_bw: pointer for max_bw read
+ * @min_bw: pointer for min_bw read
+ * @min_valid: pointer for bool that is true if min_bw is a valid value
+ * @max_valid: pointer for bool that is true if max_bw is a valid value
+ *
+ * Read bw from the alternate ram for the given pf
+ **/
+enum i40e_status_code i40e_read_bw_from_alt_ram(struct i40e_hw *hw,
+ u32 *max_bw, u32 *min_bw,
+ bool *min_valid, bool *max_valid)
+{
+ enum i40e_status_code status;
+ u32 max_bw_addr, min_bw_addr;
+
+ /* Calculate the address of the min/max bw registers */
+ max_bw_addr = I40E_ALT_STRUCT_FIRST_PF_OFFSET +
+ I40E_ALT_STRUCT_MAX_BW_OFFSET +
+ (I40E_ALT_STRUCT_DWORDS_PER_PF * hw->pf_id);
+ min_bw_addr = I40E_ALT_STRUCT_FIRST_PF_OFFSET +
+ I40E_ALT_STRUCT_MIN_BW_OFFSET +
+ (I40E_ALT_STRUCT_DWORDS_PER_PF * hw->pf_id);
+
+ /* Read the bandwidths from alt ram */
+ status = i40e_aq_alternate_read(hw, max_bw_addr, max_bw,
+ min_bw_addr, min_bw);
+
+ if (*min_bw & I40E_ALT_BW_VALID_MASK)
+ *min_valid = true;
+ else
+ *min_valid = false;
+
+ if (*max_bw & I40E_ALT_BW_VALID_MASK)
+ *max_valid = true;
+ else
+ *max_valid = false;
+
+ return status;
+}
+
+/**
+ * i40e_aq_configure_partition_bw
+ * @hw: pointer to the hardware structure
+ * @bw_data: Buffer holding valid pfs and bw limits
+ * @cmd_details: pointer to command details
+ *
+ * Configure partitions guaranteed/max bw
+ **/
+enum i40e_status_code i40e_aq_configure_partition_bw(struct i40e_hw *hw,
+ struct i40e_aqc_configure_partition_bw_data *bw_data,
+ struct i40e_asq_cmd_details *cmd_details)
+{
+ enum i40e_status_code status;
+ struct i40e_aq_desc desc;
+ u16 bwd_size = sizeof(*bw_data);
+
+ i40e_fill_default_direct_cmd_desc(&desc,
+ i40e_aqc_opc_configure_partition_bw);
+
+ /* Indirect command */
+ desc.flags |= CPU_TO_LE16((u16)I40E_AQ_FLAG_BUF);
+ desc.flags |= CPU_TO_LE16((u16)I40E_AQ_FLAG_RD);
+
+ if (bwd_size > I40E_AQ_LARGE_BUF)
+ desc.flags |= CPU_TO_LE16((u16)I40E_AQ_FLAG_LB);
+
+ desc.datalen = CPU_TO_LE16(bwd_size);
+
+ status = i40e_asq_send_command(hw, &desc, bw_data, bwd_size, cmd_details);
+
+ return status;
+}
+
+/**
+ * i40e_read_phy_register
+ * @hw: pointer to the HW structure
+ * @page: registers page number
+ * @reg: register address in the page
+ * @phy_adr: PHY address on MDIO interface
+ * @value: PHY register value
+ *
+ * Reads specified PHY register value
+ **/
+enum i40e_status_code i40e_read_phy_register(struct i40e_hw *hw,
+ u8 page, u16 reg, u8 phy_addr,
+ u16 *value)
+{
+ enum i40e_status_code status = I40E_ERR_TIMEOUT;
+ u32 command = 0;
+ u16 retry = 1000;
+ u8 port_num = (u8)hw->func_caps.mdio_port_num;
+
+ command = (reg << I40E_GLGEN_MSCA_MDIADD_SHIFT) |
+ (page << I40E_GLGEN_MSCA_DEVADD_SHIFT) |
+ (phy_addr << I40E_GLGEN_MSCA_PHYADD_SHIFT) |
+ (I40E_MDIO_OPCODE_ADDRESS) |
+ (I40E_MDIO_STCODE) |
+ (I40E_GLGEN_MSCA_MDICMD_MASK) |
+ (I40E_GLGEN_MSCA_MDIINPROGEN_MASK);
+ wr32(hw, I40E_GLGEN_MSCA(port_num), command);
+ do {
+ command = rd32(hw, I40E_GLGEN_MSCA(port_num));
+ if (!(command & I40E_GLGEN_MSCA_MDICMD_MASK)) {
+ status = I40E_SUCCESS;
+ break;
+ }
+ i40e_usec_delay(10);
+ retry--;
+ } while (retry);
+
+ if (status) {
+ i40e_debug(hw, I40E_DEBUG_PHY,
+ "PHY: Can't write command to external PHY.\n");
+ goto phy_read_end;
+ }
+
+ command = (page << I40E_GLGEN_MSCA_DEVADD_SHIFT) |
+ (phy_addr << I40E_GLGEN_MSCA_PHYADD_SHIFT) |
+ (I40E_MDIO_OPCODE_READ) |
+ (I40E_MDIO_STCODE) |
+ (I40E_GLGEN_MSCA_MDICMD_MASK) |
+ (I40E_GLGEN_MSCA_MDIINPROGEN_MASK);
+ status = I40E_ERR_TIMEOUT;
+ retry = 1000;
+ wr32(hw, I40E_GLGEN_MSCA(port_num), command);
+ do {
+ command = rd32(hw, I40E_GLGEN_MSCA(port_num));
+ if (!(command & I40E_GLGEN_MSCA_MDICMD_MASK)) {
+ status = I40E_SUCCESS;
+ break;
+ }
+ i40e_usec_delay(10);
+ retry--;
+ } while (retry);
+
+ if (!status) {
+ command = rd32(hw, I40E_GLGEN_MSRWD(port_num));
+ *value = (command & I40E_GLGEN_MSRWD_MDIRDDATA_MASK) >>
+ I40E_GLGEN_MSRWD_MDIRDDATA_SHIFT;
+ } else {
+ i40e_debug(hw, I40E_DEBUG_PHY,
+ "PHY: Can't read register value from external PHY.\n");
+ }
+
+phy_read_end:
+ return status;
+}
+
+/**
+ * i40e_write_phy_register
+ * @hw: pointer to the HW structure
+ * @page: registers page number
+ * @reg: register address in the page
+ * @phy_adr: PHY address on MDIO interface
+ * @value: PHY register value
+ *
+ * Writes value to specified PHY register
+ **/
+enum i40e_status_code i40e_write_phy_register(struct i40e_hw *hw,
+ u8 page, u16 reg, u8 phy_addr,
+ u16 value)
+{
+ enum i40e_status_code status = I40E_ERR_TIMEOUT;
+ u32 command = 0;
+ u16 retry = 1000;
+ u8 port_num = (u8)hw->func_caps.mdio_port_num;
+
+ command = (reg << I40E_GLGEN_MSCA_MDIADD_SHIFT) |
+ (page << I40E_GLGEN_MSCA_DEVADD_SHIFT) |
+ (phy_addr << I40E_GLGEN_MSCA_PHYADD_SHIFT) |
+ (I40E_MDIO_OPCODE_ADDRESS) |
+ (I40E_MDIO_STCODE) |
+ (I40E_GLGEN_MSCA_MDICMD_MASK) |
+ (I40E_GLGEN_MSCA_MDIINPROGEN_MASK);
+ wr32(hw, I40E_GLGEN_MSCA(port_num), command);
+ do {
+ command = rd32(hw, I40E_GLGEN_MSCA(port_num));
+ if (!(command & I40E_GLGEN_MSCA_MDICMD_MASK)) {
+ status = I40E_SUCCESS;
+ break;
+ }
+ i40e_usec_delay(10);
+ retry--;
+ } while (retry);
+ if (status) {
+ i40e_debug(hw, I40E_DEBUG_PHY,
+ "PHY: Can't write command to external PHY.\n");
+ goto phy_write_end;
+ }
+
+ command = value << I40E_GLGEN_MSRWD_MDIWRDATA_SHIFT;
+ wr32(hw, I40E_GLGEN_MSRWD(port_num), command);
+
+ command = (page << I40E_GLGEN_MSCA_DEVADD_SHIFT) |
+ (phy_addr << I40E_GLGEN_MSCA_PHYADD_SHIFT) |
+ (I40E_MDIO_OPCODE_WRITE) |
+ (I40E_MDIO_STCODE) |
+ (I40E_GLGEN_MSCA_MDICMD_MASK) |
+ (I40E_GLGEN_MSCA_MDIINPROGEN_MASK);
+ status = I40E_ERR_TIMEOUT;
+ retry = 1000;
+ wr32(hw, I40E_GLGEN_MSCA(port_num), command);
+ do {
+ command = rd32(hw, I40E_GLGEN_MSCA(port_num));
+ if (!(command & I40E_GLGEN_MSCA_MDICMD_MASK)) {
+ status = I40E_SUCCESS;
+ break;
+ }
+ i40e_usec_delay(10);
+ retry--;
+ } while (retry);
+
+phy_write_end:
+ return status;
+}
+
+/**
+ * i40e_get_phy_address
+ * @hw: pointer to the HW structure
+ * @dev_num: PHY port num that address we want
+ * @phy_addr: Returned PHY address
+ *
+ * Gets PHY address for current port
+ **/
+u8 i40e_get_phy_address(struct i40e_hw *hw, u8 dev_num)
+{
+ u8 port_num = (u8)hw->func_caps.mdio_port_num;
+ u32 reg_val = rd32(hw, I40E_GLGEN_MDIO_I2C_SEL(port_num));
+
+ return (u8)(reg_val >> ((dev_num + 1) * 5)) & 0x1f;
+}
+
+/**
+ * i40e_blink_phy_led
+ * @hw: pointer to the HW structure
+ * @time: time how long led will blinks in secs
+ * @interval: gap between LED on and off in msecs
+ *
+ * Blinks PHY link LED
+ **/
+enum i40e_status_code i40e_blink_phy_link_led(struct i40e_hw *hw,
+ u32 time, u32 interval)
+{
+ enum i40e_status_code status = I40E_SUCCESS;
+ u32 i;
+ u16 led_ctl = 0;
+ u16 gpio_led_port;
+ u16 led_reg;
+ u16 led_addr = I40E_PHY_LED_PROV_REG_1;
+ u8 phy_addr = 0;
+ u8 port_num;
+
+ i = rd32(hw, I40E_PFGEN_PORTNUM);
+ port_num = (u8)(i & I40E_PFGEN_PORTNUM_PORT_NUM_MASK);
+ phy_addr = i40e_get_phy_address(hw, port_num);
+
+ for (gpio_led_port = 0; gpio_led_port < 3; gpio_led_port++,
+ led_addr++) {
+ status = i40e_read_phy_register(hw, I40E_PHY_COM_REG_PAGE,
+ led_addr, phy_addr, &led_reg);
+ if (status)
+ goto phy_blinking_end;
+ led_ctl = led_reg;
+ if (led_reg & I40E_PHY_LED_LINK_MODE_MASK) {
+ led_reg = 0;
+ status = i40e_write_phy_register(hw,
+ I40E_PHY_COM_REG_PAGE,
+ led_addr, phy_addr,
+ led_reg);
+ if (status)
+ goto phy_blinking_end;
+ break;
+ }
+ }
+
+ if (time > 0 && interval > 0) {
+ for (i = 0; i < time * 1000; i += interval) {
+ status = i40e_read_phy_register(hw,
+ I40E_PHY_COM_REG_PAGE,
+ led_addr, phy_addr,
+ &led_reg);
+ if (status)
+ goto restore_config;
+ if (led_reg & I40E_PHY_LED_MANUAL_ON)
+ led_reg = 0;
+ else
+ led_reg = I40E_PHY_LED_MANUAL_ON;
+ status = i40e_write_phy_register(hw,
+ I40E_PHY_COM_REG_PAGE,
+ led_addr, phy_addr,
+ led_reg);
+ if (status)
+ goto restore_config;
+ i40e_msec_delay(interval);
+ }
+ }
+
+restore_config:
+ status = i40e_write_phy_register(hw, I40E_PHY_COM_REG_PAGE, led_addr,
+ phy_addr, led_ctl);
+
+phy_blinking_end:
+ return status;
+}
+
+/**
+ * i40e_led_get_phy - return current on/off mode
+ * @hw: pointer to the hw struct
+ * @led_addr: address of led register to use
+ * @val: original value of register to use
+ *
+ **/
+enum i40e_status_code i40e_led_get_phy(struct i40e_hw *hw, u16 *led_addr,
+ u16 *val)
+{
+ enum i40e_status_code status = I40E_SUCCESS;
+ u16 gpio_led_port;
+ u8 phy_addr = 0;
+ u16 reg_val;
+ u16 temp_addr;
+ u8 port_num;
+ u32 i;
+
+ temp_addr = I40E_PHY_LED_PROV_REG_1;
+ i = rd32(hw, I40E_PFGEN_PORTNUM);
+ port_num = (u8)(i & I40E_PFGEN_PORTNUM_PORT_NUM_MASK);
+ phy_addr = i40e_get_phy_address(hw, port_num);
+
+ for (gpio_led_port = 0; gpio_led_port < 3; gpio_led_port++,
+ temp_addr++) {
+ status = i40e_read_phy_register(hw, I40E_PHY_COM_REG_PAGE,
+ temp_addr, phy_addr, &reg_val);
+ if (status)
+ return status;
+ *val = reg_val;
+ if (reg_val & I40E_PHY_LED_LINK_MODE_MASK) {
+ *led_addr = temp_addr;
+ break;
+ }
+ }
+ return status;
+}
+
+/**
+ * i40e_led_set_phy
+ * @hw: pointer to the HW structure
+ * @on: true or false
+ * @mode: original val plus bit for set or ignore
+ * Set led's on or off when controlled by the PHY
+ *
+ **/
+enum i40e_status_code i40e_led_set_phy(struct i40e_hw *hw, bool on,
+ u16 led_addr, u32 mode)
+{
+ enum i40e_status_code status = I40E_SUCCESS;
+ u16 led_ctl = 0;
+ u16 led_reg = 0;
+ u8 phy_addr = 0;
+ u8 port_num;
+ u32 i;
+
+ i = rd32(hw, I40E_PFGEN_PORTNUM);
+ port_num = (u8)(i & I40E_PFGEN_PORTNUM_PORT_NUM_MASK);
+ phy_addr = i40e_get_phy_address(hw, port_num);
+
+ status = i40e_read_phy_register(hw, I40E_PHY_COM_REG_PAGE, led_addr,
+ phy_addr, &led_reg);
+ if (status)
+ return status;
+ led_ctl = led_reg;
+ if (led_reg & I40E_PHY_LED_LINK_MODE_MASK) {
+ led_reg = 0;
+ status = i40e_write_phy_register(hw, I40E_PHY_COM_REG_PAGE,
+ led_addr, phy_addr, led_reg);
+ if (status)
+ return status;
+ }
+ status = i40e_read_phy_register(hw, I40E_PHY_COM_REG_PAGE,
+ led_addr, phy_addr, &led_reg);
+ if (status)
+ goto restore_config;
+ if (on)
+ led_reg = I40E_PHY_LED_MANUAL_ON;
+ else
+ led_reg = 0;
+ status = i40e_write_phy_register(hw, I40E_PHY_COM_REG_PAGE,
+ led_addr, phy_addr, led_reg);
+ if (status)
+ goto restore_config;
+ if (mode & I40E_PHY_LED_MODE_ORIG) {
+ led_ctl = (mode & I40E_PHY_LED_MODE_MASK);
+ status = i40e_write_phy_register(hw,
+ I40E_PHY_COM_REG_PAGE,
+ led_addr, phy_addr, led_ctl);
+ }
+ return status;
+restore_config:
+ status = i40e_write_phy_register(hw, I40E_PHY_COM_REG_PAGE, led_addr,
+ phy_addr, led_ctl);
+ return status;
+}
+#endif /* PF_DRIVER */
+
+/**
+ * i40e_aq_rx_ctl_read_register - use FW to read from an Rx control register
+ * @hw: pointer to the hw struct
+ * @reg_addr: register address
+ * @reg_val: ptr to register value
+ * @cmd_details: pointer to command details structure or NULL
+ *
+ * Use the firmware to read the Rx control register,
+ * especially useful if the Rx unit is under heavy pressure
+ **/
+enum i40e_status_code i40e_aq_rx_ctl_read_register(struct i40e_hw *hw,
+ u32 reg_addr, u32 *reg_val,
+ struct i40e_asq_cmd_details *cmd_details)
+{
+ struct i40e_aq_desc desc;
+ struct i40e_aqc_rx_ctl_reg_read_write *cmd_resp =
+ (struct i40e_aqc_rx_ctl_reg_read_write *)&desc.params.raw;
+ enum i40e_status_code status;
+
+ if (reg_val == NULL)
+ return I40E_ERR_PARAM;
+
+ i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_rx_ctl_reg_read);
+
+ cmd_resp->address = CPU_TO_LE32(reg_addr);
+
+ status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
+
+ if (status == I40E_SUCCESS)
+ *reg_val = LE32_TO_CPU(cmd_resp->value);
+
+ return status;
+}
+
+/**
+ * i40e_read_rx_ctl - read from an Rx control register
+ * @hw: pointer to the hw struct
+ * @reg_addr: register address
+ **/
+u32 i40e_read_rx_ctl(struct i40e_hw *hw, u32 reg_addr)
+{
+ enum i40e_status_code status = I40E_SUCCESS;
+ bool use_register;
+ int retry = 5;
+ u32 val = 0;
+
+ use_register = (hw->aq.api_maj_ver == 1) && (hw->aq.api_min_ver < 5);
+ if (!use_register) {
+do_retry:
+ status = i40e_aq_rx_ctl_read_register(hw, reg_addr, &val, NULL);
+ if (hw->aq.asq_last_status == I40E_AQ_RC_EAGAIN && retry) {
+ i40e_msec_delay(1);
+ retry--;
+ goto do_retry;
+ }
+ }
+
+ /* if the AQ access failed, try the old-fashioned way */
+ if (status || use_register)
+ val = rd32(hw, reg_addr);
+
+ return val;
+}
+
+/**
+ * i40e_aq_rx_ctl_write_register
+ * @hw: pointer to the hw struct
+ * @reg_addr: register address
+ * @reg_val: register value
+ * @cmd_details: pointer to command details structure or NULL
+ *
+ * Use the firmware to write to an Rx control register,
+ * especially useful if the Rx unit is under heavy pressure
+ **/
+enum i40e_status_code i40e_aq_rx_ctl_write_register(struct i40e_hw *hw,
+ u32 reg_addr, u32 reg_val,
+ struct i40e_asq_cmd_details *cmd_details)
+{
+ struct i40e_aq_desc desc;
+ struct i40e_aqc_rx_ctl_reg_read_write *cmd =
+ (struct i40e_aqc_rx_ctl_reg_read_write *)&desc.params.raw;
+ enum i40e_status_code status;
+
+ i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_rx_ctl_reg_write);
+
+ cmd->address = CPU_TO_LE32(reg_addr);
+ cmd->value = CPU_TO_LE32(reg_val);
+
+ status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
+
+ return status;
+}
+
+/**
+ * i40e_write_rx_ctl - write to an Rx control register
+ * @hw: pointer to the hw struct
+ * @reg_addr: register address
+ * @reg_val: register value
+ **/
+void i40e_write_rx_ctl(struct i40e_hw *hw, u32 reg_addr, u32 reg_val)
+{
+ enum i40e_status_code status = I40E_SUCCESS;
+ bool use_register;
+ int retry = 5;
+
+ use_register = (hw->aq.api_maj_ver == 1) && (hw->aq.api_min_ver < 5);
+ if (!use_register) {
+do_retry:
+ status = i40e_aq_rx_ctl_write_register(hw, reg_addr,
+ reg_val, NULL);
+ if (hw->aq.asq_last_status == I40E_AQ_RC_EAGAIN && retry) {
+ i40e_msec_delay(1);
+ retry--;
+ goto do_retry;
+ }
+ }
+
+ /* if the AQ access failed, try the old-fashioned way */
+ if (status || use_register)
+ wr32(hw, reg_addr, reg_val);
+}
+#ifdef VF_DRIVER
+
+/**
+ * i40e_aq_send_msg_to_pf
+ * @hw: pointer to the hardware structure
+ * @v_opcode: opcodes for VF-PF communication
+ * @v_retval: return error code
+ * @msg: pointer to the msg buffer
+ * @msglen: msg length
+ * @cmd_details: pointer to command details
+ *
+ * Send message to PF driver using admin queue. By default, this message
+ * is sent asynchronously, i.e. i40e_asq_send_command() does not wait for
+ * completion before returning.
+ **/
+enum i40e_status_code i40e_aq_send_msg_to_pf(struct i40e_hw *hw,
+ enum i40e_virtchnl_ops v_opcode,
+ enum i40e_status_code v_retval,
+ u8 *msg, u16 msglen,
+ struct i40e_asq_cmd_details *cmd_details)
+{
+ struct i40e_aq_desc desc;
+ struct i40e_asq_cmd_details details;
+ enum i40e_status_code status;
+
+ i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_send_msg_to_pf);
+ desc.flags |= CPU_TO_LE16((u16)I40E_AQ_FLAG_SI);
+ desc.cookie_high = CPU_TO_LE32(v_opcode);
+ desc.cookie_low = CPU_TO_LE32(v_retval);
+ if (msglen) {
+ desc.flags |= CPU_TO_LE16((u16)(I40E_AQ_FLAG_BUF
+ | I40E_AQ_FLAG_RD));
+ if (msglen > I40E_AQ_LARGE_BUF)
+ desc.flags |= CPU_TO_LE16((u16)I40E_AQ_FLAG_LB);
+ desc.datalen = CPU_TO_LE16(msglen);
+ }
+ if (!cmd_details) {
+ i40e_memset(&details, 0, sizeof(details), I40E_NONDMA_MEM);
+ details.async = true;
+ cmd_details = &details;
+ }
+ status = i40e_asq_send_command(hw, (struct i40e_aq_desc *)&desc, msg,
+ msglen, cmd_details);
+ return status;
+}
+
+/**
+ * i40e_vf_parse_hw_config
+ * @hw: pointer to the hardware structure
+ * @msg: pointer to the virtual channel VF resource structure
+ *
+ * Given a VF resource message from the PF, populate the hw struct
+ * with appropriate information.
+ **/
+void i40e_vf_parse_hw_config(struct i40e_hw *hw,
+ struct i40e_virtchnl_vf_resource *msg)
+{
+ struct i40e_virtchnl_vsi_resource *vsi_res;
+ int i;
+
+ vsi_res = &msg->vsi_res[0];
+
+ hw->dev_caps.num_vsis = msg->num_vsis;
+ hw->dev_caps.num_rx_qp = msg->num_queue_pairs;
+ hw->dev_caps.num_tx_qp = msg->num_queue_pairs;
+ hw->dev_caps.num_msix_vectors_vf = msg->max_vectors;
+ hw->dev_caps.dcb = msg->vf_offload_flags &
+ I40E_VIRTCHNL_VF_OFFLOAD_L2;
+ hw->dev_caps.fcoe = (msg->vf_offload_flags &
+ I40E_VIRTCHNL_VF_OFFLOAD_FCOE) ? 1 : 0;
+ hw->dev_caps.iwarp = (msg->vf_offload_flags &
+ I40E_VIRTCHNL_VF_OFFLOAD_IWARP) ? 1 : 0;
+ for (i = 0; i < msg->num_vsis; i++) {
+ if (vsi_res->vsi_type == I40E_VSI_SRIOV) {
+ i40e_memcpy(hw->mac.perm_addr,
+ vsi_res->default_mac_addr,
+ I40E_ETH_LENGTH_OF_ADDRESS,
+ I40E_NONDMA_TO_NONDMA);
+ i40e_memcpy(hw->mac.addr, vsi_res->default_mac_addr,
+ I40E_ETH_LENGTH_OF_ADDRESS,
+ I40E_NONDMA_TO_NONDMA);
+ }
+ vsi_res++;
+ }
+}
+
+/**
+ * i40e_vf_reset
+ * @hw: pointer to the hardware structure
+ *
+ * Send a VF_RESET message to the PF. Does not wait for response from PF
+ * as none will be forthcoming. Immediately after calling this function,
+ * the admin queue should be shut down and (optionally) reinitialized.
+ **/
+enum i40e_status_code i40e_vf_reset(struct i40e_hw *hw)
+{
+ return i40e_aq_send_msg_to_pf(hw, I40E_VIRTCHNL_OP_RESET_VF,
+ I40E_SUCCESS, NULL, 0, NULL);
+}
+#endif /* VF_DRIVER */
+#ifdef X722_SUPPORT
+
+/**
+ * i40e_aq_set_arp_proxy_config
+ * @hw: pointer to the HW structure
+ * @proxy_config - pointer to proxy config command table struct
+ * @cmd_details: pointer to command details
+ *
+ * Set ARP offload parameters from pre-populated
+ * i40e_aqc_arp_proxy_data struct
+ **/
+enum i40e_status_code i40e_aq_set_arp_proxy_config(struct i40e_hw *hw,
+ struct i40e_aqc_arp_proxy_data *proxy_config,
+ struct i40e_asq_cmd_details *cmd_details)
+{
+ struct i40e_aq_desc desc;
+ enum i40e_status_code status;
+
+ if (!proxy_config)
+ return I40E_ERR_PARAM;
+
+ i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_set_proxy_config);
+
+ desc.params.external.addr_high =
+ CPU_TO_LE32(I40E_HI_DWORD((u64)proxy_config));
+ desc.params.external.addr_low =
+ CPU_TO_LE32(I40E_LO_DWORD((u64)proxy_config));
+
+ status = i40e_asq_send_command(hw, &desc, proxy_config,
+ sizeof(struct i40e_aqc_arp_proxy_data),
+ cmd_details);
+
+ return status;
+}
+
+/**
+ * i40e_aq_opc_set_ns_proxy_table_entry
+ * @hw: pointer to the HW structure
+ * @ns_proxy_table_entry: pointer to NS table entry command struct
+ * @cmd_details: pointer to command details
+ *
+ * Set IPv6 Neighbor Solicitation (NS) protocol offload parameters
+ * from pre-populated i40e_aqc_ns_proxy_data struct
+ **/
+enum i40e_status_code i40e_aq_set_ns_proxy_table_entry(struct i40e_hw *hw,
+ struct i40e_aqc_ns_proxy_data *ns_proxy_table_entry,
+ struct i40e_asq_cmd_details *cmd_details)
+{
+ struct i40e_aq_desc desc;
+ enum i40e_status_code status;
+
+ if (!ns_proxy_table_entry)
+ return I40E_ERR_PARAM;
+
+ i40e_fill_default_direct_cmd_desc(&desc,
+ i40e_aqc_opc_set_ns_proxy_table_entry);
+
+ desc.params.external.addr_high =
+ CPU_TO_LE32(I40E_HI_DWORD((u64)ns_proxy_table_entry));
+ desc.params.external.addr_low =
+ CPU_TO_LE32(I40E_LO_DWORD((u64)ns_proxy_table_entry));
+
+ status = i40e_asq_send_command(hw, &desc, ns_proxy_table_entry,
+ sizeof(struct i40e_aqc_ns_proxy_data),
+ cmd_details);
+
+ return status;
+}
+
+/**
+ * i40e_aq_set_clear_wol_filter
+ * @hw: pointer to the hw struct
+ * @filter_index: index of filter to modify (0-7)
+ * @filter: buffer containing filter to be set
+ * @set_filter: true to set filter, false to clear filter
+ * @no_wol_tco: if true, pass through packets cannot cause wake-up
+ * if false, pass through packets may cause wake-up
+ * @filter_valid: true if filter action is valid
+ * @no_wol_tco_valid: true if no WoL in TCO traffic action valid
+ * @cmd_details: pointer to command details structure or NULL
+ *
+ * Set or clear WoL filter for port attached to the PF
+ **/
+enum i40e_status_code i40e_aq_set_clear_wol_filter(struct i40e_hw *hw,
+ u8 filter_index,
+ struct i40e_aqc_set_wol_filter_data *filter,
+ bool set_filter, bool no_wol_tco,
+ bool filter_valid, bool no_wol_tco_valid,
+ struct i40e_asq_cmd_details *cmd_details)
+{
+ struct i40e_aq_desc desc;
+ struct i40e_aqc_set_wol_filter *cmd =
+ (struct i40e_aqc_set_wol_filter *)&desc.params.raw;
+ enum i40e_status_code status;
+ u16 cmd_flags = 0;
+ u16 valid_flags = 0;
+ u16 buff_len = 0;
+
+ i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_set_wol_filter);
+
+ if (filter_index >= I40E_AQC_MAX_NUM_WOL_FILTERS)
+ return I40E_ERR_PARAM;
+ cmd->filter_index = CPU_TO_LE16(filter_index);
+
+ if (set_filter) {
+ if (!filter)
+ return I40E_ERR_PARAM;
+ cmd_flags |= I40E_AQC_SET_WOL_FILTER;
+ buff_len = sizeof(*filter);
+ }
+ if (no_wol_tco)
+ cmd_flags |= I40E_AQC_SET_WOL_FILTER_NO_TCO_WOL;
+ cmd->cmd_flags = CPU_TO_LE16(cmd_flags);
+
+ if (filter_valid)
+ valid_flags |= I40E_AQC_SET_WOL_FILTER_ACTION_VALID;
+ if (no_wol_tco_valid)
+ valid_flags |= I40E_AQC_SET_WOL_FILTER_NO_TCO_ACTION_VALID;
+ cmd->valid_flags = CPU_TO_LE16(valid_flags);
+
+ cmd->address_high = CPU_TO_LE32(I40E_HI_DWORD((u64)filter));
+ cmd->address_low = CPU_TO_LE32(I40E_LO_DWORD((u64)filter));
+
+ status = i40e_asq_send_command(hw, &desc, filter,
+ buff_len, cmd_details);
+
+ return status;
+}
+
+/**
+ * i40e_aq_get_wake_event_reason
+ * @hw: pointer to the hw struct
+ * @wake_reason: return value, index of matching filter
+ * @cmd_details: pointer to command details structure or NULL
+ *
+ * Get information for the reason of a Wake Up event
+ **/
+enum i40e_status_code i40e_aq_get_wake_event_reason(struct i40e_hw *hw,
+ u16 *wake_reason,
+ struct i40e_asq_cmd_details *cmd_details)
+{
+ struct i40e_aq_desc desc;
+ struct i40e_aqc_get_wake_reason_completion *resp =
+ (struct i40e_aqc_get_wake_reason_completion *)&desc.params.raw;
+ enum i40e_status_code status;
+
+ i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_get_wake_reason);
+
+ status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
+
+ if (status == I40E_SUCCESS)
+ *wake_reason = LE16_TO_CPU(resp->wake_reason);
+
+ return status;
+}
+
+#endif /* X722_SUPPORT */
diff --git a/src/dpdk/drivers/net/i40e/base/i40e_dcb.c b/src/dpdk/drivers/net/i40e/base/i40e_dcb.c
new file mode 100644
index 00000000..26c344fd
--- /dev/null
+++ b/src/dpdk/drivers/net/i40e/base/i40e_dcb.c
@@ -0,0 +1,1305 @@
+/*******************************************************************************
+
+Copyright (c) 2013 - 2015, Intel Corporation
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are met:
+
+ 1. Redistributions of source code must retain the above copyright notice,
+ this list of conditions and the following disclaimer.
+
+ 2. Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in the
+ documentation and/or other materials provided with the distribution.
+
+ 3. Neither the name of the Intel Corporation nor the names of its
+ contributors may be used to endorse or promote products derived from
+ this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+POSSIBILITY OF SUCH DAMAGE.
+
+***************************************************************************/
+
+#include "i40e_adminq.h"
+#include "i40e_prototype.h"
+#include "i40e_dcb.h"
+
+/**
+ * i40e_get_dcbx_status
+ * @hw: pointer to the hw struct
+ * @status: Embedded DCBX Engine Status
+ *
+ * Get the DCBX status from the Firmware
+ **/
+enum i40e_status_code i40e_get_dcbx_status(struct i40e_hw *hw, u16 *status)
+{
+ u32 reg;
+
+ if (!status)
+ return I40E_ERR_PARAM;
+
+ reg = rd32(hw, I40E_PRTDCB_GENS);
+ *status = (u16)((reg & I40E_PRTDCB_GENS_DCBX_STATUS_MASK) >>
+ I40E_PRTDCB_GENS_DCBX_STATUS_SHIFT);
+
+ return I40E_SUCCESS;
+}
+
+/**
+ * i40e_parse_ieee_etscfg_tlv
+ * @tlv: IEEE 802.1Qaz ETS CFG TLV
+ * @dcbcfg: Local store to update ETS CFG data
+ *
+ * Parses IEEE 802.1Qaz ETS CFG TLV
+ **/
+static void i40e_parse_ieee_etscfg_tlv(struct i40e_lldp_org_tlv *tlv,
+ struct i40e_dcbx_config *dcbcfg)
+{
+ struct i40e_dcb_ets_config *etscfg;
+ u8 *buf = tlv->tlvinfo;
+ u16 offset = 0;
+ u8 priority;
+ int i;
+
+ /* First Octet post subtype
+ * --------------------------
+ * |will-|CBS | Re- | Max |
+ * |ing | |served| TCs |
+ * --------------------------
+ * |1bit | 1bit|3 bits|3bits|
+ */
+ etscfg = &dcbcfg->etscfg;
+ etscfg->willing = (u8)((buf[offset] & I40E_IEEE_ETS_WILLING_MASK) >>
+ I40E_IEEE_ETS_WILLING_SHIFT);
+ etscfg->cbs = (u8)((buf[offset] & I40E_IEEE_ETS_CBS_MASK) >>
+ I40E_IEEE_ETS_CBS_SHIFT);
+ etscfg->maxtcs = (u8)((buf[offset] & I40E_IEEE_ETS_MAXTC_MASK) >>
+ I40E_IEEE_ETS_MAXTC_SHIFT);
+
+ /* Move offset to Priority Assignment Table */
+ offset++;
+
+ /* Priority Assignment Table (4 octets)
+ * Octets:| 1 | 2 | 3 | 4 |
+ * -----------------------------------------
+ * |pri0|pri1|pri2|pri3|pri4|pri5|pri6|pri7|
+ * -----------------------------------------
+ * Bits:|7 4|3 0|7 4|3 0|7 4|3 0|7 4|3 0|
+ * -----------------------------------------
+ */
+ for (i = 0; i < 4; i++) {
+ priority = (u8)((buf[offset] & I40E_IEEE_ETS_PRIO_1_MASK) >>
+ I40E_IEEE_ETS_PRIO_1_SHIFT);
+ etscfg->prioritytable[i * 2] = priority;
+ priority = (u8)((buf[offset] & I40E_IEEE_ETS_PRIO_0_MASK) >>
+ I40E_IEEE_ETS_PRIO_0_SHIFT);
+ etscfg->prioritytable[i * 2 + 1] = priority;
+ offset++;
+ }
+
+ /* TC Bandwidth Table (8 octets)
+ * Octets:| 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 |
+ * ---------------------------------
+ * |tc0|tc1|tc2|tc3|tc4|tc5|tc6|tc7|
+ * ---------------------------------
+ */
+ for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++)
+ etscfg->tcbwtable[i] = buf[offset++];
+
+ /* TSA Assignment Table (8 octets)
+ * Octets:| 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 |
+ * ---------------------------------
+ * |tc0|tc1|tc2|tc3|tc4|tc5|tc6|tc7|
+ * ---------------------------------
+ */
+ for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++)
+ etscfg->tsatable[i] = buf[offset++];
+}
+
+/**
+ * i40e_parse_ieee_etsrec_tlv
+ * @tlv: IEEE 802.1Qaz ETS REC TLV
+ * @dcbcfg: Local store to update ETS REC data
+ *
+ * Parses IEEE 802.1Qaz ETS REC TLV
+ **/
+static void i40e_parse_ieee_etsrec_tlv(struct i40e_lldp_org_tlv *tlv,
+ struct i40e_dcbx_config *dcbcfg)
+{
+ u8 *buf = tlv->tlvinfo;
+ u16 offset = 0;
+ u8 priority;
+ int i;
+
+ /* Move offset to priority table */
+ offset++;
+
+ /* Priority Assignment Table (4 octets)
+ * Octets:| 1 | 2 | 3 | 4 |
+ * -----------------------------------------
+ * |pri0|pri1|pri2|pri3|pri4|pri5|pri6|pri7|
+ * -----------------------------------------
+ * Bits:|7 4|3 0|7 4|3 0|7 4|3 0|7 4|3 0|
+ * -----------------------------------------
+ */
+ for (i = 0; i < 4; i++) {
+ priority = (u8)((buf[offset] & I40E_IEEE_ETS_PRIO_1_MASK) >>
+ I40E_IEEE_ETS_PRIO_1_SHIFT);
+ dcbcfg->etsrec.prioritytable[i*2] = priority;
+ priority = (u8)((buf[offset] & I40E_IEEE_ETS_PRIO_0_MASK) >>
+ I40E_IEEE_ETS_PRIO_0_SHIFT);
+ dcbcfg->etsrec.prioritytable[i*2 + 1] = priority;
+ offset++;
+ }
+
+ /* TC Bandwidth Table (8 octets)
+ * Octets:| 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 |
+ * ---------------------------------
+ * |tc0|tc1|tc2|tc3|tc4|tc5|tc6|tc7|
+ * ---------------------------------
+ */
+ for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++)
+ dcbcfg->etsrec.tcbwtable[i] = buf[offset++];
+
+ /* TSA Assignment Table (8 octets)
+ * Octets:| 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 |
+ * ---------------------------------
+ * |tc0|tc1|tc2|tc3|tc4|tc5|tc6|tc7|
+ * ---------------------------------
+ */
+ for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++)
+ dcbcfg->etsrec.tsatable[i] = buf[offset++];
+}
+
+/**
+ * i40e_parse_ieee_pfccfg_tlv
+ * @tlv: IEEE 802.1Qaz PFC CFG TLV
+ * @dcbcfg: Local store to update PFC CFG data
+ *
+ * Parses IEEE 802.1Qaz PFC CFG TLV
+ **/
+static void i40e_parse_ieee_pfccfg_tlv(struct i40e_lldp_org_tlv *tlv,
+ struct i40e_dcbx_config *dcbcfg)
+{
+ u8 *buf = tlv->tlvinfo;
+
+ /* ----------------------------------------
+ * |will-|MBC | Re- | PFC | PFC Enable |
+ * |ing | |served| cap | |
+ * -----------------------------------------
+ * |1bit | 1bit|2 bits|4bits| 1 octet |
+ */
+ dcbcfg->pfc.willing = (u8)((buf[0] & I40E_IEEE_PFC_WILLING_MASK) >>
+ I40E_IEEE_PFC_WILLING_SHIFT);
+ dcbcfg->pfc.mbc = (u8)((buf[0] & I40E_IEEE_PFC_MBC_MASK) >>
+ I40E_IEEE_PFC_MBC_SHIFT);
+ dcbcfg->pfc.pfccap = (u8)((buf[0] & I40E_IEEE_PFC_CAP_MASK) >>
+ I40E_IEEE_PFC_CAP_SHIFT);
+ dcbcfg->pfc.pfcenable = buf[1];
+}
+
+/**
+ * i40e_parse_ieee_app_tlv
+ * @tlv: IEEE 802.1Qaz APP TLV
+ * @dcbcfg: Local store to update APP PRIO data
+ *
+ * Parses IEEE 802.1Qaz APP PRIO TLV
+ **/
+static void i40e_parse_ieee_app_tlv(struct i40e_lldp_org_tlv *tlv,
+ struct i40e_dcbx_config *dcbcfg)
+{
+ u16 typelength;
+ u16 offset = 0;
+ u16 length;
+ int i = 0;
+ u8 *buf;
+
+ typelength = I40E_NTOHS(tlv->typelength);
+ length = (u16)((typelength & I40E_LLDP_TLV_LEN_MASK) >>
+ I40E_LLDP_TLV_LEN_SHIFT);
+ buf = tlv->tlvinfo;
+
+ /* The App priority table starts 5 octets after TLV header */
+ length -= (sizeof(tlv->ouisubtype) + 1);
+
+ /* Move offset to App Priority Table */
+ offset++;
+
+ /* Application Priority Table (3 octets)
+ * Octets:| 1 | 2 | 3 |
+ * -----------------------------------------
+ * |Priority|Rsrvd| Sel | Protocol ID |
+ * -----------------------------------------
+ * Bits:|23 21|20 19|18 16|15 0|
+ * -----------------------------------------
+ */
+ while (offset < length) {
+ dcbcfg->app[i].priority = (u8)((buf[offset] &
+ I40E_IEEE_APP_PRIO_MASK) >>
+ I40E_IEEE_APP_PRIO_SHIFT);
+ dcbcfg->app[i].selector = (u8)((buf[offset] &
+ I40E_IEEE_APP_SEL_MASK) >>
+ I40E_IEEE_APP_SEL_SHIFT);
+ dcbcfg->app[i].protocolid = (buf[offset + 1] << 0x8) |
+ buf[offset + 2];
+ /* Move to next app */
+ offset += 3;
+ i++;
+ if (i >= I40E_DCBX_MAX_APPS)
+ break;
+ }
+
+ dcbcfg->numapps = i;
+}
+
+/**
+ * i40e_parse_ieee_etsrec_tlv
+ * @tlv: IEEE 802.1Qaz TLV
+ * @dcbcfg: Local store to update ETS REC data
+ *
+ * Get the TLV subtype and send it to parsing function
+ * based on the subtype value
+ **/
+static void i40e_parse_ieee_tlv(struct i40e_lldp_org_tlv *tlv,
+ struct i40e_dcbx_config *dcbcfg)
+{
+ u32 ouisubtype;
+ u8 subtype;
+
+ ouisubtype = I40E_NTOHL(tlv->ouisubtype);
+ subtype = (u8)((ouisubtype & I40E_LLDP_TLV_SUBTYPE_MASK) >>
+ I40E_LLDP_TLV_SUBTYPE_SHIFT);
+ switch (subtype) {
+ case I40E_IEEE_SUBTYPE_ETS_CFG:
+ i40e_parse_ieee_etscfg_tlv(tlv, dcbcfg);
+ break;
+ case I40E_IEEE_SUBTYPE_ETS_REC:
+ i40e_parse_ieee_etsrec_tlv(tlv, dcbcfg);
+ break;
+ case I40E_IEEE_SUBTYPE_PFC_CFG:
+ i40e_parse_ieee_pfccfg_tlv(tlv, dcbcfg);
+ break;
+ case I40E_IEEE_SUBTYPE_APP_PRI:
+ i40e_parse_ieee_app_tlv(tlv, dcbcfg);
+ break;
+ default:
+ break;
+ }
+}
+
+/**
+ * i40e_parse_cee_pgcfg_tlv
+ * @tlv: CEE DCBX PG CFG TLV
+ * @dcbcfg: Local store to update ETS CFG data
+ *
+ * Parses CEE DCBX PG CFG TLV
+ **/
+static void i40e_parse_cee_pgcfg_tlv(struct i40e_cee_feat_tlv *tlv,
+ struct i40e_dcbx_config *dcbcfg)
+{
+ struct i40e_dcb_ets_config *etscfg;
+ u8 *buf = tlv->tlvinfo;
+ u16 offset = 0;
+ u8 priority;
+ int i;
+
+ etscfg = &dcbcfg->etscfg;
+
+ if (tlv->en_will_err & I40E_CEE_FEAT_TLV_WILLING_MASK)
+ etscfg->willing = 1;
+
+ etscfg->cbs = 0;
+ /* Priority Group Table (4 octets)
+ * Octets:| 1 | 2 | 3 | 4 |
+ * -----------------------------------------
+ * |pri0|pri1|pri2|pri3|pri4|pri5|pri6|pri7|
+ * -----------------------------------------
+ * Bits:|7 4|3 0|7 4|3 0|7 4|3 0|7 4|3 0|
+ * -----------------------------------------
+ */
+ for (i = 0; i < 4; i++) {
+ priority = (u8)((buf[offset] & I40E_CEE_PGID_PRIO_1_MASK) >>
+ I40E_CEE_PGID_PRIO_1_SHIFT);
+ etscfg->prioritytable[i * 2] = priority;
+ priority = (u8)((buf[offset] & I40E_CEE_PGID_PRIO_0_MASK) >>
+ I40E_CEE_PGID_PRIO_0_SHIFT);
+ etscfg->prioritytable[i * 2 + 1] = priority;
+ offset++;
+ }
+
+ /* PG Percentage Table (8 octets)
+ * Octets:| 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 |
+ * ---------------------------------
+ * |pg0|pg1|pg2|pg3|pg4|pg5|pg6|pg7|
+ * ---------------------------------
+ */
+ for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++)
+ etscfg->tcbwtable[i] = buf[offset++];
+
+ /* Number of TCs supported (1 octet) */
+ etscfg->maxtcs = buf[offset];
+}
+
+/**
+ * i40e_parse_cee_pfccfg_tlv
+ * @tlv: CEE DCBX PFC CFG TLV
+ * @dcbcfg: Local store to update PFC CFG data
+ *
+ * Parses CEE DCBX PFC CFG TLV
+ **/
+static void i40e_parse_cee_pfccfg_tlv(struct i40e_cee_feat_tlv *tlv,
+ struct i40e_dcbx_config *dcbcfg)
+{
+ u8 *buf = tlv->tlvinfo;
+
+ if (tlv->en_will_err & I40E_CEE_FEAT_TLV_WILLING_MASK)
+ dcbcfg->pfc.willing = 1;
+
+ /* ------------------------
+ * | PFC Enable | PFC TCs |
+ * ------------------------
+ * | 1 octet | 1 octet |
+ */
+ dcbcfg->pfc.pfcenable = buf[0];
+ dcbcfg->pfc.pfccap = buf[1];
+}
+
+/**
+ * i40e_parse_cee_app_tlv
+ * @tlv: CEE DCBX APP TLV
+ * @dcbcfg: Local store to update APP PRIO data
+ *
+ * Parses CEE DCBX APP PRIO TLV
+ **/
+static void i40e_parse_cee_app_tlv(struct i40e_cee_feat_tlv *tlv,
+ struct i40e_dcbx_config *dcbcfg)
+{
+ u16 length, typelength, offset = 0;
+ struct i40e_cee_app_prio *app;
+ u8 i;
+
+ typelength = I40E_NTOHS(tlv->hdr.typelen);
+ length = (u16)((typelength & I40E_LLDP_TLV_LEN_MASK) >>
+ I40E_LLDP_TLV_LEN_SHIFT);
+
+ dcbcfg->numapps = length / sizeof(*app);
+ if (!dcbcfg->numapps)
+ return;
+
+ for (i = 0; i < dcbcfg->numapps; i++) {
+ u8 up, selector;
+
+ app = (struct i40e_cee_app_prio *)(tlv->tlvinfo + offset);
+ for (up = 0; up < I40E_MAX_USER_PRIORITY; up++) {
+ if (app->prio_map & BIT(up))
+ break;
+ }
+ dcbcfg->app[i].priority = up;
+
+ /* Get Selector from lower 2 bits, and convert to IEEE */
+ selector = (app->upper_oui_sel & I40E_CEE_APP_SELECTOR_MASK);
+ switch (selector) {
+ case I40E_CEE_APP_SEL_ETHTYPE:
+ dcbcfg->app[i].selector = I40E_APP_SEL_ETHTYPE;
+ break;
+ case I40E_CEE_APP_SEL_TCPIP:
+ dcbcfg->app[i].selector = I40E_APP_SEL_TCPIP;
+ break;
+ default:
+ /* Keep selector as it is for unknown types */
+ dcbcfg->app[i].selector = selector;
+ }
+
+ dcbcfg->app[i].protocolid = I40E_NTOHS(app->protocol);
+ /* Move to next app */
+ offset += sizeof(*app);
+ }
+}
+
+/**
+ * i40e_parse_cee_tlv
+ * @tlv: CEE DCBX TLV
+ * @dcbcfg: Local store to update DCBX config data
+ *
+ * Get the TLV subtype and send it to parsing function
+ * based on the subtype value
+ **/
+static void i40e_parse_cee_tlv(struct i40e_lldp_org_tlv *tlv,
+ struct i40e_dcbx_config *dcbcfg)
+{
+ u16 len, tlvlen, sublen, typelength;
+ struct i40e_cee_feat_tlv *sub_tlv;
+ u8 subtype, feat_tlv_count = 0;
+ u32 ouisubtype;
+
+ ouisubtype = I40E_NTOHL(tlv->ouisubtype);
+ subtype = (u8)((ouisubtype & I40E_LLDP_TLV_SUBTYPE_MASK) >>
+ I40E_LLDP_TLV_SUBTYPE_SHIFT);
+ /* Return if not CEE DCBX */
+ if (subtype != I40E_CEE_DCBX_TYPE)
+ return;
+
+ typelength = I40E_NTOHS(tlv->typelength);
+ tlvlen = (u16)((typelength & I40E_LLDP_TLV_LEN_MASK) >>
+ I40E_LLDP_TLV_LEN_SHIFT);
+ len = sizeof(tlv->typelength) + sizeof(ouisubtype) +
+ sizeof(struct i40e_cee_ctrl_tlv);
+ /* Return if no CEE DCBX Feature TLVs */
+ if (tlvlen <= len)
+ return;
+
+ sub_tlv = (struct i40e_cee_feat_tlv *)((char *)tlv + len);
+ while (feat_tlv_count < I40E_CEE_MAX_FEAT_TYPE) {
+ typelength = I40E_NTOHS(sub_tlv->hdr.typelen);
+ sublen = (u16)((typelength &
+ I40E_LLDP_TLV_LEN_MASK) >>
+ I40E_LLDP_TLV_LEN_SHIFT);
+ subtype = (u8)((typelength & I40E_LLDP_TLV_TYPE_MASK) >>
+ I40E_LLDP_TLV_TYPE_SHIFT);
+ switch (subtype) {
+ case I40E_CEE_SUBTYPE_PG_CFG:
+ i40e_parse_cee_pgcfg_tlv(sub_tlv, dcbcfg);
+ break;
+ case I40E_CEE_SUBTYPE_PFC_CFG:
+ i40e_parse_cee_pfccfg_tlv(sub_tlv, dcbcfg);
+ break;
+ case I40E_CEE_SUBTYPE_APP_PRI:
+ i40e_parse_cee_app_tlv(sub_tlv, dcbcfg);
+ break;
+ default:
+ return; /* Invalid Sub-type return */
+ }
+ feat_tlv_count++;
+ /* Move to next sub TLV */
+ sub_tlv = (struct i40e_cee_feat_tlv *)((char *)sub_tlv +
+ sizeof(sub_tlv->hdr.typelen) +
+ sublen);
+ }
+}
+
+/**
+ * i40e_parse_org_tlv
+ * @tlv: Organization specific TLV
+ * @dcbcfg: Local store to update ETS REC data
+ *
+ * Currently only IEEE 802.1Qaz TLV is supported, all others
+ * will be returned
+ **/
+static void i40e_parse_org_tlv(struct i40e_lldp_org_tlv *tlv,
+ struct i40e_dcbx_config *dcbcfg)
+{
+ u32 ouisubtype;
+ u32 oui;
+
+ ouisubtype = I40E_NTOHL(tlv->ouisubtype);
+ oui = (u32)((ouisubtype & I40E_LLDP_TLV_OUI_MASK) >>
+ I40E_LLDP_TLV_OUI_SHIFT);
+ switch (oui) {
+ case I40E_IEEE_8021QAZ_OUI:
+ i40e_parse_ieee_tlv(tlv, dcbcfg);
+ break;
+ case I40E_CEE_DCBX_OUI:
+ i40e_parse_cee_tlv(tlv, dcbcfg);
+ break;
+ default:
+ break;
+ }
+}
+
+/**
+ * i40e_lldp_to_dcb_config
+ * @lldpmib: LLDPDU to be parsed
+ * @dcbcfg: store for LLDPDU data
+ *
+ * Parse DCB configuration from the LLDPDU
+ **/
+enum i40e_status_code i40e_lldp_to_dcb_config(u8 *lldpmib,
+ struct i40e_dcbx_config *dcbcfg)
+{
+ enum i40e_status_code ret = I40E_SUCCESS;
+ struct i40e_lldp_org_tlv *tlv;
+ u16 type;
+ u16 length;
+ u16 typelength;
+ u16 offset = 0;
+
+ if (!lldpmib || !dcbcfg)
+ return I40E_ERR_PARAM;
+
+ /* set to the start of LLDPDU */
+ lldpmib += I40E_LLDP_MIB_HLEN;
+ tlv = (struct i40e_lldp_org_tlv *)lldpmib;
+ while (1) {
+ typelength = I40E_NTOHS(tlv->typelength);
+ type = (u16)((typelength & I40E_LLDP_TLV_TYPE_MASK) >>
+ I40E_LLDP_TLV_TYPE_SHIFT);
+ length = (u16)((typelength & I40E_LLDP_TLV_LEN_MASK) >>
+ I40E_LLDP_TLV_LEN_SHIFT);
+ offset += sizeof(typelength) + length;
+
+ /* END TLV or beyond LLDPDU size */
+ if ((type == I40E_TLV_TYPE_END) || (offset > I40E_LLDPDU_SIZE))
+ break;
+
+ switch (type) {
+ case I40E_TLV_TYPE_ORG:
+ i40e_parse_org_tlv(tlv, dcbcfg);
+ break;
+ default:
+ break;
+ }
+
+ /* Move to next TLV */
+ tlv = (struct i40e_lldp_org_tlv *)((char *)tlv +
+ sizeof(tlv->typelength) +
+ length);
+ }
+
+ return ret;
+}
+
+/**
+ * i40e_aq_get_dcb_config
+ * @hw: pointer to the hw struct
+ * @mib_type: mib type for the query
+ * @bridgetype: bridge type for the query (remote)
+ * @dcbcfg: store for LLDPDU data
+ *
+ * Query DCB configuration from the Firmware
+ **/
+enum i40e_status_code i40e_aq_get_dcb_config(struct i40e_hw *hw, u8 mib_type,
+ u8 bridgetype,
+ struct i40e_dcbx_config *dcbcfg)
+{
+ enum i40e_status_code ret = I40E_SUCCESS;
+ struct i40e_virt_mem mem;
+ u8 *lldpmib;
+
+ /* Allocate the LLDPDU */
+ ret = i40e_allocate_virt_mem(hw, &mem, I40E_LLDPDU_SIZE);
+ if (ret)
+ return ret;
+
+ lldpmib = (u8 *)mem.va;
+ ret = i40e_aq_get_lldp_mib(hw, bridgetype, mib_type,
+ (void *)lldpmib, I40E_LLDPDU_SIZE,
+ NULL, NULL, NULL);
+ if (ret)
+ goto free_mem;
+
+ /* Parse LLDP MIB to get dcb configuration */
+ ret = i40e_lldp_to_dcb_config(lldpmib, dcbcfg);
+
+free_mem:
+ i40e_free_virt_mem(hw, &mem);
+ return ret;
+}
+
+/**
+ * i40e_cee_to_dcb_v1_config
+ * @cee_cfg: pointer to CEE v1 response configuration struct
+ * @dcbcfg: DCB configuration struct
+ *
+ * Convert CEE v1 configuration from firmware to DCB configuration
+ **/
+static void i40e_cee_to_dcb_v1_config(
+ struct i40e_aqc_get_cee_dcb_cfg_v1_resp *cee_cfg,
+ struct i40e_dcbx_config *dcbcfg)
+{
+ u16 status, tlv_status = LE16_TO_CPU(cee_cfg->tlv_status);
+ u16 app_prio = LE16_TO_CPU(cee_cfg->oper_app_prio);
+ u8 i, tc, err;
+
+ /* CEE PG data to ETS config */
+ dcbcfg->etscfg.maxtcs = cee_cfg->oper_num_tc;
+
+ /* Note that the FW creates the oper_prio_tc nibbles reversed
+ * from those in the CEE Priority Group sub-TLV.
+ */
+ for (i = 0; i < 4; i++) {
+ tc = (u8)((cee_cfg->oper_prio_tc[i] &
+ I40E_CEE_PGID_PRIO_0_MASK) >>
+ I40E_CEE_PGID_PRIO_0_SHIFT);
+ dcbcfg->etscfg.prioritytable[i*2] = tc;
+ tc = (u8)((cee_cfg->oper_prio_tc[i] &
+ I40E_CEE_PGID_PRIO_1_MASK) >>
+ I40E_CEE_PGID_PRIO_1_SHIFT);
+ dcbcfg->etscfg.prioritytable[i*2 + 1] = tc;
+ }
+
+ for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++)
+ dcbcfg->etscfg.tcbwtable[i] = cee_cfg->oper_tc_bw[i];
+
+ for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
+ if (dcbcfg->etscfg.prioritytable[i] == I40E_CEE_PGID_STRICT) {
+ /* Map it to next empty TC */
+ dcbcfg->etscfg.prioritytable[i] =
+ cee_cfg->oper_num_tc - 1;
+ dcbcfg->etscfg.tsatable[i] = I40E_IEEE_TSA_STRICT;
+ } else {
+ dcbcfg->etscfg.tsatable[i] = I40E_IEEE_TSA_ETS;
+ }
+ }
+
+ /* CEE PFC data to ETS config */
+ dcbcfg->pfc.pfcenable = cee_cfg->oper_pfc_en;
+ dcbcfg->pfc.pfccap = I40E_MAX_TRAFFIC_CLASS;
+
+ status = (tlv_status & I40E_AQC_CEE_APP_STATUS_MASK) >>
+ I40E_AQC_CEE_APP_STATUS_SHIFT;
+ err = (status & I40E_TLV_STATUS_ERR) ? 1 : 0;
+ /* Add APPs if Error is False */
+ if (!err) {
+ /* CEE operating configuration supports FCoE/iSCSI/FIP only */
+ dcbcfg->numapps = I40E_CEE_OPER_MAX_APPS;
+
+ /* FCoE APP */
+ dcbcfg->app[0].priority =
+ (app_prio & I40E_AQC_CEE_APP_FCOE_MASK) >>
+ I40E_AQC_CEE_APP_FCOE_SHIFT;
+ dcbcfg->app[0].selector = I40E_APP_SEL_ETHTYPE;
+ dcbcfg->app[0].protocolid = I40E_APP_PROTOID_FCOE;
+
+ /* iSCSI APP */
+ dcbcfg->app[1].priority =
+ (app_prio & I40E_AQC_CEE_APP_ISCSI_MASK) >>
+ I40E_AQC_CEE_APP_ISCSI_SHIFT;
+ dcbcfg->app[1].selector = I40E_APP_SEL_TCPIP;
+ dcbcfg->app[1].protocolid = I40E_APP_PROTOID_ISCSI;
+
+ /* FIP APP */
+ dcbcfg->app[2].priority =
+ (app_prio & I40E_AQC_CEE_APP_FIP_MASK) >>
+ I40E_AQC_CEE_APP_FIP_SHIFT;
+ dcbcfg->app[2].selector = I40E_APP_SEL_ETHTYPE;
+ dcbcfg->app[2].protocolid = I40E_APP_PROTOID_FIP;
+ }
+}
+
+/**
+ * i40e_cee_to_dcb_config
+ * @cee_cfg: pointer to CEE configuration struct
+ * @dcbcfg: DCB configuration struct
+ *
+ * Convert CEE configuration from firmware to DCB configuration
+ **/
+static void i40e_cee_to_dcb_config(
+ struct i40e_aqc_get_cee_dcb_cfg_resp *cee_cfg,
+ struct i40e_dcbx_config *dcbcfg)
+{
+ u32 status, tlv_status = LE32_TO_CPU(cee_cfg->tlv_status);
+ u16 app_prio = LE16_TO_CPU(cee_cfg->oper_app_prio);
+ u8 i, tc, err, sync, oper;
+
+ /* CEE PG data to ETS config */
+ dcbcfg->etscfg.maxtcs = cee_cfg->oper_num_tc;
+
+ /* Note that the FW creates the oper_prio_tc nibbles reversed
+ * from those in the CEE Priority Group sub-TLV.
+ */
+ for (i = 0; i < 4; i++) {
+ tc = (u8)((cee_cfg->oper_prio_tc[i] &
+ I40E_CEE_PGID_PRIO_0_MASK) >>
+ I40E_CEE_PGID_PRIO_0_SHIFT);
+ dcbcfg->etscfg.prioritytable[i*2] = tc;
+ tc = (u8)((cee_cfg->oper_prio_tc[i] &
+ I40E_CEE_PGID_PRIO_1_MASK) >>
+ I40E_CEE_PGID_PRIO_1_SHIFT);
+ dcbcfg->etscfg.prioritytable[i*2 + 1] = tc;
+ }
+
+ for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++)
+ dcbcfg->etscfg.tcbwtable[i] = cee_cfg->oper_tc_bw[i];
+
+ for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
+ if (dcbcfg->etscfg.prioritytable[i] == I40E_CEE_PGID_STRICT) {
+ /* Map it to next empty TC */
+ dcbcfg->etscfg.prioritytable[i] =
+ cee_cfg->oper_num_tc - 1;
+ dcbcfg->etscfg.tsatable[i] = I40E_IEEE_TSA_STRICT;
+ } else {
+ dcbcfg->etscfg.tsatable[i] = I40E_IEEE_TSA_ETS;
+ }
+ }
+
+ /* CEE PFC data to ETS config */
+ dcbcfg->pfc.pfcenable = cee_cfg->oper_pfc_en;
+ dcbcfg->pfc.pfccap = I40E_MAX_TRAFFIC_CLASS;
+
+ i = 0;
+ status = (tlv_status & I40E_AQC_CEE_FCOE_STATUS_MASK) >>
+ I40E_AQC_CEE_FCOE_STATUS_SHIFT;
+ err = (status & I40E_TLV_STATUS_ERR) ? 1 : 0;
+ sync = (status & I40E_TLV_STATUS_SYNC) ? 1 : 0;
+ oper = (status & I40E_TLV_STATUS_OPER) ? 1 : 0;
+ /* Add FCoE APP if Error is False and Oper/Sync is True */
+ if (!err && sync && oper) {
+ /* FCoE APP */
+ dcbcfg->app[i].priority =
+ (app_prio & I40E_AQC_CEE_APP_FCOE_MASK) >>
+ I40E_AQC_CEE_APP_FCOE_SHIFT;
+ dcbcfg->app[i].selector = I40E_APP_SEL_ETHTYPE;
+ dcbcfg->app[i].protocolid = I40E_APP_PROTOID_FCOE;
+ i++;
+ }
+
+ status = (tlv_status & I40E_AQC_CEE_ISCSI_STATUS_MASK) >>
+ I40E_AQC_CEE_ISCSI_STATUS_SHIFT;
+ err = (status & I40E_TLV_STATUS_ERR) ? 1 : 0;
+ sync = (status & I40E_TLV_STATUS_SYNC) ? 1 : 0;
+ oper = (status & I40E_TLV_STATUS_OPER) ? 1 : 0;
+ /* Add iSCSI APP if Error is False and Oper/Sync is True */
+ if (!err && sync && oper) {
+ /* iSCSI APP */
+ dcbcfg->app[i].priority =
+ (app_prio & I40E_AQC_CEE_APP_ISCSI_MASK) >>
+ I40E_AQC_CEE_APP_ISCSI_SHIFT;
+ dcbcfg->app[i].selector = I40E_APP_SEL_TCPIP;
+ dcbcfg->app[i].protocolid = I40E_APP_PROTOID_ISCSI;
+ i++;
+ }
+
+ status = (tlv_status & I40E_AQC_CEE_FIP_STATUS_MASK) >>
+ I40E_AQC_CEE_FIP_STATUS_SHIFT;
+ err = (status & I40E_TLV_STATUS_ERR) ? 1 : 0;
+ sync = (status & I40E_TLV_STATUS_SYNC) ? 1 : 0;
+ oper = (status & I40E_TLV_STATUS_OPER) ? 1 : 0;
+ /* Add FIP APP if Error is False and Oper/Sync is True */
+ if (!err && sync && oper) {
+ /* FIP APP */
+ dcbcfg->app[i].priority =
+ (app_prio & I40E_AQC_CEE_APP_FIP_MASK) >>
+ I40E_AQC_CEE_APP_FIP_SHIFT;
+ dcbcfg->app[i].selector = I40E_APP_SEL_ETHTYPE;
+ dcbcfg->app[i].protocolid = I40E_APP_PROTOID_FIP;
+ i++;
+ }
+ dcbcfg->numapps = i;
+}
+
+/**
+ * i40e_get_ieee_dcb_config
+ * @hw: pointer to the hw struct
+ *
+ * Get IEEE mode DCB configuration from the Firmware
+ **/
+STATIC enum i40e_status_code i40e_get_ieee_dcb_config(struct i40e_hw *hw)
+{
+ enum i40e_status_code ret = I40E_SUCCESS;
+
+ /* IEEE mode */
+ hw->local_dcbx_config.dcbx_mode = I40E_DCBX_MODE_IEEE;
+ /* Get Local DCB Config */
+ ret = i40e_aq_get_dcb_config(hw, I40E_AQ_LLDP_MIB_LOCAL, 0,
+ &hw->local_dcbx_config);
+ if (ret)
+ goto out;
+
+ /* Get Remote DCB Config */
+ ret = i40e_aq_get_dcb_config(hw, I40E_AQ_LLDP_MIB_REMOTE,
+ I40E_AQ_LLDP_BRIDGE_TYPE_NEAREST_BRIDGE,
+ &hw->remote_dcbx_config);
+ /* Don't treat ENOENT as an error for Remote MIBs */
+ if (hw->aq.asq_last_status == I40E_AQ_RC_ENOENT)
+ ret = I40E_SUCCESS;
+
+out:
+ return ret;
+}
+
+/**
+ * i40e_get_dcb_config
+ * @hw: pointer to the hw struct
+ *
+ * Get DCB configuration from the Firmware
+ **/
+enum i40e_status_code i40e_get_dcb_config(struct i40e_hw *hw)
+{
+ enum i40e_status_code ret = I40E_SUCCESS;
+ struct i40e_aqc_get_cee_dcb_cfg_resp cee_cfg;
+ struct i40e_aqc_get_cee_dcb_cfg_v1_resp cee_v1_cfg;
+
+ /* If Firmware version < v4.33 on X710/XL710, IEEE only */
+ if ((hw->mac.type == I40E_MAC_XL710) &&
+ (((hw->aq.fw_maj_ver == 4) && (hw->aq.fw_min_ver < 33)) ||
+ (hw->aq.fw_maj_ver < 4)))
+ return i40e_get_ieee_dcb_config(hw);
+
+ /* If Firmware version == v4.33 on X710/XL710, use old CEE struct */
+ if ((hw->mac.type == I40E_MAC_XL710) &&
+ ((hw->aq.fw_maj_ver == 4) && (hw->aq.fw_min_ver == 33))) {
+ ret = i40e_aq_get_cee_dcb_config(hw, &cee_v1_cfg,
+ sizeof(cee_v1_cfg), NULL);
+ if (ret == I40E_SUCCESS) {
+ /* CEE mode */
+ hw->local_dcbx_config.dcbx_mode = I40E_DCBX_MODE_CEE;
+ hw->local_dcbx_config.tlv_status =
+ LE16_TO_CPU(cee_v1_cfg.tlv_status);
+ i40e_cee_to_dcb_v1_config(&cee_v1_cfg,
+ &hw->local_dcbx_config);
+ }
+ } else {
+ ret = i40e_aq_get_cee_dcb_config(hw, &cee_cfg,
+ sizeof(cee_cfg), NULL);
+ if (ret == I40E_SUCCESS) {
+ /* CEE mode */
+ hw->local_dcbx_config.dcbx_mode = I40E_DCBX_MODE_CEE;
+ hw->local_dcbx_config.tlv_status =
+ LE32_TO_CPU(cee_cfg.tlv_status);
+ i40e_cee_to_dcb_config(&cee_cfg,
+ &hw->local_dcbx_config);
+ }
+ }
+
+ /* CEE mode not enabled try querying IEEE data */
+ if (hw->aq.asq_last_status == I40E_AQ_RC_ENOENT)
+ return i40e_get_ieee_dcb_config(hw);
+
+ if (ret != I40E_SUCCESS)
+ goto out;
+
+ /* Get CEE DCB Desired Config */
+ ret = i40e_aq_get_dcb_config(hw, I40E_AQ_LLDP_MIB_LOCAL, 0,
+ &hw->desired_dcbx_config);
+ if (ret)
+ goto out;
+
+ /* Get Remote DCB Config */
+ ret = i40e_aq_get_dcb_config(hw, I40E_AQ_LLDP_MIB_REMOTE,
+ I40E_AQ_LLDP_BRIDGE_TYPE_NEAREST_BRIDGE,
+ &hw->remote_dcbx_config);
+ /* Don't treat ENOENT as an error for Remote MIBs */
+ if (hw->aq.asq_last_status == I40E_AQ_RC_ENOENT)
+ ret = I40E_SUCCESS;
+
+out:
+ return ret;
+}
+
+/**
+ * i40e_init_dcb
+ * @hw: pointer to the hw struct
+ *
+ * Update DCB configuration from the Firmware
+ **/
+enum i40e_status_code i40e_init_dcb(struct i40e_hw *hw)
+{
+ enum i40e_status_code ret = I40E_SUCCESS;
+ struct i40e_lldp_variables lldp_cfg;
+ u8 adminstatus = 0;
+
+ if (!hw->func_caps.dcb)
+ return ret;
+
+ /* Read LLDP NVM area */
+ ret = i40e_read_lldp_cfg(hw, &lldp_cfg);
+ if (ret)
+ return ret;
+
+ /* Get the LLDP AdminStatus for the current port */
+ adminstatus = lldp_cfg.adminstatus >> (hw->port * 4);
+ adminstatus &= 0xF;
+
+ /* LLDP agent disabled */
+ if (!adminstatus) {
+ hw->dcbx_status = I40E_DCBX_STATUS_DISABLED;
+ return ret;
+ }
+
+ /* Get DCBX status */
+ ret = i40e_get_dcbx_status(hw, &hw->dcbx_status);
+ if (ret)
+ return ret;
+
+ /* Check the DCBX Status */
+ switch (hw->dcbx_status) {
+ case I40E_DCBX_STATUS_DONE:
+ case I40E_DCBX_STATUS_IN_PROGRESS:
+ /* Get current DCBX configuration */
+ ret = i40e_get_dcb_config(hw);
+ if (ret)
+ return ret;
+ break;
+ case I40E_DCBX_STATUS_DISABLED:
+ return ret;
+ case I40E_DCBX_STATUS_NOT_STARTED:
+ case I40E_DCBX_STATUS_MULTIPLE_PEERS:
+ default:
+ break;
+ }
+
+ /* Configure the LLDP MIB change event */
+ ret = i40e_aq_cfg_lldp_mib_change_event(hw, true, NULL);
+ if (ret)
+ return ret;
+
+ return ret;
+}
+
+/**
+ * i40e_add_ieee_ets_tlv - Prepare ETS TLV in IEEE format
+ * @tlv: Fill the ETS config data in IEEE format
+ * @dcbcfg: Local store which holds the DCB Config
+ *
+ * Prepare IEEE 802.1Qaz ETS CFG TLV
+ **/
+static void i40e_add_ieee_ets_tlv(struct i40e_lldp_org_tlv *tlv,
+ struct i40e_dcbx_config *dcbcfg)
+{
+ u8 priority0, priority1, maxtcwilling = 0;
+ struct i40e_dcb_ets_config *etscfg;
+ u16 offset = 0, typelength, i;
+ u8 *buf = tlv->tlvinfo;
+ u32 ouisubtype;
+
+ typelength = (u16)((I40E_TLV_TYPE_ORG << I40E_LLDP_TLV_TYPE_SHIFT) |
+ I40E_IEEE_ETS_TLV_LENGTH);
+ tlv->typelength = I40E_HTONS(typelength);
+
+ ouisubtype = (u32)((I40E_IEEE_8021QAZ_OUI << I40E_LLDP_TLV_OUI_SHIFT) |
+ I40E_IEEE_SUBTYPE_ETS_CFG);
+ tlv->ouisubtype = I40E_HTONL(ouisubtype);
+
+ /* First Octet post subtype
+ * --------------------------
+ * |will-|CBS | Re- | Max |
+ * |ing | |served| TCs |
+ * --------------------------
+ * |1bit | 1bit|3 bits|3bits|
+ */
+ etscfg = &dcbcfg->etscfg;
+ if (etscfg->willing)
+ maxtcwilling = BIT(I40E_IEEE_ETS_WILLING_SHIFT);
+ maxtcwilling |= etscfg->maxtcs & I40E_IEEE_ETS_MAXTC_MASK;
+ buf[offset] = maxtcwilling;
+
+ /* Move offset to Priority Assignment Table */
+ offset++;
+
+ /* Priority Assignment Table (4 octets)
+ * Octets:| 1 | 2 | 3 | 4 |
+ * -----------------------------------------
+ * |pri0|pri1|pri2|pri3|pri4|pri5|pri6|pri7|
+ * -----------------------------------------
+ * Bits:|7 4|3 0|7 4|3 0|7 4|3 0|7 4|3 0|
+ * -----------------------------------------
+ */
+ for (i = 0; i < 4; i++) {
+ priority0 = etscfg->prioritytable[i * 2] & 0xF;
+ priority1 = etscfg->prioritytable[i * 2 + 1] & 0xF;
+ buf[offset] = (priority0 << I40E_IEEE_ETS_PRIO_1_SHIFT) |
+ priority1;
+ offset++;
+ }
+
+ /* TC Bandwidth Table (8 octets)
+ * Octets:| 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 |
+ * ---------------------------------
+ * |tc0|tc1|tc2|tc3|tc4|tc5|tc6|tc7|
+ * ---------------------------------
+ */
+ for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++)
+ buf[offset++] = etscfg->tcbwtable[i];
+
+ /* TSA Assignment Table (8 octets)
+ * Octets:| 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 |
+ * ---------------------------------
+ * |tc0|tc1|tc2|tc3|tc4|tc5|tc6|tc7|
+ * ---------------------------------
+ */
+ for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++)
+ buf[offset++] = etscfg->tsatable[i];
+}
+
+/**
+ * i40e_add_ieee_etsrec_tlv - Prepare ETS Recommended TLV in IEEE format
+ * @tlv: Fill ETS Recommended TLV in IEEE format
+ * @dcbcfg: Local store which holds the DCB Config
+ *
+ * Prepare IEEE 802.1Qaz ETS REC TLV
+ **/
+static void i40e_add_ieee_etsrec_tlv(struct i40e_lldp_org_tlv *tlv,
+ struct i40e_dcbx_config *dcbcfg)
+{
+ struct i40e_dcb_ets_config *etsrec;
+ u16 offset = 0, typelength, i;
+ u8 priority0, priority1;
+ u8 *buf = tlv->tlvinfo;
+ u32 ouisubtype;
+
+ typelength = (u16)((I40E_TLV_TYPE_ORG << I40E_LLDP_TLV_TYPE_SHIFT) |
+ I40E_IEEE_ETS_TLV_LENGTH);
+ tlv->typelength = I40E_HTONS(typelength);
+
+ ouisubtype = (u32)((I40E_IEEE_8021QAZ_OUI << I40E_LLDP_TLV_OUI_SHIFT) |
+ I40E_IEEE_SUBTYPE_ETS_REC);
+ tlv->ouisubtype = I40E_HTONL(ouisubtype);
+
+ etsrec = &dcbcfg->etsrec;
+ /* First Octet is reserved */
+ /* Move offset to Priority Assignment Table */
+ offset++;
+
+ /* Priority Assignment Table (4 octets)
+ * Octets:| 1 | 2 | 3 | 4 |
+ * -----------------------------------------
+ * |pri0|pri1|pri2|pri3|pri4|pri5|pri6|pri7|
+ * -----------------------------------------
+ * Bits:|7 4|3 0|7 4|3 0|7 4|3 0|7 4|3 0|
+ * -----------------------------------------
+ */
+ for (i = 0; i < 4; i++) {
+ priority0 = etsrec->prioritytable[i * 2] & 0xF;
+ priority1 = etsrec->prioritytable[i * 2 + 1] & 0xF;
+ buf[offset] = (priority0 << I40E_IEEE_ETS_PRIO_1_SHIFT) |
+ priority1;
+ offset++;
+ }
+
+ /* TC Bandwidth Table (8 octets)
+ * Octets:| 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 |
+ * ---------------------------------
+ * |tc0|tc1|tc2|tc3|tc4|tc5|tc6|tc7|
+ * ---------------------------------
+ */
+ for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++)
+ buf[offset++] = etsrec->tcbwtable[i];
+
+ /* TSA Assignment Table (8 octets)
+ * Octets:| 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 |
+ * ---------------------------------
+ * |tc0|tc1|tc2|tc3|tc4|tc5|tc6|tc7|
+ * ---------------------------------
+ */
+ for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++)
+ buf[offset++] = etsrec->tsatable[i];
+}
+
+ /**
+ * i40e_add_ieee_pfc_tlv - Prepare PFC TLV in IEEE format
+ * @tlv: Fill PFC TLV in IEEE format
+ * @dcbcfg: Local store to get PFC CFG data
+ *
+ * Prepare IEEE 802.1Qaz PFC CFG TLV
+ **/
+static void i40e_add_ieee_pfc_tlv(struct i40e_lldp_org_tlv *tlv,
+ struct i40e_dcbx_config *dcbcfg)
+{
+ u8 *buf = tlv->tlvinfo;
+ u32 ouisubtype;
+ u16 typelength;
+
+ typelength = (u16)((I40E_TLV_TYPE_ORG << I40E_LLDP_TLV_TYPE_SHIFT) |
+ I40E_IEEE_PFC_TLV_LENGTH);
+ tlv->typelength = I40E_HTONS(typelength);
+
+ ouisubtype = (u32)((I40E_IEEE_8021QAZ_OUI << I40E_LLDP_TLV_OUI_SHIFT) |
+ I40E_IEEE_SUBTYPE_PFC_CFG);
+ tlv->ouisubtype = I40E_HTONL(ouisubtype);
+
+ /* ----------------------------------------
+ * |will-|MBC | Re- | PFC | PFC Enable |
+ * |ing | |served| cap | |
+ * -----------------------------------------
+ * |1bit | 1bit|2 bits|4bits| 1 octet |
+ */
+ if (dcbcfg->pfc.willing)
+ buf[0] = BIT(I40E_IEEE_PFC_WILLING_SHIFT);
+
+ if (dcbcfg->pfc.mbc)
+ buf[0] |= BIT(I40E_IEEE_PFC_MBC_SHIFT);
+
+ buf[0] |= dcbcfg->pfc.pfccap & 0xF;
+ buf[1] = dcbcfg->pfc.pfcenable;
+}
+
+/**
+ * i40e_add_ieee_app_pri_tlv - Prepare APP TLV in IEEE format
+ * @tlv: Fill APP TLV in IEEE format
+ * @dcbcfg: Local store to get APP CFG data
+ *
+ * Prepare IEEE 802.1Qaz APP CFG TLV
+ **/
+static void i40e_add_ieee_app_pri_tlv(struct i40e_lldp_org_tlv *tlv,
+ struct i40e_dcbx_config *dcbcfg)
+{
+ u16 typelength, length, offset = 0;
+ u8 priority, selector, i = 0;
+ u8 *buf = tlv->tlvinfo;
+ u32 ouisubtype;
+
+ /* No APP TLVs then just return */
+ if (dcbcfg->numapps == 0)
+ return;
+ ouisubtype = (u32)((I40E_IEEE_8021QAZ_OUI << I40E_LLDP_TLV_OUI_SHIFT) |
+ I40E_IEEE_SUBTYPE_APP_PRI);
+ tlv->ouisubtype = I40E_HTONL(ouisubtype);
+
+ /* Move offset to App Priority Table */
+ offset++;
+ /* Application Priority Table (3 octets)
+ * Octets:| 1 | 2 | 3 |
+ * -----------------------------------------
+ * |Priority|Rsrvd| Sel | Protocol ID |
+ * -----------------------------------------
+ * Bits:|23 21|20 19|18 16|15 0|
+ * -----------------------------------------
+ */
+ while (i < dcbcfg->numapps) {
+ priority = dcbcfg->app[i].priority & 0x7;
+ selector = dcbcfg->app[i].selector & 0x7;
+ buf[offset] = (priority << I40E_IEEE_APP_PRIO_SHIFT) | selector;
+ buf[offset + 1] = (dcbcfg->app[i].protocolid >> 0x8) & 0xFF;
+ buf[offset + 2] = dcbcfg->app[i].protocolid & 0xFF;
+ /* Move to next app */
+ offset += 3;
+ i++;
+ if (i >= I40E_DCBX_MAX_APPS)
+ break;
+ }
+ /* length includes size of ouisubtype + 1 reserved + 3*numapps */
+ length = sizeof(tlv->ouisubtype) + 1 + (i*3);
+ typelength = (u16)((I40E_TLV_TYPE_ORG << I40E_LLDP_TLV_TYPE_SHIFT) |
+ (length & 0x1FF));
+ tlv->typelength = I40E_HTONS(typelength);
+}
+
+ /**
+ * i40e_add_dcb_tlv - Add all IEEE TLVs
+ * @tlv: pointer to org tlv
+ *
+ * add tlv information
+ **/
+static void i40e_add_dcb_tlv(struct i40e_lldp_org_tlv *tlv,
+ struct i40e_dcbx_config *dcbcfg,
+ u16 tlvid)
+{
+ switch (tlvid) {
+ case I40E_IEEE_TLV_ID_ETS_CFG:
+ i40e_add_ieee_ets_tlv(tlv, dcbcfg);
+ break;
+ case I40E_IEEE_TLV_ID_ETS_REC:
+ i40e_add_ieee_etsrec_tlv(tlv, dcbcfg);
+ break;
+ case I40E_IEEE_TLV_ID_PFC_CFG:
+ i40e_add_ieee_pfc_tlv(tlv, dcbcfg);
+ break;
+ case I40E_IEEE_TLV_ID_APP_PRI:
+ i40e_add_ieee_app_pri_tlv(tlv, dcbcfg);
+ break;
+ default:
+ break;
+ }
+}
+
+ /**
+ * i40e_set_dcb_config - Set the local LLDP MIB to FW
+ * @hw: pointer to the hw struct
+ *
+ * Set DCB configuration to the Firmware
+ **/
+enum i40e_status_code i40e_set_dcb_config(struct i40e_hw *hw)
+{
+ enum i40e_status_code ret = I40E_SUCCESS;
+ struct i40e_dcbx_config *dcbcfg;
+ struct i40e_virt_mem mem;
+ u8 mib_type, *lldpmib;
+ u16 miblen;
+
+ /* update the hw local config */
+ dcbcfg = &hw->local_dcbx_config;
+ /* Allocate the LLDPDU */
+ ret = i40e_allocate_virt_mem(hw, &mem, I40E_LLDPDU_SIZE);
+ if (ret)
+ return ret;
+
+ mib_type = SET_LOCAL_MIB_AC_TYPE_LOCAL_MIB;
+ if (dcbcfg->app_mode == I40E_DCBX_APPS_NON_WILLING) {
+ mib_type |= SET_LOCAL_MIB_AC_TYPE_NON_WILLING_APPS <<
+ SET_LOCAL_MIB_AC_TYPE_NON_WILLING_APPS_SHIFT;
+ }
+ lldpmib = (u8 *)mem.va;
+ ret = i40e_dcb_config_to_lldp(lldpmib, &miblen, dcbcfg);
+ ret = i40e_aq_set_lldp_mib(hw, mib_type, (void *)lldpmib, miblen, NULL);
+
+ i40e_free_virt_mem(hw, &mem);
+ return ret;
+}
+
+/**
+ * i40e_dcb_config_to_lldp - Convert Dcbconfig to MIB format
+ * @hw: pointer to the hw struct
+ * @dcbcfg: store for LLDPDU data
+ *
+ * send DCB configuration to FW
+ **/
+enum i40e_status_code i40e_dcb_config_to_lldp(u8 *lldpmib, u16 *miblen,
+ struct i40e_dcbx_config *dcbcfg)
+{
+ u16 length, offset = 0, tlvid = I40E_TLV_ID_START;
+ enum i40e_status_code ret = I40E_SUCCESS;
+ struct i40e_lldp_org_tlv *tlv;
+ u16 typelength;
+
+ tlv = (struct i40e_lldp_org_tlv *)lldpmib;
+ while (1) {
+ i40e_add_dcb_tlv(tlv, dcbcfg, tlvid++);
+ typelength = I40E_NTOHS(tlv->typelength);
+ length = (u16)((typelength & I40E_LLDP_TLV_LEN_MASK) >>
+ I40E_LLDP_TLV_LEN_SHIFT);
+ if (length)
+ offset += length + 2;
+ /* END TLV or beyond LLDPDU size */
+ if ((tlvid >= I40E_TLV_ID_END_OF_LLDPPDU) ||
+ (offset > I40E_LLDPDU_SIZE))
+ break;
+ /* Move to next TLV */
+ if (length)
+ tlv = (struct i40e_lldp_org_tlv *)((char *)tlv +
+ sizeof(tlv->typelength) + length);
+ }
+ *miblen = offset;
+ return ret;
+}
+
+
+/**
+ * i40e_read_lldp_cfg - read LLDP Configuration data from NVM
+ * @hw: pointer to the HW structure
+ * @lldp_cfg: pointer to hold lldp configuration variables
+ *
+ * Reads the LLDP configuration data from NVM
+ **/
+enum i40e_status_code i40e_read_lldp_cfg(struct i40e_hw *hw,
+ struct i40e_lldp_variables *lldp_cfg)
+{
+ enum i40e_status_code ret = I40E_SUCCESS;
+ u32 offset = (2 * I40E_NVM_LLDP_CFG_PTR);
+
+ if (!lldp_cfg)
+ return I40E_ERR_PARAM;
+
+ ret = i40e_acquire_nvm(hw, I40E_RESOURCE_READ);
+ if (ret != I40E_SUCCESS)
+ goto err_lldp_cfg;
+
+ ret = i40e_aq_read_nvm(hw, I40E_SR_EMP_MODULE_PTR, offset,
+ sizeof(struct i40e_lldp_variables),
+ (u8 *)lldp_cfg,
+ true, NULL);
+ i40e_release_nvm(hw);
+
+err_lldp_cfg:
+ return ret;
+}
diff --git a/src/dpdk/drivers/net/i40e/base/i40e_dcb.h b/src/dpdk/drivers/net/i40e/base/i40e_dcb.h
new file mode 100644
index 00000000..3b709efd
--- /dev/null
+++ b/src/dpdk/drivers/net/i40e/base/i40e_dcb.h
@@ -0,0 +1,223 @@
+/*******************************************************************************
+
+Copyright (c) 2013 - 2015, Intel Corporation
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are met:
+
+ 1. Redistributions of source code must retain the above copyright notice,
+ this list of conditions and the following disclaimer.
+
+ 2. Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in the
+ documentation and/or other materials provided with the distribution.
+
+ 3. Neither the name of the Intel Corporation nor the names of its
+ contributors may be used to endorse or promote products derived from
+ this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+POSSIBILITY OF SUCH DAMAGE.
+
+***************************************************************************/
+
+#ifndef _I40E_DCB_H_
+#define _I40E_DCB_H_
+
+#include "i40e_type.h"
+
+#define I40E_DCBX_OFFLOAD_DISABLED 0
+#define I40E_DCBX_OFFLOAD_ENABLED 1
+
+#define I40E_DCBX_STATUS_NOT_STARTED 0
+#define I40E_DCBX_STATUS_IN_PROGRESS 1
+#define I40E_DCBX_STATUS_DONE 2
+#define I40E_DCBX_STATUS_MULTIPLE_PEERS 3
+#define I40E_DCBX_STATUS_DISABLED 7
+
+#define I40E_TLV_TYPE_END 0
+#define I40E_TLV_TYPE_ORG 127
+
+#define I40E_IEEE_8021QAZ_OUI 0x0080C2
+#define I40E_IEEE_SUBTYPE_ETS_CFG 9
+#define I40E_IEEE_SUBTYPE_ETS_REC 10
+#define I40E_IEEE_SUBTYPE_PFC_CFG 11
+#define I40E_IEEE_SUBTYPE_APP_PRI 12
+
+#define I40E_CEE_DCBX_OUI 0x001b21
+#define I40E_CEE_DCBX_TYPE 2
+
+#define I40E_CEE_SUBTYPE_CTRL 1
+#define I40E_CEE_SUBTYPE_PG_CFG 2
+#define I40E_CEE_SUBTYPE_PFC_CFG 3
+#define I40E_CEE_SUBTYPE_APP_PRI 4
+
+#define I40E_CEE_MAX_FEAT_TYPE 3
+#define I40E_LLDP_ADMINSTATUS_DISABLED 0
+#define I40E_LLDP_ADMINSTATUS_ENABLED_RX 1
+#define I40E_LLDP_ADMINSTATUS_ENABLED_TX 2
+#define I40E_LLDP_ADMINSTATUS_ENABLED_RXTX 3
+
+/* Defines for LLDP TLV header */
+#define I40E_LLDP_MIB_HLEN 14
+#define I40E_LLDP_TLV_LEN_SHIFT 0
+#define I40E_LLDP_TLV_LEN_MASK (0x01FF << I40E_LLDP_TLV_LEN_SHIFT)
+#define I40E_LLDP_TLV_TYPE_SHIFT 9
+#define I40E_LLDP_TLV_TYPE_MASK (0x7F << I40E_LLDP_TLV_TYPE_SHIFT)
+#define I40E_LLDP_TLV_SUBTYPE_SHIFT 0
+#define I40E_LLDP_TLV_SUBTYPE_MASK (0xFF << I40E_LLDP_TLV_SUBTYPE_SHIFT)
+#define I40E_LLDP_TLV_OUI_SHIFT 8
+#define I40E_LLDP_TLV_OUI_MASK (0xFFFFFF << I40E_LLDP_TLV_OUI_SHIFT)
+
+/* Defines for IEEE ETS TLV */
+#define I40E_IEEE_ETS_MAXTC_SHIFT 0
+#define I40E_IEEE_ETS_MAXTC_MASK (0x7 << I40E_IEEE_ETS_MAXTC_SHIFT)
+#define I40E_IEEE_ETS_CBS_SHIFT 6
+#define I40E_IEEE_ETS_CBS_MASK BIT(I40E_IEEE_ETS_CBS_SHIFT)
+#define I40E_IEEE_ETS_WILLING_SHIFT 7
+#define I40E_IEEE_ETS_WILLING_MASK BIT(I40E_IEEE_ETS_WILLING_SHIFT)
+#define I40E_IEEE_ETS_PRIO_0_SHIFT 0
+#define I40E_IEEE_ETS_PRIO_0_MASK (0x7 << I40E_IEEE_ETS_PRIO_0_SHIFT)
+#define I40E_IEEE_ETS_PRIO_1_SHIFT 4
+#define I40E_IEEE_ETS_PRIO_1_MASK (0x7 << I40E_IEEE_ETS_PRIO_1_SHIFT)
+#define I40E_CEE_PGID_PRIO_0_SHIFT 0
+#define I40E_CEE_PGID_PRIO_0_MASK (0xF << I40E_CEE_PGID_PRIO_0_SHIFT)
+#define I40E_CEE_PGID_PRIO_1_SHIFT 4
+#define I40E_CEE_PGID_PRIO_1_MASK (0xF << I40E_CEE_PGID_PRIO_1_SHIFT)
+#define I40E_CEE_PGID_STRICT 15
+
+/* Defines for IEEE TSA types */
+#define I40E_IEEE_TSA_STRICT 0
+#define I40E_IEEE_TSA_CBS 1
+#define I40E_IEEE_TSA_ETS 2
+#define I40E_IEEE_TSA_VENDOR 255
+
+/* Defines for IEEE PFC TLV */
+#define I40E_IEEE_PFC_CAP_SHIFT 0
+#define I40E_IEEE_PFC_CAP_MASK (0xF << I40E_IEEE_PFC_CAP_SHIFT)
+#define I40E_IEEE_PFC_MBC_SHIFT 6
+#define I40E_IEEE_PFC_MBC_MASK BIT(I40E_IEEE_PFC_MBC_SHIFT)
+#define I40E_IEEE_PFC_WILLING_SHIFT 7
+#define I40E_IEEE_PFC_WILLING_MASK BIT(I40E_IEEE_PFC_WILLING_SHIFT)
+
+/* Defines for IEEE APP TLV */
+#define I40E_IEEE_APP_SEL_SHIFT 0
+#define I40E_IEEE_APP_SEL_MASK (0x7 << I40E_IEEE_APP_SEL_SHIFT)
+#define I40E_IEEE_APP_PRIO_SHIFT 5
+#define I40E_IEEE_APP_PRIO_MASK (0x7 << I40E_IEEE_APP_PRIO_SHIFT)
+
+/* TLV definitions for preparing MIB */
+#define I40E_TLV_ID_CHASSIS_ID 0
+#define I40E_TLV_ID_PORT_ID 1
+#define I40E_TLV_ID_TIME_TO_LIVE 2
+#define I40E_IEEE_TLV_ID_ETS_CFG 3
+#define I40E_IEEE_TLV_ID_ETS_REC 4
+#define I40E_IEEE_TLV_ID_PFC_CFG 5
+#define I40E_IEEE_TLV_ID_APP_PRI 6
+#define I40E_TLV_ID_END_OF_LLDPPDU 7
+#define I40E_TLV_ID_START I40E_IEEE_TLV_ID_ETS_CFG
+
+#define I40E_IEEE_ETS_TLV_LENGTH 25
+#define I40E_IEEE_PFC_TLV_LENGTH 6
+#define I40E_IEEE_APP_TLV_LENGTH 11
+
+#pragma pack(1)
+
+/* IEEE 802.1AB LLDP TLV structure */
+struct i40e_lldp_generic_tlv {
+ __be16 typelength;
+ u8 tlvinfo[1];
+};
+
+/* IEEE 802.1AB LLDP Organization specific TLV */
+struct i40e_lldp_org_tlv {
+ __be16 typelength;
+ __be32 ouisubtype;
+ u8 tlvinfo[1];
+};
+
+struct i40e_cee_tlv_hdr {
+ __be16 typelen;
+ u8 operver;
+ u8 maxver;
+};
+
+struct i40e_cee_ctrl_tlv {
+ struct i40e_cee_tlv_hdr hdr;
+ __be32 seqno;
+ __be32 ackno;
+};
+
+struct i40e_cee_feat_tlv {
+ struct i40e_cee_tlv_hdr hdr;
+ u8 en_will_err; /* Bits: |En|Will|Err|Reserved(5)| */
+#define I40E_CEE_FEAT_TLV_ENABLE_MASK 0x80
+#define I40E_CEE_FEAT_TLV_WILLING_MASK 0x40
+#define I40E_CEE_FEAT_TLV_ERR_MASK 0x20
+ u8 subtype;
+ u8 tlvinfo[1];
+};
+
+struct i40e_cee_app_prio {
+ __be16 protocol;
+ u8 upper_oui_sel; /* Bits: |Upper OUI(6)|Selector(2)| */
+#define I40E_CEE_APP_SELECTOR_MASK 0x03
+ __be16 lower_oui;
+ u8 prio_map;
+};
+#pragma pack()
+
+/*
+ * TODO: The below structures related LLDP/DCBX variables
+ * and statistics are defined but need to find how to get
+ * the required information from the Firmware to use them
+ */
+
+/* IEEE 802.1AB LLDP Agent Statistics */
+struct i40e_lldp_stats {
+ u64 remtablelastchangetime;
+ u64 remtableinserts;
+ u64 remtabledeletes;
+ u64 remtabledrops;
+ u64 remtableageouts;
+ u64 txframestotal;
+ u64 rxframesdiscarded;
+ u64 rxportframeerrors;
+ u64 rxportframestotal;
+ u64 rxporttlvsdiscardedtotal;
+ u64 rxporttlvsunrecognizedtotal;
+ u64 remtoomanyneighbors;
+};
+
+/* IEEE 802.1Qaz DCBX variables */
+struct i40e_dcbx_variables {
+ u32 defmaxtrafficclasses;
+ u32 defprioritytcmapping;
+ u32 deftcbandwidth;
+ u32 deftsaassignment;
+};
+
+enum i40e_status_code i40e_get_dcbx_status(struct i40e_hw *hw,
+ u16 *status);
+enum i40e_status_code i40e_lldp_to_dcb_config(u8 *lldpmib,
+ struct i40e_dcbx_config *dcbcfg);
+enum i40e_status_code i40e_aq_get_dcb_config(struct i40e_hw *hw, u8 mib_type,
+ u8 bridgetype,
+ struct i40e_dcbx_config *dcbcfg);
+enum i40e_status_code i40e_get_dcb_config(struct i40e_hw *hw);
+enum i40e_status_code i40e_init_dcb(struct i40e_hw *hw);
+enum i40e_status_code i40e_set_dcb_config(struct i40e_hw *hw);
+enum i40e_status_code i40e_dcb_config_to_lldp(u8 *lldpmib, u16 *miblen,
+ struct i40e_dcbx_config *dcbcfg);
+
+#endif /* _I40E_DCB_H_ */
diff --git a/src/dpdk/drivers/net/i40e/base/i40e_devids.h b/src/dpdk/drivers/net/i40e/base/i40e_devids.h
new file mode 100644
index 00000000..ed73e1d2
--- /dev/null
+++ b/src/dpdk/drivers/net/i40e/base/i40e_devids.h
@@ -0,0 +1,82 @@
+/*******************************************************************************
+
+Copyright (c) 2013 - 2015, Intel Corporation
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are met:
+
+ 1. Redistributions of source code must retain the above copyright notice,
+ this list of conditions and the following disclaimer.
+
+ 2. Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in the
+ documentation and/or other materials provided with the distribution.
+
+ 3. Neither the name of the Intel Corporation nor the names of its
+ contributors may be used to endorse or promote products derived from
+ this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+POSSIBILITY OF SUCH DAMAGE.
+
+***************************************************************************/
+
+#ifndef _I40E_DEVIDS_H_
+#define _I40E_DEVIDS_H_
+
+/* Vendor ID */
+#define I40E_INTEL_VENDOR_ID 0x8086
+
+/* Device IDs */
+#define I40E_DEV_ID_SFP_XL710 0x1572
+#define I40E_DEV_ID_QEMU 0x1574
+#define I40E_DEV_ID_KX_B 0x1580
+#define I40E_DEV_ID_KX_C 0x1581
+#define I40E_DEV_ID_QSFP_A 0x1583
+#define I40E_DEV_ID_QSFP_B 0x1584
+#define I40E_DEV_ID_QSFP_C 0x1585
+#define I40E_DEV_ID_10G_BASE_T 0x1586
+#define I40E_DEV_ID_20G_KR2 0x1587
+#define I40E_DEV_ID_20G_KR2_A 0x1588
+#define I40E_DEV_ID_10G_BASE_T4 0x1589
+#define I40E_DEV_ID_25G_B 0x158A
+#define I40E_DEV_ID_25G_SFP28 0x158B
+#if defined(INTEGRATED_VF) || defined(VF_DRIVER) || defined(I40E_NDIS_SUPPORT)
+#define I40E_DEV_ID_VF 0x154C
+#define I40E_DEV_ID_VF_HV 0x1571
+#endif /* VF_DRIVER */
+#ifdef X722_SUPPORT
+#ifdef X722_A0_SUPPORT
+#define I40E_DEV_ID_X722_A0 0x374C
+#if defined(INTEGRATED_VF) || defined(VF_DRIVER)
+#define I40E_DEV_ID_X722_A0_VF 0x374D
+#endif
+#endif
+#define I40E_DEV_ID_KX_X722 0x37CE
+#define I40E_DEV_ID_QSFP_X722 0x37CF
+#define I40E_DEV_ID_SFP_X722 0x37D0
+#define I40E_DEV_ID_1G_BASE_T_X722 0x37D1
+#define I40E_DEV_ID_10G_BASE_T_X722 0x37D2
+#define I40E_DEV_ID_SFP_I_X722 0x37D3
+#define I40E_DEV_ID_QSFP_I_X722 0x37D4
+#if defined(INTEGRATED_VF) || defined(VF_DRIVER) || defined(I40E_NDIS_SUPPORT)
+#define I40E_DEV_ID_X722_VF 0x37CD
+#define I40E_DEV_ID_X722_VF_HV 0x37D9
+#endif /* VF_DRIVER */
+#endif /* X722_SUPPORT */
+
+#define i40e_is_40G_device(d) ((d) == I40E_DEV_ID_QSFP_A || \
+ (d) == I40E_DEV_ID_QSFP_B || \
+ (d) == I40E_DEV_ID_QSFP_C)
+
+#endif /* _I40E_DEVIDS_H_ */
diff --git a/src/dpdk/drivers/net/i40e/base/i40e_diag.c b/src/dpdk/drivers/net/i40e/base/i40e_diag.c
new file mode 100644
index 00000000..c3c76a0c
--- /dev/null
+++ b/src/dpdk/drivers/net/i40e/base/i40e_diag.c
@@ -0,0 +1,175 @@
+/*******************************************************************************
+
+Copyright (c) 2013 - 2015, Intel Corporation
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are met:
+
+ 1. Redistributions of source code must retain the above copyright notice,
+ this list of conditions and the following disclaimer.
+
+ 2. Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in the
+ documentation and/or other materials provided with the distribution.
+
+ 3. Neither the name of the Intel Corporation nor the names of its
+ contributors may be used to endorse or promote products derived from
+ this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+POSSIBILITY OF SUCH DAMAGE.
+
+***************************************************************************/
+
+#include "i40e_diag.h"
+#include "i40e_prototype.h"
+
+/**
+ * i40e_diag_set_loopback
+ * @hw: pointer to the hw struct
+ * @mode: loopback mode
+ *
+ * Set chosen loopback mode
+ **/
+enum i40e_status_code i40e_diag_set_loopback(struct i40e_hw *hw,
+ enum i40e_lb_mode mode)
+{
+ enum i40e_status_code ret_code = I40E_SUCCESS;
+
+ if (i40e_aq_set_lb_modes(hw, mode, NULL))
+ ret_code = I40E_ERR_DIAG_TEST_FAILED;
+
+ return ret_code;
+}
+
+/**
+ * i40e_diag_reg_pattern_test
+ * @hw: pointer to the hw struct
+ * @reg: reg to be tested
+ * @mask: bits to be touched
+ **/
+static enum i40e_status_code i40e_diag_reg_pattern_test(struct i40e_hw *hw,
+ u32 reg, u32 mask)
+{
+ const u32 patterns[] = {0x5A5A5A5A, 0xA5A5A5A5, 0x00000000, 0xFFFFFFFF};
+ u32 pat, val, orig_val;
+ int i;
+
+ orig_val = rd32(hw, reg);
+ for (i = 0; i < ARRAY_SIZE(patterns); i++) {
+ pat = patterns[i];
+ wr32(hw, reg, (pat & mask));
+ val = rd32(hw, reg);
+ if ((val & mask) != (pat & mask)) {
+ return I40E_ERR_DIAG_TEST_FAILED;
+ }
+ }
+
+ wr32(hw, reg, orig_val);
+ val = rd32(hw, reg);
+ if (val != orig_val) {
+ return I40E_ERR_DIAG_TEST_FAILED;
+ }
+
+ return I40E_SUCCESS;
+}
+
+struct i40e_diag_reg_test_info i40e_reg_list[] = {
+ /* offset mask elements stride */
+ {I40E_QTX_CTL(0), 0x0000FFBF, 1, I40E_QTX_CTL(1) - I40E_QTX_CTL(0)},
+ {I40E_PFINT_ITR0(0), 0x00000FFF, 3, I40E_PFINT_ITR0(1) - I40E_PFINT_ITR0(0)},
+ {I40E_PFINT_ITRN(0, 0), 0x00000FFF, 1, I40E_PFINT_ITRN(0, 1) - I40E_PFINT_ITRN(0, 0)},
+ {I40E_PFINT_ITRN(1, 0), 0x00000FFF, 1, I40E_PFINT_ITRN(1, 1) - I40E_PFINT_ITRN(1, 0)},
+ {I40E_PFINT_ITRN(2, 0), 0x00000FFF, 1, I40E_PFINT_ITRN(2, 1) - I40E_PFINT_ITRN(2, 0)},
+ {I40E_PFINT_STAT_CTL0, 0x0000000C, 1, 0},
+ {I40E_PFINT_LNKLST0, 0x00001FFF, 1, 0},
+ {I40E_PFINT_LNKLSTN(0), 0x000007FF, 1, I40E_PFINT_LNKLSTN(1) - I40E_PFINT_LNKLSTN(0)},
+ {I40E_QINT_TQCTL(0), 0x000000FF, 1, I40E_QINT_TQCTL(1) - I40E_QINT_TQCTL(0)},
+ {I40E_QINT_RQCTL(0), 0x000000FF, 1, I40E_QINT_RQCTL(1) - I40E_QINT_RQCTL(0)},
+ {I40E_PFINT_ICR0_ENA, 0xF7F20000, 1, 0},
+ { 0 }
+};
+
+/**
+ * i40e_diag_reg_test
+ * @hw: pointer to the hw struct
+ *
+ * Perform registers diagnostic test
+ **/
+enum i40e_status_code i40e_diag_reg_test(struct i40e_hw *hw)
+{
+ enum i40e_status_code ret_code = I40E_SUCCESS;
+ u32 reg, mask;
+ u32 i, j;
+
+ for (i = 0; i40e_reg_list[i].offset != 0 &&
+ ret_code == I40E_SUCCESS; i++) {
+
+ /* set actual reg range for dynamically allocated resources */
+ if (i40e_reg_list[i].offset == I40E_QTX_CTL(0) &&
+ hw->func_caps.num_tx_qp != 0)
+ i40e_reg_list[i].elements = hw->func_caps.num_tx_qp;
+ if ((i40e_reg_list[i].offset == I40E_PFINT_ITRN(0, 0) ||
+ i40e_reg_list[i].offset == I40E_PFINT_ITRN(1, 0) ||
+ i40e_reg_list[i].offset == I40E_PFINT_ITRN(2, 0) ||
+ i40e_reg_list[i].offset == I40E_QINT_TQCTL(0) ||
+ i40e_reg_list[i].offset == I40E_QINT_RQCTL(0)) &&
+ hw->func_caps.num_msix_vectors != 0)
+ i40e_reg_list[i].elements =
+ hw->func_caps.num_msix_vectors - 1;
+
+ /* test register access */
+ mask = i40e_reg_list[i].mask;
+ for (j = 0; j < i40e_reg_list[i].elements &&
+ ret_code == I40E_SUCCESS; j++) {
+ reg = i40e_reg_list[i].offset
+ + (j * i40e_reg_list[i].stride);
+ ret_code = i40e_diag_reg_pattern_test(hw, reg, mask);
+ }
+ }
+
+ return ret_code;
+}
+
+/**
+ * i40e_diag_eeprom_test
+ * @hw: pointer to the hw struct
+ *
+ * Perform EEPROM diagnostic test
+ **/
+enum i40e_status_code i40e_diag_eeprom_test(struct i40e_hw *hw)
+{
+ enum i40e_status_code ret_code;
+ u16 reg_val;
+
+ /* read NVM control word and if NVM valid, validate EEPROM checksum*/
+ ret_code = i40e_read_nvm_word(hw, I40E_SR_NVM_CONTROL_WORD, &reg_val);
+ if ((ret_code == I40E_SUCCESS) &&
+ ((reg_val & I40E_SR_CONTROL_WORD_1_MASK) ==
+ BIT(I40E_SR_CONTROL_WORD_1_SHIFT)))
+ return i40e_validate_nvm_checksum(hw, NULL);
+ else
+ return I40E_ERR_DIAG_TEST_FAILED;
+}
+
+/**
+ * i40e_diag_fw_alive_test
+ * @hw: pointer to the hw struct
+ *
+ * Perform FW alive diagnostic test
+ **/
+enum i40e_status_code i40e_diag_fw_alive_test(struct i40e_hw *hw)
+{
+ UNREFERENCED_1PARAMETER(hw);
+ return I40E_SUCCESS;
+}
diff --git a/src/dpdk/drivers/net/i40e/base/i40e_diag.h b/src/dpdk/drivers/net/i40e/base/i40e_diag.h
new file mode 100644
index 00000000..105b1191
--- /dev/null
+++ b/src/dpdk/drivers/net/i40e/base/i40e_diag.h
@@ -0,0 +1,61 @@
+/*******************************************************************************
+
+Copyright (c) 2013 - 2015, Intel Corporation
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are met:
+
+ 1. Redistributions of source code must retain the above copyright notice,
+ this list of conditions and the following disclaimer.
+
+ 2. Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in the
+ documentation and/or other materials provided with the distribution.
+
+ 3. Neither the name of the Intel Corporation nor the names of its
+ contributors may be used to endorse or promote products derived from
+ this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+POSSIBILITY OF SUCH DAMAGE.
+
+***************************************************************************/
+
+#ifndef _I40E_DIAG_H_
+#define _I40E_DIAG_H_
+
+#include "i40e_type.h"
+
+enum i40e_lb_mode {
+ I40E_LB_MODE_NONE = 0x0,
+ I40E_LB_MODE_PHY_LOCAL = I40E_AQ_LB_PHY_LOCAL,
+ I40E_LB_MODE_PHY_REMOTE = I40E_AQ_LB_PHY_REMOTE,
+ I40E_LB_MODE_MAC_LOCAL = I40E_AQ_LB_MAC_LOCAL,
+};
+
+struct i40e_diag_reg_test_info {
+ u32 offset; /* the base register */
+ u32 mask; /* bits that can be tested */
+ u32 elements; /* number of elements if array */
+ u32 stride; /* bytes between each element */
+};
+
+extern struct i40e_diag_reg_test_info i40e_reg_list[];
+
+enum i40e_status_code i40e_diag_set_loopback(struct i40e_hw *hw,
+ enum i40e_lb_mode mode);
+enum i40e_status_code i40e_diag_fw_alive_test(struct i40e_hw *hw);
+enum i40e_status_code i40e_diag_reg_test(struct i40e_hw *hw);
+enum i40e_status_code i40e_diag_eeprom_test(struct i40e_hw *hw);
+
+#endif /* _I40E_DIAG_H_ */
diff --git a/src/dpdk/drivers/net/i40e/base/i40e_hmc.c b/src/dpdk/drivers/net/i40e/base/i40e_hmc.c
new file mode 100644
index 00000000..75d38412
--- /dev/null
+++ b/src/dpdk/drivers/net/i40e/base/i40e_hmc.c
@@ -0,0 +1,370 @@
+/*******************************************************************************
+
+Copyright (c) 2013 - 2015, Intel Corporation
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are met:
+
+ 1. Redistributions of source code must retain the above copyright notice,
+ this list of conditions and the following disclaimer.
+
+ 2. Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in the
+ documentation and/or other materials provided with the distribution.
+
+ 3. Neither the name of the Intel Corporation nor the names of its
+ contributors may be used to endorse or promote products derived from
+ this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+POSSIBILITY OF SUCH DAMAGE.
+
+***************************************************************************/
+
+#include "i40e_osdep.h"
+#include "i40e_register.h"
+#include "i40e_status.h"
+#include "i40e_alloc.h"
+#include "i40e_hmc.h"
+#include "i40e_type.h"
+
+/**
+ * i40e_add_sd_table_entry - Adds a segment descriptor to the table
+ * @hw: pointer to our hw struct
+ * @hmc_info: pointer to the HMC configuration information struct
+ * @sd_index: segment descriptor index to manipulate
+ * @type: what type of segment descriptor we're manipulating
+ * @direct_mode_sz: size to alloc in direct mode
+ **/
+enum i40e_status_code i40e_add_sd_table_entry(struct i40e_hw *hw,
+ struct i40e_hmc_info *hmc_info,
+ u32 sd_index,
+ enum i40e_sd_entry_type type,
+ u64 direct_mode_sz)
+{
+ enum i40e_status_code ret_code = I40E_SUCCESS;
+ struct i40e_hmc_sd_entry *sd_entry;
+ enum i40e_memory_type mem_type;
+ bool dma_mem_alloc_done = false;
+ struct i40e_dma_mem mem;
+ u64 alloc_len;
+
+ if (NULL == hmc_info->sd_table.sd_entry) {
+ ret_code = I40E_ERR_BAD_PTR;
+ DEBUGOUT("i40e_add_sd_table_entry: bad sd_entry\n");
+ goto exit;
+ }
+
+ if (sd_index >= hmc_info->sd_table.sd_cnt) {
+ ret_code = I40E_ERR_INVALID_SD_INDEX;
+ DEBUGOUT("i40e_add_sd_table_entry: bad sd_index\n");
+ goto exit;
+ }
+
+ sd_entry = &hmc_info->sd_table.sd_entry[sd_index];
+ if (!sd_entry->valid) {
+ if (I40E_SD_TYPE_PAGED == type) {
+ mem_type = i40e_mem_pd;
+ alloc_len = I40E_HMC_PAGED_BP_SIZE;
+ } else {
+ mem_type = i40e_mem_bp_jumbo;
+ alloc_len = direct_mode_sz;
+ }
+
+ /* allocate a 4K pd page or 2M backing page */
+ ret_code = i40e_allocate_dma_mem(hw, &mem, mem_type, alloc_len,
+ I40E_HMC_PD_BP_BUF_ALIGNMENT);
+ if (ret_code)
+ goto exit;
+ dma_mem_alloc_done = true;
+ if (I40E_SD_TYPE_PAGED == type) {
+ ret_code = i40e_allocate_virt_mem(hw,
+ &sd_entry->u.pd_table.pd_entry_virt_mem,
+ sizeof(struct i40e_hmc_pd_entry) * 512);
+ if (ret_code)
+ goto exit;
+ sd_entry->u.pd_table.pd_entry =
+ (struct i40e_hmc_pd_entry *)
+ sd_entry->u.pd_table.pd_entry_virt_mem.va;
+ i40e_memcpy(&sd_entry->u.pd_table.pd_page_addr,
+ &mem, sizeof(struct i40e_dma_mem),
+ I40E_NONDMA_TO_NONDMA);
+ } else {
+ i40e_memcpy(&sd_entry->u.bp.addr,
+ &mem, sizeof(struct i40e_dma_mem),
+ I40E_NONDMA_TO_NONDMA);
+ sd_entry->u.bp.sd_pd_index = sd_index;
+ }
+ /* initialize the sd entry */
+ hmc_info->sd_table.sd_entry[sd_index].entry_type = type;
+
+ /* increment the ref count */
+ I40E_INC_SD_REFCNT(&hmc_info->sd_table);
+ }
+ /* Increment backing page reference count */
+ if (I40E_SD_TYPE_DIRECT == sd_entry->entry_type)
+ I40E_INC_BP_REFCNT(&sd_entry->u.bp);
+exit:
+ if (I40E_SUCCESS != ret_code)
+ if (dma_mem_alloc_done)
+ i40e_free_dma_mem(hw, &mem);
+
+ return ret_code;
+}
+
+/**
+ * i40e_add_pd_table_entry - Adds page descriptor to the specified table
+ * @hw: pointer to our HW structure
+ * @hmc_info: pointer to the HMC configuration information structure
+ * @pd_index: which page descriptor index to manipulate
+ * @rsrc_pg: if not NULL, use preallocated page instead of allocating new one.
+ *
+ * This function:
+ * 1. Initializes the pd entry
+ * 2. Adds pd_entry in the pd_table
+ * 3. Mark the entry valid in i40e_hmc_pd_entry structure
+ * 4. Initializes the pd_entry's ref count to 1
+ * assumptions:
+ * 1. The memory for pd should be pinned down, physically contiguous and
+ * aligned on 4K boundary and zeroed memory.
+ * 2. It should be 4K in size.
+ **/
+enum i40e_status_code i40e_add_pd_table_entry(struct i40e_hw *hw,
+ struct i40e_hmc_info *hmc_info,
+ u32 pd_index,
+ struct i40e_dma_mem *rsrc_pg)
+{
+ enum i40e_status_code ret_code = I40E_SUCCESS;
+ struct i40e_hmc_pd_table *pd_table;
+ struct i40e_hmc_pd_entry *pd_entry;
+ struct i40e_dma_mem mem;
+ struct i40e_dma_mem *page = &mem;
+ u32 sd_idx, rel_pd_idx;
+ u64 *pd_addr;
+ u64 page_desc;
+
+ if (pd_index / I40E_HMC_PD_CNT_IN_SD >= hmc_info->sd_table.sd_cnt) {
+ ret_code = I40E_ERR_INVALID_PAGE_DESC_INDEX;
+ DEBUGOUT("i40e_add_pd_table_entry: bad pd_index\n");
+ goto exit;
+ }
+
+ /* find corresponding sd */
+ sd_idx = (pd_index / I40E_HMC_PD_CNT_IN_SD);
+ if (I40E_SD_TYPE_PAGED !=
+ hmc_info->sd_table.sd_entry[sd_idx].entry_type)
+ goto exit;
+
+ rel_pd_idx = (pd_index % I40E_HMC_PD_CNT_IN_SD);
+ pd_table = &hmc_info->sd_table.sd_entry[sd_idx].u.pd_table;
+ pd_entry = &pd_table->pd_entry[rel_pd_idx];
+ if (!pd_entry->valid) {
+ if (rsrc_pg) {
+ pd_entry->rsrc_pg = true;
+ page = rsrc_pg;
+ } else {
+ /* allocate a 4K backing page */
+ ret_code = i40e_allocate_dma_mem(hw, page, i40e_mem_bp,
+ I40E_HMC_PAGED_BP_SIZE,
+ I40E_HMC_PD_BP_BUF_ALIGNMENT);
+ if (ret_code)
+ goto exit;
+ pd_entry->rsrc_pg = false;
+ }
+
+ i40e_memcpy(&pd_entry->bp.addr, page,
+ sizeof(struct i40e_dma_mem), I40E_NONDMA_TO_NONDMA);
+ pd_entry->bp.sd_pd_index = pd_index;
+ pd_entry->bp.entry_type = I40E_SD_TYPE_PAGED;
+ /* Set page address and valid bit */
+ page_desc = page->pa | 0x1;
+
+ pd_addr = (u64 *)pd_table->pd_page_addr.va;
+ pd_addr += rel_pd_idx;
+
+ /* Add the backing page physical address in the pd entry */
+ i40e_memcpy(pd_addr, &page_desc, sizeof(u64),
+ I40E_NONDMA_TO_DMA);
+
+ pd_entry->sd_index = sd_idx;
+ pd_entry->valid = true;
+ I40E_INC_PD_REFCNT(pd_table);
+ }
+ I40E_INC_BP_REFCNT(&pd_entry->bp);
+exit:
+ return ret_code;
+}
+
+/**
+ * i40e_remove_pd_bp - remove a backing page from a page descriptor
+ * @hw: pointer to our HW structure
+ * @hmc_info: pointer to the HMC configuration information structure
+ * @idx: the page index
+ * @is_pf: distinguishes a VF from a PF
+ *
+ * This function:
+ * 1. Marks the entry in pd tabe (for paged address mode) or in sd table
+ * (for direct address mode) invalid.
+ * 2. Write to register PMPDINV to invalidate the backing page in FV cache
+ * 3. Decrement the ref count for the pd _entry
+ * assumptions:
+ * 1. Caller can deallocate the memory used by backing storage after this
+ * function returns.
+ **/
+enum i40e_status_code i40e_remove_pd_bp(struct i40e_hw *hw,
+ struct i40e_hmc_info *hmc_info,
+ u32 idx)
+{
+ enum i40e_status_code ret_code = I40E_SUCCESS;
+ struct i40e_hmc_pd_entry *pd_entry;
+ struct i40e_hmc_pd_table *pd_table;
+ struct i40e_hmc_sd_entry *sd_entry;
+ u32 sd_idx, rel_pd_idx;
+ u64 *pd_addr;
+
+ /* calculate index */
+ sd_idx = idx / I40E_HMC_PD_CNT_IN_SD;
+ rel_pd_idx = idx % I40E_HMC_PD_CNT_IN_SD;
+ if (sd_idx >= hmc_info->sd_table.sd_cnt) {
+ ret_code = I40E_ERR_INVALID_PAGE_DESC_INDEX;
+ DEBUGOUT("i40e_remove_pd_bp: bad idx\n");
+ goto exit;
+ }
+ sd_entry = &hmc_info->sd_table.sd_entry[sd_idx];
+ if (I40E_SD_TYPE_PAGED != sd_entry->entry_type) {
+ ret_code = I40E_ERR_INVALID_SD_TYPE;
+ DEBUGOUT("i40e_remove_pd_bp: wrong sd_entry type\n");
+ goto exit;
+ }
+ /* get the entry and decrease its ref counter */
+ pd_table = &hmc_info->sd_table.sd_entry[sd_idx].u.pd_table;
+ pd_entry = &pd_table->pd_entry[rel_pd_idx];
+ I40E_DEC_BP_REFCNT(&pd_entry->bp);
+ if (pd_entry->bp.ref_cnt)
+ goto exit;
+
+ /* mark the entry invalid */
+ pd_entry->valid = false;
+ I40E_DEC_PD_REFCNT(pd_table);
+ pd_addr = (u64 *)pd_table->pd_page_addr.va;
+ pd_addr += rel_pd_idx;
+ i40e_memset(pd_addr, 0, sizeof(u64), I40E_DMA_MEM);
+ I40E_INVALIDATE_PF_HMC_PD(hw, sd_idx, idx);
+
+ /* free memory here */
+ if (!pd_entry->rsrc_pg)
+ ret_code = i40e_free_dma_mem(hw, &(pd_entry->bp.addr));
+ if (I40E_SUCCESS != ret_code)
+ goto exit;
+ if (!pd_table->ref_cnt)
+ i40e_free_virt_mem(hw, &pd_table->pd_entry_virt_mem);
+exit:
+ return ret_code;
+}
+
+/**
+ * i40e_prep_remove_sd_bp - Prepares to remove a backing page from a sd entry
+ * @hmc_info: pointer to the HMC configuration information structure
+ * @idx: the page index
+ **/
+enum i40e_status_code i40e_prep_remove_sd_bp(struct i40e_hmc_info *hmc_info,
+ u32 idx)
+{
+ enum i40e_status_code ret_code = I40E_SUCCESS;
+ struct i40e_hmc_sd_entry *sd_entry;
+
+ /* get the entry and decrease its ref counter */
+ sd_entry = &hmc_info->sd_table.sd_entry[idx];
+ I40E_DEC_BP_REFCNT(&sd_entry->u.bp);
+ if (sd_entry->u.bp.ref_cnt) {
+ ret_code = I40E_ERR_NOT_READY;
+ goto exit;
+ }
+ I40E_DEC_SD_REFCNT(&hmc_info->sd_table);
+
+ /* mark the entry invalid */
+ sd_entry->valid = false;
+exit:
+ return ret_code;
+}
+
+/**
+ * i40e_remove_sd_bp_new - Removes a backing page from a segment descriptor
+ * @hw: pointer to our hw struct
+ * @hmc_info: pointer to the HMC configuration information structure
+ * @idx: the page index
+ * @is_pf: used to distinguish between VF and PF
+ **/
+enum i40e_status_code i40e_remove_sd_bp_new(struct i40e_hw *hw,
+ struct i40e_hmc_info *hmc_info,
+ u32 idx, bool is_pf)
+{
+ struct i40e_hmc_sd_entry *sd_entry;
+
+ if (!is_pf)
+ return I40E_NOT_SUPPORTED;
+
+ /* get the entry and decrease its ref counter */
+ sd_entry = &hmc_info->sd_table.sd_entry[idx];
+ I40E_CLEAR_PF_SD_ENTRY(hw, idx, I40E_SD_TYPE_DIRECT);
+
+ return i40e_free_dma_mem(hw, &(sd_entry->u.bp.addr));
+}
+
+/**
+ * i40e_prep_remove_pd_page - Prepares to remove a PD page from sd entry.
+ * @hmc_info: pointer to the HMC configuration information structure
+ * @idx: segment descriptor index to find the relevant page descriptor
+ **/
+enum i40e_status_code i40e_prep_remove_pd_page(struct i40e_hmc_info *hmc_info,
+ u32 idx)
+{
+ enum i40e_status_code ret_code = I40E_SUCCESS;
+ struct i40e_hmc_sd_entry *sd_entry;
+
+ sd_entry = &hmc_info->sd_table.sd_entry[idx];
+
+ if (sd_entry->u.pd_table.ref_cnt) {
+ ret_code = I40E_ERR_NOT_READY;
+ goto exit;
+ }
+
+ /* mark the entry invalid */
+ sd_entry->valid = false;
+
+ I40E_DEC_SD_REFCNT(&hmc_info->sd_table);
+exit:
+ return ret_code;
+}
+
+/**
+ * i40e_remove_pd_page_new - Removes a PD page from sd entry.
+ * @hw: pointer to our hw struct
+ * @hmc_info: pointer to the HMC configuration information structure
+ * @idx: segment descriptor index to find the relevant page descriptor
+ * @is_pf: used to distinguish between VF and PF
+ **/
+enum i40e_status_code i40e_remove_pd_page_new(struct i40e_hw *hw,
+ struct i40e_hmc_info *hmc_info,
+ u32 idx, bool is_pf)
+{
+ struct i40e_hmc_sd_entry *sd_entry;
+
+ if (!is_pf)
+ return I40E_NOT_SUPPORTED;
+
+ sd_entry = &hmc_info->sd_table.sd_entry[idx];
+ I40E_CLEAR_PF_SD_ENTRY(hw, idx, I40E_SD_TYPE_PAGED);
+
+ return i40e_free_dma_mem(hw, &(sd_entry->u.pd_table.pd_page_addr));
+}
diff --git a/src/dpdk/drivers/net/i40e/base/i40e_hmc.h b/src/dpdk/drivers/net/i40e/base/i40e_hmc.h
new file mode 100644
index 00000000..343b251f
--- /dev/null
+++ b/src/dpdk/drivers/net/i40e/base/i40e_hmc.h
@@ -0,0 +1,245 @@
+/*******************************************************************************
+
+Copyright (c) 2013 - 2015, Intel Corporation
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are met:
+
+ 1. Redistributions of source code must retain the above copyright notice,
+ this list of conditions and the following disclaimer.
+
+ 2. Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in the
+ documentation and/or other materials provided with the distribution.
+
+ 3. Neither the name of the Intel Corporation nor the names of its
+ contributors may be used to endorse or promote products derived from
+ this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+POSSIBILITY OF SUCH DAMAGE.
+
+***************************************************************************/
+
+#ifndef _I40E_HMC_H_
+#define _I40E_HMC_H_
+
+#define I40E_HMC_MAX_BP_COUNT 512
+
+/* forward-declare the HW struct for the compiler */
+struct i40e_hw;
+
+#define I40E_HMC_INFO_SIGNATURE 0x484D5347 /* HMSG */
+#define I40E_HMC_PD_CNT_IN_SD 512
+#define I40E_HMC_DIRECT_BP_SIZE 0x200000 /* 2M */
+#define I40E_HMC_PAGED_BP_SIZE 4096
+#define I40E_HMC_PD_BP_BUF_ALIGNMENT 4096
+#define I40E_FIRST_VF_FPM_ID 16
+
+struct i40e_hmc_obj_info {
+ u64 base; /* base addr in FPM */
+ u32 max_cnt; /* max count available for this hmc func */
+ u32 cnt; /* count of objects driver actually wants to create */
+ u64 size; /* size in bytes of one object */
+};
+
+enum i40e_sd_entry_type {
+ I40E_SD_TYPE_INVALID = 0,
+ I40E_SD_TYPE_PAGED = 1,
+ I40E_SD_TYPE_DIRECT = 2
+};
+
+struct i40e_hmc_bp {
+ enum i40e_sd_entry_type entry_type;
+ struct i40e_dma_mem addr; /* populate to be used by hw */
+ u32 sd_pd_index;
+ u32 ref_cnt;
+};
+
+struct i40e_hmc_pd_entry {
+ struct i40e_hmc_bp bp;
+ u32 sd_index;
+ bool rsrc_pg;
+ bool valid;
+};
+
+struct i40e_hmc_pd_table {
+ struct i40e_dma_mem pd_page_addr; /* populate to be used by hw */
+ struct i40e_hmc_pd_entry *pd_entry; /* [512] for sw book keeping */
+ struct i40e_virt_mem pd_entry_virt_mem; /* virt mem for pd_entry */
+
+ u32 ref_cnt;
+ u32 sd_index;
+};
+
+struct i40e_hmc_sd_entry {
+ enum i40e_sd_entry_type entry_type;
+ bool valid;
+
+ union {
+ struct i40e_hmc_pd_table pd_table;
+ struct i40e_hmc_bp bp;
+ } u;
+};
+
+struct i40e_hmc_sd_table {
+ struct i40e_virt_mem addr; /* used to track sd_entry allocations */
+ u32 sd_cnt;
+ u32 ref_cnt;
+ struct i40e_hmc_sd_entry *sd_entry; /* (sd_cnt*512) entries max */
+};
+
+struct i40e_hmc_info {
+ u32 signature;
+ /* equals to pci func num for PF and dynamically allocated for VFs */
+ u8 hmc_fn_id;
+ u16 first_sd_index; /* index of the first available SD */
+
+ /* hmc objects */
+ struct i40e_hmc_obj_info *hmc_obj;
+ struct i40e_virt_mem hmc_obj_virt_mem;
+ struct i40e_hmc_sd_table sd_table;
+};
+
+#define I40E_INC_SD_REFCNT(sd_table) ((sd_table)->ref_cnt++)
+#define I40E_INC_PD_REFCNT(pd_table) ((pd_table)->ref_cnt++)
+#define I40E_INC_BP_REFCNT(bp) ((bp)->ref_cnt++)
+
+#define I40E_DEC_SD_REFCNT(sd_table) ((sd_table)->ref_cnt--)
+#define I40E_DEC_PD_REFCNT(pd_table) ((pd_table)->ref_cnt--)
+#define I40E_DEC_BP_REFCNT(bp) ((bp)->ref_cnt--)
+
+/**
+ * I40E_SET_PF_SD_ENTRY - marks the sd entry as valid in the hardware
+ * @hw: pointer to our hw struct
+ * @pa: pointer to physical address
+ * @sd_index: segment descriptor index
+ * @type: if sd entry is direct or paged
+ **/
+#define I40E_SET_PF_SD_ENTRY(hw, pa, sd_index, type) \
+{ \
+ u32 val1, val2, val3; \
+ val1 = (u32)(I40E_HI_DWORD(pa)); \
+ val2 = (u32)(pa) | (I40E_HMC_MAX_BP_COUNT << \
+ I40E_PFHMC_SDDATALOW_PMSDBPCOUNT_SHIFT) | \
+ ((((type) == I40E_SD_TYPE_PAGED) ? 0 : 1) << \
+ I40E_PFHMC_SDDATALOW_PMSDTYPE_SHIFT) | \
+ BIT(I40E_PFHMC_SDDATALOW_PMSDVALID_SHIFT); \
+ val3 = (sd_index) | BIT_ULL(I40E_PFHMC_SDCMD_PMSDWR_SHIFT); \
+ wr32((hw), I40E_PFHMC_SDDATAHIGH, val1); \
+ wr32((hw), I40E_PFHMC_SDDATALOW, val2); \
+ wr32((hw), I40E_PFHMC_SDCMD, val3); \
+}
+
+/**
+ * I40E_CLEAR_PF_SD_ENTRY - marks the sd entry as invalid in the hardware
+ * @hw: pointer to our hw struct
+ * @sd_index: segment descriptor index
+ * @type: if sd entry is direct or paged
+ **/
+#define I40E_CLEAR_PF_SD_ENTRY(hw, sd_index, type) \
+{ \
+ u32 val2, val3; \
+ val2 = (I40E_HMC_MAX_BP_COUNT << \
+ I40E_PFHMC_SDDATALOW_PMSDBPCOUNT_SHIFT) | \
+ ((((type) == I40E_SD_TYPE_PAGED) ? 0 : 1) << \
+ I40E_PFHMC_SDDATALOW_PMSDTYPE_SHIFT); \
+ val3 = (sd_index) | BIT_ULL(I40E_PFHMC_SDCMD_PMSDWR_SHIFT); \
+ wr32((hw), I40E_PFHMC_SDDATAHIGH, 0); \
+ wr32((hw), I40E_PFHMC_SDDATALOW, val2); \
+ wr32((hw), I40E_PFHMC_SDCMD, val3); \
+}
+
+/**
+ * I40E_INVALIDATE_PF_HMC_PD - Invalidates the pd cache in the hardware
+ * @hw: pointer to our hw struct
+ * @sd_idx: segment descriptor index
+ * @pd_idx: page descriptor index
+ **/
+#define I40E_INVALIDATE_PF_HMC_PD(hw, sd_idx, pd_idx) \
+ wr32((hw), I40E_PFHMC_PDINV, \
+ (((sd_idx) << I40E_PFHMC_PDINV_PMSDIDX_SHIFT) | \
+ ((pd_idx) << I40E_PFHMC_PDINV_PMPDIDX_SHIFT)))
+
+/**
+ * I40E_FIND_SD_INDEX_LIMIT - finds segment descriptor index limit
+ * @hmc_info: pointer to the HMC configuration information structure
+ * @type: type of HMC resources we're searching
+ * @index: starting index for the object
+ * @cnt: number of objects we're trying to create
+ * @sd_idx: pointer to return index of the segment descriptor in question
+ * @sd_limit: pointer to return the maximum number of segment descriptors
+ *
+ * This function calculates the segment descriptor index and index limit
+ * for the resource defined by i40e_hmc_rsrc_type.
+ **/
+#define I40E_FIND_SD_INDEX_LIMIT(hmc_info, type, index, cnt, sd_idx, sd_limit)\
+{ \
+ u64 fpm_addr, fpm_limit; \
+ fpm_addr = (hmc_info)->hmc_obj[(type)].base + \
+ (hmc_info)->hmc_obj[(type)].size * (index); \
+ fpm_limit = fpm_addr + (hmc_info)->hmc_obj[(type)].size * (cnt);\
+ *(sd_idx) = (u32)(fpm_addr / I40E_HMC_DIRECT_BP_SIZE); \
+ *(sd_limit) = (u32)((fpm_limit - 1) / I40E_HMC_DIRECT_BP_SIZE); \
+ /* add one more to the limit to correct our range */ \
+ *(sd_limit) += 1; \
+}
+
+/**
+ * I40E_FIND_PD_INDEX_LIMIT - finds page descriptor index limit
+ * @hmc_info: pointer to the HMC configuration information struct
+ * @type: HMC resource type we're examining
+ * @idx: starting index for the object
+ * @cnt: number of objects we're trying to create
+ * @pd_index: pointer to return page descriptor index
+ * @pd_limit: pointer to return page descriptor index limit
+ *
+ * Calculates the page descriptor index and index limit for the resource
+ * defined by i40e_hmc_rsrc_type.
+ **/
+#define I40E_FIND_PD_INDEX_LIMIT(hmc_info, type, idx, cnt, pd_index, pd_limit)\
+{ \
+ u64 fpm_adr, fpm_limit; \
+ fpm_adr = (hmc_info)->hmc_obj[(type)].base + \
+ (hmc_info)->hmc_obj[(type)].size * (idx); \
+ fpm_limit = fpm_adr + (hmc_info)->hmc_obj[(type)].size * (cnt); \
+ *(pd_index) = (u32)(fpm_adr / I40E_HMC_PAGED_BP_SIZE); \
+ *(pd_limit) = (u32)((fpm_limit - 1) / I40E_HMC_PAGED_BP_SIZE); \
+ /* add one more to the limit to correct our range */ \
+ *(pd_limit) += 1; \
+}
+enum i40e_status_code i40e_add_sd_table_entry(struct i40e_hw *hw,
+ struct i40e_hmc_info *hmc_info,
+ u32 sd_index,
+ enum i40e_sd_entry_type type,
+ u64 direct_mode_sz);
+
+enum i40e_status_code i40e_add_pd_table_entry(struct i40e_hw *hw,
+ struct i40e_hmc_info *hmc_info,
+ u32 pd_index,
+ struct i40e_dma_mem *rsrc_pg);
+enum i40e_status_code i40e_remove_pd_bp(struct i40e_hw *hw,
+ struct i40e_hmc_info *hmc_info,
+ u32 idx);
+enum i40e_status_code i40e_prep_remove_sd_bp(struct i40e_hmc_info *hmc_info,
+ u32 idx);
+enum i40e_status_code i40e_remove_sd_bp_new(struct i40e_hw *hw,
+ struct i40e_hmc_info *hmc_info,
+ u32 idx, bool is_pf);
+enum i40e_status_code i40e_prep_remove_pd_page(struct i40e_hmc_info *hmc_info,
+ u32 idx);
+enum i40e_status_code i40e_remove_pd_page_new(struct i40e_hw *hw,
+ struct i40e_hmc_info *hmc_info,
+ u32 idx, bool is_pf);
+
+#endif /* _I40E_HMC_H_ */
diff --git a/src/dpdk/drivers/net/i40e/base/i40e_lan_hmc.c b/src/dpdk/drivers/net/i40e/base/i40e_lan_hmc.c
new file mode 100644
index 00000000..22606484
--- /dev/null
+++ b/src/dpdk/drivers/net/i40e/base/i40e_lan_hmc.c
@@ -0,0 +1,1411 @@
+/*******************************************************************************
+
+Copyright (c) 2013 - 2015, Intel Corporation
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are met:
+
+ 1. Redistributions of source code must retain the above copyright notice,
+ this list of conditions and the following disclaimer.
+
+ 2. Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in the
+ documentation and/or other materials provided with the distribution.
+
+ 3. Neither the name of the Intel Corporation nor the names of its
+ contributors may be used to endorse or promote products derived from
+ this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+POSSIBILITY OF SUCH DAMAGE.
+
+***************************************************************************/
+
+#include "i40e_osdep.h"
+#include "i40e_register.h"
+#include "i40e_type.h"
+#include "i40e_hmc.h"
+#include "i40e_lan_hmc.h"
+#include "i40e_prototype.h"
+
+/* lan specific interface functions */
+
+/**
+ * i40e_align_l2obj_base - aligns base object pointer to 512 bytes
+ * @offset: base address offset needing alignment
+ *
+ * Aligns the layer 2 function private memory so it's 512-byte aligned.
+ **/
+STATIC u64 i40e_align_l2obj_base(u64 offset)
+{
+ u64 aligned_offset = offset;
+
+ if ((offset % I40E_HMC_L2OBJ_BASE_ALIGNMENT) > 0)
+ aligned_offset += (I40E_HMC_L2OBJ_BASE_ALIGNMENT -
+ (offset % I40E_HMC_L2OBJ_BASE_ALIGNMENT));
+
+ return aligned_offset;
+}
+
+/**
+ * i40e_calculate_l2fpm_size - calculates layer 2 FPM memory size
+ * @txq_num: number of Tx queues needing backing context
+ * @rxq_num: number of Rx queues needing backing context
+ * @fcoe_cntx_num: amount of FCoE statefull contexts needing backing context
+ * @fcoe_filt_num: number of FCoE filters needing backing context
+ *
+ * Calculates the maximum amount of memory for the function required, based
+ * on the number of resources it must provide context for.
+ **/
+u64 i40e_calculate_l2fpm_size(u32 txq_num, u32 rxq_num,
+ u32 fcoe_cntx_num, u32 fcoe_filt_num)
+{
+ u64 fpm_size = 0;
+
+ fpm_size = txq_num * I40E_HMC_OBJ_SIZE_TXQ;
+ fpm_size = i40e_align_l2obj_base(fpm_size);
+
+ fpm_size += (rxq_num * I40E_HMC_OBJ_SIZE_RXQ);
+ fpm_size = i40e_align_l2obj_base(fpm_size);
+
+ fpm_size += (fcoe_cntx_num * I40E_HMC_OBJ_SIZE_FCOE_CNTX);
+ fpm_size = i40e_align_l2obj_base(fpm_size);
+
+ fpm_size += (fcoe_filt_num * I40E_HMC_OBJ_SIZE_FCOE_FILT);
+ fpm_size = i40e_align_l2obj_base(fpm_size);
+
+ return fpm_size;
+}
+
+/**
+ * i40e_init_lan_hmc - initialize i40e_hmc_info struct
+ * @hw: pointer to the HW structure
+ * @txq_num: number of Tx queues needing backing context
+ * @rxq_num: number of Rx queues needing backing context
+ * @fcoe_cntx_num: amount of FCoE statefull contexts needing backing context
+ * @fcoe_filt_num: number of FCoE filters needing backing context
+ *
+ * This function will be called once per physical function initialization.
+ * It will fill out the i40e_hmc_obj_info structure for LAN objects based on
+ * the driver's provided input, as well as information from the HMC itself
+ * loaded from NVRAM.
+ *
+ * Assumptions:
+ * - HMC Resource Profile has been selected before calling this function.
+ **/
+enum i40e_status_code i40e_init_lan_hmc(struct i40e_hw *hw, u32 txq_num,
+ u32 rxq_num, u32 fcoe_cntx_num,
+ u32 fcoe_filt_num)
+{
+ struct i40e_hmc_obj_info *obj, *full_obj;
+ enum i40e_status_code ret_code = I40E_SUCCESS;
+ u64 l2fpm_size;
+ u32 size_exp;
+
+ hw->hmc.signature = I40E_HMC_INFO_SIGNATURE;
+ hw->hmc.hmc_fn_id = hw->pf_id;
+
+ /* allocate memory for hmc_obj */
+ ret_code = i40e_allocate_virt_mem(hw, &hw->hmc.hmc_obj_virt_mem,
+ sizeof(struct i40e_hmc_obj_info) * I40E_HMC_LAN_MAX);
+ if (ret_code)
+ goto init_lan_hmc_out;
+ hw->hmc.hmc_obj = (struct i40e_hmc_obj_info *)
+ hw->hmc.hmc_obj_virt_mem.va;
+
+ /* The full object will be used to create the LAN HMC SD */
+ full_obj = &hw->hmc.hmc_obj[I40E_HMC_LAN_FULL];
+ full_obj->max_cnt = 0;
+ full_obj->cnt = 0;
+ full_obj->base = 0;
+ full_obj->size = 0;
+
+ /* Tx queue context information */
+ obj = &hw->hmc.hmc_obj[I40E_HMC_LAN_TX];
+ obj->max_cnt = rd32(hw, I40E_GLHMC_LANQMAX);
+ obj->cnt = txq_num;
+ obj->base = 0;
+ size_exp = rd32(hw, I40E_GLHMC_LANTXOBJSZ);
+ obj->size = BIT_ULL(size_exp);
+
+ /* validate values requested by driver don't exceed HMC capacity */
+ if (txq_num > obj->max_cnt) {
+ ret_code = I40E_ERR_INVALID_HMC_OBJ_COUNT;
+ DEBUGOUT3("i40e_init_lan_hmc: Tx context: asks for 0x%x but max allowed is 0x%x, returns error %d\n",
+ txq_num, obj->max_cnt, ret_code);
+ goto init_lan_hmc_out;
+ }
+
+ /* aggregate values into the full LAN object for later */
+ full_obj->max_cnt += obj->max_cnt;
+ full_obj->cnt += obj->cnt;
+
+ /* Rx queue context information */
+ obj = &hw->hmc.hmc_obj[I40E_HMC_LAN_RX];
+ obj->max_cnt = rd32(hw, I40E_GLHMC_LANQMAX);
+ obj->cnt = rxq_num;
+ obj->base = hw->hmc.hmc_obj[I40E_HMC_LAN_TX].base +
+ (hw->hmc.hmc_obj[I40E_HMC_LAN_TX].cnt *
+ hw->hmc.hmc_obj[I40E_HMC_LAN_TX].size);
+ obj->base = i40e_align_l2obj_base(obj->base);
+ size_exp = rd32(hw, I40E_GLHMC_LANRXOBJSZ);
+ obj->size = BIT_ULL(size_exp);
+
+ /* validate values requested by driver don't exceed HMC capacity */
+ if (rxq_num > obj->max_cnt) {
+ ret_code = I40E_ERR_INVALID_HMC_OBJ_COUNT;
+ DEBUGOUT3("i40e_init_lan_hmc: Rx context: asks for 0x%x but max allowed is 0x%x, returns error %d\n",
+ rxq_num, obj->max_cnt, ret_code);
+ goto init_lan_hmc_out;
+ }
+
+ /* aggregate values into the full LAN object for later */
+ full_obj->max_cnt += obj->max_cnt;
+ full_obj->cnt += obj->cnt;
+
+ /* FCoE context information */
+ obj = &hw->hmc.hmc_obj[I40E_HMC_FCOE_CTX];
+ obj->max_cnt = rd32(hw, I40E_GLHMC_FCOEMAX);
+ obj->cnt = fcoe_cntx_num;
+ obj->base = hw->hmc.hmc_obj[I40E_HMC_LAN_RX].base +
+ (hw->hmc.hmc_obj[I40E_HMC_LAN_RX].cnt *
+ hw->hmc.hmc_obj[I40E_HMC_LAN_RX].size);
+ obj->base = i40e_align_l2obj_base(obj->base);
+ size_exp = rd32(hw, I40E_GLHMC_FCOEDDPOBJSZ);
+ obj->size = BIT_ULL(size_exp);
+
+ /* validate values requested by driver don't exceed HMC capacity */
+ if (fcoe_cntx_num > obj->max_cnt) {
+ ret_code = I40E_ERR_INVALID_HMC_OBJ_COUNT;
+ DEBUGOUT3("i40e_init_lan_hmc: FCoE context: asks for 0x%x but max allowed is 0x%x, returns error %d\n",
+ fcoe_cntx_num, obj->max_cnt, ret_code);
+ goto init_lan_hmc_out;
+ }
+
+ /* aggregate values into the full LAN object for later */
+ full_obj->max_cnt += obj->max_cnt;
+ full_obj->cnt += obj->cnt;
+
+ /* FCoE filter information */
+ obj = &hw->hmc.hmc_obj[I40E_HMC_FCOE_FILT];
+ obj->max_cnt = rd32(hw, I40E_GLHMC_FCOEFMAX);
+ obj->cnt = fcoe_filt_num;
+ obj->base = hw->hmc.hmc_obj[I40E_HMC_FCOE_CTX].base +
+ (hw->hmc.hmc_obj[I40E_HMC_FCOE_CTX].cnt *
+ hw->hmc.hmc_obj[I40E_HMC_FCOE_CTX].size);
+ obj->base = i40e_align_l2obj_base(obj->base);
+ size_exp = rd32(hw, I40E_GLHMC_FCOEFOBJSZ);
+ obj->size = BIT_ULL(size_exp);
+
+ /* validate values requested by driver don't exceed HMC capacity */
+ if (fcoe_filt_num > obj->max_cnt) {
+ ret_code = I40E_ERR_INVALID_HMC_OBJ_COUNT;
+ DEBUGOUT3("i40e_init_lan_hmc: FCoE filter: asks for 0x%x but max allowed is 0x%x, returns error %d\n",
+ fcoe_filt_num, obj->max_cnt, ret_code);
+ goto init_lan_hmc_out;
+ }
+
+ /* aggregate values into the full LAN object for later */
+ full_obj->max_cnt += obj->max_cnt;
+ full_obj->cnt += obj->cnt;
+
+ hw->hmc.first_sd_index = 0;
+ hw->hmc.sd_table.ref_cnt = 0;
+ l2fpm_size = i40e_calculate_l2fpm_size(txq_num, rxq_num, fcoe_cntx_num,
+ fcoe_filt_num);
+ if (NULL == hw->hmc.sd_table.sd_entry) {
+ hw->hmc.sd_table.sd_cnt = (u32)
+ (l2fpm_size + I40E_HMC_DIRECT_BP_SIZE - 1) /
+ I40E_HMC_DIRECT_BP_SIZE;
+
+ /* allocate the sd_entry members in the sd_table */
+ ret_code = i40e_allocate_virt_mem(hw, &hw->hmc.sd_table.addr,
+ (sizeof(struct i40e_hmc_sd_entry) *
+ hw->hmc.sd_table.sd_cnt));
+ if (ret_code)
+ goto init_lan_hmc_out;
+ hw->hmc.sd_table.sd_entry =
+ (struct i40e_hmc_sd_entry *)hw->hmc.sd_table.addr.va;
+ }
+ /* store in the LAN full object for later */
+ full_obj->size = l2fpm_size;
+
+init_lan_hmc_out:
+ return ret_code;
+}
+
+/**
+ * i40e_remove_pd_page - Remove a page from the page descriptor table
+ * @hw: pointer to the HW structure
+ * @hmc_info: pointer to the HMC configuration information structure
+ * @idx: segment descriptor index to find the relevant page descriptor
+ *
+ * This function:
+ * 1. Marks the entry in pd table (for paged address mode) invalid
+ * 2. write to register PMPDINV to invalidate the backing page in FV cache
+ * 3. Decrement the ref count for pd_entry
+ * assumptions:
+ * 1. caller can deallocate the memory used by pd after this function
+ * returns.
+ **/
+STATIC enum i40e_status_code i40e_remove_pd_page(struct i40e_hw *hw,
+ struct i40e_hmc_info *hmc_info,
+ u32 idx)
+{
+ enum i40e_status_code ret_code = I40E_SUCCESS;
+
+ if (i40e_prep_remove_pd_page(hmc_info, idx) == I40E_SUCCESS)
+ ret_code = i40e_remove_pd_page_new(hw, hmc_info, idx, true);
+
+ return ret_code;
+}
+
+/**
+ * i40e_remove_sd_bp - remove a backing page from a segment descriptor
+ * @hw: pointer to our HW structure
+ * @hmc_info: pointer to the HMC configuration information structure
+ * @idx: the page index
+ *
+ * This function:
+ * 1. Marks the entry in sd table (for direct address mode) invalid
+ * 2. write to register PMSDCMD, PMSDDATALOW(PMSDDATALOW.PMSDVALID set
+ * to 0) and PMSDDATAHIGH to invalidate the sd page
+ * 3. Decrement the ref count for the sd_entry
+ * assumptions:
+ * 1. caller can deallocate the memory used by backing storage after this
+ * function returns.
+ **/
+STATIC enum i40e_status_code i40e_remove_sd_bp(struct i40e_hw *hw,
+ struct i40e_hmc_info *hmc_info,
+ u32 idx)
+{
+ enum i40e_status_code ret_code = I40E_SUCCESS;
+
+ if (i40e_prep_remove_sd_bp(hmc_info, idx) == I40E_SUCCESS)
+ ret_code = i40e_remove_sd_bp_new(hw, hmc_info, idx, true);
+
+ return ret_code;
+}
+
+/**
+ * i40e_create_lan_hmc_object - allocate backing store for hmc objects
+ * @hw: pointer to the HW structure
+ * @info: pointer to i40e_hmc_create_obj_info struct
+ *
+ * This will allocate memory for PDs and backing pages and populate
+ * the sd and pd entries.
+ **/
+enum i40e_status_code i40e_create_lan_hmc_object(struct i40e_hw *hw,
+ struct i40e_hmc_lan_create_obj_info *info)
+{
+ enum i40e_status_code ret_code = I40E_SUCCESS;
+ struct i40e_hmc_sd_entry *sd_entry;
+ u32 pd_idx1 = 0, pd_lmt1 = 0;
+ u32 pd_idx = 0, pd_lmt = 0;
+ bool pd_error = false;
+ u32 sd_idx, sd_lmt;
+ u64 sd_size;
+ u32 i, j;
+
+ if (NULL == info) {
+ ret_code = I40E_ERR_BAD_PTR;
+ DEBUGOUT("i40e_create_lan_hmc_object: bad info ptr\n");
+ goto exit;
+ }
+ if (NULL == info->hmc_info) {
+ ret_code = I40E_ERR_BAD_PTR;
+ DEBUGOUT("i40e_create_lan_hmc_object: bad hmc_info ptr\n");
+ goto exit;
+ }
+ if (I40E_HMC_INFO_SIGNATURE != info->hmc_info->signature) {
+ ret_code = I40E_ERR_BAD_PTR;
+ DEBUGOUT("i40e_create_lan_hmc_object: bad signature\n");
+ goto exit;
+ }
+
+ if (info->start_idx >= info->hmc_info->hmc_obj[info->rsrc_type].cnt) {
+ ret_code = I40E_ERR_INVALID_HMC_OBJ_INDEX;
+ DEBUGOUT1("i40e_create_lan_hmc_object: returns error %d\n",
+ ret_code);
+ goto exit;
+ }
+ if ((info->start_idx + info->count) >
+ info->hmc_info->hmc_obj[info->rsrc_type].cnt) {
+ ret_code = I40E_ERR_INVALID_HMC_OBJ_COUNT;
+ DEBUGOUT1("i40e_create_lan_hmc_object: returns error %d\n",
+ ret_code);
+ goto exit;
+ }
+
+ /* find sd index and limit */
+ I40E_FIND_SD_INDEX_LIMIT(info->hmc_info, info->rsrc_type,
+ info->start_idx, info->count,
+ &sd_idx, &sd_lmt);
+ if (sd_idx >= info->hmc_info->sd_table.sd_cnt ||
+ sd_lmt > info->hmc_info->sd_table.sd_cnt) {
+ ret_code = I40E_ERR_INVALID_SD_INDEX;
+ goto exit;
+ }
+ /* find pd index */
+ I40E_FIND_PD_INDEX_LIMIT(info->hmc_info, info->rsrc_type,
+ info->start_idx, info->count, &pd_idx,
+ &pd_lmt);
+
+ /* This is to cover for cases where you may not want to have an SD with
+ * the full 2M memory but something smaller. By not filling out any
+ * size, the function will default the SD size to be 2M.
+ */
+ if (info->direct_mode_sz == 0)
+ sd_size = I40E_HMC_DIRECT_BP_SIZE;
+ else
+ sd_size = info->direct_mode_sz;
+
+ /* check if all the sds are valid. If not, allocate a page and
+ * initialize it.
+ */
+ for (j = sd_idx; j < sd_lmt; j++) {
+ /* update the sd table entry */
+ ret_code = i40e_add_sd_table_entry(hw, info->hmc_info, j,
+ info->entry_type,
+ sd_size);
+ if (I40E_SUCCESS != ret_code)
+ goto exit_sd_error;
+ sd_entry = &info->hmc_info->sd_table.sd_entry[j];
+ if (I40E_SD_TYPE_PAGED == sd_entry->entry_type) {
+ /* check if all the pds in this sd are valid. If not,
+ * allocate a page and initialize it.
+ */
+
+ /* find pd_idx and pd_lmt in this sd */
+ pd_idx1 = max(pd_idx, (j * I40E_HMC_MAX_BP_COUNT));
+ pd_lmt1 = min(pd_lmt,
+ ((j + 1) * I40E_HMC_MAX_BP_COUNT));
+ for (i = pd_idx1; i < pd_lmt1; i++) {
+ /* update the pd table entry */
+ ret_code = i40e_add_pd_table_entry(hw,
+ info->hmc_info,
+ i, NULL);
+ if (I40E_SUCCESS != ret_code) {
+ pd_error = true;
+ break;
+ }
+ }
+ if (pd_error) {
+ /* remove the backing pages from pd_idx1 to i */
+ while (i && (i > pd_idx1)) {
+ i40e_remove_pd_bp(hw, info->hmc_info,
+ (i - 1));
+ i--;
+ }
+ }
+ }
+ if (!sd_entry->valid) {
+ sd_entry->valid = true;
+ switch (sd_entry->entry_type) {
+ case I40E_SD_TYPE_PAGED:
+ I40E_SET_PF_SD_ENTRY(hw,
+ sd_entry->u.pd_table.pd_page_addr.pa,
+ j, sd_entry->entry_type);
+ break;
+ case I40E_SD_TYPE_DIRECT:
+ I40E_SET_PF_SD_ENTRY(hw, sd_entry->u.bp.addr.pa,
+ j, sd_entry->entry_type);
+ break;
+ default:
+ ret_code = I40E_ERR_INVALID_SD_TYPE;
+ goto exit;
+ }
+ }
+ }
+ goto exit;
+
+exit_sd_error:
+ /* cleanup for sd entries from j to sd_idx */
+ while (j && (j > sd_idx)) {
+ sd_entry = &info->hmc_info->sd_table.sd_entry[j - 1];
+ switch (sd_entry->entry_type) {
+ case I40E_SD_TYPE_PAGED:
+ pd_idx1 = max(pd_idx,
+ ((j - 1) * I40E_HMC_MAX_BP_COUNT));
+ pd_lmt1 = min(pd_lmt, (j * I40E_HMC_MAX_BP_COUNT));
+ for (i = pd_idx1; i < pd_lmt1; i++)
+ i40e_remove_pd_bp(hw, info->hmc_info, i);
+ i40e_remove_pd_page(hw, info->hmc_info, (j - 1));
+ break;
+ case I40E_SD_TYPE_DIRECT:
+ i40e_remove_sd_bp(hw, info->hmc_info, (j - 1));
+ break;
+ default:
+ ret_code = I40E_ERR_INVALID_SD_TYPE;
+ break;
+ }
+ j--;
+ }
+exit:
+ return ret_code;
+}
+
+/**
+ * i40e_configure_lan_hmc - prepare the HMC backing store
+ * @hw: pointer to the hw structure
+ * @model: the model for the layout of the SD/PD tables
+ *
+ * - This function will be called once per physical function initialization.
+ * - This function will be called after i40e_init_lan_hmc() and before
+ * any LAN/FCoE HMC objects can be created.
+ **/
+enum i40e_status_code i40e_configure_lan_hmc(struct i40e_hw *hw,
+ enum i40e_hmc_model model)
+{
+ struct i40e_hmc_lan_create_obj_info info;
+ u8 hmc_fn_id = hw->hmc.hmc_fn_id;
+ struct i40e_hmc_obj_info *obj;
+ enum i40e_status_code ret_code = I40E_SUCCESS;
+
+ /* Initialize part of the create object info struct */
+ info.hmc_info = &hw->hmc;
+ info.rsrc_type = I40E_HMC_LAN_FULL;
+ info.start_idx = 0;
+ info.direct_mode_sz = hw->hmc.hmc_obj[I40E_HMC_LAN_FULL].size;
+
+ /* Build the SD entry for the LAN objects */
+ switch (model) {
+ case I40E_HMC_MODEL_DIRECT_PREFERRED:
+ case I40E_HMC_MODEL_DIRECT_ONLY:
+ info.entry_type = I40E_SD_TYPE_DIRECT;
+ /* Make one big object, a single SD */
+ info.count = 1;
+ ret_code = i40e_create_lan_hmc_object(hw, &info);
+ if ((ret_code != I40E_SUCCESS) && (model == I40E_HMC_MODEL_DIRECT_PREFERRED))
+ goto try_type_paged;
+ else if (ret_code != I40E_SUCCESS)
+ goto configure_lan_hmc_out;
+ /* else clause falls through the break */
+ break;
+ case I40E_HMC_MODEL_PAGED_ONLY:
+try_type_paged:
+ info.entry_type = I40E_SD_TYPE_PAGED;
+ /* Make one big object in the PD table */
+ info.count = 1;
+ ret_code = i40e_create_lan_hmc_object(hw, &info);
+ if (ret_code != I40E_SUCCESS)
+ goto configure_lan_hmc_out;
+ break;
+ default:
+ /* unsupported type */
+ ret_code = I40E_ERR_INVALID_SD_TYPE;
+ DEBUGOUT1("i40e_configure_lan_hmc: Unknown SD type: %d\n",
+ ret_code);
+ goto configure_lan_hmc_out;
+ }
+
+ /* Configure and program the FPM registers so objects can be created */
+
+ /* Tx contexts */
+ obj = &hw->hmc.hmc_obj[I40E_HMC_LAN_TX];
+ wr32(hw, I40E_GLHMC_LANTXBASE(hmc_fn_id),
+ (u32)((obj->base & I40E_GLHMC_LANTXBASE_FPMLANTXBASE_MASK) / 512));
+ wr32(hw, I40E_GLHMC_LANTXCNT(hmc_fn_id), obj->cnt);
+
+ /* Rx contexts */
+ obj = &hw->hmc.hmc_obj[I40E_HMC_LAN_RX];
+ wr32(hw, I40E_GLHMC_LANRXBASE(hmc_fn_id),
+ (u32)((obj->base & I40E_GLHMC_LANRXBASE_FPMLANRXBASE_MASK) / 512));
+ wr32(hw, I40E_GLHMC_LANRXCNT(hmc_fn_id), obj->cnt);
+
+ /* FCoE contexts */
+ obj = &hw->hmc.hmc_obj[I40E_HMC_FCOE_CTX];
+ wr32(hw, I40E_GLHMC_FCOEDDPBASE(hmc_fn_id),
+ (u32)((obj->base & I40E_GLHMC_FCOEDDPBASE_FPMFCOEDDPBASE_MASK) / 512));
+ wr32(hw, I40E_GLHMC_FCOEDDPCNT(hmc_fn_id), obj->cnt);
+
+ /* FCoE filters */
+ obj = &hw->hmc.hmc_obj[I40E_HMC_FCOE_FILT];
+ wr32(hw, I40E_GLHMC_FCOEFBASE(hmc_fn_id),
+ (u32)((obj->base & I40E_GLHMC_FCOEFBASE_FPMFCOEFBASE_MASK) / 512));
+ wr32(hw, I40E_GLHMC_FCOEFCNT(hmc_fn_id), obj->cnt);
+
+configure_lan_hmc_out:
+ return ret_code;
+}
+
+/**
+ * i40e_delete_hmc_object - remove hmc objects
+ * @hw: pointer to the HW structure
+ * @info: pointer to i40e_hmc_delete_obj_info struct
+ *
+ * This will de-populate the SDs and PDs. It frees
+ * the memory for PDS and backing storage. After this function is returned,
+ * caller should deallocate memory allocated previously for
+ * book-keeping information about PDs and backing storage.
+ **/
+enum i40e_status_code i40e_delete_lan_hmc_object(struct i40e_hw *hw,
+ struct i40e_hmc_lan_delete_obj_info *info)
+{
+ enum i40e_status_code ret_code = I40E_SUCCESS;
+ struct i40e_hmc_pd_table *pd_table;
+ u32 pd_idx, pd_lmt, rel_pd_idx;
+ u32 sd_idx, sd_lmt;
+ u32 i, j;
+
+ if (NULL == info) {
+ ret_code = I40E_ERR_BAD_PTR;
+ DEBUGOUT("i40e_delete_hmc_object: bad info ptr\n");
+ goto exit;
+ }
+ if (NULL == info->hmc_info) {
+ ret_code = I40E_ERR_BAD_PTR;
+ DEBUGOUT("i40e_delete_hmc_object: bad info->hmc_info ptr\n");
+ goto exit;
+ }
+ if (I40E_HMC_INFO_SIGNATURE != info->hmc_info->signature) {
+ ret_code = I40E_ERR_BAD_PTR;
+ DEBUGOUT("i40e_delete_hmc_object: bad hmc_info->signature\n");
+ goto exit;
+ }
+
+ if (NULL == info->hmc_info->sd_table.sd_entry) {
+ ret_code = I40E_ERR_BAD_PTR;
+ DEBUGOUT("i40e_delete_hmc_object: bad sd_entry\n");
+ goto exit;
+ }
+
+ if (NULL == info->hmc_info->hmc_obj) {
+ ret_code = I40E_ERR_BAD_PTR;
+ DEBUGOUT("i40e_delete_hmc_object: bad hmc_info->hmc_obj\n");
+ goto exit;
+ }
+ if (info->start_idx >= info->hmc_info->hmc_obj[info->rsrc_type].cnt) {
+ ret_code = I40E_ERR_INVALID_HMC_OBJ_INDEX;
+ DEBUGOUT1("i40e_delete_hmc_object: returns error %d\n",
+ ret_code);
+ goto exit;
+ }
+
+ if ((info->start_idx + info->count) >
+ info->hmc_info->hmc_obj[info->rsrc_type].cnt) {
+ ret_code = I40E_ERR_INVALID_HMC_OBJ_COUNT;
+ DEBUGOUT1("i40e_delete_hmc_object: returns error %d\n",
+ ret_code);
+ goto exit;
+ }
+
+ I40E_FIND_PD_INDEX_LIMIT(info->hmc_info, info->rsrc_type,
+ info->start_idx, info->count, &pd_idx,
+ &pd_lmt);
+
+ for (j = pd_idx; j < pd_lmt; j++) {
+ sd_idx = j / I40E_HMC_PD_CNT_IN_SD;
+
+ if (I40E_SD_TYPE_PAGED !=
+ info->hmc_info->sd_table.sd_entry[sd_idx].entry_type)
+ continue;
+
+ rel_pd_idx = j % I40E_HMC_PD_CNT_IN_SD;
+
+ pd_table =
+ &info->hmc_info->sd_table.sd_entry[sd_idx].u.pd_table;
+ if (pd_table->pd_entry[rel_pd_idx].valid) {
+ ret_code = i40e_remove_pd_bp(hw, info->hmc_info, j);
+ if (I40E_SUCCESS != ret_code)
+ goto exit;
+ }
+ }
+
+ /* find sd index and limit */
+ I40E_FIND_SD_INDEX_LIMIT(info->hmc_info, info->rsrc_type,
+ info->start_idx, info->count,
+ &sd_idx, &sd_lmt);
+ if (sd_idx >= info->hmc_info->sd_table.sd_cnt ||
+ sd_lmt > info->hmc_info->sd_table.sd_cnt) {
+ ret_code = I40E_ERR_INVALID_SD_INDEX;
+ goto exit;
+ }
+
+ for (i = sd_idx; i < sd_lmt; i++) {
+ if (!info->hmc_info->sd_table.sd_entry[i].valid)
+ continue;
+ switch (info->hmc_info->sd_table.sd_entry[i].entry_type) {
+ case I40E_SD_TYPE_DIRECT:
+ ret_code = i40e_remove_sd_bp(hw, info->hmc_info, i);
+ if (I40E_SUCCESS != ret_code)
+ goto exit;
+ break;
+ case I40E_SD_TYPE_PAGED:
+ ret_code = i40e_remove_pd_page(hw, info->hmc_info, i);
+ if (I40E_SUCCESS != ret_code)
+ goto exit;
+ break;
+ default:
+ break;
+ }
+ }
+exit:
+ return ret_code;
+}
+
+/**
+ * i40e_shutdown_lan_hmc - Remove HMC backing store, free allocated memory
+ * @hw: pointer to the hw structure
+ *
+ * This must be called by drivers as they are shutting down and being
+ * removed from the OS.
+ **/
+enum i40e_status_code i40e_shutdown_lan_hmc(struct i40e_hw *hw)
+{
+ struct i40e_hmc_lan_delete_obj_info info;
+ enum i40e_status_code ret_code;
+
+ info.hmc_info = &hw->hmc;
+ info.rsrc_type = I40E_HMC_LAN_FULL;
+ info.start_idx = 0;
+ info.count = 1;
+
+ /* delete the object */
+ ret_code = i40e_delete_lan_hmc_object(hw, &info);
+
+ /* free the SD table entry for LAN */
+ i40e_free_virt_mem(hw, &hw->hmc.sd_table.addr);
+ hw->hmc.sd_table.sd_cnt = 0;
+ hw->hmc.sd_table.sd_entry = NULL;
+
+ /* free memory used for hmc_obj */
+ i40e_free_virt_mem(hw, &hw->hmc.hmc_obj_virt_mem);
+ hw->hmc.hmc_obj = NULL;
+
+ return ret_code;
+}
+
+#define I40E_HMC_STORE(_struct, _ele) \
+ offsetof(struct _struct, _ele), \
+ FIELD_SIZEOF(struct _struct, _ele)
+
+struct i40e_context_ele {
+ u16 offset;
+ u16 size_of;
+ u16 width;
+ u16 lsb;
+};
+
+/* LAN Tx Queue Context */
+static struct i40e_context_ele i40e_hmc_txq_ce_info[] = {
+ /* Field Width LSB */
+ {I40E_HMC_STORE(i40e_hmc_obj_txq, head), 13, 0 },
+ {I40E_HMC_STORE(i40e_hmc_obj_txq, new_context), 1, 30 },
+ {I40E_HMC_STORE(i40e_hmc_obj_txq, base), 57, 32 },
+ {I40E_HMC_STORE(i40e_hmc_obj_txq, fc_ena), 1, 89 },
+ {I40E_HMC_STORE(i40e_hmc_obj_txq, timesync_ena), 1, 90 },
+ {I40E_HMC_STORE(i40e_hmc_obj_txq, fd_ena), 1, 91 },
+ {I40E_HMC_STORE(i40e_hmc_obj_txq, alt_vlan_ena), 1, 92 },
+ {I40E_HMC_STORE(i40e_hmc_obj_txq, cpuid), 8, 96 },
+/* line 1 */
+ {I40E_HMC_STORE(i40e_hmc_obj_txq, thead_wb), 13, 0 + 128 },
+ {I40E_HMC_STORE(i40e_hmc_obj_txq, head_wb_ena), 1, 32 + 128 },
+ {I40E_HMC_STORE(i40e_hmc_obj_txq, qlen), 13, 33 + 128 },
+ {I40E_HMC_STORE(i40e_hmc_obj_txq, tphrdesc_ena), 1, 46 + 128 },
+ {I40E_HMC_STORE(i40e_hmc_obj_txq, tphrpacket_ena), 1, 47 + 128 },
+ {I40E_HMC_STORE(i40e_hmc_obj_txq, tphwdesc_ena), 1, 48 + 128 },
+ {I40E_HMC_STORE(i40e_hmc_obj_txq, head_wb_addr), 64, 64 + 128 },
+/* line 7 */
+ {I40E_HMC_STORE(i40e_hmc_obj_txq, crc), 32, 0 + (7 * 128) },
+ {I40E_HMC_STORE(i40e_hmc_obj_txq, rdylist), 10, 84 + (7 * 128) },
+ {I40E_HMC_STORE(i40e_hmc_obj_txq, rdylist_act), 1, 94 + (7 * 128) },
+ { 0 }
+};
+
+/* LAN Rx Queue Context */
+static struct i40e_context_ele i40e_hmc_rxq_ce_info[] = {
+ /* Field Width LSB */
+ { I40E_HMC_STORE(i40e_hmc_obj_rxq, head), 13, 0 },
+ { I40E_HMC_STORE(i40e_hmc_obj_rxq, cpuid), 8, 13 },
+ { I40E_HMC_STORE(i40e_hmc_obj_rxq, base), 57, 32 },
+ { I40E_HMC_STORE(i40e_hmc_obj_rxq, qlen), 13, 89 },
+ { I40E_HMC_STORE(i40e_hmc_obj_rxq, dbuff), 7, 102 },
+ { I40E_HMC_STORE(i40e_hmc_obj_rxq, hbuff), 5, 109 },
+ { I40E_HMC_STORE(i40e_hmc_obj_rxq, dtype), 2, 114 },
+ { I40E_HMC_STORE(i40e_hmc_obj_rxq, dsize), 1, 116 },
+ { I40E_HMC_STORE(i40e_hmc_obj_rxq, crcstrip), 1, 117 },
+ { I40E_HMC_STORE(i40e_hmc_obj_rxq, fc_ena), 1, 118 },
+ { I40E_HMC_STORE(i40e_hmc_obj_rxq, l2tsel), 1, 119 },
+ { I40E_HMC_STORE(i40e_hmc_obj_rxq, hsplit_0), 4, 120 },
+ { I40E_HMC_STORE(i40e_hmc_obj_rxq, hsplit_1), 2, 124 },
+ { I40E_HMC_STORE(i40e_hmc_obj_rxq, showiv), 1, 127 },
+ { I40E_HMC_STORE(i40e_hmc_obj_rxq, rxmax), 14, 174 },
+ { I40E_HMC_STORE(i40e_hmc_obj_rxq, tphrdesc_ena), 1, 193 },
+ { I40E_HMC_STORE(i40e_hmc_obj_rxq, tphwdesc_ena), 1, 194 },
+ { I40E_HMC_STORE(i40e_hmc_obj_rxq, tphdata_ena), 1, 195 },
+ { I40E_HMC_STORE(i40e_hmc_obj_rxq, tphhead_ena), 1, 196 },
+ { I40E_HMC_STORE(i40e_hmc_obj_rxq, lrxqthresh), 3, 198 },
+ { I40E_HMC_STORE(i40e_hmc_obj_rxq, prefena), 1, 201 },
+ { 0 }
+};
+
+/**
+ * i40e_write_byte - replace HMC context byte
+ * @hmc_bits: pointer to the HMC memory
+ * @ce_info: a description of the struct to be read from
+ * @src: the struct to be read from
+ **/
+static void i40e_write_byte(u8 *hmc_bits,
+ struct i40e_context_ele *ce_info,
+ u8 *src)
+{
+ u8 src_byte, dest_byte, mask;
+ u8 *from, *dest;
+ u16 shift_width;
+
+ /* copy from the next struct field */
+ from = src + ce_info->offset;
+
+ /* prepare the bits and mask */
+ shift_width = ce_info->lsb % 8;
+ mask = (u8)(BIT(ce_info->width) - 1);
+
+ src_byte = *from;
+ src_byte &= mask;
+
+ /* shift to correct alignment */
+ mask <<= shift_width;
+ src_byte <<= shift_width;
+
+ /* get the current bits from the target bit string */
+ dest = hmc_bits + (ce_info->lsb / 8);
+
+ i40e_memcpy(&dest_byte, dest, sizeof(dest_byte), I40E_DMA_TO_NONDMA);
+
+ dest_byte &= ~mask; /* get the bits not changing */
+ dest_byte |= src_byte; /* add in the new bits */
+
+ /* put it all back */
+ i40e_memcpy(dest, &dest_byte, sizeof(dest_byte), I40E_NONDMA_TO_DMA);
+}
+
+/**
+ * i40e_write_word - replace HMC context word
+ * @hmc_bits: pointer to the HMC memory
+ * @ce_info: a description of the struct to be read from
+ * @src: the struct to be read from
+ **/
+static void i40e_write_word(u8 *hmc_bits,
+ struct i40e_context_ele *ce_info,
+ u8 *src)
+{
+ u16 src_word, mask;
+ u8 *from, *dest;
+ u16 shift_width;
+ __le16 dest_word;
+
+ /* copy from the next struct field */
+ from = src + ce_info->offset;
+
+ /* prepare the bits and mask */
+ shift_width = ce_info->lsb % 8;
+ mask = BIT(ce_info->width) - 1;
+
+ /* don't swizzle the bits until after the mask because the mask bits
+ * will be in a different bit position on big endian machines
+ */
+ src_word = *(u16 *)from;
+ src_word &= mask;
+
+ /* shift to correct alignment */
+ mask <<= shift_width;
+ src_word <<= shift_width;
+
+ /* get the current bits from the target bit string */
+ dest = hmc_bits + (ce_info->lsb / 8);
+
+ i40e_memcpy(&dest_word, dest, sizeof(dest_word), I40E_DMA_TO_NONDMA);
+
+ dest_word &= ~(CPU_TO_LE16(mask)); /* get the bits not changing */
+ dest_word |= CPU_TO_LE16(src_word); /* add in the new bits */
+
+ /* put it all back */
+ i40e_memcpy(dest, &dest_word, sizeof(dest_word), I40E_NONDMA_TO_DMA);
+}
+
+/**
+ * i40e_write_dword - replace HMC context dword
+ * @hmc_bits: pointer to the HMC memory
+ * @ce_info: a description of the struct to be read from
+ * @src: the struct to be read from
+ **/
+static void i40e_write_dword(u8 *hmc_bits,
+ struct i40e_context_ele *ce_info,
+ u8 *src)
+{
+ u32 src_dword, mask;
+ u8 *from, *dest;
+ u16 shift_width;
+ __le32 dest_dword;
+
+ /* copy from the next struct field */
+ from = src + ce_info->offset;
+
+ /* prepare the bits and mask */
+ shift_width = ce_info->lsb % 8;
+
+ /* if the field width is exactly 32 on an x86 machine, then the shift
+ * operation will not work because the SHL instructions count is masked
+ * to 5 bits so the shift will do nothing
+ */
+ if (ce_info->width < 32)
+ mask = BIT(ce_info->width) - 1;
+ else
+ mask = ~(u32)0;
+
+ /* don't swizzle the bits until after the mask because the mask bits
+ * will be in a different bit position on big endian machines
+ */
+ src_dword = *(u32 *)from;
+ src_dword &= mask;
+
+ /* shift to correct alignment */
+ mask <<= shift_width;
+ src_dword <<= shift_width;
+
+ /* get the current bits from the target bit string */
+ dest = hmc_bits + (ce_info->lsb / 8);
+
+ i40e_memcpy(&dest_dword, dest, sizeof(dest_dword), I40E_DMA_TO_NONDMA);
+
+ dest_dword &= ~(CPU_TO_LE32(mask)); /* get the bits not changing */
+ dest_dword |= CPU_TO_LE32(src_dword); /* add in the new bits */
+
+ /* put it all back */
+ i40e_memcpy(dest, &dest_dword, sizeof(dest_dword), I40E_NONDMA_TO_DMA);
+}
+
+/**
+ * i40e_write_qword - replace HMC context qword
+ * @hmc_bits: pointer to the HMC memory
+ * @ce_info: a description of the struct to be read from
+ * @src: the struct to be read from
+ **/
+static void i40e_write_qword(u8 *hmc_bits,
+ struct i40e_context_ele *ce_info,
+ u8 *src)
+{
+ u64 src_qword, mask;
+ u8 *from, *dest;
+ u16 shift_width;
+ __le64 dest_qword;
+
+ /* copy from the next struct field */
+ from = src + ce_info->offset;
+
+ /* prepare the bits and mask */
+ shift_width = ce_info->lsb % 8;
+
+ /* if the field width is exactly 64 on an x86 machine, then the shift
+ * operation will not work because the SHL instructions count is masked
+ * to 6 bits so the shift will do nothing
+ */
+ if (ce_info->width < 64)
+ mask = BIT_ULL(ce_info->width) - 1;
+ else
+ mask = ~(u64)0;
+
+ /* don't swizzle the bits until after the mask because the mask bits
+ * will be in a different bit position on big endian machines
+ */
+ src_qword = *(u64 *)from;
+ src_qword &= mask;
+
+ /* shift to correct alignment */
+ mask <<= shift_width;
+ src_qword <<= shift_width;
+
+ /* get the current bits from the target bit string */
+ dest = hmc_bits + (ce_info->lsb / 8);
+
+ i40e_memcpy(&dest_qword, dest, sizeof(dest_qword), I40E_DMA_TO_NONDMA);
+
+ dest_qword &= ~(CPU_TO_LE64(mask)); /* get the bits not changing */
+ dest_qword |= CPU_TO_LE64(src_qword); /* add in the new bits */
+
+ /* put it all back */
+ i40e_memcpy(dest, &dest_qword, sizeof(dest_qword), I40E_NONDMA_TO_DMA);
+}
+
+/**
+ * i40e_read_byte - read HMC context byte into struct
+ * @hmc_bits: pointer to the HMC memory
+ * @ce_info: a description of the struct to be filled
+ * @dest: the struct to be filled
+ **/
+static void i40e_read_byte(u8 *hmc_bits,
+ struct i40e_context_ele *ce_info,
+ u8 *dest)
+{
+ u8 dest_byte, mask;
+ u8 *src, *target;
+ u16 shift_width;
+
+ /* prepare the bits and mask */
+ shift_width = ce_info->lsb % 8;
+ mask = (u8)(BIT(ce_info->width) - 1);
+
+ /* shift to correct alignment */
+ mask <<= shift_width;
+
+ /* get the current bits from the src bit string */
+ src = hmc_bits + (ce_info->lsb / 8);
+
+ i40e_memcpy(&dest_byte, src, sizeof(dest_byte), I40E_DMA_TO_NONDMA);
+
+ dest_byte &= ~(mask);
+
+ dest_byte >>= shift_width;
+
+ /* get the address from the struct field */
+ target = dest + ce_info->offset;
+
+ /* put it back in the struct */
+ i40e_memcpy(target, &dest_byte, sizeof(dest_byte), I40E_NONDMA_TO_DMA);
+}
+
+/**
+ * i40e_read_word - read HMC context word into struct
+ * @hmc_bits: pointer to the HMC memory
+ * @ce_info: a description of the struct to be filled
+ * @dest: the struct to be filled
+ **/
+static void i40e_read_word(u8 *hmc_bits,
+ struct i40e_context_ele *ce_info,
+ u8 *dest)
+{
+ u16 dest_word, mask;
+ u8 *src, *target;
+ u16 shift_width;
+ __le16 src_word;
+
+ /* prepare the bits and mask */
+ shift_width = ce_info->lsb % 8;
+ mask = BIT(ce_info->width) - 1;
+
+ /* shift to correct alignment */
+ mask <<= shift_width;
+
+ /* get the current bits from the src bit string */
+ src = hmc_bits + (ce_info->lsb / 8);
+
+ i40e_memcpy(&src_word, src, sizeof(src_word), I40E_DMA_TO_NONDMA);
+
+ /* the data in the memory is stored as little endian so mask it
+ * correctly
+ */
+ src_word &= ~(CPU_TO_LE16(mask));
+
+ /* get the data back into host order before shifting */
+ dest_word = LE16_TO_CPU(src_word);
+
+ dest_word >>= shift_width;
+
+ /* get the address from the struct field */
+ target = dest + ce_info->offset;
+
+ /* put it back in the struct */
+ i40e_memcpy(target, &dest_word, sizeof(dest_word), I40E_NONDMA_TO_DMA);
+}
+
+/**
+ * i40e_read_dword - read HMC context dword into struct
+ * @hmc_bits: pointer to the HMC memory
+ * @ce_info: a description of the struct to be filled
+ * @dest: the struct to be filled
+ **/
+static void i40e_read_dword(u8 *hmc_bits,
+ struct i40e_context_ele *ce_info,
+ u8 *dest)
+{
+ u32 dest_dword, mask;
+ u8 *src, *target;
+ u16 shift_width;
+ __le32 src_dword;
+
+ /* prepare the bits and mask */
+ shift_width = ce_info->lsb % 8;
+
+ /* if the field width is exactly 32 on an x86 machine, then the shift
+ * operation will not work because the SHL instructions count is masked
+ * to 5 bits so the shift will do nothing
+ */
+ if (ce_info->width < 32)
+ mask = BIT(ce_info->width) - 1;
+ else
+ mask = ~(u32)0;
+
+ /* shift to correct alignment */
+ mask <<= shift_width;
+
+ /* get the current bits from the src bit string */
+ src = hmc_bits + (ce_info->lsb / 8);
+
+ i40e_memcpy(&src_dword, src, sizeof(src_dword), I40E_DMA_TO_NONDMA);
+
+ /* the data in the memory is stored as little endian so mask it
+ * correctly
+ */
+ src_dword &= ~(CPU_TO_LE32(mask));
+
+ /* get the data back into host order before shifting */
+ dest_dword = LE32_TO_CPU(src_dword);
+
+ dest_dword >>= shift_width;
+
+ /* get the address from the struct field */
+ target = dest + ce_info->offset;
+
+ /* put it back in the struct */
+ i40e_memcpy(target, &dest_dword, sizeof(dest_dword),
+ I40E_NONDMA_TO_DMA);
+}
+
+/**
+ * i40e_read_qword - read HMC context qword into struct
+ * @hmc_bits: pointer to the HMC memory
+ * @ce_info: a description of the struct to be filled
+ * @dest: the struct to be filled
+ **/
+static void i40e_read_qword(u8 *hmc_bits,
+ struct i40e_context_ele *ce_info,
+ u8 *dest)
+{
+ u64 dest_qword, mask;
+ u8 *src, *target;
+ u16 shift_width;
+ __le64 src_qword;
+
+ /* prepare the bits and mask */
+ shift_width = ce_info->lsb % 8;
+
+ /* if the field width is exactly 64 on an x86 machine, then the shift
+ * operation will not work because the SHL instructions count is masked
+ * to 6 bits so the shift will do nothing
+ */
+ if (ce_info->width < 64)
+ mask = BIT_ULL(ce_info->width) - 1;
+ else
+ mask = ~(u64)0;
+
+ /* shift to correct alignment */
+ mask <<= shift_width;
+
+ /* get the current bits from the src bit string */
+ src = hmc_bits + (ce_info->lsb / 8);
+
+ i40e_memcpy(&src_qword, src, sizeof(src_qword), I40E_DMA_TO_NONDMA);
+
+ /* the data in the memory is stored as little endian so mask it
+ * correctly
+ */
+ src_qword &= ~(CPU_TO_LE64(mask));
+
+ /* get the data back into host order before shifting */
+ dest_qword = LE64_TO_CPU(src_qword);
+
+ dest_qword >>= shift_width;
+
+ /* get the address from the struct field */
+ target = dest + ce_info->offset;
+
+ /* put it back in the struct */
+ i40e_memcpy(target, &dest_qword, sizeof(dest_qword),
+ I40E_NONDMA_TO_DMA);
+}
+
+/**
+ * i40e_get_hmc_context - extract HMC context bits
+ * @context_bytes: pointer to the context bit array
+ * @ce_info: a description of the struct to be filled
+ * @dest: the struct to be filled
+ **/
+static enum i40e_status_code i40e_get_hmc_context(u8 *context_bytes,
+ struct i40e_context_ele *ce_info,
+ u8 *dest)
+{
+ int f;
+
+ for (f = 0; ce_info[f].width != 0; f++) {
+ switch (ce_info[f].size_of) {
+ case 1:
+ i40e_read_byte(context_bytes, &ce_info[f], dest);
+ break;
+ case 2:
+ i40e_read_word(context_bytes, &ce_info[f], dest);
+ break;
+ case 4:
+ i40e_read_dword(context_bytes, &ce_info[f], dest);
+ break;
+ case 8:
+ i40e_read_qword(context_bytes, &ce_info[f], dest);
+ break;
+ default:
+ /* nothing to do, just keep going */
+ break;
+ }
+ }
+
+ return I40E_SUCCESS;
+}
+
+/**
+ * i40e_clear_hmc_context - zero out the HMC context bits
+ * @hw: the hardware struct
+ * @context_bytes: pointer to the context bit array (DMA memory)
+ * @hmc_type: the type of HMC resource
+ **/
+static enum i40e_status_code i40e_clear_hmc_context(struct i40e_hw *hw,
+ u8 *context_bytes,
+ enum i40e_hmc_lan_rsrc_type hmc_type)
+{
+ /* clean the bit array */
+ i40e_memset(context_bytes, 0, (u32)hw->hmc.hmc_obj[hmc_type].size,
+ I40E_DMA_MEM);
+
+ return I40E_SUCCESS;
+}
+
+/**
+ * i40e_set_hmc_context - replace HMC context bits
+ * @context_bytes: pointer to the context bit array
+ * @ce_info: a description of the struct to be filled
+ * @dest: the struct to be filled
+ **/
+static enum i40e_status_code i40e_set_hmc_context(u8 *context_bytes,
+ struct i40e_context_ele *ce_info,
+ u8 *dest)
+{
+ int f;
+
+ for (f = 0; ce_info[f].width != 0; f++) {
+
+ /* we have to deal with each element of the HMC using the
+ * correct size so that we are correct regardless of the
+ * endianness of the machine
+ */
+ switch (ce_info[f].size_of) {
+ case 1:
+ i40e_write_byte(context_bytes, &ce_info[f], dest);
+ break;
+ case 2:
+ i40e_write_word(context_bytes, &ce_info[f], dest);
+ break;
+ case 4:
+ i40e_write_dword(context_bytes, &ce_info[f], dest);
+ break;
+ case 8:
+ i40e_write_qword(context_bytes, &ce_info[f], dest);
+ break;
+ }
+ }
+
+ return I40E_SUCCESS;
+}
+
+/**
+ * i40e_hmc_get_object_va - retrieves an object's virtual address
+ * @hw: pointer to the hw structure
+ * @object_base: pointer to u64 to get the va
+ * @rsrc_type: the hmc resource type
+ * @obj_idx: hmc object index
+ *
+ * This function retrieves the object's virtual address from the object
+ * base pointer. This function is used for LAN Queue contexts.
+ **/
+STATIC
+enum i40e_status_code i40e_hmc_get_object_va(struct i40e_hw *hw,
+ u8 **object_base,
+ enum i40e_hmc_lan_rsrc_type rsrc_type,
+ u32 obj_idx)
+{
+ u32 obj_offset_in_sd, obj_offset_in_pd;
+ struct i40e_hmc_info *hmc_info = &hw->hmc;
+ struct i40e_hmc_sd_entry *sd_entry;
+ struct i40e_hmc_pd_entry *pd_entry;
+ u32 pd_idx, pd_lmt, rel_pd_idx;
+ enum i40e_status_code ret_code = I40E_SUCCESS;
+ u64 obj_offset_in_fpm;
+ u32 sd_idx, sd_lmt;
+
+ if (NULL == hmc_info) {
+ ret_code = I40E_ERR_BAD_PTR;
+ DEBUGOUT("i40e_hmc_get_object_va: bad hmc_info ptr\n");
+ goto exit;
+ }
+ if (NULL == hmc_info->hmc_obj) {
+ ret_code = I40E_ERR_BAD_PTR;
+ DEBUGOUT("i40e_hmc_get_object_va: bad hmc_info->hmc_obj ptr\n");
+ goto exit;
+ }
+ if (NULL == object_base) {
+ ret_code = I40E_ERR_BAD_PTR;
+ DEBUGOUT("i40e_hmc_get_object_va: bad object_base ptr\n");
+ goto exit;
+ }
+ if (I40E_HMC_INFO_SIGNATURE != hmc_info->signature) {
+ ret_code = I40E_ERR_BAD_PTR;
+ DEBUGOUT("i40e_hmc_get_object_va: bad hmc_info->signature\n");
+ goto exit;
+ }
+ if (obj_idx >= hmc_info->hmc_obj[rsrc_type].cnt) {
+ DEBUGOUT1("i40e_hmc_get_object_va: returns error %d\n",
+ ret_code);
+ ret_code = I40E_ERR_INVALID_HMC_OBJ_INDEX;
+ goto exit;
+ }
+ /* find sd index and limit */
+ I40E_FIND_SD_INDEX_LIMIT(hmc_info, rsrc_type, obj_idx, 1,
+ &sd_idx, &sd_lmt);
+
+ sd_entry = &hmc_info->sd_table.sd_entry[sd_idx];
+ obj_offset_in_fpm = hmc_info->hmc_obj[rsrc_type].base +
+ hmc_info->hmc_obj[rsrc_type].size * obj_idx;
+
+ if (I40E_SD_TYPE_PAGED == sd_entry->entry_type) {
+ I40E_FIND_PD_INDEX_LIMIT(hmc_info, rsrc_type, obj_idx, 1,
+ &pd_idx, &pd_lmt);
+ rel_pd_idx = pd_idx % I40E_HMC_PD_CNT_IN_SD;
+ pd_entry = &sd_entry->u.pd_table.pd_entry[rel_pd_idx];
+ obj_offset_in_pd = (u32)(obj_offset_in_fpm %
+ I40E_HMC_PAGED_BP_SIZE);
+ *object_base = (u8 *)pd_entry->bp.addr.va + obj_offset_in_pd;
+ } else {
+ obj_offset_in_sd = (u32)(obj_offset_in_fpm %
+ I40E_HMC_DIRECT_BP_SIZE);
+ *object_base = (u8 *)sd_entry->u.bp.addr.va + obj_offset_in_sd;
+ }
+exit:
+ return ret_code;
+}
+
+/**
+ * i40e_get_lan_tx_queue_context - return the HMC context for the queue
+ * @hw: the hardware struct
+ * @queue: the queue we care about
+ * @s: the struct to be filled
+ **/
+enum i40e_status_code i40e_get_lan_tx_queue_context(struct i40e_hw *hw,
+ u16 queue,
+ struct i40e_hmc_obj_txq *s)
+{
+ enum i40e_status_code err;
+ u8 *context_bytes;
+
+ err = i40e_hmc_get_object_va(hw, &context_bytes, I40E_HMC_LAN_TX, queue);
+ if (err < 0)
+ return err;
+
+ return i40e_get_hmc_context(context_bytes,
+ i40e_hmc_txq_ce_info, (u8 *)s);
+}
+
+/**
+ * i40e_clear_lan_tx_queue_context - clear the HMC context for the queue
+ * @hw: the hardware struct
+ * @queue: the queue we care about
+ **/
+enum i40e_status_code i40e_clear_lan_tx_queue_context(struct i40e_hw *hw,
+ u16 queue)
+{
+ enum i40e_status_code err;
+ u8 *context_bytes;
+
+ err = i40e_hmc_get_object_va(hw, &context_bytes, I40E_HMC_LAN_TX, queue);
+ if (err < 0)
+ return err;
+
+ return i40e_clear_hmc_context(hw, context_bytes, I40E_HMC_LAN_TX);
+}
+
+/**
+ * i40e_set_lan_tx_queue_context - set the HMC context for the queue
+ * @hw: the hardware struct
+ * @queue: the queue we care about
+ * @s: the struct to be filled
+ **/
+enum i40e_status_code i40e_set_lan_tx_queue_context(struct i40e_hw *hw,
+ u16 queue,
+ struct i40e_hmc_obj_txq *s)
+{
+ enum i40e_status_code err;
+ u8 *context_bytes;
+
+ err = i40e_hmc_get_object_va(hw, &context_bytes, I40E_HMC_LAN_TX, queue);
+ if (err < 0)
+ return err;
+
+ return i40e_set_hmc_context(context_bytes,
+ i40e_hmc_txq_ce_info, (u8 *)s);
+}
+
+/**
+ * i40e_get_lan_rx_queue_context - return the HMC context for the queue
+ * @hw: the hardware struct
+ * @queue: the queue we care about
+ * @s: the struct to be filled
+ **/
+enum i40e_status_code i40e_get_lan_rx_queue_context(struct i40e_hw *hw,
+ u16 queue,
+ struct i40e_hmc_obj_rxq *s)
+{
+ enum i40e_status_code err;
+ u8 *context_bytes;
+
+ err = i40e_hmc_get_object_va(hw, &context_bytes, I40E_HMC_LAN_RX, queue);
+ if (err < 0)
+ return err;
+
+ return i40e_get_hmc_context(context_bytes,
+ i40e_hmc_rxq_ce_info, (u8 *)s);
+}
+
+/**
+ * i40e_clear_lan_rx_queue_context - clear the HMC context for the queue
+ * @hw: the hardware struct
+ * @queue: the queue we care about
+ **/
+enum i40e_status_code i40e_clear_lan_rx_queue_context(struct i40e_hw *hw,
+ u16 queue)
+{
+ enum i40e_status_code err;
+ u8 *context_bytes;
+
+ err = i40e_hmc_get_object_va(hw, &context_bytes, I40E_HMC_LAN_RX, queue);
+ if (err < 0)
+ return err;
+
+ return i40e_clear_hmc_context(hw, context_bytes, I40E_HMC_LAN_RX);
+}
+
+/**
+ * i40e_set_lan_rx_queue_context - set the HMC context for the queue
+ * @hw: the hardware struct
+ * @queue: the queue we care about
+ * @s: the struct to be filled
+ **/
+enum i40e_status_code i40e_set_lan_rx_queue_context(struct i40e_hw *hw,
+ u16 queue,
+ struct i40e_hmc_obj_rxq *s)
+{
+ enum i40e_status_code err;
+ u8 *context_bytes;
+
+ err = i40e_hmc_get_object_va(hw, &context_bytes, I40E_HMC_LAN_RX, queue);
+ if (err < 0)
+ return err;
+
+ return i40e_set_hmc_context(context_bytes,
+ i40e_hmc_rxq_ce_info, (u8 *)s);
+}
diff --git a/src/dpdk/drivers/net/i40e/base/i40e_lan_hmc.h b/src/dpdk/drivers/net/i40e/base/i40e_lan_hmc.h
new file mode 100644
index 00000000..b2a43104
--- /dev/null
+++ b/src/dpdk/drivers/net/i40e/base/i40e_lan_hmc.h
@@ -0,0 +1,200 @@
+/*******************************************************************************
+
+Copyright (c) 2013 - 2015, Intel Corporation
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are met:
+
+ 1. Redistributions of source code must retain the above copyright notice,
+ this list of conditions and the following disclaimer.
+
+ 2. Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in the
+ documentation and/or other materials provided with the distribution.
+
+ 3. Neither the name of the Intel Corporation nor the names of its
+ contributors may be used to endorse or promote products derived from
+ this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+POSSIBILITY OF SUCH DAMAGE.
+
+***************************************************************************/
+
+#ifndef _I40E_LAN_HMC_H_
+#define _I40E_LAN_HMC_H_
+
+/* forward-declare the HW struct for the compiler */
+struct i40e_hw;
+
+/* HMC element context information */
+
+/* Rx queue context data
+ *
+ * The sizes of the variables may be larger than needed due to crossing byte
+ * boundaries. If we do not have the width of the variable set to the correct
+ * size then we could end up shifting bits off the top of the variable when the
+ * variable is at the top of a byte and crosses over into the next byte.
+ */
+struct i40e_hmc_obj_rxq {
+ u16 head;
+ u16 cpuid; /* bigger than needed, see above for reason */
+ u64 base;
+ u16 qlen;
+#define I40E_RXQ_CTX_DBUFF_SHIFT 7
+ u16 dbuff; /* bigger than needed, see above for reason */
+#define I40E_RXQ_CTX_HBUFF_SHIFT 6
+ u16 hbuff; /* bigger than needed, see above for reason */
+ u8 dtype;
+ u8 dsize;
+ u8 crcstrip;
+ u8 fc_ena;
+ u8 l2tsel;
+ u8 hsplit_0;
+ u8 hsplit_1;
+ u8 showiv;
+ u32 rxmax; /* bigger than needed, see above for reason */
+ u8 tphrdesc_ena;
+ u8 tphwdesc_ena;
+ u8 tphdata_ena;
+ u8 tphhead_ena;
+ u16 lrxqthresh; /* bigger than needed, see above for reason */
+ u8 prefena; /* NOTE: normally must be set to 1 at init */
+};
+
+/* Tx queue context data
+*
+* The sizes of the variables may be larger than needed due to crossing byte
+* boundaries. If we do not have the width of the variable set to the correct
+* size then we could end up shifting bits off the top of the variable when the
+* variable is at the top of a byte and crosses over into the next byte.
+*/
+struct i40e_hmc_obj_txq {
+ u16 head;
+ u8 new_context;
+ u64 base;
+ u8 fc_ena;
+ u8 timesync_ena;
+ u8 fd_ena;
+ u8 alt_vlan_ena;
+ u16 thead_wb;
+ u8 cpuid;
+ u8 head_wb_ena;
+ u16 qlen;
+ u8 tphrdesc_ena;
+ u8 tphrpacket_ena;
+ u8 tphwdesc_ena;
+ u64 head_wb_addr;
+ u32 crc;
+ u16 rdylist;
+ u8 rdylist_act;
+};
+
+/* for hsplit_0 field of Rx HMC context */
+enum i40e_hmc_obj_rx_hsplit_0 {
+ I40E_HMC_OBJ_RX_HSPLIT_0_NO_SPLIT = 0,
+ I40E_HMC_OBJ_RX_HSPLIT_0_SPLIT_L2 = 1,
+ I40E_HMC_OBJ_RX_HSPLIT_0_SPLIT_IP = 2,
+ I40E_HMC_OBJ_RX_HSPLIT_0_SPLIT_TCP_UDP = 4,
+ I40E_HMC_OBJ_RX_HSPLIT_0_SPLIT_SCTP = 8,
+};
+
+/* fcoe_cntx and fcoe_filt are for debugging purpose only */
+struct i40e_hmc_obj_fcoe_cntx {
+ u32 rsv[32];
+};
+
+struct i40e_hmc_obj_fcoe_filt {
+ u32 rsv[8];
+};
+
+/* Context sizes for LAN objects */
+enum i40e_hmc_lan_object_size {
+ I40E_HMC_LAN_OBJ_SZ_8 = 0x3,
+ I40E_HMC_LAN_OBJ_SZ_16 = 0x4,
+ I40E_HMC_LAN_OBJ_SZ_32 = 0x5,
+ I40E_HMC_LAN_OBJ_SZ_64 = 0x6,
+ I40E_HMC_LAN_OBJ_SZ_128 = 0x7,
+ I40E_HMC_LAN_OBJ_SZ_256 = 0x8,
+ I40E_HMC_LAN_OBJ_SZ_512 = 0x9,
+};
+
+#define I40E_HMC_L2OBJ_BASE_ALIGNMENT 512
+#define I40E_HMC_OBJ_SIZE_TXQ 128
+#define I40E_HMC_OBJ_SIZE_RXQ 32
+#define I40E_HMC_OBJ_SIZE_FCOE_CNTX 64
+#define I40E_HMC_OBJ_SIZE_FCOE_FILT 64
+
+enum i40e_hmc_lan_rsrc_type {
+ I40E_HMC_LAN_FULL = 0,
+ I40E_HMC_LAN_TX = 1,
+ I40E_HMC_LAN_RX = 2,
+ I40E_HMC_FCOE_CTX = 3,
+ I40E_HMC_FCOE_FILT = 4,
+ I40E_HMC_LAN_MAX = 5
+};
+
+enum i40e_hmc_model {
+ I40E_HMC_MODEL_DIRECT_PREFERRED = 0,
+ I40E_HMC_MODEL_DIRECT_ONLY = 1,
+ I40E_HMC_MODEL_PAGED_ONLY = 2,
+ I40E_HMC_MODEL_UNKNOWN,
+};
+
+struct i40e_hmc_lan_create_obj_info {
+ struct i40e_hmc_info *hmc_info;
+ u32 rsrc_type;
+ u32 start_idx;
+ u32 count;
+ enum i40e_sd_entry_type entry_type;
+ u64 direct_mode_sz;
+};
+
+struct i40e_hmc_lan_delete_obj_info {
+ struct i40e_hmc_info *hmc_info;
+ u32 rsrc_type;
+ u32 start_idx;
+ u32 count;
+};
+
+enum i40e_status_code i40e_init_lan_hmc(struct i40e_hw *hw, u32 txq_num,
+ u32 rxq_num, u32 fcoe_cntx_num,
+ u32 fcoe_filt_num);
+enum i40e_status_code i40e_configure_lan_hmc(struct i40e_hw *hw,
+ enum i40e_hmc_model model);
+enum i40e_status_code i40e_shutdown_lan_hmc(struct i40e_hw *hw);
+
+u64 i40e_calculate_l2fpm_size(u32 txq_num, u32 rxq_num,
+ u32 fcoe_cntx_num, u32 fcoe_filt_num);
+enum i40e_status_code i40e_get_lan_tx_queue_context(struct i40e_hw *hw,
+ u16 queue,
+ struct i40e_hmc_obj_txq *s);
+enum i40e_status_code i40e_clear_lan_tx_queue_context(struct i40e_hw *hw,
+ u16 queue);
+enum i40e_status_code i40e_set_lan_tx_queue_context(struct i40e_hw *hw,
+ u16 queue,
+ struct i40e_hmc_obj_txq *s);
+enum i40e_status_code i40e_get_lan_rx_queue_context(struct i40e_hw *hw,
+ u16 queue,
+ struct i40e_hmc_obj_rxq *s);
+enum i40e_status_code i40e_clear_lan_rx_queue_context(struct i40e_hw *hw,
+ u16 queue);
+enum i40e_status_code i40e_set_lan_rx_queue_context(struct i40e_hw *hw,
+ u16 queue,
+ struct i40e_hmc_obj_rxq *s);
+enum i40e_status_code i40e_create_lan_hmc_object(struct i40e_hw *hw,
+ struct i40e_hmc_lan_create_obj_info *info);
+enum i40e_status_code i40e_delete_lan_hmc_object(struct i40e_hw *hw,
+ struct i40e_hmc_lan_delete_obj_info *info);
+
+#endif /* _I40E_LAN_HMC_H_ */
diff --git a/src/dpdk/drivers/net/i40e/base/i40e_nvm.c b/src/dpdk/drivers/net/i40e/base/i40e_nvm.c
new file mode 100644
index 00000000..4fa1220b
--- /dev/null
+++ b/src/dpdk/drivers/net/i40e/base/i40e_nvm.c
@@ -0,0 +1,1637 @@
+/*******************************************************************************
+
+Copyright (c) 2013 - 2015, Intel Corporation
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are met:
+
+ 1. Redistributions of source code must retain the above copyright notice,
+ this list of conditions and the following disclaimer.
+
+ 2. Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in the
+ documentation and/or other materials provided with the distribution.
+
+ 3. Neither the name of the Intel Corporation nor the names of its
+ contributors may be used to endorse or promote products derived from
+ this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+POSSIBILITY OF SUCH DAMAGE.
+
+***************************************************************************/
+
+#include "i40e_prototype.h"
+
+enum i40e_status_code i40e_read_nvm_word_srctl(struct i40e_hw *hw, u16 offset,
+ u16 *data);
+enum i40e_status_code i40e_read_nvm_word_aq(struct i40e_hw *hw, u16 offset,
+ u16 *data);
+enum i40e_status_code i40e_read_nvm_buffer_srctl(struct i40e_hw *hw, u16 offset,
+ u16 *words, u16 *data);
+enum i40e_status_code i40e_read_nvm_buffer_aq(struct i40e_hw *hw, u16 offset,
+ u16 *words, u16 *data);
+enum i40e_status_code i40e_read_nvm_aq(struct i40e_hw *hw, u8 module_pointer,
+ u32 offset, u16 words, void *data,
+ bool last_command);
+
+/**
+ * i40e_init_nvm_ops - Initialize NVM function pointers
+ * @hw: pointer to the HW structure
+ *
+ * Setup the function pointers and the NVM info structure. Should be called
+ * once per NVM initialization, e.g. inside the i40e_init_shared_code().
+ * Please notice that the NVM term is used here (& in all methods covered
+ * in this file) as an equivalent of the FLASH part mapped into the SR.
+ * We are accessing FLASH always through the Shadow RAM.
+ **/
+enum i40e_status_code i40e_init_nvm(struct i40e_hw *hw)
+{
+ struct i40e_nvm_info *nvm = &hw->nvm;
+ enum i40e_status_code ret_code = I40E_SUCCESS;
+ u32 fla, gens;
+ u8 sr_size;
+
+ DEBUGFUNC("i40e_init_nvm");
+
+ /* The SR size is stored regardless of the nvm programming mode
+ * as the blank mode may be used in the factory line.
+ */
+ gens = rd32(hw, I40E_GLNVM_GENS);
+ sr_size = ((gens & I40E_GLNVM_GENS_SR_SIZE_MASK) >>
+ I40E_GLNVM_GENS_SR_SIZE_SHIFT);
+ /* Switching to words (sr_size contains power of 2KB) */
+ nvm->sr_size = BIT(sr_size) * I40E_SR_WORDS_IN_1KB;
+
+ /* Check if we are in the normal or blank NVM programming mode */
+ fla = rd32(hw, I40E_GLNVM_FLA);
+ if (fla & I40E_GLNVM_FLA_LOCKED_MASK) { /* Normal programming mode */
+ /* Max NVM timeout */
+ nvm->timeout = I40E_MAX_NVM_TIMEOUT;
+ nvm->blank_nvm_mode = false;
+ } else { /* Blank programming mode */
+ nvm->blank_nvm_mode = true;
+ ret_code = I40E_ERR_NVM_BLANK_MODE;
+ i40e_debug(hw, I40E_DEBUG_NVM, "NVM init error: unsupported blank mode.\n");
+ }
+
+ return ret_code;
+}
+
+/**
+ * i40e_acquire_nvm - Generic request for acquiring the NVM ownership
+ * @hw: pointer to the HW structure
+ * @access: NVM access type (read or write)
+ *
+ * This function will request NVM ownership for reading
+ * via the proper Admin Command.
+ **/
+enum i40e_status_code i40e_acquire_nvm(struct i40e_hw *hw,
+ enum i40e_aq_resource_access_type access)
+{
+ enum i40e_status_code ret_code = I40E_SUCCESS;
+ u64 gtime, timeout;
+ u64 time_left = 0;
+
+ DEBUGFUNC("i40e_acquire_nvm");
+
+ if (hw->nvm.blank_nvm_mode)
+ goto i40e_i40e_acquire_nvm_exit;
+
+ ret_code = i40e_aq_request_resource(hw, I40E_NVM_RESOURCE_ID, access,
+ 0, &time_left, NULL);
+ /* Reading the Global Device Timer */
+ gtime = rd32(hw, I40E_GLVFGEN_TIMER);
+
+ /* Store the timeout */
+ hw->nvm.hw_semaphore_timeout = I40E_MS_TO_GTIME(time_left) + gtime;
+
+ if (ret_code)
+ i40e_debug(hw, I40E_DEBUG_NVM,
+ "NVM acquire type %d failed time_left=%llu ret=%d aq_err=%d\n",
+ access, time_left, ret_code, hw->aq.asq_last_status);
+
+ if (ret_code && time_left) {
+ /* Poll until the current NVM owner timeouts */
+ timeout = I40E_MS_TO_GTIME(I40E_MAX_NVM_TIMEOUT) + gtime;
+ while ((gtime < timeout) && time_left) {
+ i40e_msec_delay(10);
+ gtime = rd32(hw, I40E_GLVFGEN_TIMER);
+ ret_code = i40e_aq_request_resource(hw,
+ I40E_NVM_RESOURCE_ID,
+ access, 0, &time_left,
+ NULL);
+ if (ret_code == I40E_SUCCESS) {
+ hw->nvm.hw_semaphore_timeout =
+ I40E_MS_TO_GTIME(time_left) + gtime;
+ break;
+ }
+ }
+ if (ret_code != I40E_SUCCESS) {
+ hw->nvm.hw_semaphore_timeout = 0;
+ i40e_debug(hw, I40E_DEBUG_NVM,
+ "NVM acquire timed out, wait %llu ms before trying again. status=%d aq_err=%d\n",
+ time_left, ret_code, hw->aq.asq_last_status);
+ }
+ }
+
+i40e_i40e_acquire_nvm_exit:
+ return ret_code;
+}
+
+/**
+ * i40e_release_nvm - Generic request for releasing the NVM ownership
+ * @hw: pointer to the HW structure
+ *
+ * This function will release NVM resource via the proper Admin Command.
+ **/
+void i40e_release_nvm(struct i40e_hw *hw)
+{
+ enum i40e_status_code ret_code = I40E_SUCCESS;
+ u32 total_delay = 0;
+
+ DEBUGFUNC("i40e_release_nvm");
+
+ if (hw->nvm.blank_nvm_mode)
+ return;
+
+ ret_code = i40e_aq_release_resource(hw, I40E_NVM_RESOURCE_ID, 0, NULL);
+
+ /* there are some rare cases when trying to release the resource
+ * results in an admin Q timeout, so handle them correctly
+ */
+ while ((ret_code == I40E_ERR_ADMIN_QUEUE_TIMEOUT) &&
+ (total_delay < hw->aq.asq_cmd_timeout)) {
+ i40e_msec_delay(1);
+ ret_code = i40e_aq_release_resource(hw,
+ I40E_NVM_RESOURCE_ID, 0, NULL);
+ total_delay++;
+ }
+}
+
+/**
+ * i40e_poll_sr_srctl_done_bit - Polls the GLNVM_SRCTL done bit
+ * @hw: pointer to the HW structure
+ *
+ * Polls the SRCTL Shadow RAM register done bit.
+ **/
+static enum i40e_status_code i40e_poll_sr_srctl_done_bit(struct i40e_hw *hw)
+{
+ enum i40e_status_code ret_code = I40E_ERR_TIMEOUT;
+ u32 srctl, wait_cnt;
+
+ DEBUGFUNC("i40e_poll_sr_srctl_done_bit");
+
+ /* Poll the I40E_GLNVM_SRCTL until the done bit is set */
+ for (wait_cnt = 0; wait_cnt < I40E_SRRD_SRCTL_ATTEMPTS; wait_cnt++) {
+ srctl = rd32(hw, I40E_GLNVM_SRCTL);
+ if (srctl & I40E_GLNVM_SRCTL_DONE_MASK) {
+ ret_code = I40E_SUCCESS;
+ break;
+ }
+ i40e_usec_delay(5);
+ }
+ if (ret_code == I40E_ERR_TIMEOUT)
+ i40e_debug(hw, I40E_DEBUG_NVM, "Done bit in GLNVM_SRCTL not set");
+ return ret_code;
+}
+
+/**
+ * i40e_read_nvm_word - Reads nvm word and acquire lock if necessary
+ * @hw: pointer to the HW structure
+ * @offset: offset of the Shadow RAM word to read (0x000000 - 0x001FFF)
+ * @data: word read from the Shadow RAM
+ *
+ * Reads one 16 bit word from the Shadow RAM using the GLNVM_SRCTL register.
+ **/
+enum i40e_status_code i40e_read_nvm_word(struct i40e_hw *hw, u16 offset,
+ u16 *data)
+{
+ enum i40e_status_code ret_code = I40E_SUCCESS;
+
+#ifdef X722_SUPPORT
+ if (hw->flags & I40E_HW_FLAG_AQ_SRCTL_ACCESS_ENABLE) {
+ ret_code = i40e_acquire_nvm(hw, I40E_RESOURCE_READ);
+ if (!ret_code) {
+ ret_code = i40e_read_nvm_word_aq(hw, offset, data);
+ i40e_release_nvm(hw);
+ }
+ } else {
+ ret_code = i40e_read_nvm_word_srctl(hw, offset, data);
+ }
+#else
+ ret_code = i40e_read_nvm_word_srctl(hw, offset, data);
+#endif
+ return ret_code;
+}
+
+/**
+ * __i40e_read_nvm_word - Reads nvm word, assumes caller does the locking
+ * @hw: pointer to the HW structure
+ * @offset: offset of the Shadow RAM word to read (0x000000 - 0x001FFF)
+ * @data: word read from the Shadow RAM
+ *
+ * Reads one 16 bit word from the Shadow RAM using the GLNVM_SRCTL register.
+ **/
+enum i40e_status_code __i40e_read_nvm_word(struct i40e_hw *hw,
+ u16 offset,
+ u16 *data)
+{
+ enum i40e_status_code ret_code = I40E_SUCCESS;
+
+#ifdef X722_SUPPORT
+ if (hw->flags & I40E_HW_FLAG_AQ_SRCTL_ACCESS_ENABLE)
+ ret_code = i40e_read_nvm_word_aq(hw, offset, data);
+ else
+ ret_code = i40e_read_nvm_word_srctl(hw, offset, data);
+#else
+ ret_code = i40e_read_nvm_word_srctl(hw, offset, data);
+#endif
+ return ret_code;
+}
+
+/**
+ * i40e_read_nvm_word_srctl - Reads Shadow RAM via SRCTL register
+ * @hw: pointer to the HW structure
+ * @offset: offset of the Shadow RAM word to read (0x000000 - 0x001FFF)
+ * @data: word read from the Shadow RAM
+ *
+ * Reads one 16 bit word from the Shadow RAM using the GLNVM_SRCTL register.
+ **/
+enum i40e_status_code i40e_read_nvm_word_srctl(struct i40e_hw *hw, u16 offset,
+ u16 *data)
+{
+ enum i40e_status_code ret_code = I40E_ERR_TIMEOUT;
+ u32 sr_reg;
+
+ DEBUGFUNC("i40e_read_nvm_word_srctl");
+
+ if (offset >= hw->nvm.sr_size) {
+ i40e_debug(hw, I40E_DEBUG_NVM,
+ "NVM read error: Offset %d beyond Shadow RAM limit %d\n",
+ offset, hw->nvm.sr_size);
+ ret_code = I40E_ERR_PARAM;
+ goto read_nvm_exit;
+ }
+
+ /* Poll the done bit first */
+ ret_code = i40e_poll_sr_srctl_done_bit(hw);
+ if (ret_code == I40E_SUCCESS) {
+ /* Write the address and start reading */
+ sr_reg = ((u32)offset << I40E_GLNVM_SRCTL_ADDR_SHIFT) |
+ BIT(I40E_GLNVM_SRCTL_START_SHIFT);
+ wr32(hw, I40E_GLNVM_SRCTL, sr_reg);
+
+ /* Poll I40E_GLNVM_SRCTL until the done bit is set */
+ ret_code = i40e_poll_sr_srctl_done_bit(hw);
+ if (ret_code == I40E_SUCCESS) {
+ sr_reg = rd32(hw, I40E_GLNVM_SRDATA);
+ *data = (u16)((sr_reg &
+ I40E_GLNVM_SRDATA_RDDATA_MASK)
+ >> I40E_GLNVM_SRDATA_RDDATA_SHIFT);
+ }
+ }
+ if (ret_code != I40E_SUCCESS)
+ i40e_debug(hw, I40E_DEBUG_NVM,
+ "NVM read error: Couldn't access Shadow RAM address: 0x%x\n",
+ offset);
+
+read_nvm_exit:
+ return ret_code;
+}
+
+/**
+ * i40e_read_nvm_word_aq - Reads Shadow RAM via AQ
+ * @hw: pointer to the HW structure
+ * @offset: offset of the Shadow RAM word to read (0x000000 - 0x001FFF)
+ * @data: word read from the Shadow RAM
+ *
+ * Reads one 16 bit word from the Shadow RAM using the GLNVM_SRCTL register.
+ **/
+enum i40e_status_code i40e_read_nvm_word_aq(struct i40e_hw *hw, u16 offset,
+ u16 *data)
+{
+ enum i40e_status_code ret_code = I40E_ERR_TIMEOUT;
+
+ DEBUGFUNC("i40e_read_nvm_word_aq");
+
+ ret_code = i40e_read_nvm_aq(hw, 0x0, offset, 1, data, true);
+ *data = LE16_TO_CPU(*(__le16 *)data);
+
+ return ret_code;
+}
+
+/**
+ * __i40e_read_nvm_buffer - Reads nvm buffer, caller must acquire lock
+ * @hw: pointer to the HW structure
+ * @offset: offset of the Shadow RAM word to read (0x000000 - 0x001FFF).
+ * @words: (in) number of words to read; (out) number of words actually read
+ * @data: words read from the Shadow RAM
+ *
+ * Reads 16 bit words (data buffer) from the SR using the i40e_read_nvm_srrd()
+ * method. The buffer read is preceded by the NVM ownership take
+ * and followed by the release.
+ **/
+enum i40e_status_code __i40e_read_nvm_buffer(struct i40e_hw *hw,
+ u16 offset,
+ u16 *words, u16 *data)
+{
+ enum i40e_status_code ret_code = I40E_SUCCESS;
+
+#ifdef X722_SUPPORT
+ if (hw->flags & I40E_HW_FLAG_AQ_SRCTL_ACCESS_ENABLE)
+ ret_code = i40e_read_nvm_buffer_aq(hw, offset, words, data);
+ else
+ ret_code = i40e_read_nvm_buffer_srctl(hw, offset, words, data);
+#else
+ ret_code = i40e_read_nvm_buffer_srctl(hw, offset, words, data);
+#endif
+ return ret_code;
+}
+
+/**
+ * i40e_read_nvm_buffer - Reads Shadow RAM buffer and acuire lock if necessary
+ * @hw: pointer to the HW structure
+ * @offset: offset of the Shadow RAM word to read (0x000000 - 0x001FFF).
+ * @words: (in) number of words to read; (out) number of words actually read
+ * @data: words read from the Shadow RAM
+ *
+ * Reads 16 bit words (data buffer) from the SR using the i40e_read_nvm_srrd()
+ * method. The buffer read is preceded by the NVM ownership take
+ * and followed by the release.
+ **/
+enum i40e_status_code i40e_read_nvm_buffer(struct i40e_hw *hw, u16 offset,
+ u16 *words, u16 *data)
+{
+ enum i40e_status_code ret_code = I40E_SUCCESS;
+
+#ifdef X722_SUPPORT
+ if (hw->flags & I40E_HW_FLAG_AQ_SRCTL_ACCESS_ENABLE) {
+ ret_code = i40e_acquire_nvm(hw, I40E_RESOURCE_READ);
+ if (!ret_code) {
+ ret_code = i40e_read_nvm_buffer_aq(hw, offset, words,
+ data);
+ i40e_release_nvm(hw);
+ }
+ } else {
+ ret_code = i40e_read_nvm_buffer_srctl(hw, offset, words, data);
+ }
+#else
+ ret_code = i40e_read_nvm_buffer_srctl(hw, offset, words, data);
+#endif
+ return ret_code;
+}
+
+/**
+ * i40e_read_nvm_buffer_srctl - Reads Shadow RAM buffer via SRCTL register
+ * @hw: pointer to the HW structure
+ * @offset: offset of the Shadow RAM word to read (0x000000 - 0x001FFF).
+ * @words: (in) number of words to read; (out) number of words actually read
+ * @data: words read from the Shadow RAM
+ *
+ * Reads 16 bit words (data buffer) from the SR using the i40e_read_nvm_srrd()
+ * method. The buffer read is preceded by the NVM ownership take
+ * and followed by the release.
+ **/
+enum i40e_status_code i40e_read_nvm_buffer_srctl(struct i40e_hw *hw, u16 offset,
+ u16 *words, u16 *data)
+{
+ enum i40e_status_code ret_code = I40E_SUCCESS;
+ u16 index, word;
+
+ DEBUGFUNC("i40e_read_nvm_buffer_srctl");
+
+ /* Loop through the selected region */
+ for (word = 0; word < *words; word++) {
+ index = offset + word;
+ ret_code = i40e_read_nvm_word_srctl(hw, index, &data[word]);
+ if (ret_code != I40E_SUCCESS)
+ break;
+ }
+
+ /* Update the number of words read from the Shadow RAM */
+ *words = word;
+
+ return ret_code;
+}
+
+/**
+ * i40e_read_nvm_buffer_aq - Reads Shadow RAM buffer via AQ
+ * @hw: pointer to the HW structure
+ * @offset: offset of the Shadow RAM word to read (0x000000 - 0x001FFF).
+ * @words: (in) number of words to read; (out) number of words actually read
+ * @data: words read from the Shadow RAM
+ *
+ * Reads 16 bit words (data buffer) from the SR using the i40e_read_nvm_aq()
+ * method. The buffer read is preceded by the NVM ownership take
+ * and followed by the release.
+ **/
+enum i40e_status_code i40e_read_nvm_buffer_aq(struct i40e_hw *hw, u16 offset,
+ u16 *words, u16 *data)
+{
+ enum i40e_status_code ret_code;
+ u16 read_size = *words;
+ bool last_cmd = false;
+ u16 words_read = 0;
+ u16 i = 0;
+
+ DEBUGFUNC("i40e_read_nvm_buffer_aq");
+
+ do {
+ /* Calculate number of bytes we should read in this step.
+ * FVL AQ do not allow to read more than one page at a time or
+ * to cross page boundaries.
+ */
+ if (offset % I40E_SR_SECTOR_SIZE_IN_WORDS)
+ read_size = min(*words,
+ (u16)(I40E_SR_SECTOR_SIZE_IN_WORDS -
+ (offset % I40E_SR_SECTOR_SIZE_IN_WORDS)));
+ else
+ read_size = min((*words - words_read),
+ I40E_SR_SECTOR_SIZE_IN_WORDS);
+
+ /* Check if this is last command, if so set proper flag */
+ if ((words_read + read_size) >= *words)
+ last_cmd = true;
+
+ ret_code = i40e_read_nvm_aq(hw, 0x0, offset, read_size,
+ data + words_read, last_cmd);
+ if (ret_code != I40E_SUCCESS)
+ goto read_nvm_buffer_aq_exit;
+
+ /* Increment counter for words already read and move offset to
+ * new read location
+ */
+ words_read += read_size;
+ offset += read_size;
+ } while (words_read < *words);
+
+ for (i = 0; i < *words; i++)
+ data[i] = LE16_TO_CPU(((__le16 *)data)[i]);
+
+read_nvm_buffer_aq_exit:
+ *words = words_read;
+ return ret_code;
+}
+
+/**
+ * i40e_read_nvm_aq - Read Shadow RAM.
+ * @hw: pointer to the HW structure.
+ * @module_pointer: module pointer location in words from the NVM beginning
+ * @offset: offset in words from module start
+ * @words: number of words to write
+ * @data: buffer with words to write to the Shadow RAM
+ * @last_command: tells the AdminQ that this is the last command
+ *
+ * Writes a 16 bit words buffer to the Shadow RAM using the admin command.
+ **/
+enum i40e_status_code i40e_read_nvm_aq(struct i40e_hw *hw, u8 module_pointer,
+ u32 offset, u16 words, void *data,
+ bool last_command)
+{
+ enum i40e_status_code ret_code = I40E_ERR_NVM;
+ struct i40e_asq_cmd_details cmd_details;
+
+ DEBUGFUNC("i40e_read_nvm_aq");
+
+ memset(&cmd_details, 0, sizeof(cmd_details));
+ cmd_details.wb_desc = &hw->nvm_wb_desc;
+
+ /* Here we are checking the SR limit only for the flat memory model.
+ * We cannot do it for the module-based model, as we did not acquire
+ * the NVM resource yet (we cannot get the module pointer value).
+ * Firmware will check the module-based model.
+ */
+ if ((offset + words) > hw->nvm.sr_size)
+ i40e_debug(hw, I40E_DEBUG_NVM,
+ "NVM write error: offset %d beyond Shadow RAM limit %d\n",
+ (offset + words), hw->nvm.sr_size);
+ else if (words > I40E_SR_SECTOR_SIZE_IN_WORDS)
+ /* We can write only up to 4KB (one sector), in one AQ write */
+ i40e_debug(hw, I40E_DEBUG_NVM,
+ "NVM write fail error: tried to write %d words, limit is %d.\n",
+ words, I40E_SR_SECTOR_SIZE_IN_WORDS);
+ else if (((offset + (words - 1)) / I40E_SR_SECTOR_SIZE_IN_WORDS)
+ != (offset / I40E_SR_SECTOR_SIZE_IN_WORDS))
+ /* A single write cannot spread over two sectors */
+ i40e_debug(hw, I40E_DEBUG_NVM,
+ "NVM write error: cannot spread over two sectors in a single write offset=%d words=%d\n",
+ offset, words);
+ else
+ ret_code = i40e_aq_read_nvm(hw, module_pointer,
+ 2 * offset, /*bytes*/
+ 2 * words, /*bytes*/
+ data, last_command, &cmd_details);
+
+ return ret_code;
+}
+
+/**
+ * i40e_write_nvm_aq - Writes Shadow RAM.
+ * @hw: pointer to the HW structure.
+ * @module_pointer: module pointer location in words from the NVM beginning
+ * @offset: offset in words from module start
+ * @words: number of words to write
+ * @data: buffer with words to write to the Shadow RAM
+ * @last_command: tells the AdminQ that this is the last command
+ *
+ * Writes a 16 bit words buffer to the Shadow RAM using the admin command.
+ **/
+enum i40e_status_code i40e_write_nvm_aq(struct i40e_hw *hw, u8 module_pointer,
+ u32 offset, u16 words, void *data,
+ bool last_command)
+{
+ enum i40e_status_code ret_code = I40E_ERR_NVM;
+ struct i40e_asq_cmd_details cmd_details;
+
+ DEBUGFUNC("i40e_write_nvm_aq");
+
+ memset(&cmd_details, 0, sizeof(cmd_details));
+ cmd_details.wb_desc = &hw->nvm_wb_desc;
+
+ /* Here we are checking the SR limit only for the flat memory model.
+ * We cannot do it for the module-based model, as we did not acquire
+ * the NVM resource yet (we cannot get the module pointer value).
+ * Firmware will check the module-based model.
+ */
+ if ((offset + words) > hw->nvm.sr_size)
+ DEBUGOUT("NVM write error: offset beyond Shadow RAM limit.\n");
+ else if (words > I40E_SR_SECTOR_SIZE_IN_WORDS)
+ /* We can write only up to 4KB (one sector), in one AQ write */
+ DEBUGOUT("NVM write fail error: cannot write more than 4KB in a single write.\n");
+ else if (((offset + (words - 1)) / I40E_SR_SECTOR_SIZE_IN_WORDS)
+ != (offset / I40E_SR_SECTOR_SIZE_IN_WORDS))
+ /* A single write cannot spread over two sectors */
+ DEBUGOUT("NVM write error: cannot spread over two sectors in a single write.\n");
+ else
+ ret_code = i40e_aq_update_nvm(hw, module_pointer,
+ 2 * offset, /*bytes*/
+ 2 * words, /*bytes*/
+ data, last_command, &cmd_details);
+
+ return ret_code;
+}
+
+/**
+ * __i40e_write_nvm_word - Writes Shadow RAM word
+ * @hw: pointer to the HW structure
+ * @offset: offset of the Shadow RAM word to write
+ * @data: word to write to the Shadow RAM
+ *
+ * Writes a 16 bit word to the SR using the i40e_write_nvm_aq() method.
+ * NVM ownership have to be acquired and released (on ARQ completion event
+ * reception) by caller. To commit SR to NVM update checksum function
+ * should be called.
+ **/
+enum i40e_status_code __i40e_write_nvm_word(struct i40e_hw *hw, u32 offset,
+ void *data)
+{
+ DEBUGFUNC("i40e_write_nvm_word");
+
+ *((__le16 *)data) = CPU_TO_LE16(*((u16 *)data));
+
+ /* Value 0x00 below means that we treat SR as a flat mem */
+ return i40e_write_nvm_aq(hw, 0x00, offset, 1, data, false);
+}
+
+/**
+ * __i40e_write_nvm_buffer - Writes Shadow RAM buffer
+ * @hw: pointer to the HW structure
+ * @module_pointer: module pointer location in words from the NVM beginning
+ * @offset: offset of the Shadow RAM buffer to write
+ * @words: number of words to write
+ * @data: words to write to the Shadow RAM
+ *
+ * Writes a 16 bit words buffer to the Shadow RAM using the admin command.
+ * NVM ownership must be acquired before calling this function and released
+ * on ARQ completion event reception by caller. To commit SR to NVM update
+ * checksum function should be called.
+ **/
+enum i40e_status_code __i40e_write_nvm_buffer(struct i40e_hw *hw,
+ u8 module_pointer, u32 offset,
+ u16 words, void *data)
+{
+ __le16 *le_word_ptr = (__le16 *)data;
+ u16 *word_ptr = (u16 *)data;
+ u32 i = 0;
+
+ DEBUGFUNC("i40e_write_nvm_buffer");
+
+ for (i = 0; i < words; i++)
+ le_word_ptr[i] = CPU_TO_LE16(word_ptr[i]);
+
+ /* Here we will only write one buffer as the size of the modules
+ * mirrored in the Shadow RAM is always less than 4K.
+ */
+ return i40e_write_nvm_aq(hw, module_pointer, offset, words,
+ data, false);
+}
+
+/**
+ * i40e_calc_nvm_checksum - Calculates and returns the checksum
+ * @hw: pointer to hardware structure
+ * @checksum: pointer to the checksum
+ *
+ * This function calculates SW Checksum that covers the whole 64kB shadow RAM
+ * except the VPD and PCIe ALT Auto-load modules. The structure and size of VPD
+ * is customer specific and unknown. Therefore, this function skips all maximum
+ * possible size of VPD (1kB).
+ **/
+enum i40e_status_code i40e_calc_nvm_checksum(struct i40e_hw *hw, u16 *checksum)
+{
+ enum i40e_status_code ret_code = I40E_SUCCESS;
+ struct i40e_virt_mem vmem;
+ u16 pcie_alt_module = 0;
+ u16 checksum_local = 0;
+ u16 vpd_module = 0;
+ u16 *data;
+ u16 i = 0;
+
+ DEBUGFUNC("i40e_calc_nvm_checksum");
+
+ ret_code = i40e_allocate_virt_mem(hw, &vmem,
+ I40E_SR_SECTOR_SIZE_IN_WORDS * sizeof(u16));
+ if (ret_code)
+ goto i40e_calc_nvm_checksum_exit;
+ data = (u16 *)vmem.va;
+
+ /* read pointer to VPD area */
+ ret_code = __i40e_read_nvm_word(hw, I40E_SR_VPD_PTR,
+ &vpd_module);
+ if (ret_code != I40E_SUCCESS) {
+ ret_code = I40E_ERR_NVM_CHECKSUM;
+ goto i40e_calc_nvm_checksum_exit;
+ }
+
+ /* read pointer to PCIe Alt Auto-load module */
+ ret_code = __i40e_read_nvm_word(hw,
+ I40E_SR_PCIE_ALT_AUTO_LOAD_PTR,
+ &pcie_alt_module);
+ if (ret_code != I40E_SUCCESS) {
+ ret_code = I40E_ERR_NVM_CHECKSUM;
+ goto i40e_calc_nvm_checksum_exit;
+ }
+
+ /* Calculate SW checksum that covers the whole 64kB shadow RAM
+ * except the VPD and PCIe ALT Auto-load modules
+ */
+ for (i = 0; i < hw->nvm.sr_size; i++) {
+ /* Read SR page */
+ if ((i % I40E_SR_SECTOR_SIZE_IN_WORDS) == 0) {
+ u16 words = I40E_SR_SECTOR_SIZE_IN_WORDS;
+
+ ret_code = __i40e_read_nvm_buffer(hw, i, &words, data);
+ if (ret_code != I40E_SUCCESS) {
+ ret_code = I40E_ERR_NVM_CHECKSUM;
+ goto i40e_calc_nvm_checksum_exit;
+ }
+ }
+
+ /* Skip Checksum word */
+ if (i == I40E_SR_SW_CHECKSUM_WORD)
+ continue;
+ /* Skip VPD module (convert byte size to word count) */
+ if ((i >= (u32)vpd_module) &&
+ (i < ((u32)vpd_module +
+ (I40E_SR_VPD_MODULE_MAX_SIZE / 2)))) {
+ continue;
+ }
+ /* Skip PCIe ALT module (convert byte size to word count) */
+ if ((i >= (u32)pcie_alt_module) &&
+ (i < ((u32)pcie_alt_module +
+ (I40E_SR_PCIE_ALT_MODULE_MAX_SIZE / 2)))) {
+ continue;
+ }
+
+ checksum_local += data[i % I40E_SR_SECTOR_SIZE_IN_WORDS];
+ }
+
+ *checksum = (u16)I40E_SR_SW_CHECKSUM_BASE - checksum_local;
+
+i40e_calc_nvm_checksum_exit:
+ i40e_free_virt_mem(hw, &vmem);
+ return ret_code;
+}
+
+/**
+ * i40e_update_nvm_checksum - Updates the NVM checksum
+ * @hw: pointer to hardware structure
+ *
+ * NVM ownership must be acquired before calling this function and released
+ * on ARQ completion event reception by caller.
+ * This function will commit SR to NVM.
+ **/
+enum i40e_status_code i40e_update_nvm_checksum(struct i40e_hw *hw)
+{
+ enum i40e_status_code ret_code = I40E_SUCCESS;
+ u16 checksum;
+ __le16 le_sum;
+
+ DEBUGFUNC("i40e_update_nvm_checksum");
+
+ ret_code = i40e_calc_nvm_checksum(hw, &checksum);
+ le_sum = CPU_TO_LE16(checksum);
+ if (ret_code == I40E_SUCCESS)
+ ret_code = i40e_write_nvm_aq(hw, 0x00, I40E_SR_SW_CHECKSUM_WORD,
+ 1, &le_sum, true);
+
+ return ret_code;
+}
+
+/**
+ * i40e_validate_nvm_checksum - Validate EEPROM checksum
+ * @hw: pointer to hardware structure
+ * @checksum: calculated checksum
+ *
+ * Performs checksum calculation and validates the NVM SW checksum. If the
+ * caller does not need checksum, the value can be NULL.
+ **/
+enum i40e_status_code i40e_validate_nvm_checksum(struct i40e_hw *hw,
+ u16 *checksum)
+{
+ enum i40e_status_code ret_code = I40E_SUCCESS;
+ u16 checksum_sr = 0;
+ u16 checksum_local = 0;
+
+ DEBUGFUNC("i40e_validate_nvm_checksum");
+
+ if (hw->flags & I40E_HW_FLAG_AQ_SRCTL_ACCESS_ENABLE)
+ ret_code = i40e_acquire_nvm(hw, I40E_RESOURCE_READ);
+ if (!ret_code) {
+ ret_code = i40e_calc_nvm_checksum(hw, &checksum_local);
+ if (hw->flags & I40E_HW_FLAG_AQ_SRCTL_ACCESS_ENABLE)
+ i40e_release_nvm(hw);
+ if (ret_code != I40E_SUCCESS)
+ goto i40e_validate_nvm_checksum_exit;
+ } else {
+ goto i40e_validate_nvm_checksum_exit;
+ }
+
+ i40e_read_nvm_word(hw, I40E_SR_SW_CHECKSUM_WORD, &checksum_sr);
+
+ /* Verify read checksum from EEPROM is the same as
+ * calculated checksum
+ */
+ if (checksum_local != checksum_sr)
+ ret_code = I40E_ERR_NVM_CHECKSUM;
+
+ /* If the user cares, return the calculated checksum */
+ if (checksum)
+ *checksum = checksum_local;
+
+i40e_validate_nvm_checksum_exit:
+ return ret_code;
+}
+
+STATIC enum i40e_status_code i40e_nvmupd_state_init(struct i40e_hw *hw,
+ struct i40e_nvm_access *cmd,
+ u8 *bytes, int *perrno);
+STATIC enum i40e_status_code i40e_nvmupd_state_reading(struct i40e_hw *hw,
+ struct i40e_nvm_access *cmd,
+ u8 *bytes, int *perrno);
+STATIC enum i40e_status_code i40e_nvmupd_state_writing(struct i40e_hw *hw,
+ struct i40e_nvm_access *cmd,
+ u8 *bytes, int *perrno);
+STATIC enum i40e_nvmupd_cmd i40e_nvmupd_validate_command(struct i40e_hw *hw,
+ struct i40e_nvm_access *cmd,
+ int *perrno);
+STATIC enum i40e_status_code i40e_nvmupd_nvm_erase(struct i40e_hw *hw,
+ struct i40e_nvm_access *cmd,
+ int *perrno);
+STATIC enum i40e_status_code i40e_nvmupd_nvm_write(struct i40e_hw *hw,
+ struct i40e_nvm_access *cmd,
+ u8 *bytes, int *perrno);
+STATIC enum i40e_status_code i40e_nvmupd_nvm_read(struct i40e_hw *hw,
+ struct i40e_nvm_access *cmd,
+ u8 *bytes, int *perrno);
+STATIC enum i40e_status_code i40e_nvmupd_exec_aq(struct i40e_hw *hw,
+ struct i40e_nvm_access *cmd,
+ u8 *bytes, int *perrno);
+STATIC enum i40e_status_code i40e_nvmupd_get_aq_result(struct i40e_hw *hw,
+ struct i40e_nvm_access *cmd,
+ u8 *bytes, int *perrno);
+STATIC INLINE u8 i40e_nvmupd_get_module(u32 val)
+{
+ return (u8)(val & I40E_NVM_MOD_PNT_MASK);
+}
+STATIC INLINE u8 i40e_nvmupd_get_transaction(u32 val)
+{
+ return (u8)((val & I40E_NVM_TRANS_MASK) >> I40E_NVM_TRANS_SHIFT);
+}
+
+STATIC const char *i40e_nvm_update_state_str[] = {
+ "I40E_NVMUPD_INVALID",
+ "I40E_NVMUPD_READ_CON",
+ "I40E_NVMUPD_READ_SNT",
+ "I40E_NVMUPD_READ_LCB",
+ "I40E_NVMUPD_READ_SA",
+ "I40E_NVMUPD_WRITE_ERA",
+ "I40E_NVMUPD_WRITE_CON",
+ "I40E_NVMUPD_WRITE_SNT",
+ "I40E_NVMUPD_WRITE_LCB",
+ "I40E_NVMUPD_WRITE_SA",
+ "I40E_NVMUPD_CSUM_CON",
+ "I40E_NVMUPD_CSUM_SA",
+ "I40E_NVMUPD_CSUM_LCB",
+ "I40E_NVMUPD_STATUS",
+ "I40E_NVMUPD_EXEC_AQ",
+ "I40E_NVMUPD_GET_AQ_RESULT",
+};
+
+/**
+ * i40e_nvmupd_command - Process an NVM update command
+ * @hw: pointer to hardware structure
+ * @cmd: pointer to nvm update command
+ * @bytes: pointer to the data buffer
+ * @perrno: pointer to return error code
+ *
+ * Dispatches command depending on what update state is current
+ **/
+enum i40e_status_code i40e_nvmupd_command(struct i40e_hw *hw,
+ struct i40e_nvm_access *cmd,
+ u8 *bytes, int *perrno)
+{
+ enum i40e_status_code status;
+ enum i40e_nvmupd_cmd upd_cmd;
+
+ DEBUGFUNC("i40e_nvmupd_command");
+
+ /* assume success */
+ *perrno = 0;
+
+ /* early check for status command and debug msgs */
+ upd_cmd = i40e_nvmupd_validate_command(hw, cmd, perrno);
+
+ i40e_debug(hw, I40E_DEBUG_NVM, "%s state %d nvm_release_on_hold %d opc 0x%04x cmd 0x%08x config 0x%08x offset 0x%08x data_size 0x%08x\n",
+ i40e_nvm_update_state_str[upd_cmd],
+ hw->nvmupd_state,
+ hw->nvm_release_on_done, hw->nvm_wait_opcode,
+ cmd->command, cmd->config, cmd->offset, cmd->data_size);
+
+ if (upd_cmd == I40E_NVMUPD_INVALID) {
+ *perrno = -EFAULT;
+ i40e_debug(hw, I40E_DEBUG_NVM,
+ "i40e_nvmupd_validate_command returns %d errno %d\n",
+ upd_cmd, *perrno);
+ }
+
+ /* a status request returns immediately rather than
+ * going into the state machine
+ */
+ if (upd_cmd == I40E_NVMUPD_STATUS) {
+ if (!cmd->data_size) {
+ *perrno = -EFAULT;
+ return I40E_ERR_BUF_TOO_SHORT;
+ }
+
+ bytes[0] = hw->nvmupd_state;
+
+ if (cmd->data_size >= 4) {
+ bytes[1] = 0;
+ *((u16 *)&bytes[2]) = hw->nvm_wait_opcode;
+ }
+
+ return I40E_SUCCESS;
+ }
+
+ switch (hw->nvmupd_state) {
+ case I40E_NVMUPD_STATE_INIT:
+ status = i40e_nvmupd_state_init(hw, cmd, bytes, perrno);
+ break;
+
+ case I40E_NVMUPD_STATE_READING:
+ status = i40e_nvmupd_state_reading(hw, cmd, bytes, perrno);
+ break;
+
+ case I40E_NVMUPD_STATE_WRITING:
+ status = i40e_nvmupd_state_writing(hw, cmd, bytes, perrno);
+ break;
+
+ case I40E_NVMUPD_STATE_INIT_WAIT:
+ case I40E_NVMUPD_STATE_WRITE_WAIT:
+ /* if we need to stop waiting for an event, clear
+ * the wait info and return before doing anything else
+ */
+ if (cmd->offset == 0xffff) {
+ i40e_nvmupd_check_wait_event(hw, hw->nvm_wait_opcode);
+ return I40E_SUCCESS;
+ }
+
+ status = I40E_ERR_NOT_READY;
+ *perrno = -EBUSY;
+ break;
+
+ default:
+ /* invalid state, should never happen */
+ i40e_debug(hw, I40E_DEBUG_NVM,
+ "NVMUPD: no such state %d\n", hw->nvmupd_state);
+ status = I40E_NOT_SUPPORTED;
+ *perrno = -ESRCH;
+ break;
+ }
+ return status;
+}
+
+/**
+ * i40e_nvmupd_state_init - Handle NVM update state Init
+ * @hw: pointer to hardware structure
+ * @cmd: pointer to nvm update command buffer
+ * @bytes: pointer to the data buffer
+ * @perrno: pointer to return error code
+ *
+ * Process legitimate commands of the Init state and conditionally set next
+ * state. Reject all other commands.
+ **/
+STATIC enum i40e_status_code i40e_nvmupd_state_init(struct i40e_hw *hw,
+ struct i40e_nvm_access *cmd,
+ u8 *bytes, int *perrno)
+{
+ enum i40e_status_code status = I40E_SUCCESS;
+ enum i40e_nvmupd_cmd upd_cmd;
+
+ DEBUGFUNC("i40e_nvmupd_state_init");
+
+ upd_cmd = i40e_nvmupd_validate_command(hw, cmd, perrno);
+
+ switch (upd_cmd) {
+ case I40E_NVMUPD_READ_SA:
+ status = i40e_acquire_nvm(hw, I40E_RESOURCE_READ);
+ if (status) {
+ *perrno = i40e_aq_rc_to_posix(status,
+ hw->aq.asq_last_status);
+ } else {
+ status = i40e_nvmupd_nvm_read(hw, cmd, bytes, perrno);
+ i40e_release_nvm(hw);
+ }
+ break;
+
+ case I40E_NVMUPD_READ_SNT:
+ status = i40e_acquire_nvm(hw, I40E_RESOURCE_READ);
+ if (status) {
+ *perrno = i40e_aq_rc_to_posix(status,
+ hw->aq.asq_last_status);
+ } else {
+ status = i40e_nvmupd_nvm_read(hw, cmd, bytes, perrno);
+ if (status)
+ i40e_release_nvm(hw);
+ else
+ hw->nvmupd_state = I40E_NVMUPD_STATE_READING;
+ }
+ break;
+
+ case I40E_NVMUPD_WRITE_ERA:
+ status = i40e_acquire_nvm(hw, I40E_RESOURCE_WRITE);
+ if (status) {
+ *perrno = i40e_aq_rc_to_posix(status,
+ hw->aq.asq_last_status);
+ } else {
+ status = i40e_nvmupd_nvm_erase(hw, cmd, perrno);
+ if (status) {
+ i40e_release_nvm(hw);
+ } else {
+ hw->nvm_release_on_done = true;
+ hw->nvm_wait_opcode = i40e_aqc_opc_nvm_erase;
+ hw->nvmupd_state = I40E_NVMUPD_STATE_INIT_WAIT;
+ }
+ }
+ break;
+
+ case I40E_NVMUPD_WRITE_SA:
+ status = i40e_acquire_nvm(hw, I40E_RESOURCE_WRITE);
+ if (status) {
+ *perrno = i40e_aq_rc_to_posix(status,
+ hw->aq.asq_last_status);
+ } else {
+ status = i40e_nvmupd_nvm_write(hw, cmd, bytes, perrno);
+ if (status) {
+ i40e_release_nvm(hw);
+ } else {
+ hw->nvm_release_on_done = true;
+ hw->nvm_wait_opcode = i40e_aqc_opc_nvm_update;
+ hw->nvmupd_state = I40E_NVMUPD_STATE_INIT_WAIT;
+ }
+ }
+ break;
+
+ case I40E_NVMUPD_WRITE_SNT:
+ status = i40e_acquire_nvm(hw, I40E_RESOURCE_WRITE);
+ if (status) {
+ *perrno = i40e_aq_rc_to_posix(status,
+ hw->aq.asq_last_status);
+ } else {
+ status = i40e_nvmupd_nvm_write(hw, cmd, bytes, perrno);
+ if (status) {
+ i40e_release_nvm(hw);
+ } else {
+ hw->nvm_wait_opcode = i40e_aqc_opc_nvm_update;
+ hw->nvmupd_state = I40E_NVMUPD_STATE_WRITE_WAIT;
+ }
+ }
+ break;
+
+ case I40E_NVMUPD_CSUM_SA:
+ status = i40e_acquire_nvm(hw, I40E_RESOURCE_WRITE);
+ if (status) {
+ *perrno = i40e_aq_rc_to_posix(status,
+ hw->aq.asq_last_status);
+ } else {
+ status = i40e_update_nvm_checksum(hw);
+ if (status) {
+ *perrno = hw->aq.asq_last_status ?
+ i40e_aq_rc_to_posix(status,
+ hw->aq.asq_last_status) :
+ -EIO;
+ i40e_release_nvm(hw);
+ } else {
+ hw->nvm_release_on_done = true;
+ hw->nvm_wait_opcode = i40e_aqc_opc_nvm_update;
+ hw->nvmupd_state = I40E_NVMUPD_STATE_INIT_WAIT;
+ }
+ }
+ break;
+
+ case I40E_NVMUPD_EXEC_AQ:
+ status = i40e_nvmupd_exec_aq(hw, cmd, bytes, perrno);
+ break;
+
+ case I40E_NVMUPD_GET_AQ_RESULT:
+ status = i40e_nvmupd_get_aq_result(hw, cmd, bytes, perrno);
+ break;
+
+ default:
+ i40e_debug(hw, I40E_DEBUG_NVM,
+ "NVMUPD: bad cmd %s in init state\n",
+ i40e_nvm_update_state_str[upd_cmd]);
+ status = I40E_ERR_NVM;
+ *perrno = -ESRCH;
+ break;
+ }
+ return status;
+}
+
+/**
+ * i40e_nvmupd_state_reading - Handle NVM update state Reading
+ * @hw: pointer to hardware structure
+ * @cmd: pointer to nvm update command buffer
+ * @bytes: pointer to the data buffer
+ * @perrno: pointer to return error code
+ *
+ * NVM ownership is already held. Process legitimate commands and set any
+ * change in state; reject all other commands.
+ **/
+STATIC enum i40e_status_code i40e_nvmupd_state_reading(struct i40e_hw *hw,
+ struct i40e_nvm_access *cmd,
+ u8 *bytes, int *perrno)
+{
+ enum i40e_status_code status = I40E_SUCCESS;
+ enum i40e_nvmupd_cmd upd_cmd;
+
+ DEBUGFUNC("i40e_nvmupd_state_reading");
+
+ upd_cmd = i40e_nvmupd_validate_command(hw, cmd, perrno);
+
+ switch (upd_cmd) {
+ case I40E_NVMUPD_READ_SA:
+ case I40E_NVMUPD_READ_CON:
+ status = i40e_nvmupd_nvm_read(hw, cmd, bytes, perrno);
+ break;
+
+ case I40E_NVMUPD_READ_LCB:
+ status = i40e_nvmupd_nvm_read(hw, cmd, bytes, perrno);
+ i40e_release_nvm(hw);
+ hw->nvmupd_state = I40E_NVMUPD_STATE_INIT;
+ break;
+
+ default:
+ i40e_debug(hw, I40E_DEBUG_NVM,
+ "NVMUPD: bad cmd %s in reading state.\n",
+ i40e_nvm_update_state_str[upd_cmd]);
+ status = I40E_NOT_SUPPORTED;
+ *perrno = -ESRCH;
+ break;
+ }
+ return status;
+}
+
+/**
+ * i40e_nvmupd_state_writing - Handle NVM update state Writing
+ * @hw: pointer to hardware structure
+ * @cmd: pointer to nvm update command buffer
+ * @bytes: pointer to the data buffer
+ * @perrno: pointer to return error code
+ *
+ * NVM ownership is already held. Process legitimate commands and set any
+ * change in state; reject all other commands
+ **/
+STATIC enum i40e_status_code i40e_nvmupd_state_writing(struct i40e_hw *hw,
+ struct i40e_nvm_access *cmd,
+ u8 *bytes, int *perrno)
+{
+ enum i40e_status_code status = I40E_SUCCESS;
+ enum i40e_nvmupd_cmd upd_cmd;
+ bool retry_attempt = false;
+
+ DEBUGFUNC("i40e_nvmupd_state_writing");
+
+ upd_cmd = i40e_nvmupd_validate_command(hw, cmd, perrno);
+
+retry:
+ switch (upd_cmd) {
+ case I40E_NVMUPD_WRITE_CON:
+ status = i40e_nvmupd_nvm_write(hw, cmd, bytes, perrno);
+ if (!status) {
+ hw->nvm_wait_opcode = i40e_aqc_opc_nvm_update;
+ hw->nvmupd_state = I40E_NVMUPD_STATE_WRITE_WAIT;
+ }
+ break;
+
+ case I40E_NVMUPD_WRITE_LCB:
+ status = i40e_nvmupd_nvm_write(hw, cmd, bytes, perrno);
+ if (status) {
+ *perrno = hw->aq.asq_last_status ?
+ i40e_aq_rc_to_posix(status,
+ hw->aq.asq_last_status) :
+ -EIO;
+ hw->nvmupd_state = I40E_NVMUPD_STATE_INIT;
+ } else {
+ hw->nvm_release_on_done = true;
+ hw->nvm_wait_opcode = i40e_aqc_opc_nvm_update;
+ hw->nvmupd_state = I40E_NVMUPD_STATE_INIT_WAIT;
+ }
+ break;
+
+ case I40E_NVMUPD_CSUM_CON:
+ /* Assumes the caller has acquired the nvm */
+ status = i40e_update_nvm_checksum(hw);
+ if (status) {
+ *perrno = hw->aq.asq_last_status ?
+ i40e_aq_rc_to_posix(status,
+ hw->aq.asq_last_status) :
+ -EIO;
+ hw->nvmupd_state = I40E_NVMUPD_STATE_INIT;
+ } else {
+ hw->nvm_wait_opcode = i40e_aqc_opc_nvm_update;
+ hw->nvmupd_state = I40E_NVMUPD_STATE_WRITE_WAIT;
+ }
+ break;
+
+ case I40E_NVMUPD_CSUM_LCB:
+ /* Assumes the caller has acquired the nvm */
+ status = i40e_update_nvm_checksum(hw);
+ if (status) {
+ *perrno = hw->aq.asq_last_status ?
+ i40e_aq_rc_to_posix(status,
+ hw->aq.asq_last_status) :
+ -EIO;
+ hw->nvmupd_state = I40E_NVMUPD_STATE_INIT;
+ } else {
+ hw->nvm_release_on_done = true;
+ hw->nvm_wait_opcode = i40e_aqc_opc_nvm_update;
+ hw->nvmupd_state = I40E_NVMUPD_STATE_INIT_WAIT;
+ }
+ break;
+
+ default:
+ i40e_debug(hw, I40E_DEBUG_NVM,
+ "NVMUPD: bad cmd %s in writing state.\n",
+ i40e_nvm_update_state_str[upd_cmd]);
+ status = I40E_NOT_SUPPORTED;
+ *perrno = -ESRCH;
+ break;
+ }
+
+ /* In some circumstances, a multi-write transaction takes longer
+ * than the default 3 minute timeout on the write semaphore. If
+ * the write failed with an EBUSY status, this is likely the problem,
+ * so here we try to reacquire the semaphore then retry the write.
+ * We only do one retry, then give up.
+ */
+ if (status && (hw->aq.asq_last_status == I40E_AQ_RC_EBUSY) &&
+ !retry_attempt) {
+ enum i40e_status_code old_status = status;
+ u32 old_asq_status = hw->aq.asq_last_status;
+ u32 gtime;
+
+ gtime = rd32(hw, I40E_GLVFGEN_TIMER);
+ if (gtime >= hw->nvm.hw_semaphore_timeout) {
+ i40e_debug(hw, I40E_DEBUG_ALL,
+ "NVMUPD: write semaphore expired (%d >= %lld), retrying\n",
+ gtime, hw->nvm.hw_semaphore_timeout);
+ i40e_release_nvm(hw);
+ status = i40e_acquire_nvm(hw, I40E_RESOURCE_WRITE);
+ if (status) {
+ i40e_debug(hw, I40E_DEBUG_ALL,
+ "NVMUPD: write semaphore reacquire failed aq_err = %d\n",
+ hw->aq.asq_last_status);
+ status = old_status;
+ hw->aq.asq_last_status = old_asq_status;
+ } else {
+ retry_attempt = true;
+ goto retry;
+ }
+ }
+ }
+
+ return status;
+}
+
+/**
+ * i40e_nvmupd_check_wait_event - handle NVM update operation events
+ * @hw: pointer to the hardware structure
+ * @opcode: the event that just happened
+ **/
+void i40e_nvmupd_check_wait_event(struct i40e_hw *hw, u16 opcode)
+{
+ if (opcode == hw->nvm_wait_opcode) {
+ i40e_debug(hw, I40E_DEBUG_NVM,
+ "NVMUPD: clearing wait on opcode 0x%04x\n", opcode);
+ if (hw->nvm_release_on_done) {
+ i40e_release_nvm(hw);
+ hw->nvm_release_on_done = false;
+ }
+ hw->nvm_wait_opcode = 0;
+
+ switch (hw->nvmupd_state) {
+ case I40E_NVMUPD_STATE_INIT_WAIT:
+ hw->nvmupd_state = I40E_NVMUPD_STATE_INIT;
+ break;
+
+ case I40E_NVMUPD_STATE_WRITE_WAIT:
+ hw->nvmupd_state = I40E_NVMUPD_STATE_WRITING;
+ break;
+
+ default:
+ break;
+ }
+ }
+}
+
+/**
+ * i40e_nvmupd_validate_command - Validate given command
+ * @hw: pointer to hardware structure
+ * @cmd: pointer to nvm update command buffer
+ * @perrno: pointer to return error code
+ *
+ * Return one of the valid command types or I40E_NVMUPD_INVALID
+ **/
+STATIC enum i40e_nvmupd_cmd i40e_nvmupd_validate_command(struct i40e_hw *hw,
+ struct i40e_nvm_access *cmd,
+ int *perrno)
+{
+ enum i40e_nvmupd_cmd upd_cmd;
+ u8 module, transaction;
+
+ DEBUGFUNC("i40e_nvmupd_validate_command\n");
+
+ /* anything that doesn't match a recognized case is an error */
+ upd_cmd = I40E_NVMUPD_INVALID;
+
+ transaction = i40e_nvmupd_get_transaction(cmd->config);
+ module = i40e_nvmupd_get_module(cmd->config);
+
+ /* limits on data size */
+ if ((cmd->data_size < 1) ||
+ (cmd->data_size > I40E_NVMUPD_MAX_DATA)) {
+ i40e_debug(hw, I40E_DEBUG_NVM,
+ "i40e_nvmupd_validate_command data_size %d\n",
+ cmd->data_size);
+ *perrno = -EFAULT;
+ return I40E_NVMUPD_INVALID;
+ }
+
+ switch (cmd->command) {
+ case I40E_NVM_READ:
+ switch (transaction) {
+ case I40E_NVM_CON:
+ upd_cmd = I40E_NVMUPD_READ_CON;
+ break;
+ case I40E_NVM_SNT:
+ upd_cmd = I40E_NVMUPD_READ_SNT;
+ break;
+ case I40E_NVM_LCB:
+ upd_cmd = I40E_NVMUPD_READ_LCB;
+ break;
+ case I40E_NVM_SA:
+ upd_cmd = I40E_NVMUPD_READ_SA;
+ break;
+ case I40E_NVM_EXEC:
+ if (module == 0xf)
+ upd_cmd = I40E_NVMUPD_STATUS;
+ else if (module == 0)
+ upd_cmd = I40E_NVMUPD_GET_AQ_RESULT;
+ break;
+ }
+ break;
+
+ case I40E_NVM_WRITE:
+ switch (transaction) {
+ case I40E_NVM_CON:
+ upd_cmd = I40E_NVMUPD_WRITE_CON;
+ break;
+ case I40E_NVM_SNT:
+ upd_cmd = I40E_NVMUPD_WRITE_SNT;
+ break;
+ case I40E_NVM_LCB:
+ upd_cmd = I40E_NVMUPD_WRITE_LCB;
+ break;
+ case I40E_NVM_SA:
+ upd_cmd = I40E_NVMUPD_WRITE_SA;
+ break;
+ case I40E_NVM_ERA:
+ upd_cmd = I40E_NVMUPD_WRITE_ERA;
+ break;
+ case I40E_NVM_CSUM:
+ upd_cmd = I40E_NVMUPD_CSUM_CON;
+ break;
+ case (I40E_NVM_CSUM|I40E_NVM_SA):
+ upd_cmd = I40E_NVMUPD_CSUM_SA;
+ break;
+ case (I40E_NVM_CSUM|I40E_NVM_LCB):
+ upd_cmd = I40E_NVMUPD_CSUM_LCB;
+ break;
+ case I40E_NVM_EXEC:
+ if (module == 0)
+ upd_cmd = I40E_NVMUPD_EXEC_AQ;
+ break;
+ }
+ break;
+ }
+
+ return upd_cmd;
+}
+
+/**
+ * i40e_nvmupd_exec_aq - Run an AQ command
+ * @hw: pointer to hardware structure
+ * @cmd: pointer to nvm update command buffer
+ * @bytes: pointer to the data buffer
+ * @perrno: pointer to return error code
+ *
+ * cmd structure contains identifiers and data buffer
+ **/
+STATIC enum i40e_status_code i40e_nvmupd_exec_aq(struct i40e_hw *hw,
+ struct i40e_nvm_access *cmd,
+ u8 *bytes, int *perrno)
+{
+ struct i40e_asq_cmd_details cmd_details;
+ enum i40e_status_code status;
+ struct i40e_aq_desc *aq_desc;
+ u32 buff_size = 0;
+ u8 *buff = NULL;
+ u32 aq_desc_len;
+ u32 aq_data_len;
+
+ i40e_debug(hw, I40E_DEBUG_NVM, "NVMUPD: %s\n", __func__);
+ memset(&cmd_details, 0, sizeof(cmd_details));
+ cmd_details.wb_desc = &hw->nvm_wb_desc;
+
+ aq_desc_len = sizeof(struct i40e_aq_desc);
+ memset(&hw->nvm_wb_desc, 0, aq_desc_len);
+
+ /* get the aq descriptor */
+ if (cmd->data_size < aq_desc_len) {
+ i40e_debug(hw, I40E_DEBUG_NVM,
+ "NVMUPD: not enough aq desc bytes for exec, size %d < %d\n",
+ cmd->data_size, aq_desc_len);
+ *perrno = -EINVAL;
+ return I40E_ERR_PARAM;
+ }
+ aq_desc = (struct i40e_aq_desc *)bytes;
+
+ /* if data buffer needed, make sure it's ready */
+ aq_data_len = cmd->data_size - aq_desc_len;
+ buff_size = max(aq_data_len, (u32)LE16_TO_CPU(aq_desc->datalen));
+ if (buff_size) {
+ if (!hw->nvm_buff.va) {
+ status = i40e_allocate_virt_mem(hw, &hw->nvm_buff,
+ hw->aq.asq_buf_size);
+ if (status)
+ i40e_debug(hw, I40E_DEBUG_NVM,
+ "NVMUPD: i40e_allocate_virt_mem for exec buff failed, %d\n",
+ status);
+ }
+
+ if (hw->nvm_buff.va) {
+ buff = hw->nvm_buff.va;
+ memcpy(buff, &bytes[aq_desc_len], aq_data_len);
+ }
+ }
+
+ /* and away we go! */
+ status = i40e_asq_send_command(hw, aq_desc, buff,
+ buff_size, &cmd_details);
+ if (status) {
+ i40e_debug(hw, I40E_DEBUG_NVM,
+ "i40e_nvmupd_exec_aq err %s aq_err %s\n",
+ i40e_stat_str(hw, status),
+ i40e_aq_str(hw, hw->aq.asq_last_status));
+ *perrno = i40e_aq_rc_to_posix(status, hw->aq.asq_last_status);
+ }
+
+ /* should we wait for a followup event? */
+ if (cmd->offset) {
+ hw->nvm_wait_opcode = cmd->offset;
+ hw->nvmupd_state = I40E_NVMUPD_STATE_INIT_WAIT;
+ }
+
+ return status;
+}
+
+/**
+ * i40e_nvmupd_get_aq_result - Get the results from the previous exec_aq
+ * @hw: pointer to hardware structure
+ * @cmd: pointer to nvm update command buffer
+ * @bytes: pointer to the data buffer
+ * @perrno: pointer to return error code
+ *
+ * cmd structure contains identifiers and data buffer
+ **/
+STATIC enum i40e_status_code i40e_nvmupd_get_aq_result(struct i40e_hw *hw,
+ struct i40e_nvm_access *cmd,
+ u8 *bytes, int *perrno)
+{
+ u32 aq_total_len;
+ u32 aq_desc_len;
+ int remainder;
+ u8 *buff;
+
+ i40e_debug(hw, I40E_DEBUG_NVM, "NVMUPD: %s\n", __func__);
+
+ aq_desc_len = sizeof(struct i40e_aq_desc);
+ aq_total_len = aq_desc_len + LE16_TO_CPU(hw->nvm_wb_desc.datalen);
+
+ /* check offset range */
+ if (cmd->offset > aq_total_len) {
+ i40e_debug(hw, I40E_DEBUG_NVM, "%s: offset too big %d > %d\n",
+ __func__, cmd->offset, aq_total_len);
+ *perrno = -EINVAL;
+ return I40E_ERR_PARAM;
+ }
+
+ /* check copylength range */
+ if (cmd->data_size > (aq_total_len - cmd->offset)) {
+ int new_len = aq_total_len - cmd->offset;
+
+ i40e_debug(hw, I40E_DEBUG_NVM, "%s: copy length %d too big, trimming to %d\n",
+ __func__, cmd->data_size, new_len);
+ cmd->data_size = new_len;
+ }
+
+ remainder = cmd->data_size;
+ if (cmd->offset < aq_desc_len) {
+ u32 len = aq_desc_len - cmd->offset;
+
+ len = min(len, cmd->data_size);
+ i40e_debug(hw, I40E_DEBUG_NVM, "%s: aq_desc bytes %d to %d\n",
+ __func__, cmd->offset, cmd->offset + len);
+
+ buff = ((u8 *)&hw->nvm_wb_desc) + cmd->offset;
+ memcpy(bytes, buff, len);
+
+ bytes += len;
+ remainder -= len;
+ buff = hw->nvm_buff.va;
+ } else {
+ buff = (u8 *)hw->nvm_buff.va + (cmd->offset - aq_desc_len);
+ }
+
+ if (remainder > 0) {
+ int start_byte = buff - (u8 *)hw->nvm_buff.va;
+
+ i40e_debug(hw, I40E_DEBUG_NVM, "%s: databuf bytes %d to %d\n",
+ __func__, start_byte, start_byte + remainder);
+ memcpy(bytes, buff, remainder);
+ }
+
+ return I40E_SUCCESS;
+}
+
+/**
+ * i40e_nvmupd_nvm_read - Read NVM
+ * @hw: pointer to hardware structure
+ * @cmd: pointer to nvm update command buffer
+ * @bytes: pointer to the data buffer
+ * @perrno: pointer to return error code
+ *
+ * cmd structure contains identifiers and data buffer
+ **/
+STATIC enum i40e_status_code i40e_nvmupd_nvm_read(struct i40e_hw *hw,
+ struct i40e_nvm_access *cmd,
+ u8 *bytes, int *perrno)
+{
+ struct i40e_asq_cmd_details cmd_details;
+ enum i40e_status_code status;
+ u8 module, transaction;
+ bool last;
+
+ transaction = i40e_nvmupd_get_transaction(cmd->config);
+ module = i40e_nvmupd_get_module(cmd->config);
+ last = (transaction == I40E_NVM_LCB) || (transaction == I40E_NVM_SA);
+
+ memset(&cmd_details, 0, sizeof(cmd_details));
+ cmd_details.wb_desc = &hw->nvm_wb_desc;
+
+ status = i40e_aq_read_nvm(hw, module, cmd->offset, (u16)cmd->data_size,
+ bytes, last, &cmd_details);
+ if (status) {
+ i40e_debug(hw, I40E_DEBUG_NVM,
+ "i40e_nvmupd_nvm_read mod 0x%x off 0x%x len 0x%x\n",
+ module, cmd->offset, cmd->data_size);
+ i40e_debug(hw, I40E_DEBUG_NVM,
+ "i40e_nvmupd_nvm_read status %d aq %d\n",
+ status, hw->aq.asq_last_status);
+ *perrno = i40e_aq_rc_to_posix(status, hw->aq.asq_last_status);
+ }
+
+ return status;
+}
+
+/**
+ * i40e_nvmupd_nvm_erase - Erase an NVM module
+ * @hw: pointer to hardware structure
+ * @cmd: pointer to nvm update command buffer
+ * @perrno: pointer to return error code
+ *
+ * module, offset, data_size and data are in cmd structure
+ **/
+STATIC enum i40e_status_code i40e_nvmupd_nvm_erase(struct i40e_hw *hw,
+ struct i40e_nvm_access *cmd,
+ int *perrno)
+{
+ enum i40e_status_code status = I40E_SUCCESS;
+ struct i40e_asq_cmd_details cmd_details;
+ u8 module, transaction;
+ bool last;
+
+ transaction = i40e_nvmupd_get_transaction(cmd->config);
+ module = i40e_nvmupd_get_module(cmd->config);
+ last = (transaction & I40E_NVM_LCB);
+
+ memset(&cmd_details, 0, sizeof(cmd_details));
+ cmd_details.wb_desc = &hw->nvm_wb_desc;
+
+ status = i40e_aq_erase_nvm(hw, module, cmd->offset, (u16)cmd->data_size,
+ last, &cmd_details);
+ if (status) {
+ i40e_debug(hw, I40E_DEBUG_NVM,
+ "i40e_nvmupd_nvm_erase mod 0x%x off 0x%x len 0x%x\n",
+ module, cmd->offset, cmd->data_size);
+ i40e_debug(hw, I40E_DEBUG_NVM,
+ "i40e_nvmupd_nvm_erase status %d aq %d\n",
+ status, hw->aq.asq_last_status);
+ *perrno = i40e_aq_rc_to_posix(status, hw->aq.asq_last_status);
+ }
+
+ return status;
+}
+
+/**
+ * i40e_nvmupd_nvm_write - Write NVM
+ * @hw: pointer to hardware structure
+ * @cmd: pointer to nvm update command buffer
+ * @bytes: pointer to the data buffer
+ * @perrno: pointer to return error code
+ *
+ * module, offset, data_size and data are in cmd structure
+ **/
+STATIC enum i40e_status_code i40e_nvmupd_nvm_write(struct i40e_hw *hw,
+ struct i40e_nvm_access *cmd,
+ u8 *bytes, int *perrno)
+{
+ enum i40e_status_code status = I40E_SUCCESS;
+ struct i40e_asq_cmd_details cmd_details;
+ u8 module, transaction;
+ bool last;
+
+ transaction = i40e_nvmupd_get_transaction(cmd->config);
+ module = i40e_nvmupd_get_module(cmd->config);
+ last = (transaction & I40E_NVM_LCB);
+
+ memset(&cmd_details, 0, sizeof(cmd_details));
+ cmd_details.wb_desc = &hw->nvm_wb_desc;
+
+ status = i40e_aq_update_nvm(hw, module, cmd->offset,
+ (u16)cmd->data_size, bytes, last,
+ &cmd_details);
+ if (status) {
+ i40e_debug(hw, I40E_DEBUG_NVM,
+ "i40e_nvmupd_nvm_write mod 0x%x off 0x%x len 0x%x\n",
+ module, cmd->offset, cmd->data_size);
+ i40e_debug(hw, I40E_DEBUG_NVM,
+ "i40e_nvmupd_nvm_write status %d aq %d\n",
+ status, hw->aq.asq_last_status);
+ *perrno = i40e_aq_rc_to_posix(status, hw->aq.asq_last_status);
+ }
+
+ return status;
+}
diff --git a/src/dpdk/drivers/net/i40e/base/i40e_osdep.h b/src/dpdk/drivers/net/i40e/base/i40e_osdep.h
new file mode 100644
index 00000000..38e7ba5b
--- /dev/null
+++ b/src/dpdk/drivers/net/i40e/base/i40e_osdep.h
@@ -0,0 +1,240 @@
+/******************************************************************************
+
+ Copyright (c) 2001-2015, Intel Corporation
+ All rights reserved.
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions are met:
+
+ 1. Redistributions of source code must retain the above copyright notice,
+ this list of conditions and the following disclaimer.
+
+ 2. Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in the
+ documentation and/or other materials provided with the distribution.
+
+ 3. Neither the name of the Intel Corporation nor the names of its
+ contributors may be used to endorse or promote products derived from
+ this software without specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ POSSIBILITY OF SUCH DAMAGE.
+******************************************************************************/
+
+#ifndef _I40E_OSDEP_H_
+#define _I40E_OSDEP_H_
+
+#include <string.h>
+#include <stdint.h>
+#include <stdio.h>
+#include <stdarg.h>
+
+#include <rte_common.h>
+#include <rte_memcpy.h>
+#include <rte_byteorder.h>
+#include <rte_cycles.h>
+#include <rte_spinlock.h>
+#include <rte_log.h>
+
+#include "../i40e_logs.h"
+
+#define INLINE inline
+#define STATIC static
+
+typedef uint8_t u8;
+typedef int8_t s8;
+typedef uint16_t u16;
+typedef uint32_t u32;
+typedef int32_t s32;
+typedef uint64_t u64;
+typedef int bool;
+
+typedef enum i40e_status_code i40e_status;
+#define __iomem
+#define hw_dbg(hw, S, A...) do {} while (0)
+#define upper_32_bits(n) ((u32)(((n) >> 16) >> 16))
+#define lower_32_bits(n) ((u32)(n))
+#define low_16_bits(x) ((x) & 0xFFFF)
+#define high_16_bits(x) (((x) & 0xFFFF0000) >> 16)
+
+#ifndef ETH_ADDR_LEN
+#define ETH_ADDR_LEN 6
+#endif
+
+#ifndef __le16
+#define __le16 uint16_t
+#endif
+#ifndef __le32
+#define __le32 uint32_t
+#endif
+#ifndef __le64
+#define __le64 uint64_t
+#endif
+#ifndef __be16
+#define __be16 uint16_t
+#endif
+#ifndef __be32
+#define __be32 uint32_t
+#endif
+#ifndef __be64
+#define __be64 uint64_t
+#endif
+
+#define FALSE 0
+#define TRUE 1
+#define false 0
+#define true 1
+
+#define min(a,b) RTE_MIN(a,b)
+#define max(a,b) RTE_MAX(a,b)
+
+#define FIELD_SIZEOF(t, f) (sizeof(((t*)0)->f))
+#define ASSERT(x) if(!(x)) rte_panic("IXGBE: x")
+
+#define DEBUGOUT(S) PMD_DRV_LOG_RAW(DEBUG, S)
+#define DEBUGOUT1(S, A...) PMD_DRV_LOG_RAW(DEBUG, S, ##A)
+
+#define DEBUGFUNC(F) DEBUGOUT(F "\n")
+#define DEBUGOUT2 DEBUGOUT1
+#define DEBUGOUT3 DEBUGOUT2
+#define DEBUGOUT6 DEBUGOUT3
+#define DEBUGOUT7 DEBUGOUT6
+
+#define i40e_debug(h, m, s, ...) \
+do { \
+ if (((m) & (h)->debug_mask)) \
+ PMD_DRV_LOG_RAW(DEBUG, "i40e %02x.%x " s, \
+ (h)->bus.device, (h)->bus.func, \
+ ##__VA_ARGS__); \
+} while (0)
+
+/* AQ commands based interfaces of i40e_read_rx_ctl() and i40e_write_rx_ctl()
+ * are required for reading/writing below registers, as reading/writing it
+ * directly may not function correctly if the device is under heavy small
+ * packet traffic. Note that those interfaces are available from FVL5 and not
+ * suitable before the AdminQ is ready during initialization.
+ *
+ * I40E_PFQF_CTL_0
+ * I40E_PFQF_HENA
+ * I40E_PFQF_FDALLOC
+ * I40E_PFQF_HREGION
+ * I40E_PFLAN_QALLOC
+ * I40E_VPQF_CTL
+ * I40E_VFQF_HENA
+ * I40E_VFQF_HREGION
+ * I40E_VSIQF_CTL
+ * I40E_VSILAN_QBASE
+ * I40E_VSILAN_QTABLE
+ * I40E_VSIQF_TCREGION
+ * I40E_PFQF_HKEY
+ * I40E_VFQF_HKEY
+ * I40E_PRTQF_CTL_0
+ * I40E_GLFCOE_RCTL
+ * I40E_GLFCOE_RSOF
+ * I40E_GLQF_CTL
+ * I40E_GLQF_SWAP
+ * I40E_GLQF_HASH_MSK
+ * I40E_GLQF_HASH_INSET
+ * I40E_GLQF_HSYM
+ * I40E_GLQF_FC_MSK
+ * I40E_GLQF_FC_INSET
+ * I40E_GLQF_FD_MSK
+ * I40E_PRTQF_FD_INSET
+ * I40E_PRTQF_FD_FLXINSET
+ * I40E_PRTQF_FD_MSK
+ */
+
+#define I40E_PCI_REG(reg) (*((volatile uint32_t *)(reg)))
+#define I40E_PCI_REG_ADDR(a, reg) \
+ ((volatile uint32_t *)((char *)(a)->hw_addr + (reg)))
+static inline uint32_t i40e_read_addr(volatile void *addr)
+{
+ return rte_le_to_cpu_32(I40E_PCI_REG(addr));
+}
+#define I40E_PCI_REG_WRITE(reg, value) \
+ do { I40E_PCI_REG((reg)) = rte_cpu_to_le_32(value); } while (0)
+
+#define I40E_WRITE_FLUSH(a) I40E_READ_REG(a, I40E_GLGEN_STAT)
+#define I40EVF_WRITE_FLUSH(a) I40E_READ_REG(a, I40E_VFGEN_RSTAT)
+
+#define I40E_READ_REG(hw, reg) i40e_read_addr(I40E_PCI_REG_ADDR((hw), (reg)))
+#define I40E_WRITE_REG(hw, reg, value) \
+ I40E_PCI_REG_WRITE(I40E_PCI_REG_ADDR((hw), (reg)), (value))
+
+#define rd32(a, reg) i40e_read_addr(I40E_PCI_REG_ADDR((a), (reg)))
+#define wr32(a, reg, value) \
+ I40E_PCI_REG_WRITE(I40E_PCI_REG_ADDR((a), (reg)), (value))
+#define flush(a) i40e_read_addr(I40E_PCI_REG_ADDR((a), (I40E_GLGEN_STAT)))
+
+#define ARRAY_SIZE(arr) (sizeof(arr)/sizeof(arr[0]))
+
+/* memory allocation tracking */
+struct i40e_dma_mem {
+ void *va;
+ u64 pa;
+ u32 size;
+ const void *zone;
+} __attribute__((packed));
+
+#define i40e_allocate_dma_mem(h, m, unused, s, a) \
+ i40e_allocate_dma_mem_d(h, m, s, a)
+#define i40e_free_dma_mem(h, m) i40e_free_dma_mem_d(h, m)
+
+struct i40e_virt_mem {
+ void *va;
+ u32 size;
+} __attribute__((packed));
+
+#define i40e_allocate_virt_mem(h, m, s) i40e_allocate_virt_mem_d(h, m, s)
+#define i40e_free_virt_mem(h, m) i40e_free_virt_mem_d(h, m)
+
+#define CPU_TO_LE16(o) rte_cpu_to_le_16(o)
+#define CPU_TO_LE32(s) rte_cpu_to_le_32(s)
+#define CPU_TO_LE64(h) rte_cpu_to_le_64(h)
+#define LE16_TO_CPU(a) rte_le_to_cpu_16(a)
+#define LE32_TO_CPU(c) rte_le_to_cpu_32(c)
+#define LE64_TO_CPU(k) rte_le_to_cpu_64(k)
+
+#define cpu_to_le16(o) rte_cpu_to_le_16(o)
+#define cpu_to_le32(s) rte_cpu_to_le_32(s)
+#define cpu_to_le64(h) rte_cpu_to_le_64(h)
+#define le16_to_cpu(a) rte_le_to_cpu_16(a)
+#define le32_to_cpu(c) rte_le_to_cpu_32(c)
+#define le64_to_cpu(k) rte_le_to_cpu_64(k)
+
+/* SW spinlock */
+struct i40e_spinlock {
+ rte_spinlock_t spinlock;
+};
+
+#define i40e_init_spinlock(_sp) i40e_init_spinlock_d(_sp)
+#define i40e_acquire_spinlock(_sp) i40e_acquire_spinlock_d(_sp)
+#define i40e_release_spinlock(_sp) i40e_release_spinlock_d(_sp)
+#define i40e_destroy_spinlock(_sp) i40e_destroy_spinlock_d(_sp)
+
+#define I40E_NTOHS(a) rte_be_to_cpu_16(a)
+#define I40E_NTOHL(a) rte_be_to_cpu_32(a)
+#define I40E_HTONS(a) rte_cpu_to_be_16(a)
+#define I40E_HTONL(a) rte_cpu_to_be_32(a)
+
+#define i40e_memset(a, b, c, d) memset((a), (b), (c))
+#define i40e_memcpy(a, b, c, d) rte_memcpy((a), (b), (c))
+
+#define DIV_ROUND_UP(n,d) (((n) + (d) - 1) / (d))
+#define DELAY(x) rte_delay_us(x)
+#define i40e_usec_delay(x) rte_delay_us(x)
+#define i40e_msec_delay(x) rte_delay_us(1000*(x))
+#define udelay(x) DELAY(x)
+#define msleep(x) DELAY(1000*(x))
+#define usleep_range(min, max) msleep(DIV_ROUND_UP(min, 1000))
+
+#endif /* _I40E_OSDEP_H_ */
diff --git a/src/dpdk/drivers/net/i40e/base/i40e_prototype.h b/src/dpdk/drivers/net/i40e/base/i40e_prototype.h
new file mode 100644
index 00000000..03dda937
--- /dev/null
+++ b/src/dpdk/drivers/net/i40e/base/i40e_prototype.h
@@ -0,0 +1,545 @@
+/*******************************************************************************
+
+Copyright (c) 2013 - 2015, Intel Corporation
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are met:
+
+ 1. Redistributions of source code must retain the above copyright notice,
+ this list of conditions and the following disclaimer.
+
+ 2. Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in the
+ documentation and/or other materials provided with the distribution.
+
+ 3. Neither the name of the Intel Corporation nor the names of its
+ contributors may be used to endorse or promote products derived from
+ this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+POSSIBILITY OF SUCH DAMAGE.
+
+***************************************************************************/
+
+#ifndef _I40E_PROTOTYPE_H_
+#define _I40E_PROTOTYPE_H_
+
+#include "i40e_type.h"
+#include "i40e_alloc.h"
+#include "i40e_virtchnl.h"
+
+/* Prototypes for shared code functions that are not in
+ * the standard function pointer structures. These are
+ * mostly because they are needed even before the init
+ * has happened and will assist in the early SW and FW
+ * setup.
+ */
+
+/* adminq functions */
+enum i40e_status_code i40e_init_adminq(struct i40e_hw *hw);
+enum i40e_status_code i40e_shutdown_adminq(struct i40e_hw *hw);
+enum i40e_status_code i40e_init_asq(struct i40e_hw *hw);
+enum i40e_status_code i40e_init_arq(struct i40e_hw *hw);
+enum i40e_status_code i40e_alloc_adminq_asq_ring(struct i40e_hw *hw);
+enum i40e_status_code i40e_alloc_adminq_arq_ring(struct i40e_hw *hw);
+enum i40e_status_code i40e_shutdown_asq(struct i40e_hw *hw);
+enum i40e_status_code i40e_shutdown_arq(struct i40e_hw *hw);
+u16 i40e_clean_asq(struct i40e_hw *hw);
+void i40e_free_adminq_asq(struct i40e_hw *hw);
+void i40e_free_adminq_arq(struct i40e_hw *hw);
+enum i40e_status_code i40e_validate_mac_addr(u8 *mac_addr);
+void i40e_adminq_init_ring_data(struct i40e_hw *hw);
+enum i40e_status_code i40e_clean_arq_element(struct i40e_hw *hw,
+ struct i40e_arq_event_info *e,
+ u16 *events_pending);
+enum i40e_status_code i40e_asq_send_command(struct i40e_hw *hw,
+ struct i40e_aq_desc *desc,
+ void *buff, /* can be NULL */
+ u16 buff_size,
+ struct i40e_asq_cmd_details *cmd_details);
+#ifdef VF_DRIVER
+bool i40e_asq_done(struct i40e_hw *hw);
+#endif
+
+/* debug function for adminq */
+void i40e_debug_aq(struct i40e_hw *hw, enum i40e_debug_mask mask,
+ void *desc, void *buffer, u16 buf_len);
+
+void i40e_idle_aq(struct i40e_hw *hw);
+bool i40e_check_asq_alive(struct i40e_hw *hw);
+enum i40e_status_code i40e_aq_queue_shutdown(struct i40e_hw *hw, bool unloading);
+#ifdef X722_SUPPORT
+
+enum i40e_status_code i40e_aq_get_rss_lut(struct i40e_hw *hw, u16 seid,
+ bool pf_lut, u8 *lut, u16 lut_size);
+enum i40e_status_code i40e_aq_set_rss_lut(struct i40e_hw *hw, u16 seid,
+ bool pf_lut, u8 *lut, u16 lut_size);
+enum i40e_status_code i40e_aq_get_rss_key(struct i40e_hw *hw,
+ u16 seid,
+ struct i40e_aqc_get_set_rss_key_data *key);
+enum i40e_status_code i40e_aq_set_rss_key(struct i40e_hw *hw,
+ u16 seid,
+ struct i40e_aqc_get_set_rss_key_data *key);
+#endif
+#ifndef I40E_NDIS_SUPPORT
+const char *i40e_aq_str(struct i40e_hw *hw, enum i40e_admin_queue_err aq_err);
+const char *i40e_stat_str(struct i40e_hw *hw, enum i40e_status_code stat_err);
+#endif /* I40E_NDIS_SUPPORT */
+
+#ifdef PF_DRIVER
+
+u32 i40e_led_get(struct i40e_hw *hw);
+void i40e_led_set(struct i40e_hw *hw, u32 mode, bool blink);
+enum i40e_status_code i40e_led_set_phy(struct i40e_hw *hw, bool on,
+ u16 led_addr, u32 mode);
+enum i40e_status_code i40e_led_get_phy(struct i40e_hw *hw, u16 *led_addr,
+ u16 *val);
+enum i40e_status_code i40e_blink_phy_link_led(struct i40e_hw *hw,
+ u32 time, u32 interval);
+
+/* admin send queue commands */
+
+enum i40e_status_code i40e_aq_get_firmware_version(struct i40e_hw *hw,
+ u16 *fw_major_version, u16 *fw_minor_version,
+ u32 *fw_build,
+ u16 *api_major_version, u16 *api_minor_version,
+ struct i40e_asq_cmd_details *cmd_details);
+enum i40e_status_code i40e_aq_debug_write_register(struct i40e_hw *hw,
+ u32 reg_addr, u64 reg_val,
+ struct i40e_asq_cmd_details *cmd_details);
+enum i40e_status_code i40e_aq_debug_read_register(struct i40e_hw *hw,
+ u32 reg_addr, u64 *reg_val,
+ struct i40e_asq_cmd_details *cmd_details);
+enum i40e_status_code i40e_aq_set_phy_debug(struct i40e_hw *hw, u8 cmd_flags,
+ struct i40e_asq_cmd_details *cmd_details);
+enum i40e_status_code i40e_aq_set_default_vsi(struct i40e_hw *hw, u16 vsi_id,
+ struct i40e_asq_cmd_details *cmd_details);
+enum i40e_status_code i40e_aq_get_phy_capabilities(struct i40e_hw *hw,
+ bool qualified_modules, bool report_init,
+ struct i40e_aq_get_phy_abilities_resp *abilities,
+ struct i40e_asq_cmd_details *cmd_details);
+enum i40e_status_code i40e_aq_set_phy_config(struct i40e_hw *hw,
+ struct i40e_aq_set_phy_config *config,
+ struct i40e_asq_cmd_details *cmd_details);
+enum i40e_status_code i40e_set_fc(struct i40e_hw *hw, u8 *aq_failures,
+ bool atomic_reset);
+enum i40e_status_code i40e_aq_set_phy_int_mask(struct i40e_hw *hw, u16 mask,
+ struct i40e_asq_cmd_details *cmd_details);
+enum i40e_status_code i40e_aq_set_mac_config(struct i40e_hw *hw,
+ u16 max_frame_size, bool crc_en, u16 pacing,
+ struct i40e_asq_cmd_details *cmd_details);
+enum i40e_status_code i40e_aq_get_local_advt_reg(struct i40e_hw *hw,
+ u64 *advt_reg,
+ struct i40e_asq_cmd_details *cmd_details);
+enum i40e_status_code i40e_aq_get_partner_advt(struct i40e_hw *hw,
+ u64 *advt_reg,
+ struct i40e_asq_cmd_details *cmd_details);
+enum i40e_status_code i40e_aq_set_lb_modes(struct i40e_hw *hw, u16 lb_modes,
+ struct i40e_asq_cmd_details *cmd_details);
+enum i40e_status_code i40e_aq_clear_pxe_mode(struct i40e_hw *hw,
+ struct i40e_asq_cmd_details *cmd_details);
+enum i40e_status_code i40e_aq_set_link_restart_an(struct i40e_hw *hw,
+ bool enable_link, struct i40e_asq_cmd_details *cmd_details);
+enum i40e_status_code i40e_aq_get_link_info(struct i40e_hw *hw,
+ bool enable_lse, struct i40e_link_status *link,
+ struct i40e_asq_cmd_details *cmd_details);
+enum i40e_status_code i40e_aq_set_local_advt_reg(struct i40e_hw *hw,
+ u64 advt_reg,
+ struct i40e_asq_cmd_details *cmd_details);
+enum i40e_status_code i40e_aq_send_driver_version(struct i40e_hw *hw,
+ struct i40e_driver_version *dv,
+ struct i40e_asq_cmd_details *cmd_details);
+enum i40e_status_code i40e_aq_add_vsi(struct i40e_hw *hw,
+ struct i40e_vsi_context *vsi_ctx,
+ struct i40e_asq_cmd_details *cmd_details);
+enum i40e_status_code i40e_aq_set_vsi_broadcast(struct i40e_hw *hw,
+ u16 vsi_id, bool set_filter,
+ struct i40e_asq_cmd_details *cmd_details);
+enum i40e_status_code i40e_aq_set_vsi_unicast_promiscuous(struct i40e_hw *hw,
+ u16 vsi_id, bool set, struct i40e_asq_cmd_details *cmd_details,
+ bool rx_only_promisc);
+enum i40e_status_code i40e_aq_set_vsi_multicast_promiscuous(struct i40e_hw *hw,
+ u16 vsi_id, bool set, struct i40e_asq_cmd_details *cmd_details);
+enum i40e_status_code i40e_aq_set_vsi_mc_promisc_on_vlan(struct i40e_hw *hw,
+ u16 seid, bool enable, u16 vid,
+ struct i40e_asq_cmd_details *cmd_details);
+enum i40e_status_code i40e_aq_set_vsi_uc_promisc_on_vlan(struct i40e_hw *hw,
+ u16 seid, bool enable, u16 vid,
+ struct i40e_asq_cmd_details *cmd_details);
+enum i40e_status_code i40e_aq_set_vsi_vlan_promisc(struct i40e_hw *hw,
+ u16 seid, bool enable,
+ struct i40e_asq_cmd_details *cmd_details);
+enum i40e_status_code i40e_aq_get_vsi_params(struct i40e_hw *hw,
+ struct i40e_vsi_context *vsi_ctx,
+ struct i40e_asq_cmd_details *cmd_details);
+enum i40e_status_code i40e_aq_update_vsi_params(struct i40e_hw *hw,
+ struct i40e_vsi_context *vsi_ctx,
+ struct i40e_asq_cmd_details *cmd_details);
+enum i40e_status_code i40e_aq_add_veb(struct i40e_hw *hw, u16 uplink_seid,
+ u16 downlink_seid, u8 enabled_tc,
+ bool default_port, u16 *pveb_seid,
+ bool enable_stats,
+ struct i40e_asq_cmd_details *cmd_details);
+enum i40e_status_code i40e_aq_get_veb_parameters(struct i40e_hw *hw,
+ u16 veb_seid, u16 *switch_id, bool *floating,
+ u16 *statistic_index, u16 *vebs_used,
+ u16 *vebs_free,
+ struct i40e_asq_cmd_details *cmd_details);
+enum i40e_status_code i40e_aq_add_macvlan(struct i40e_hw *hw, u16 vsi_id,
+ struct i40e_aqc_add_macvlan_element_data *mv_list,
+ u16 count, struct i40e_asq_cmd_details *cmd_details);
+enum i40e_status_code i40e_aq_remove_macvlan(struct i40e_hw *hw, u16 vsi_id,
+ struct i40e_aqc_remove_macvlan_element_data *mv_list,
+ u16 count, struct i40e_asq_cmd_details *cmd_details);
+enum i40e_status_code i40e_aq_add_mirrorrule(struct i40e_hw *hw, u16 sw_seid,
+ u16 rule_type, u16 dest_vsi, u16 count, __le16 *mr_list,
+ struct i40e_asq_cmd_details *cmd_details,
+ u16 *rule_id, u16 *rules_used, u16 *rules_free);
+enum i40e_status_code i40e_aq_delete_mirrorrule(struct i40e_hw *hw, u16 sw_seid,
+ u16 rule_type, u16 rule_id, u16 count, __le16 *mr_list,
+ struct i40e_asq_cmd_details *cmd_details,
+ u16 *rules_used, u16 *rules_free);
+
+enum i40e_status_code i40e_aq_add_vlan(struct i40e_hw *hw, u16 vsi_id,
+ struct i40e_aqc_add_remove_vlan_element_data *v_list,
+ u8 count, struct i40e_asq_cmd_details *cmd_details);
+enum i40e_status_code i40e_aq_remove_vlan(struct i40e_hw *hw, u16 vsi_id,
+ struct i40e_aqc_add_remove_vlan_element_data *v_list,
+ u8 count, struct i40e_asq_cmd_details *cmd_details);
+enum i40e_status_code i40e_aq_send_msg_to_vf(struct i40e_hw *hw, u16 vfid,
+ u32 v_opcode, u32 v_retval, u8 *msg, u16 msglen,
+ struct i40e_asq_cmd_details *cmd_details);
+enum i40e_status_code i40e_aq_get_switch_config(struct i40e_hw *hw,
+ struct i40e_aqc_get_switch_config_resp *buf,
+ u16 buf_size, u16 *start_seid,
+ struct i40e_asq_cmd_details *cmd_details);
+enum i40e_status_code i40e_aq_set_switch_config(struct i40e_hw *hw,
+ u16 flags, u16 valid_flags,
+ struct i40e_asq_cmd_details *cmd_details);
+enum i40e_status_code i40e_aq_request_resource(struct i40e_hw *hw,
+ enum i40e_aq_resources_ids resource,
+ enum i40e_aq_resource_access_type access,
+ u8 sdp_number, u64 *timeout,
+ struct i40e_asq_cmd_details *cmd_details);
+enum i40e_status_code i40e_aq_release_resource(struct i40e_hw *hw,
+ enum i40e_aq_resources_ids resource,
+ u8 sdp_number,
+ struct i40e_asq_cmd_details *cmd_details);
+enum i40e_status_code i40e_aq_read_nvm(struct i40e_hw *hw, u8 module_pointer,
+ u32 offset, u16 length, void *data,
+ bool last_command,
+ struct i40e_asq_cmd_details *cmd_details);
+enum i40e_status_code i40e_aq_erase_nvm(struct i40e_hw *hw, u8 module_pointer,
+ u32 offset, u16 length, bool last_command,
+ struct i40e_asq_cmd_details *cmd_details);
+enum i40e_status_code i40e_aq_read_nvm_config(struct i40e_hw *hw,
+ u8 cmd_flags, u32 field_id, void *data,
+ u16 buf_size, u16 *element_count,
+ struct i40e_asq_cmd_details *cmd_details);
+enum i40e_status_code i40e_aq_write_nvm_config(struct i40e_hw *hw,
+ u8 cmd_flags, void *data, u16 buf_size,
+ u16 element_count,
+ struct i40e_asq_cmd_details *cmd_details);
+enum i40e_status_code i40e_aq_oem_post_update(struct i40e_hw *hw,
+ void *buff, u16 buff_size,
+ struct i40e_asq_cmd_details *cmd_details);
+enum i40e_status_code i40e_aq_discover_capabilities(struct i40e_hw *hw,
+ void *buff, u16 buff_size, u16 *data_size,
+ enum i40e_admin_queue_opc list_type_opc,
+ struct i40e_asq_cmd_details *cmd_details);
+enum i40e_status_code i40e_aq_update_nvm(struct i40e_hw *hw, u8 module_pointer,
+ u32 offset, u16 length, void *data,
+ bool last_command,
+ struct i40e_asq_cmd_details *cmd_details);
+enum i40e_status_code i40e_aq_get_lldp_mib(struct i40e_hw *hw, u8 bridge_type,
+ u8 mib_type, void *buff, u16 buff_size,
+ u16 *local_len, u16 *remote_len,
+ struct i40e_asq_cmd_details *cmd_details);
+enum i40e_status_code i40e_aq_set_lldp_mib(struct i40e_hw *hw,
+ u8 mib_type, void *buff, u16 buff_size,
+ struct i40e_asq_cmd_details *cmd_details);
+enum i40e_status_code i40e_aq_cfg_lldp_mib_change_event(struct i40e_hw *hw,
+ bool enable_update,
+ struct i40e_asq_cmd_details *cmd_details);
+enum i40e_status_code i40e_aq_add_lldp_tlv(struct i40e_hw *hw, u8 bridge_type,
+ void *buff, u16 buff_size, u16 tlv_len,
+ u16 *mib_len,
+ struct i40e_asq_cmd_details *cmd_details);
+enum i40e_status_code i40e_aq_update_lldp_tlv(struct i40e_hw *hw,
+ u8 bridge_type, void *buff, u16 buff_size,
+ u16 old_len, u16 new_len, u16 offset,
+ u16 *mib_len,
+ struct i40e_asq_cmd_details *cmd_details);
+enum i40e_status_code i40e_aq_delete_lldp_tlv(struct i40e_hw *hw,
+ u8 bridge_type, void *buff, u16 buff_size,
+ u16 tlv_len, u16 *mib_len,
+ struct i40e_asq_cmd_details *cmd_details);
+enum i40e_status_code i40e_aq_stop_lldp(struct i40e_hw *hw, bool shutdown_agent,
+ struct i40e_asq_cmd_details *cmd_details);
+enum i40e_status_code i40e_aq_start_lldp(struct i40e_hw *hw,
+ struct i40e_asq_cmd_details *cmd_details);
+enum i40e_status_code i40e_aq_get_cee_dcb_config(struct i40e_hw *hw,
+ void *buff, u16 buff_size,
+ struct i40e_asq_cmd_details *cmd_details);
+enum i40e_status_code i40e_aq_start_stop_dcbx(struct i40e_hw *hw,
+ bool start_agent,
+ struct i40e_asq_cmd_details *cmd_details);
+enum i40e_status_code i40e_aq_add_udp_tunnel(struct i40e_hw *hw,
+ u16 udp_port, u8 protocol_index,
+ u8 *filter_index,
+ struct i40e_asq_cmd_details *cmd_details);
+enum i40e_status_code i40e_aq_del_udp_tunnel(struct i40e_hw *hw, u8 index,
+ struct i40e_asq_cmd_details *cmd_details);
+enum i40e_status_code i40e_aq_get_switch_resource_alloc(struct i40e_hw *hw,
+ u8 *num_entries,
+ struct i40e_aqc_switch_resource_alloc_element_resp *buf,
+ u16 count,
+ struct i40e_asq_cmd_details *cmd_details);
+enum i40e_status_code i40e_aq_add_pvirt(struct i40e_hw *hw, u16 flags,
+ u16 mac_seid, u16 vsi_seid,
+ u16 *ret_seid);
+enum i40e_status_code i40e_aq_add_tag(struct i40e_hw *hw, bool direct_to_queue,
+ u16 vsi_seid, u16 tag, u16 queue_num,
+ u16 *tags_used, u16 *tags_free,
+ struct i40e_asq_cmd_details *cmd_details);
+enum i40e_status_code i40e_aq_remove_tag(struct i40e_hw *hw, u16 vsi_seid,
+ u16 tag, u16 *tags_used, u16 *tags_free,
+ struct i40e_asq_cmd_details *cmd_details);
+enum i40e_status_code i40e_aq_add_mcast_etag(struct i40e_hw *hw, u16 pe_seid,
+ u16 etag, u8 num_tags_in_buf, void *buf,
+ u16 *tags_used, u16 *tags_free,
+ struct i40e_asq_cmd_details *cmd_details);
+enum i40e_status_code i40e_aq_remove_mcast_etag(struct i40e_hw *hw, u16 pe_seid,
+ u16 etag, u16 *tags_used, u16 *tags_free,
+ struct i40e_asq_cmd_details *cmd_details);
+enum i40e_status_code i40e_aq_update_tag(struct i40e_hw *hw, u16 vsi_seid,
+ u16 old_tag, u16 new_tag, u16 *tags_used,
+ u16 *tags_free,
+ struct i40e_asq_cmd_details *cmd_details);
+enum i40e_status_code i40e_aq_add_statistics(struct i40e_hw *hw, u16 seid,
+ u16 vlan_id, u16 *stat_index,
+ struct i40e_asq_cmd_details *cmd_details);
+enum i40e_status_code i40e_aq_remove_statistics(struct i40e_hw *hw, u16 seid,
+ u16 vlan_id, u16 stat_index,
+ struct i40e_asq_cmd_details *cmd_details);
+enum i40e_status_code i40e_aq_set_port_parameters(struct i40e_hw *hw,
+ u16 bad_frame_vsi, bool save_bad_pac,
+ bool pad_short_pac, bool double_vlan,
+ struct i40e_asq_cmd_details *cmd_details);
+enum i40e_status_code i40e_aq_delete_element(struct i40e_hw *hw, u16 seid,
+ struct i40e_asq_cmd_details *cmd_details);
+enum i40e_status_code i40e_aq_mac_address_write(struct i40e_hw *hw,
+ u16 flags, u8 *mac_addr,
+ struct i40e_asq_cmd_details *cmd_details);
+enum i40e_status_code i40e_aq_config_vsi_bw_limit(struct i40e_hw *hw,
+ u16 seid, u16 credit, u8 max_credit,
+ struct i40e_asq_cmd_details *cmd_details);
+enum i40e_status_code i40e_aq_dcb_ignore_pfc(struct i40e_hw *hw,
+ u8 tcmap, bool request, u8 *tcmap_ret,
+ struct i40e_asq_cmd_details *cmd_details);
+enum i40e_status_code i40e_aq_config_switch_comp_ets_bw_limit(
+ struct i40e_hw *hw, u16 seid,
+ struct i40e_aqc_configure_switching_comp_ets_bw_limit_data *bw_data,
+ struct i40e_asq_cmd_details *cmd_details);
+enum i40e_status_code i40e_aq_config_vsi_ets_sla_bw_limit(struct i40e_hw *hw,
+ u16 seid,
+ struct i40e_aqc_configure_vsi_ets_sla_bw_data *bw_data,
+ struct i40e_asq_cmd_details *cmd_details);
+enum i40e_status_code i40e_aq_dcb_updated(struct i40e_hw *hw,
+ struct i40e_asq_cmd_details *cmd_details);
+enum i40e_status_code i40e_aq_config_switch_comp_bw_limit(struct i40e_hw *hw,
+ u16 seid, u16 credit, u8 max_bw,
+ struct i40e_asq_cmd_details *cmd_details);
+enum i40e_status_code i40e_aq_config_vsi_tc_bw(struct i40e_hw *hw, u16 seid,
+ struct i40e_aqc_configure_vsi_tc_bw_data *bw_data,
+ struct i40e_asq_cmd_details *cmd_details);
+enum i40e_status_code i40e_aq_config_switch_comp_ets(struct i40e_hw *hw,
+ u16 seid,
+ struct i40e_aqc_configure_switching_comp_ets_data *ets_data,
+ enum i40e_admin_queue_opc opcode,
+ struct i40e_asq_cmd_details *cmd_details);
+enum i40e_status_code i40e_aq_config_switch_comp_bw_config(struct i40e_hw *hw,
+ u16 seid,
+ struct i40e_aqc_configure_switching_comp_bw_config_data *bw_data,
+ struct i40e_asq_cmd_details *cmd_details);
+enum i40e_status_code i40e_aq_query_vsi_bw_config(struct i40e_hw *hw,
+ u16 seid,
+ struct i40e_aqc_query_vsi_bw_config_resp *bw_data,
+ struct i40e_asq_cmd_details *cmd_details);
+enum i40e_status_code i40e_aq_query_vsi_ets_sla_config(struct i40e_hw *hw,
+ u16 seid,
+ struct i40e_aqc_query_vsi_ets_sla_config_resp *bw_data,
+ struct i40e_asq_cmd_details *cmd_details);
+enum i40e_status_code i40e_aq_query_switch_comp_ets_config(struct i40e_hw *hw,
+ u16 seid,
+ struct i40e_aqc_query_switching_comp_ets_config_resp *bw_data,
+ struct i40e_asq_cmd_details *cmd_details);
+enum i40e_status_code i40e_aq_query_port_ets_config(struct i40e_hw *hw,
+ u16 seid,
+ struct i40e_aqc_query_port_ets_config_resp *bw_data,
+ struct i40e_asq_cmd_details *cmd_details);
+enum i40e_status_code i40e_aq_query_switch_comp_bw_config(struct i40e_hw *hw,
+ u16 seid,
+ struct i40e_aqc_query_switching_comp_bw_config_resp *bw_data,
+ struct i40e_asq_cmd_details *cmd_details);
+enum i40e_status_code i40e_aq_resume_port_tx(struct i40e_hw *hw,
+ struct i40e_asq_cmd_details *cmd_details);
+enum i40e_status_code i40e_read_lldp_cfg(struct i40e_hw *hw,
+ struct i40e_lldp_variables *lldp_cfg);
+enum i40e_status_code i40e_aq_add_cloud_filters(struct i40e_hw *hw,
+ u16 vsi,
+ struct i40e_aqc_add_remove_cloud_filters_element_data *filters,
+ u8 filter_count);
+
+enum i40e_status_code i40e_aq_remove_cloud_filters(struct i40e_hw *hw,
+ u16 vsi,
+ struct i40e_aqc_add_remove_cloud_filters_element_data *filters,
+ u8 filter_count);
+enum i40e_status_code i40e_aq_alternate_read(struct i40e_hw *hw,
+ u32 reg_addr0, u32 *reg_val0,
+ u32 reg_addr1, u32 *reg_val1);
+enum i40e_status_code i40e_aq_alternate_read_indirect(struct i40e_hw *hw,
+ u32 addr, u32 dw_count, void *buffer);
+enum i40e_status_code i40e_aq_alternate_write(struct i40e_hw *hw,
+ u32 reg_addr0, u32 reg_val0,
+ u32 reg_addr1, u32 reg_val1);
+enum i40e_status_code i40e_aq_alternate_write_indirect(struct i40e_hw *hw,
+ u32 addr, u32 dw_count, void *buffer);
+enum i40e_status_code i40e_aq_alternate_clear(struct i40e_hw *hw);
+enum i40e_status_code i40e_aq_alternate_write_done(struct i40e_hw *hw,
+ u8 bios_mode, bool *reset_needed);
+enum i40e_status_code i40e_aq_set_oem_mode(struct i40e_hw *hw,
+ u8 oem_mode);
+
+/* i40e_common */
+enum i40e_status_code i40e_init_shared_code(struct i40e_hw *hw);
+enum i40e_status_code i40e_pf_reset(struct i40e_hw *hw);
+void i40e_clear_hw(struct i40e_hw *hw);
+void i40e_clear_pxe_mode(struct i40e_hw *hw);
+enum i40e_status_code i40e_get_link_status(struct i40e_hw *hw, bool *link_up);
+enum i40e_status_code i40e_update_link_info(struct i40e_hw *hw);
+enum i40e_status_code i40e_get_mac_addr(struct i40e_hw *hw, u8 *mac_addr);
+enum i40e_status_code i40e_read_bw_from_alt_ram(struct i40e_hw *hw,
+ u32 *max_bw, u32 *min_bw, bool *min_valid, bool *max_valid);
+enum i40e_status_code i40e_aq_configure_partition_bw(struct i40e_hw *hw,
+ struct i40e_aqc_configure_partition_bw_data *bw_data,
+ struct i40e_asq_cmd_details *cmd_details);
+enum i40e_status_code i40e_get_port_mac_addr(struct i40e_hw *hw, u8 *mac_addr);
+enum i40e_status_code i40e_read_pba_string(struct i40e_hw *hw, u8 *pba_num,
+ u32 pba_num_size);
+void i40e_pre_tx_queue_cfg(struct i40e_hw *hw, u32 queue, bool enable);
+enum i40e_aq_link_speed i40e_get_link_speed(struct i40e_hw *hw);
+/* prototype for functions used for NVM access */
+enum i40e_status_code i40e_init_nvm(struct i40e_hw *hw);
+enum i40e_status_code i40e_acquire_nvm(struct i40e_hw *hw,
+ enum i40e_aq_resource_access_type access);
+void i40e_release_nvm(struct i40e_hw *hw);
+enum i40e_status_code i40e_read_nvm_word(struct i40e_hw *hw, u16 offset,
+ u16 *data);
+enum i40e_status_code i40e_read_nvm_buffer(struct i40e_hw *hw, u16 offset,
+ u16 *words, u16 *data);
+enum i40e_status_code i40e_write_nvm_aq(struct i40e_hw *hw, u8 module,
+ u32 offset, u16 words, void *data,
+ bool last_command);
+enum i40e_status_code __i40e_read_nvm_word(struct i40e_hw *hw, u16 offset,
+ u16 *data);
+enum i40e_status_code __i40e_read_nvm_buffer(struct i40e_hw *hw, u16 offset,
+ u16 *words, u16 *data);
+enum i40e_status_code __i40e_write_nvm_word(struct i40e_hw *hw, u32 offset,
+ void *data);
+enum i40e_status_code __i40e_write_nvm_buffer(struct i40e_hw *hw, u8 module,
+ u32 offset, u16 words, void *data);
+enum i40e_status_code i40e_calc_nvm_checksum(struct i40e_hw *hw, u16 *checksum);
+enum i40e_status_code i40e_update_nvm_checksum(struct i40e_hw *hw);
+enum i40e_status_code i40e_validate_nvm_checksum(struct i40e_hw *hw,
+ u16 *checksum);
+enum i40e_status_code i40e_nvmupd_command(struct i40e_hw *hw,
+ struct i40e_nvm_access *cmd,
+ u8 *bytes, int *);
+void i40e_nvmupd_check_wait_event(struct i40e_hw *hw, u16 opcode);
+void i40e_set_pci_config_data(struct i40e_hw *hw, u16 link_status);
+#endif /* PF_DRIVER */
+
+#if defined(I40E_QV) || defined(VF_DRIVER)
+enum i40e_status_code i40e_set_mac_type(struct i40e_hw *hw);
+
+#endif
+extern struct i40e_rx_ptype_decoded i40e_ptype_lookup[];
+
+STATIC INLINE struct i40e_rx_ptype_decoded decode_rx_desc_ptype(u8 ptype)
+{
+ return i40e_ptype_lookup[ptype];
+}
+
+/* prototype for functions used for SW spinlocks */
+void i40e_init_spinlock(struct i40e_spinlock *sp);
+void i40e_acquire_spinlock(struct i40e_spinlock *sp);
+void i40e_release_spinlock(struct i40e_spinlock *sp);
+void i40e_destroy_spinlock(struct i40e_spinlock *sp);
+
+/* i40e_common for VF drivers*/
+void i40e_vf_parse_hw_config(struct i40e_hw *hw,
+ struct i40e_virtchnl_vf_resource *msg);
+enum i40e_status_code i40e_vf_reset(struct i40e_hw *hw);
+enum i40e_status_code i40e_aq_send_msg_to_pf(struct i40e_hw *hw,
+ enum i40e_virtchnl_ops v_opcode,
+ enum i40e_status_code v_retval,
+ u8 *msg, u16 msglen,
+ struct i40e_asq_cmd_details *cmd_details);
+enum i40e_status_code i40e_set_filter_control(struct i40e_hw *hw,
+ struct i40e_filter_control_settings *settings);
+enum i40e_status_code i40e_aq_add_rem_control_packet_filter(struct i40e_hw *hw,
+ u8 *mac_addr, u16 ethtype, u16 flags,
+ u16 vsi_seid, u16 queue, bool is_add,
+ struct i40e_control_filter_stats *stats,
+ struct i40e_asq_cmd_details *cmd_details);
+enum i40e_status_code i40e_aq_debug_dump(struct i40e_hw *hw, u8 cluster_id,
+ u8 table_id, u32 start_index, u16 buff_size,
+ void *buff, u16 *ret_buff_size,
+ u8 *ret_next_table, u32 *ret_next_index,
+ struct i40e_asq_cmd_details *cmd_details);
+void i40e_add_filter_to_drop_tx_flow_control_frames(struct i40e_hw *hw,
+ u16 vsi_seid);
+enum i40e_status_code i40e_aq_rx_ctl_read_register(struct i40e_hw *hw,
+ u32 reg_addr, u32 *reg_val,
+ struct i40e_asq_cmd_details *cmd_details);
+u32 i40e_read_rx_ctl(struct i40e_hw *hw, u32 reg_addr);
+enum i40e_status_code i40e_aq_rx_ctl_write_register(struct i40e_hw *hw,
+ u32 reg_addr, u32 reg_val,
+ struct i40e_asq_cmd_details *cmd_details);
+void i40e_write_rx_ctl(struct i40e_hw *hw, u32 reg_addr, u32 reg_val);
+#ifdef X722_SUPPORT
+enum i40e_status_code i40e_aq_set_arp_proxy_config(struct i40e_hw *hw,
+ struct i40e_aqc_arp_proxy_data *proxy_config,
+ struct i40e_asq_cmd_details *cmd_details);
+enum i40e_status_code i40e_aq_set_ns_proxy_table_entry(struct i40e_hw *hw,
+ struct i40e_aqc_ns_proxy_data *ns_proxy_table_entry,
+ struct i40e_asq_cmd_details *cmd_details);
+enum i40e_status_code i40e_aq_set_clear_wol_filter(struct i40e_hw *hw,
+ u8 filter_index,
+ struct i40e_aqc_set_wol_filter_data *filter,
+ bool set_filter, bool no_wol_tco,
+ bool filter_valid, bool no_wol_tco_valid,
+ struct i40e_asq_cmd_details *cmd_details);
+enum i40e_status_code i40e_aq_get_wake_event_reason(struct i40e_hw *hw,
+ u16 *wake_reason,
+ struct i40e_asq_cmd_details *cmd_details);
+#endif
+enum i40e_status_code i40e_read_phy_register(struct i40e_hw *hw, u8 page,
+ u16 reg, u8 phy_addr, u16 *value);
+enum i40e_status_code i40e_write_phy_register(struct i40e_hw *hw, u8 page,
+ u16 reg, u8 phy_addr, u16 value);
+u8 i40e_get_phy_address(struct i40e_hw *hw, u8 dev_num);
+enum i40e_status_code i40e_blink_phy_link_led(struct i40e_hw *hw,
+ u32 time, u32 interval);
+#endif /* _I40E_PROTOTYPE_H_ */
diff --git a/src/dpdk/drivers/net/i40e/base/i40e_register.h b/src/dpdk/drivers/net/i40e/base/i40e_register.h
new file mode 100644
index 00000000..fd0a7230
--- /dev/null
+++ b/src/dpdk/drivers/net/i40e/base/i40e_register.h
@@ -0,0 +1,5370 @@
+/*******************************************************************************
+
+Copyright (c) 2013 - 2015, Intel Corporation
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are met:
+
+ 1. Redistributions of source code must retain the above copyright notice,
+ this list of conditions and the following disclaimer.
+
+ 2. Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in the
+ documentation and/or other materials provided with the distribution.
+
+ 3. Neither the name of the Intel Corporation nor the names of its
+ contributors may be used to endorse or promote products derived from
+ this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+POSSIBILITY OF SUCH DAMAGE.
+
+***************************************************************************/
+
+#ifndef _I40E_REGISTER_H_
+#define _I40E_REGISTER_H_
+
+
+#ifdef PF_DRIVER
+#define I40E_GL_ARQBAH 0x000801C0 /* Reset: EMPR */
+#define I40E_GL_ARQBAH_ARQBAH_SHIFT 0
+#define I40E_GL_ARQBAH_ARQBAH_MASK I40E_MASK(0xFFFFFFFF, I40E_GL_ARQBAH_ARQBAH_SHIFT)
+#define I40E_GL_ARQBAL 0x000800C0 /* Reset: EMPR */
+#define I40E_GL_ARQBAL_ARQBAL_SHIFT 0
+#define I40E_GL_ARQBAL_ARQBAL_MASK I40E_MASK(0xFFFFFFFF, I40E_GL_ARQBAL_ARQBAL_SHIFT)
+#define I40E_GL_ARQH 0x000803C0 /* Reset: EMPR */
+#define I40E_GL_ARQH_ARQH_SHIFT 0
+#define I40E_GL_ARQH_ARQH_MASK I40E_MASK(0x3FF, I40E_GL_ARQH_ARQH_SHIFT)
+#define I40E_GL_ARQT 0x000804C0 /* Reset: EMPR */
+#define I40E_GL_ARQT_ARQT_SHIFT 0
+#define I40E_GL_ARQT_ARQT_MASK I40E_MASK(0x3FF, I40E_GL_ARQT_ARQT_SHIFT)
+#define I40E_GL_ATQBAH 0x00080140 /* Reset: EMPR */
+#define I40E_GL_ATQBAH_ATQBAH_SHIFT 0
+#define I40E_GL_ATQBAH_ATQBAH_MASK I40E_MASK(0xFFFFFFFF, I40E_GL_ATQBAH_ATQBAH_SHIFT)
+#define I40E_GL_ATQBAL 0x00080040 /* Reset: EMPR */
+#define I40E_GL_ATQBAL_ATQBAL_SHIFT 0
+#define I40E_GL_ATQBAL_ATQBAL_MASK I40E_MASK(0xFFFFFFFF, I40E_GL_ATQBAL_ATQBAL_SHIFT)
+#define I40E_GL_ATQH 0x00080340 /* Reset: EMPR */
+#define I40E_GL_ATQH_ATQH_SHIFT 0
+#define I40E_GL_ATQH_ATQH_MASK I40E_MASK(0x3FF, I40E_GL_ATQH_ATQH_SHIFT)
+#define I40E_GL_ATQLEN 0x00080240 /* Reset: EMPR */
+#define I40E_GL_ATQLEN_ATQLEN_SHIFT 0
+#define I40E_GL_ATQLEN_ATQLEN_MASK I40E_MASK(0x3FF, I40E_GL_ATQLEN_ATQLEN_SHIFT)
+#define I40E_GL_ATQLEN_ATQVFE_SHIFT 28
+#define I40E_GL_ATQLEN_ATQVFE_MASK I40E_MASK(0x1, I40E_GL_ATQLEN_ATQVFE_SHIFT)
+#define I40E_GL_ATQLEN_ATQOVFL_SHIFT 29
+#define I40E_GL_ATQLEN_ATQOVFL_MASK I40E_MASK(0x1, I40E_GL_ATQLEN_ATQOVFL_SHIFT)
+#define I40E_GL_ATQLEN_ATQCRIT_SHIFT 30
+#define I40E_GL_ATQLEN_ATQCRIT_MASK I40E_MASK(0x1, I40E_GL_ATQLEN_ATQCRIT_SHIFT)
+#define I40E_GL_ATQLEN_ATQENABLE_SHIFT 31
+#define I40E_GL_ATQLEN_ATQENABLE_MASK I40E_MASK(0x1, I40E_GL_ATQLEN_ATQENABLE_SHIFT)
+#define I40E_GL_ATQT 0x00080440 /* Reset: EMPR */
+#define I40E_GL_ATQT_ATQT_SHIFT 0
+#define I40E_GL_ATQT_ATQT_MASK I40E_MASK(0x3FF, I40E_GL_ATQT_ATQT_SHIFT)
+#define I40E_PF_ARQBAH 0x00080180 /* Reset: EMPR */
+#define I40E_PF_ARQBAH_ARQBAH_SHIFT 0
+#define I40E_PF_ARQBAH_ARQBAH_MASK I40E_MASK(0xFFFFFFFF, I40E_PF_ARQBAH_ARQBAH_SHIFT)
+#define I40E_PF_ARQBAL 0x00080080 /* Reset: EMPR */
+#define I40E_PF_ARQBAL_ARQBAL_SHIFT 0
+#define I40E_PF_ARQBAL_ARQBAL_MASK I40E_MASK(0xFFFFFFFF, I40E_PF_ARQBAL_ARQBAL_SHIFT)
+#define I40E_PF_ARQH 0x00080380 /* Reset: EMPR */
+#define I40E_PF_ARQH_ARQH_SHIFT 0
+#define I40E_PF_ARQH_ARQH_MASK I40E_MASK(0x3FF, I40E_PF_ARQH_ARQH_SHIFT)
+#define I40E_PF_ARQLEN 0x00080280 /* Reset: EMPR */
+#define I40E_PF_ARQLEN_ARQLEN_SHIFT 0
+#define I40E_PF_ARQLEN_ARQLEN_MASK I40E_MASK(0x3FF, I40E_PF_ARQLEN_ARQLEN_SHIFT)
+#define I40E_PF_ARQLEN_ARQVFE_SHIFT 28
+#define I40E_PF_ARQLEN_ARQVFE_MASK I40E_MASK(0x1, I40E_PF_ARQLEN_ARQVFE_SHIFT)
+#define I40E_PF_ARQLEN_ARQOVFL_SHIFT 29
+#define I40E_PF_ARQLEN_ARQOVFL_MASK I40E_MASK(0x1, I40E_PF_ARQLEN_ARQOVFL_SHIFT)
+#define I40E_PF_ARQLEN_ARQCRIT_SHIFT 30
+#define I40E_PF_ARQLEN_ARQCRIT_MASK I40E_MASK(0x1, I40E_PF_ARQLEN_ARQCRIT_SHIFT)
+#define I40E_PF_ARQLEN_ARQENABLE_SHIFT 31
+#define I40E_PF_ARQLEN_ARQENABLE_MASK I40E_MASK(0x1, I40E_PF_ARQLEN_ARQENABLE_SHIFT)
+#define I40E_PF_ARQT 0x00080480 /* Reset: EMPR */
+#define I40E_PF_ARQT_ARQT_SHIFT 0
+#define I40E_PF_ARQT_ARQT_MASK I40E_MASK(0x3FF, I40E_PF_ARQT_ARQT_SHIFT)
+#define I40E_PF_ATQBAH 0x00080100 /* Reset: EMPR */
+#define I40E_PF_ATQBAH_ATQBAH_SHIFT 0
+#define I40E_PF_ATQBAH_ATQBAH_MASK I40E_MASK(0xFFFFFFFF, I40E_PF_ATQBAH_ATQBAH_SHIFT)
+#define I40E_PF_ATQBAL 0x00080000 /* Reset: EMPR */
+#define I40E_PF_ATQBAL_ATQBAL_SHIFT 0
+#define I40E_PF_ATQBAL_ATQBAL_MASK I40E_MASK(0xFFFFFFFF, I40E_PF_ATQBAL_ATQBAL_SHIFT)
+#define I40E_PF_ATQH 0x00080300 /* Reset: EMPR */
+#define I40E_PF_ATQH_ATQH_SHIFT 0
+#define I40E_PF_ATQH_ATQH_MASK I40E_MASK(0x3FF, I40E_PF_ATQH_ATQH_SHIFT)
+#define I40E_PF_ATQLEN 0x00080200 /* Reset: EMPR */
+#define I40E_PF_ATQLEN_ATQLEN_SHIFT 0
+#define I40E_PF_ATQLEN_ATQLEN_MASK I40E_MASK(0x3FF, I40E_PF_ATQLEN_ATQLEN_SHIFT)
+#define I40E_PF_ATQLEN_ATQVFE_SHIFT 28
+#define I40E_PF_ATQLEN_ATQVFE_MASK I40E_MASK(0x1, I40E_PF_ATQLEN_ATQVFE_SHIFT)
+#define I40E_PF_ATQLEN_ATQOVFL_SHIFT 29
+#define I40E_PF_ATQLEN_ATQOVFL_MASK I40E_MASK(0x1, I40E_PF_ATQLEN_ATQOVFL_SHIFT)
+#define I40E_PF_ATQLEN_ATQCRIT_SHIFT 30
+#define I40E_PF_ATQLEN_ATQCRIT_MASK I40E_MASK(0x1, I40E_PF_ATQLEN_ATQCRIT_SHIFT)
+#define I40E_PF_ATQLEN_ATQENABLE_SHIFT 31
+#define I40E_PF_ATQLEN_ATQENABLE_MASK I40E_MASK(0x1, I40E_PF_ATQLEN_ATQENABLE_SHIFT)
+#define I40E_PF_ATQT 0x00080400 /* Reset: EMPR */
+#define I40E_PF_ATQT_ATQT_SHIFT 0
+#define I40E_PF_ATQT_ATQT_MASK I40E_MASK(0x3FF, I40E_PF_ATQT_ATQT_SHIFT)
+#define I40E_VF_ARQBAH(_VF) (0x00081400 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: EMPR */
+#define I40E_VF_ARQBAH_MAX_INDEX 127
+#define I40E_VF_ARQBAH_ARQBAH_SHIFT 0
+#define I40E_VF_ARQBAH_ARQBAH_MASK I40E_MASK(0xFFFFFFFF, I40E_VF_ARQBAH_ARQBAH_SHIFT)
+#define I40E_VF_ARQBAL(_VF) (0x00080C00 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: EMPR */
+#define I40E_VF_ARQBAL_MAX_INDEX 127
+#define I40E_VF_ARQBAL_ARQBAL_SHIFT 0
+#define I40E_VF_ARQBAL_ARQBAL_MASK I40E_MASK(0xFFFFFFFF, I40E_VF_ARQBAL_ARQBAL_SHIFT)
+#define I40E_VF_ARQH(_VF) (0x00082400 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: EMPR */
+#define I40E_VF_ARQH_MAX_INDEX 127
+#define I40E_VF_ARQH_ARQH_SHIFT 0
+#define I40E_VF_ARQH_ARQH_MASK I40E_MASK(0x3FF, I40E_VF_ARQH_ARQH_SHIFT)
+#define I40E_VF_ARQLEN(_VF) (0x00081C00 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: EMPR */
+#define I40E_VF_ARQLEN_MAX_INDEX 127
+#define I40E_VF_ARQLEN_ARQLEN_SHIFT 0
+#define I40E_VF_ARQLEN_ARQLEN_MASK I40E_MASK(0x3FF, I40E_VF_ARQLEN_ARQLEN_SHIFT)
+#define I40E_VF_ARQLEN_ARQVFE_SHIFT 28
+#define I40E_VF_ARQLEN_ARQVFE_MASK I40E_MASK(0x1, I40E_VF_ARQLEN_ARQVFE_SHIFT)
+#define I40E_VF_ARQLEN_ARQOVFL_SHIFT 29
+#define I40E_VF_ARQLEN_ARQOVFL_MASK I40E_MASK(0x1, I40E_VF_ARQLEN_ARQOVFL_SHIFT)
+#define I40E_VF_ARQLEN_ARQCRIT_SHIFT 30
+#define I40E_VF_ARQLEN_ARQCRIT_MASK I40E_MASK(0x1, I40E_VF_ARQLEN_ARQCRIT_SHIFT)
+#define I40E_VF_ARQLEN_ARQENABLE_SHIFT 31
+#define I40E_VF_ARQLEN_ARQENABLE_MASK I40E_MASK(0x1, I40E_VF_ARQLEN_ARQENABLE_SHIFT)
+#define I40E_VF_ARQT(_VF) (0x00082C00 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: EMPR */
+#define I40E_VF_ARQT_MAX_INDEX 127
+#define I40E_VF_ARQT_ARQT_SHIFT 0
+#define I40E_VF_ARQT_ARQT_MASK I40E_MASK(0x3FF, I40E_VF_ARQT_ARQT_SHIFT)
+#define I40E_VF_ATQBAH(_VF) (0x00081000 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: EMPR */
+#define I40E_VF_ATQBAH_MAX_INDEX 127
+#define I40E_VF_ATQBAH_ATQBAH_SHIFT 0
+#define I40E_VF_ATQBAH_ATQBAH_MASK I40E_MASK(0xFFFFFFFF, I40E_VF_ATQBAH_ATQBAH_SHIFT)
+#define I40E_VF_ATQBAL(_VF) (0x00080800 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: EMPR */
+#define I40E_VF_ATQBAL_MAX_INDEX 127
+#define I40E_VF_ATQBAL_ATQBAL_SHIFT 0
+#define I40E_VF_ATQBAL_ATQBAL_MASK I40E_MASK(0xFFFFFFFF, I40E_VF_ATQBAL_ATQBAL_SHIFT)
+#define I40E_VF_ATQH(_VF) (0x00082000 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: EMPR */
+#define I40E_VF_ATQH_MAX_INDEX 127
+#define I40E_VF_ATQH_ATQH_SHIFT 0
+#define I40E_VF_ATQH_ATQH_MASK I40E_MASK(0x3FF, I40E_VF_ATQH_ATQH_SHIFT)
+#define I40E_VF_ATQLEN(_VF) (0x00081800 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: EMPR */
+#define I40E_VF_ATQLEN_MAX_INDEX 127
+#define I40E_VF_ATQLEN_ATQLEN_SHIFT 0
+#define I40E_VF_ATQLEN_ATQLEN_MASK I40E_MASK(0x3FF, I40E_VF_ATQLEN_ATQLEN_SHIFT)
+#define I40E_VF_ATQLEN_ATQVFE_SHIFT 28
+#define I40E_VF_ATQLEN_ATQVFE_MASK I40E_MASK(0x1, I40E_VF_ATQLEN_ATQVFE_SHIFT)
+#define I40E_VF_ATQLEN_ATQOVFL_SHIFT 29
+#define I40E_VF_ATQLEN_ATQOVFL_MASK I40E_MASK(0x1, I40E_VF_ATQLEN_ATQOVFL_SHIFT)
+#define I40E_VF_ATQLEN_ATQCRIT_SHIFT 30
+#define I40E_VF_ATQLEN_ATQCRIT_MASK I40E_MASK(0x1, I40E_VF_ATQLEN_ATQCRIT_SHIFT)
+#define I40E_VF_ATQLEN_ATQENABLE_SHIFT 31
+#define I40E_VF_ATQLEN_ATQENABLE_MASK I40E_MASK(0x1, I40E_VF_ATQLEN_ATQENABLE_SHIFT)
+#define I40E_VF_ATQT(_VF) (0x00082800 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: EMPR */
+#define I40E_VF_ATQT_MAX_INDEX 127
+#define I40E_VF_ATQT_ATQT_SHIFT 0
+#define I40E_VF_ATQT_ATQT_MASK I40E_MASK(0x3FF, I40E_VF_ATQT_ATQT_SHIFT)
+#define I40E_PRT_L2TAGSEN 0x001C0B20 /* Reset: CORER */
+#define I40E_PRT_L2TAGSEN_ENABLE_SHIFT 0
+#define I40E_PRT_L2TAGSEN_ENABLE_MASK I40E_MASK(0xFF, I40E_PRT_L2TAGSEN_ENABLE_SHIFT)
+#define I40E_PFCM_LAN_ERRDATA 0x0010C080 /* Reset: PFR */
+#define I40E_PFCM_LAN_ERRDATA_ERROR_CODE_SHIFT 0
+#define I40E_PFCM_LAN_ERRDATA_ERROR_CODE_MASK I40E_MASK(0xF, I40E_PFCM_LAN_ERRDATA_ERROR_CODE_SHIFT)
+#define I40E_PFCM_LAN_ERRDATA_Q_TYPE_SHIFT 4
+#define I40E_PFCM_LAN_ERRDATA_Q_TYPE_MASK I40E_MASK(0x7, I40E_PFCM_LAN_ERRDATA_Q_TYPE_SHIFT)
+#define I40E_PFCM_LAN_ERRDATA_Q_NUM_SHIFT 8
+#define I40E_PFCM_LAN_ERRDATA_Q_NUM_MASK I40E_MASK(0xFFF, I40E_PFCM_LAN_ERRDATA_Q_NUM_SHIFT)
+#define I40E_PFCM_LAN_ERRINFO 0x0010C000 /* Reset: PFR */
+#define I40E_PFCM_LAN_ERRINFO_ERROR_VALID_SHIFT 0
+#define I40E_PFCM_LAN_ERRINFO_ERROR_VALID_MASK I40E_MASK(0x1, I40E_PFCM_LAN_ERRINFO_ERROR_VALID_SHIFT)
+#define I40E_PFCM_LAN_ERRINFO_ERROR_INST_SHIFT 4
+#define I40E_PFCM_LAN_ERRINFO_ERROR_INST_MASK I40E_MASK(0x7, I40E_PFCM_LAN_ERRINFO_ERROR_INST_SHIFT)
+#define I40E_PFCM_LAN_ERRINFO_DBL_ERROR_CNT_SHIFT 8
+#define I40E_PFCM_LAN_ERRINFO_DBL_ERROR_CNT_MASK I40E_MASK(0xFF, I40E_PFCM_LAN_ERRINFO_DBL_ERROR_CNT_SHIFT)
+#define I40E_PFCM_LAN_ERRINFO_RLU_ERROR_CNT_SHIFT 16
+#define I40E_PFCM_LAN_ERRINFO_RLU_ERROR_CNT_MASK I40E_MASK(0xFF, I40E_PFCM_LAN_ERRINFO_RLU_ERROR_CNT_SHIFT)
+#define I40E_PFCM_LAN_ERRINFO_RLS_ERROR_CNT_SHIFT 24
+#define I40E_PFCM_LAN_ERRINFO_RLS_ERROR_CNT_MASK I40E_MASK(0xFF, I40E_PFCM_LAN_ERRINFO_RLS_ERROR_CNT_SHIFT)
+#define I40E_PFCM_LANCTXCTL 0x0010C300 /* Reset: CORER */
+#define I40E_PFCM_LANCTXCTL_QUEUE_NUM_SHIFT 0
+#define I40E_PFCM_LANCTXCTL_QUEUE_NUM_MASK I40E_MASK(0xFFF, I40E_PFCM_LANCTXCTL_QUEUE_NUM_SHIFT)
+#define I40E_PFCM_LANCTXCTL_SUB_LINE_SHIFT 12
+#define I40E_PFCM_LANCTXCTL_SUB_LINE_MASK I40E_MASK(0x7, I40E_PFCM_LANCTXCTL_SUB_LINE_SHIFT)
+#define I40E_PFCM_LANCTXCTL_QUEUE_TYPE_SHIFT 15
+#define I40E_PFCM_LANCTXCTL_QUEUE_TYPE_MASK I40E_MASK(0x3, I40E_PFCM_LANCTXCTL_QUEUE_TYPE_SHIFT)
+#define I40E_PFCM_LANCTXCTL_OP_CODE_SHIFT 17
+#define I40E_PFCM_LANCTXCTL_OP_CODE_MASK I40E_MASK(0x3, I40E_PFCM_LANCTXCTL_OP_CODE_SHIFT)
+#define I40E_PFCM_LANCTXDATA(_i) (0x0010C100 + ((_i) * 128)) /* _i=0...3 */ /* Reset: CORER */
+#define I40E_PFCM_LANCTXDATA_MAX_INDEX 3
+#define I40E_PFCM_LANCTXDATA_DATA_SHIFT 0
+#define I40E_PFCM_LANCTXDATA_DATA_MASK I40E_MASK(0xFFFFFFFF, I40E_PFCM_LANCTXDATA_DATA_SHIFT)
+#define I40E_PFCM_LANCTXSTAT 0x0010C380 /* Reset: CORER */
+#define I40E_PFCM_LANCTXSTAT_CTX_DONE_SHIFT 0
+#define I40E_PFCM_LANCTXSTAT_CTX_DONE_MASK I40E_MASK(0x1, I40E_PFCM_LANCTXSTAT_CTX_DONE_SHIFT)
+#define I40E_PFCM_LANCTXSTAT_CTX_MISS_SHIFT 1
+#define I40E_PFCM_LANCTXSTAT_CTX_MISS_MASK I40E_MASK(0x1, I40E_PFCM_LANCTXSTAT_CTX_MISS_SHIFT)
+#define I40E_VFCM_PE_ERRDATA1(_VF) (0x00138800 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: VFR */
+#define I40E_VFCM_PE_ERRDATA1_MAX_INDEX 127
+#define I40E_VFCM_PE_ERRDATA1_ERROR_CODE_SHIFT 0
+#define I40E_VFCM_PE_ERRDATA1_ERROR_CODE_MASK I40E_MASK(0xF, I40E_VFCM_PE_ERRDATA1_ERROR_CODE_SHIFT)
+#define I40E_VFCM_PE_ERRDATA1_Q_TYPE_SHIFT 4
+#define I40E_VFCM_PE_ERRDATA1_Q_TYPE_MASK I40E_MASK(0x7, I40E_VFCM_PE_ERRDATA1_Q_TYPE_SHIFT)
+#define I40E_VFCM_PE_ERRDATA1_Q_NUM_SHIFT 8
+#define I40E_VFCM_PE_ERRDATA1_Q_NUM_MASK I40E_MASK(0x3FFFF, I40E_VFCM_PE_ERRDATA1_Q_NUM_SHIFT)
+#define I40E_VFCM_PE_ERRINFO1(_VF) (0x00138400 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: VFR */
+#define I40E_VFCM_PE_ERRINFO1_MAX_INDEX 127
+#define I40E_VFCM_PE_ERRINFO1_ERROR_VALID_SHIFT 0
+#define I40E_VFCM_PE_ERRINFO1_ERROR_VALID_MASK I40E_MASK(0x1, I40E_VFCM_PE_ERRINFO1_ERROR_VALID_SHIFT)
+#define I40E_VFCM_PE_ERRINFO1_ERROR_INST_SHIFT 4
+#define I40E_VFCM_PE_ERRINFO1_ERROR_INST_MASK I40E_MASK(0x7, I40E_VFCM_PE_ERRINFO1_ERROR_INST_SHIFT)
+#define I40E_VFCM_PE_ERRINFO1_DBL_ERROR_CNT_SHIFT 8
+#define I40E_VFCM_PE_ERRINFO1_DBL_ERROR_CNT_MASK I40E_MASK(0xFF, I40E_VFCM_PE_ERRINFO1_DBL_ERROR_CNT_SHIFT)
+#define I40E_VFCM_PE_ERRINFO1_RLU_ERROR_CNT_SHIFT 16
+#define I40E_VFCM_PE_ERRINFO1_RLU_ERROR_CNT_MASK I40E_MASK(0xFF, I40E_VFCM_PE_ERRINFO1_RLU_ERROR_CNT_SHIFT)
+#define I40E_VFCM_PE_ERRINFO1_RLS_ERROR_CNT_SHIFT 24
+#define I40E_VFCM_PE_ERRINFO1_RLS_ERROR_CNT_MASK I40E_MASK(0xFF, I40E_VFCM_PE_ERRINFO1_RLS_ERROR_CNT_SHIFT)
+#define I40E_GLDCB_GENC 0x00083044 /* Reset: CORER */
+#define I40E_GLDCB_GENC_PCIRTT_SHIFT 0
+#define I40E_GLDCB_GENC_PCIRTT_MASK I40E_MASK(0xFFFF, I40E_GLDCB_GENC_PCIRTT_SHIFT)
+#define I40E_GLDCB_RUPTI 0x00122618 /* Reset: CORER */
+#define I40E_GLDCB_RUPTI_PFCTIMEOUT_UP_SHIFT 0
+#define I40E_GLDCB_RUPTI_PFCTIMEOUT_UP_MASK I40E_MASK(0xFFFFFFFF, I40E_GLDCB_RUPTI_PFCTIMEOUT_UP_SHIFT)
+#define I40E_PRTDCB_FCCFG 0x001E4640 /* Reset: GLOBR */
+#define I40E_PRTDCB_FCCFG_TFCE_SHIFT 3
+#define I40E_PRTDCB_FCCFG_TFCE_MASK I40E_MASK(0x3, I40E_PRTDCB_FCCFG_TFCE_SHIFT)
+#define I40E_PRTDCB_FCRTV 0x001E4600 /* Reset: GLOBR */
+#define I40E_PRTDCB_FCRTV_FC_REFRESH_TH_SHIFT 0
+#define I40E_PRTDCB_FCRTV_FC_REFRESH_TH_MASK I40E_MASK(0xFFFF, I40E_PRTDCB_FCRTV_FC_REFRESH_TH_SHIFT)
+#define I40E_PRTDCB_FCTTVN(_i) (0x001E4580 + ((_i) * 32)) /* _i=0...3 */ /* Reset: GLOBR */
+#define I40E_PRTDCB_FCTTVN_MAX_INDEX 3
+#define I40E_PRTDCB_FCTTVN_TTV_2N_SHIFT 0
+#define I40E_PRTDCB_FCTTVN_TTV_2N_MASK I40E_MASK(0xFFFF, I40E_PRTDCB_FCTTVN_TTV_2N_SHIFT)
+#define I40E_PRTDCB_FCTTVN_TTV_2N_P1_SHIFT 16
+#define I40E_PRTDCB_FCTTVN_TTV_2N_P1_MASK I40E_MASK(0xFFFF, I40E_PRTDCB_FCTTVN_TTV_2N_P1_SHIFT)
+#define I40E_PRTDCB_GENC 0x00083000 /* Reset: CORER */
+#define I40E_PRTDCB_GENC_RESERVED_1_SHIFT 0
+#define I40E_PRTDCB_GENC_RESERVED_1_MASK I40E_MASK(0x3, I40E_PRTDCB_GENC_RESERVED_1_SHIFT)
+#define I40E_PRTDCB_GENC_NUMTC_SHIFT 2
+#define I40E_PRTDCB_GENC_NUMTC_MASK I40E_MASK(0xF, I40E_PRTDCB_GENC_NUMTC_SHIFT)
+#define I40E_PRTDCB_GENC_FCOEUP_SHIFT 6
+#define I40E_PRTDCB_GENC_FCOEUP_MASK I40E_MASK(0x7, I40E_PRTDCB_GENC_FCOEUP_SHIFT)
+#define I40E_PRTDCB_GENC_FCOEUP_VALID_SHIFT 9
+#define I40E_PRTDCB_GENC_FCOEUP_VALID_MASK I40E_MASK(0x1, I40E_PRTDCB_GENC_FCOEUP_VALID_SHIFT)
+#define I40E_PRTDCB_GENC_PFCLDA_SHIFT 16
+#define I40E_PRTDCB_GENC_PFCLDA_MASK I40E_MASK(0xFFFF, I40E_PRTDCB_GENC_PFCLDA_SHIFT)
+#define I40E_PRTDCB_GENS 0x00083020 /* Reset: CORER */
+#define I40E_PRTDCB_GENS_DCBX_STATUS_SHIFT 0
+#define I40E_PRTDCB_GENS_DCBX_STATUS_MASK I40E_MASK(0x7, I40E_PRTDCB_GENS_DCBX_STATUS_SHIFT)
+#define I40E_PRTDCB_MFLCN 0x001E2400 /* Reset: GLOBR */
+#define I40E_PRTDCB_MFLCN_PMCF_SHIFT 0
+#define I40E_PRTDCB_MFLCN_PMCF_MASK I40E_MASK(0x1, I40E_PRTDCB_MFLCN_PMCF_SHIFT)
+#define I40E_PRTDCB_MFLCN_DPF_SHIFT 1
+#define I40E_PRTDCB_MFLCN_DPF_MASK I40E_MASK(0x1, I40E_PRTDCB_MFLCN_DPF_SHIFT)
+#define I40E_PRTDCB_MFLCN_RPFCM_SHIFT 2
+#define I40E_PRTDCB_MFLCN_RPFCM_MASK I40E_MASK(0x1, I40E_PRTDCB_MFLCN_RPFCM_SHIFT)
+#define I40E_PRTDCB_MFLCN_RFCE_SHIFT 3
+#define I40E_PRTDCB_MFLCN_RFCE_MASK I40E_MASK(0x1, I40E_PRTDCB_MFLCN_RFCE_SHIFT)
+#define I40E_PRTDCB_MFLCN_RPFCE_SHIFT 4
+#define I40E_PRTDCB_MFLCN_RPFCE_MASK I40E_MASK(0xFF, I40E_PRTDCB_MFLCN_RPFCE_SHIFT)
+#define I40E_PRTDCB_RETSC 0x001223E0 /* Reset: CORER */
+#define I40E_PRTDCB_RETSC_ETS_MODE_SHIFT 0
+#define I40E_PRTDCB_RETSC_ETS_MODE_MASK I40E_MASK(0x1, I40E_PRTDCB_RETSC_ETS_MODE_SHIFT)
+#define I40E_PRTDCB_RETSC_NON_ETS_MODE_SHIFT 1
+#define I40E_PRTDCB_RETSC_NON_ETS_MODE_MASK I40E_MASK(0x1, I40E_PRTDCB_RETSC_NON_ETS_MODE_SHIFT)
+#define I40E_PRTDCB_RETSC_ETS_MAX_EXP_SHIFT 2
+#define I40E_PRTDCB_RETSC_ETS_MAX_EXP_MASK I40E_MASK(0xF, I40E_PRTDCB_RETSC_ETS_MAX_EXP_SHIFT)
+#define I40E_PRTDCB_RETSC_LLTC_SHIFT 8
+#define I40E_PRTDCB_RETSC_LLTC_MASK I40E_MASK(0xFF, I40E_PRTDCB_RETSC_LLTC_SHIFT)
+#define I40E_PRTDCB_RETSTCC(_i) (0x00122180 + ((_i) * 32)) /* _i=0...7 */ /* Reset: CORER */
+#define I40E_PRTDCB_RETSTCC_MAX_INDEX 7
+#define I40E_PRTDCB_RETSTCC_BWSHARE_SHIFT 0
+#define I40E_PRTDCB_RETSTCC_BWSHARE_MASK I40E_MASK(0x7F, I40E_PRTDCB_RETSTCC_BWSHARE_SHIFT)
+#define I40E_PRTDCB_RETSTCC_UPINTC_MODE_SHIFT 30
+#define I40E_PRTDCB_RETSTCC_UPINTC_MODE_MASK I40E_MASK(0x1, I40E_PRTDCB_RETSTCC_UPINTC_MODE_SHIFT)
+#define I40E_PRTDCB_RETSTCC_ETSTC_SHIFT 31
+#define I40E_PRTDCB_RETSTCC_ETSTC_MASK I40E_MASK(0x1, I40E_PRTDCB_RETSTCC_ETSTC_SHIFT)
+#define I40E_PRTDCB_RPPMC 0x001223A0 /* Reset: CORER */
+#define I40E_PRTDCB_RPPMC_LANRPPM_SHIFT 0
+#define I40E_PRTDCB_RPPMC_LANRPPM_MASK I40E_MASK(0xFF, I40E_PRTDCB_RPPMC_LANRPPM_SHIFT)
+#define I40E_PRTDCB_RPPMC_RDMARPPM_SHIFT 8
+#define I40E_PRTDCB_RPPMC_RDMARPPM_MASK I40E_MASK(0xFF, I40E_PRTDCB_RPPMC_RDMARPPM_SHIFT)
+#define I40E_PRTDCB_RPPMC_RX_FIFO_SIZE_SHIFT 16
+#define I40E_PRTDCB_RPPMC_RX_FIFO_SIZE_MASK I40E_MASK(0xFF, I40E_PRTDCB_RPPMC_RX_FIFO_SIZE_SHIFT)
+#define I40E_PRTDCB_RUP 0x001C0B00 /* Reset: CORER */
+#define I40E_PRTDCB_RUP_NOVLANUP_SHIFT 0
+#define I40E_PRTDCB_RUP_NOVLANUP_MASK I40E_MASK(0x7, I40E_PRTDCB_RUP_NOVLANUP_SHIFT)
+#define I40E_PRTDCB_RUP2TC 0x001C09A0 /* Reset: CORER */
+#define I40E_PRTDCB_RUP2TC_UP0TC_SHIFT 0
+#define I40E_PRTDCB_RUP2TC_UP0TC_MASK I40E_MASK(0x7, I40E_PRTDCB_RUP2TC_UP0TC_SHIFT)
+#define I40E_PRTDCB_RUP2TC_UP1TC_SHIFT 3
+#define I40E_PRTDCB_RUP2TC_UP1TC_MASK I40E_MASK(0x7, I40E_PRTDCB_RUP2TC_UP1TC_SHIFT)
+#define I40E_PRTDCB_RUP2TC_UP2TC_SHIFT 6
+#define I40E_PRTDCB_RUP2TC_UP2TC_MASK I40E_MASK(0x7, I40E_PRTDCB_RUP2TC_UP2TC_SHIFT)
+#define I40E_PRTDCB_RUP2TC_UP3TC_SHIFT 9
+#define I40E_PRTDCB_RUP2TC_UP3TC_MASK I40E_MASK(0x7, I40E_PRTDCB_RUP2TC_UP3TC_SHIFT)
+#define I40E_PRTDCB_RUP2TC_UP4TC_SHIFT 12
+#define I40E_PRTDCB_RUP2TC_UP4TC_MASK I40E_MASK(0x7, I40E_PRTDCB_RUP2TC_UP4TC_SHIFT)
+#define I40E_PRTDCB_RUP2TC_UP5TC_SHIFT 15
+#define I40E_PRTDCB_RUP2TC_UP5TC_MASK I40E_MASK(0x7, I40E_PRTDCB_RUP2TC_UP5TC_SHIFT)
+#define I40E_PRTDCB_RUP2TC_UP6TC_SHIFT 18
+#define I40E_PRTDCB_RUP2TC_UP6TC_MASK I40E_MASK(0x7, I40E_PRTDCB_RUP2TC_UP6TC_SHIFT)
+#define I40E_PRTDCB_RUP2TC_UP7TC_SHIFT 21
+#define I40E_PRTDCB_RUP2TC_UP7TC_MASK I40E_MASK(0x7, I40E_PRTDCB_RUP2TC_UP7TC_SHIFT)
+#define I40E_PRTDCB_RUPTQ(_i) (0x00122400 + ((_i) * 32)) /* _i=0...7 */ /* Reset: CORER */
+#define I40E_PRTDCB_RUPTQ_MAX_INDEX 7
+#define I40E_PRTDCB_RUPTQ_RXQNUM_SHIFT 0
+#define I40E_PRTDCB_RUPTQ_RXQNUM_MASK I40E_MASK(0x3FFF, I40E_PRTDCB_RUPTQ_RXQNUM_SHIFT)
+#define I40E_PRTDCB_TC2PFC 0x001C0980 /* Reset: CORER */
+#define I40E_PRTDCB_TC2PFC_TC2PFC_SHIFT 0
+#define I40E_PRTDCB_TC2PFC_TC2PFC_MASK I40E_MASK(0xFF, I40E_PRTDCB_TC2PFC_TC2PFC_SHIFT)
+#define I40E_PRTDCB_TCMSTC(_i) (0x000A0040 + ((_i) * 32)) /* _i=0...7 */ /* Reset: CORER */
+#define I40E_PRTDCB_TCMSTC_MAX_INDEX 7
+#define I40E_PRTDCB_TCMSTC_MSTC_SHIFT 0
+#define I40E_PRTDCB_TCMSTC_MSTC_MASK I40E_MASK(0xFFFFF, I40E_PRTDCB_TCMSTC_MSTC_SHIFT)
+#define I40E_PRTDCB_TCPMC 0x000A21A0 /* Reset: CORER */
+#define I40E_PRTDCB_TCPMC_CPM_SHIFT 0
+#define I40E_PRTDCB_TCPMC_CPM_MASK I40E_MASK(0x1FFF, I40E_PRTDCB_TCPMC_CPM_SHIFT)
+#define I40E_PRTDCB_TCPMC_LLTC_SHIFT 13
+#define I40E_PRTDCB_TCPMC_LLTC_MASK I40E_MASK(0xFF, I40E_PRTDCB_TCPMC_LLTC_SHIFT)
+#define I40E_PRTDCB_TCPMC_TCPM_MODE_SHIFT 30
+#define I40E_PRTDCB_TCPMC_TCPM_MODE_MASK I40E_MASK(0x1, I40E_PRTDCB_TCPMC_TCPM_MODE_SHIFT)
+#define I40E_PRTDCB_TCWSTC(_i) (0x000A2040 + ((_i) * 32)) /* _i=0...7 */ /* Reset: CORER */
+#define I40E_PRTDCB_TCWSTC_MAX_INDEX 7
+#define I40E_PRTDCB_TCWSTC_MSTC_SHIFT 0
+#define I40E_PRTDCB_TCWSTC_MSTC_MASK I40E_MASK(0xFFFFF, I40E_PRTDCB_TCWSTC_MSTC_SHIFT)
+#define I40E_PRTDCB_TDPMC 0x000A0180 /* Reset: CORER */
+#define I40E_PRTDCB_TDPMC_DPM_SHIFT 0
+#define I40E_PRTDCB_TDPMC_DPM_MASK I40E_MASK(0xFF, I40E_PRTDCB_TDPMC_DPM_SHIFT)
+#define I40E_PRTDCB_TDPMC_TCPM_MODE_SHIFT 30
+#define I40E_PRTDCB_TDPMC_TCPM_MODE_MASK I40E_MASK(0x1, I40E_PRTDCB_TDPMC_TCPM_MODE_SHIFT)
+#define I40E_PRTDCB_TETSC_TCB 0x000AE060 /* Reset: CORER */
+#define I40E_PRTDCB_TETSC_TCB_EN_LL_STRICT_PRIORITY_SHIFT 0
+#define I40E_PRTDCB_TETSC_TCB_EN_LL_STRICT_PRIORITY_MASK I40E_MASK(0x1, I40E_PRTDCB_TETSC_TCB_EN_LL_STRICT_PRIORITY_SHIFT)
+#define I40E_PRTDCB_TETSC_TCB_LLTC_SHIFT 8
+#define I40E_PRTDCB_TETSC_TCB_LLTC_MASK I40E_MASK(0xFF, I40E_PRTDCB_TETSC_TCB_LLTC_SHIFT)
+#define I40E_PRTDCB_TETSC_TPB 0x00098060 /* Reset: CORER */
+#define I40E_PRTDCB_TETSC_TPB_EN_LL_STRICT_PRIORITY_SHIFT 0
+#define I40E_PRTDCB_TETSC_TPB_EN_LL_STRICT_PRIORITY_MASK I40E_MASK(0x1, I40E_PRTDCB_TETSC_TPB_EN_LL_STRICT_PRIORITY_SHIFT)
+#define I40E_PRTDCB_TETSC_TPB_LLTC_SHIFT 8
+#define I40E_PRTDCB_TETSC_TPB_LLTC_MASK I40E_MASK(0xFF, I40E_PRTDCB_TETSC_TPB_LLTC_SHIFT)
+#define I40E_PRTDCB_TFCS 0x001E4560 /* Reset: GLOBR */
+#define I40E_PRTDCB_TFCS_TXOFF_SHIFT 0
+#define I40E_PRTDCB_TFCS_TXOFF_MASK I40E_MASK(0x1, I40E_PRTDCB_TFCS_TXOFF_SHIFT)
+#define I40E_PRTDCB_TFCS_TXOFF0_SHIFT 8
+#define I40E_PRTDCB_TFCS_TXOFF0_MASK I40E_MASK(0x1, I40E_PRTDCB_TFCS_TXOFF0_SHIFT)
+#define I40E_PRTDCB_TFCS_TXOFF1_SHIFT 9
+#define I40E_PRTDCB_TFCS_TXOFF1_MASK I40E_MASK(0x1, I40E_PRTDCB_TFCS_TXOFF1_SHIFT)
+#define I40E_PRTDCB_TFCS_TXOFF2_SHIFT 10
+#define I40E_PRTDCB_TFCS_TXOFF2_MASK I40E_MASK(0x1, I40E_PRTDCB_TFCS_TXOFF2_SHIFT)
+#define I40E_PRTDCB_TFCS_TXOFF3_SHIFT 11
+#define I40E_PRTDCB_TFCS_TXOFF3_MASK I40E_MASK(0x1, I40E_PRTDCB_TFCS_TXOFF3_SHIFT)
+#define I40E_PRTDCB_TFCS_TXOFF4_SHIFT 12
+#define I40E_PRTDCB_TFCS_TXOFF4_MASK I40E_MASK(0x1, I40E_PRTDCB_TFCS_TXOFF4_SHIFT)
+#define I40E_PRTDCB_TFCS_TXOFF5_SHIFT 13
+#define I40E_PRTDCB_TFCS_TXOFF5_MASK I40E_MASK(0x1, I40E_PRTDCB_TFCS_TXOFF5_SHIFT)
+#define I40E_PRTDCB_TFCS_TXOFF6_SHIFT 14
+#define I40E_PRTDCB_TFCS_TXOFF6_MASK I40E_MASK(0x1, I40E_PRTDCB_TFCS_TXOFF6_SHIFT)
+#define I40E_PRTDCB_TFCS_TXOFF7_SHIFT 15
+#define I40E_PRTDCB_TFCS_TXOFF7_MASK I40E_MASK(0x1, I40E_PRTDCB_TFCS_TXOFF7_SHIFT)
+#define I40E_PRTDCB_TPFCTS(_i) (0x001E4660 + ((_i) * 32)) /* _i=0...7 */ /* Reset: GLOBR */
+#define I40E_PRTDCB_TPFCTS_MAX_INDEX 7
+#define I40E_PRTDCB_TPFCTS_PFCTIMER_SHIFT 0
+#define I40E_PRTDCB_TPFCTS_PFCTIMER_MASK I40E_MASK(0x3FFF, I40E_PRTDCB_TPFCTS_PFCTIMER_SHIFT)
+#define I40E_GLFCOE_RCTL 0x00269B94 /* Reset: CORER */
+#define I40E_GLFCOE_RCTL_FCOEVER_SHIFT 0
+#define I40E_GLFCOE_RCTL_FCOEVER_MASK I40E_MASK(0xF, I40E_GLFCOE_RCTL_FCOEVER_SHIFT)
+#define I40E_GLFCOE_RCTL_SAVBAD_SHIFT 4
+#define I40E_GLFCOE_RCTL_SAVBAD_MASK I40E_MASK(0x1, I40E_GLFCOE_RCTL_SAVBAD_SHIFT)
+#define I40E_GLFCOE_RCTL_ICRC_SHIFT 5
+#define I40E_GLFCOE_RCTL_ICRC_MASK I40E_MASK(0x1, I40E_GLFCOE_RCTL_ICRC_SHIFT)
+#define I40E_GLFCOE_RCTL_MAX_SIZE_SHIFT 16
+#define I40E_GLFCOE_RCTL_MAX_SIZE_MASK I40E_MASK(0x3FFF, I40E_GLFCOE_RCTL_MAX_SIZE_SHIFT)
+#define I40E_GL_FWSTS 0x00083048 /* Reset: POR */
+#define I40E_GL_FWSTS_FWS0B_SHIFT 0
+#define I40E_GL_FWSTS_FWS0B_MASK I40E_MASK(0xFF, I40E_GL_FWSTS_FWS0B_SHIFT)
+#define I40E_GL_FWSTS_FWRI_SHIFT 9
+#define I40E_GL_FWSTS_FWRI_MASK I40E_MASK(0x1, I40E_GL_FWSTS_FWRI_SHIFT)
+#define I40E_GL_FWSTS_FWS1B_SHIFT 16
+#define I40E_GL_FWSTS_FWS1B_MASK I40E_MASK(0xFF, I40E_GL_FWSTS_FWS1B_SHIFT)
+#define I40E_GLGEN_CLKSTAT 0x000B8184 /* Reset: POR */
+#define I40E_GLGEN_CLKSTAT_CLKMODE_SHIFT 0
+#define I40E_GLGEN_CLKSTAT_CLKMODE_MASK I40E_MASK(0x1, I40E_GLGEN_CLKSTAT_CLKMODE_SHIFT)
+#define I40E_GLGEN_CLKSTAT_U_CLK_SPEED_SHIFT 4
+#define I40E_GLGEN_CLKSTAT_U_CLK_SPEED_MASK I40E_MASK(0x3, I40E_GLGEN_CLKSTAT_U_CLK_SPEED_SHIFT)
+#define I40E_GLGEN_CLKSTAT_P0_CLK_SPEED_SHIFT 8
+#define I40E_GLGEN_CLKSTAT_P0_CLK_SPEED_MASK I40E_MASK(0x7, I40E_GLGEN_CLKSTAT_P0_CLK_SPEED_SHIFT)
+#define I40E_GLGEN_CLKSTAT_P1_CLK_SPEED_SHIFT 12
+#define I40E_GLGEN_CLKSTAT_P1_CLK_SPEED_MASK I40E_MASK(0x7, I40E_GLGEN_CLKSTAT_P1_CLK_SPEED_SHIFT)
+#define I40E_GLGEN_CLKSTAT_P2_CLK_SPEED_SHIFT 16
+#define I40E_GLGEN_CLKSTAT_P2_CLK_SPEED_MASK I40E_MASK(0x7, I40E_GLGEN_CLKSTAT_P2_CLK_SPEED_SHIFT)
+#define I40E_GLGEN_CLKSTAT_P3_CLK_SPEED_SHIFT 20
+#define I40E_GLGEN_CLKSTAT_P3_CLK_SPEED_MASK I40E_MASK(0x7, I40E_GLGEN_CLKSTAT_P3_CLK_SPEED_SHIFT)
+#define I40E_GLGEN_GPIO_CTL(_i) (0x00088100 + ((_i) * 4)) /* _i=0...29 */ /* Reset: POR */
+#define I40E_GLGEN_GPIO_CTL_MAX_INDEX 29
+#define I40E_GLGEN_GPIO_CTL_PRT_NUM_SHIFT 0
+#define I40E_GLGEN_GPIO_CTL_PRT_NUM_MASK I40E_MASK(0x3, I40E_GLGEN_GPIO_CTL_PRT_NUM_SHIFT)
+#define I40E_GLGEN_GPIO_CTL_PRT_NUM_NA_SHIFT 3
+#define I40E_GLGEN_GPIO_CTL_PRT_NUM_NA_MASK I40E_MASK(0x1, I40E_GLGEN_GPIO_CTL_PRT_NUM_NA_SHIFT)
+#define I40E_GLGEN_GPIO_CTL_PIN_DIR_SHIFT 4
+#define I40E_GLGEN_GPIO_CTL_PIN_DIR_MASK I40E_MASK(0x1, I40E_GLGEN_GPIO_CTL_PIN_DIR_SHIFT)
+#define I40E_GLGEN_GPIO_CTL_TRI_CTL_SHIFT 5
+#define I40E_GLGEN_GPIO_CTL_TRI_CTL_MASK I40E_MASK(0x1, I40E_GLGEN_GPIO_CTL_TRI_CTL_SHIFT)
+#define I40E_GLGEN_GPIO_CTL_OUT_CTL_SHIFT 6
+#define I40E_GLGEN_GPIO_CTL_OUT_CTL_MASK I40E_MASK(0x1, I40E_GLGEN_GPIO_CTL_OUT_CTL_SHIFT)
+#define I40E_GLGEN_GPIO_CTL_PIN_FUNC_SHIFT 7
+#define I40E_GLGEN_GPIO_CTL_PIN_FUNC_MASK I40E_MASK(0x7, I40E_GLGEN_GPIO_CTL_PIN_FUNC_SHIFT)
+#define I40E_GLGEN_GPIO_CTL_LED_INVRT_SHIFT 10
+#define I40E_GLGEN_GPIO_CTL_LED_INVRT_MASK I40E_MASK(0x1, I40E_GLGEN_GPIO_CTL_LED_INVRT_SHIFT)
+#define I40E_GLGEN_GPIO_CTL_LED_BLINK_SHIFT 11
+#define I40E_GLGEN_GPIO_CTL_LED_BLINK_MASK I40E_MASK(0x1, I40E_GLGEN_GPIO_CTL_LED_BLINK_SHIFT)
+#define I40E_GLGEN_GPIO_CTL_LED_MODE_SHIFT 12
+#define I40E_GLGEN_GPIO_CTL_LED_MODE_MASK I40E_MASK(0x1F, I40E_GLGEN_GPIO_CTL_LED_MODE_SHIFT)
+#define I40E_GLGEN_GPIO_CTL_INT_MODE_SHIFT 17
+#define I40E_GLGEN_GPIO_CTL_INT_MODE_MASK I40E_MASK(0x3, I40E_GLGEN_GPIO_CTL_INT_MODE_SHIFT)
+#define I40E_GLGEN_GPIO_CTL_OUT_DEFAULT_SHIFT 19
+#define I40E_GLGEN_GPIO_CTL_OUT_DEFAULT_MASK I40E_MASK(0x1, I40E_GLGEN_GPIO_CTL_OUT_DEFAULT_SHIFT)
+#define I40E_GLGEN_GPIO_CTL_PHY_PIN_NAME_SHIFT 20
+#define I40E_GLGEN_GPIO_CTL_PHY_PIN_NAME_MASK I40E_MASK(0x3F, I40E_GLGEN_GPIO_CTL_PHY_PIN_NAME_SHIFT)
+#define I40E_GLGEN_GPIO_CTL_PRT_BIT_MAP_SHIFT 26
+#define I40E_GLGEN_GPIO_CTL_PRT_BIT_MAP_MASK I40E_MASK(0xF, I40E_GLGEN_GPIO_CTL_PRT_BIT_MAP_SHIFT)
+#define I40E_GLGEN_GPIO_SET 0x00088184 /* Reset: POR */
+#define I40E_GLGEN_GPIO_SET_GPIO_INDX_SHIFT 0
+#define I40E_GLGEN_GPIO_SET_GPIO_INDX_MASK I40E_MASK(0x1F, I40E_GLGEN_GPIO_SET_GPIO_INDX_SHIFT)
+#define I40E_GLGEN_GPIO_SET_SDP_DATA_SHIFT 5
+#define I40E_GLGEN_GPIO_SET_SDP_DATA_MASK I40E_MASK(0x1, I40E_GLGEN_GPIO_SET_SDP_DATA_SHIFT)
+#define I40E_GLGEN_GPIO_SET_DRIVE_SDP_SHIFT 6
+#define I40E_GLGEN_GPIO_SET_DRIVE_SDP_MASK I40E_MASK(0x1, I40E_GLGEN_GPIO_SET_DRIVE_SDP_SHIFT)
+#define I40E_GLGEN_GPIO_STAT 0x0008817C /* Reset: POR */
+#define I40E_GLGEN_GPIO_STAT_GPIO_VALUE_SHIFT 0
+#define I40E_GLGEN_GPIO_STAT_GPIO_VALUE_MASK I40E_MASK(0x3FFFFFFF, I40E_GLGEN_GPIO_STAT_GPIO_VALUE_SHIFT)
+#define I40E_GLGEN_GPIO_TRANSIT 0x00088180 /* Reset: POR */
+#define I40E_GLGEN_GPIO_TRANSIT_GPIO_TRANSITION_SHIFT 0
+#define I40E_GLGEN_GPIO_TRANSIT_GPIO_TRANSITION_MASK I40E_MASK(0x3FFFFFFF, I40E_GLGEN_GPIO_TRANSIT_GPIO_TRANSITION_SHIFT)
+#define I40E_GLGEN_I2CCMD(_i) (0x000881E0 + ((_i) * 4)) /* _i=0...3 */ /* Reset: POR */
+#define I40E_GLGEN_I2CCMD_MAX_INDEX 3
+#define I40E_GLGEN_I2CCMD_DATA_SHIFT 0
+#define I40E_GLGEN_I2CCMD_DATA_MASK I40E_MASK(0xFFFF, I40E_GLGEN_I2CCMD_DATA_SHIFT)
+#define I40E_GLGEN_I2CCMD_REGADD_SHIFT 16
+#define I40E_GLGEN_I2CCMD_REGADD_MASK I40E_MASK(0xFF, I40E_GLGEN_I2CCMD_REGADD_SHIFT)
+#define I40E_GLGEN_I2CCMD_PHYADD_SHIFT 24
+#define I40E_GLGEN_I2CCMD_PHYADD_MASK I40E_MASK(0x7, I40E_GLGEN_I2CCMD_PHYADD_SHIFT)
+#define I40E_GLGEN_I2CCMD_OP_SHIFT 27
+#define I40E_GLGEN_I2CCMD_OP_MASK I40E_MASK(0x1, I40E_GLGEN_I2CCMD_OP_SHIFT)
+#define I40E_GLGEN_I2CCMD_RESET_SHIFT 28
+#define I40E_GLGEN_I2CCMD_RESET_MASK I40E_MASK(0x1, I40E_GLGEN_I2CCMD_RESET_SHIFT)
+#define I40E_GLGEN_I2CCMD_R_SHIFT 29
+#define I40E_GLGEN_I2CCMD_R_MASK I40E_MASK(0x1, I40E_GLGEN_I2CCMD_R_SHIFT)
+#define I40E_GLGEN_I2CCMD_E_SHIFT 31
+#define I40E_GLGEN_I2CCMD_E_MASK I40E_MASK(0x1, I40E_GLGEN_I2CCMD_E_SHIFT)
+#define I40E_GLGEN_I2CPARAMS(_i) (0x000881AC + ((_i) * 4)) /* _i=0...3 */ /* Reset: POR */
+#define I40E_GLGEN_I2CPARAMS_MAX_INDEX 3
+#define I40E_GLGEN_I2CPARAMS_WRITE_TIME_SHIFT 0
+#define I40E_GLGEN_I2CPARAMS_WRITE_TIME_MASK I40E_MASK(0x1F, I40E_GLGEN_I2CPARAMS_WRITE_TIME_SHIFT)
+#define I40E_GLGEN_I2CPARAMS_READ_TIME_SHIFT 5
+#define I40E_GLGEN_I2CPARAMS_READ_TIME_MASK I40E_MASK(0x7, I40E_GLGEN_I2CPARAMS_READ_TIME_SHIFT)
+#define I40E_GLGEN_I2CPARAMS_I2CBB_EN_SHIFT 8
+#define I40E_GLGEN_I2CPARAMS_I2CBB_EN_MASK I40E_MASK(0x1, I40E_GLGEN_I2CPARAMS_I2CBB_EN_SHIFT)
+#define I40E_GLGEN_I2CPARAMS_CLK_SHIFT 9
+#define I40E_GLGEN_I2CPARAMS_CLK_MASK I40E_MASK(0x1, I40E_GLGEN_I2CPARAMS_CLK_SHIFT)
+#define I40E_GLGEN_I2CPARAMS_DATA_OUT_SHIFT 10
+#define I40E_GLGEN_I2CPARAMS_DATA_OUT_MASK I40E_MASK(0x1, I40E_GLGEN_I2CPARAMS_DATA_OUT_SHIFT)
+#define I40E_GLGEN_I2CPARAMS_DATA_OE_N_SHIFT 11
+#define I40E_GLGEN_I2CPARAMS_DATA_OE_N_MASK I40E_MASK(0x1, I40E_GLGEN_I2CPARAMS_DATA_OE_N_SHIFT)
+#define I40E_GLGEN_I2CPARAMS_DATA_IN_SHIFT 12
+#define I40E_GLGEN_I2CPARAMS_DATA_IN_MASK I40E_MASK(0x1, I40E_GLGEN_I2CPARAMS_DATA_IN_SHIFT)
+#define I40E_GLGEN_I2CPARAMS_CLK_OE_N_SHIFT 13
+#define I40E_GLGEN_I2CPARAMS_CLK_OE_N_MASK I40E_MASK(0x1, I40E_GLGEN_I2CPARAMS_CLK_OE_N_SHIFT)
+#define I40E_GLGEN_I2CPARAMS_CLK_IN_SHIFT 14
+#define I40E_GLGEN_I2CPARAMS_CLK_IN_MASK I40E_MASK(0x1, I40E_GLGEN_I2CPARAMS_CLK_IN_SHIFT)
+#define I40E_GLGEN_I2CPARAMS_CLK_STRETCH_DIS_SHIFT 15
+#define I40E_GLGEN_I2CPARAMS_CLK_STRETCH_DIS_MASK I40E_MASK(0x1, I40E_GLGEN_I2CPARAMS_CLK_STRETCH_DIS_SHIFT)
+#define I40E_GLGEN_I2CPARAMS_I2C_DATA_ORDER_SHIFT 31
+#define I40E_GLGEN_I2CPARAMS_I2C_DATA_ORDER_MASK I40E_MASK(0x1, I40E_GLGEN_I2CPARAMS_I2C_DATA_ORDER_SHIFT)
+#define I40E_GLGEN_LED_CTL 0x00088178 /* Reset: POR */
+#define I40E_GLGEN_LED_CTL_GLOBAL_BLINK_MODE_SHIFT 0
+#define I40E_GLGEN_LED_CTL_GLOBAL_BLINK_MODE_MASK I40E_MASK(0x1, I40E_GLGEN_LED_CTL_GLOBAL_BLINK_MODE_SHIFT)
+#define I40E_GLGEN_MDIO_CTRL(_i) (0x000881D0 + ((_i) * 4)) /* _i=0...3 */ /* Reset: POR */
+#define I40E_GLGEN_MDIO_CTRL_MAX_INDEX 3
+#define I40E_GLGEN_MDIO_CTRL_LEGACY_RSVD2_SHIFT 0
+#define I40E_GLGEN_MDIO_CTRL_LEGACY_RSVD2_MASK I40E_MASK(0x1FFFF, I40E_GLGEN_MDIO_CTRL_LEGACY_RSVD2_SHIFT)
+#define I40E_GLGEN_MDIO_CTRL_CONTMDC_SHIFT 17
+#define I40E_GLGEN_MDIO_CTRL_CONTMDC_MASK I40E_MASK(0x1, I40E_GLGEN_MDIO_CTRL_CONTMDC_SHIFT)
+#define I40E_GLGEN_MDIO_CTRL_LEGACY_RSVD1_SHIFT 18
+#define I40E_GLGEN_MDIO_CTRL_LEGACY_RSVD1_MASK I40E_MASK(0x7FF, I40E_GLGEN_MDIO_CTRL_LEGACY_RSVD1_SHIFT)
+#define I40E_GLGEN_MDIO_CTRL_LEGACY_RSVD0_SHIFT 29
+#define I40E_GLGEN_MDIO_CTRL_LEGACY_RSVD0_MASK I40E_MASK(0x7, I40E_GLGEN_MDIO_CTRL_LEGACY_RSVD0_SHIFT)
+#define I40E_GLGEN_MDIO_I2C_SEL(_i) (0x000881C0 + ((_i) * 4)) /* _i=0...3 */ /* Reset: POR */
+#define I40E_GLGEN_MDIO_I2C_SEL_MAX_INDEX 3
+#define I40E_GLGEN_MDIO_I2C_SEL_MDIO_I2C_SEL_SHIFT 0
+#define I40E_GLGEN_MDIO_I2C_SEL_MDIO_I2C_SEL_MASK I40E_MASK(0x1, I40E_GLGEN_MDIO_I2C_SEL_MDIO_I2C_SEL_SHIFT)
+#define I40E_GLGEN_MDIO_I2C_SEL_PHY_PORT_NUM_SHIFT 1
+#define I40E_GLGEN_MDIO_I2C_SEL_PHY_PORT_NUM_MASK I40E_MASK(0xF, I40E_GLGEN_MDIO_I2C_SEL_PHY_PORT_NUM_SHIFT)
+#define I40E_GLGEN_MDIO_I2C_SEL_PHY0_ADDRESS_SHIFT 5
+#define I40E_GLGEN_MDIO_I2C_SEL_PHY0_ADDRESS_MASK I40E_MASK(0x1F, I40E_GLGEN_MDIO_I2C_SEL_PHY0_ADDRESS_SHIFT)
+#define I40E_GLGEN_MDIO_I2C_SEL_PHY1_ADDRESS_SHIFT 10
+#define I40E_GLGEN_MDIO_I2C_SEL_PHY1_ADDRESS_MASK I40E_MASK(0x1F, I40E_GLGEN_MDIO_I2C_SEL_PHY1_ADDRESS_SHIFT)
+#define I40E_GLGEN_MDIO_I2C_SEL_PHY2_ADDRESS_SHIFT 15
+#define I40E_GLGEN_MDIO_I2C_SEL_PHY2_ADDRESS_MASK I40E_MASK(0x1F, I40E_GLGEN_MDIO_I2C_SEL_PHY2_ADDRESS_SHIFT)
+#define I40E_GLGEN_MDIO_I2C_SEL_PHY3_ADDRESS_SHIFT 20
+#define I40E_GLGEN_MDIO_I2C_SEL_PHY3_ADDRESS_MASK I40E_MASK(0x1F, I40E_GLGEN_MDIO_I2C_SEL_PHY3_ADDRESS_SHIFT)
+#define I40E_GLGEN_MDIO_I2C_SEL_MDIO_IF_MODE_SHIFT 25
+#define I40E_GLGEN_MDIO_I2C_SEL_MDIO_IF_MODE_MASK I40E_MASK(0xF, I40E_GLGEN_MDIO_I2C_SEL_MDIO_IF_MODE_SHIFT)
+#define I40E_GLGEN_MDIO_I2C_SEL_EN_FAST_MODE_SHIFT 31
+#define I40E_GLGEN_MDIO_I2C_SEL_EN_FAST_MODE_MASK I40E_MASK(0x1, I40E_GLGEN_MDIO_I2C_SEL_EN_FAST_MODE_SHIFT)
+#define I40E_GLGEN_MSCA(_i) (0x0008818C + ((_i) * 4)) /* _i=0...3 */ /* Reset: POR */
+#define I40E_GLGEN_MSCA_MAX_INDEX 3
+#define I40E_GLGEN_MSCA_MDIADD_SHIFT 0
+#define I40E_GLGEN_MSCA_MDIADD_MASK I40E_MASK(0xFFFF, I40E_GLGEN_MSCA_MDIADD_SHIFT)
+#define I40E_GLGEN_MSCA_DEVADD_SHIFT 16
+#define I40E_GLGEN_MSCA_DEVADD_MASK I40E_MASK(0x1F, I40E_GLGEN_MSCA_DEVADD_SHIFT)
+#define I40E_GLGEN_MSCA_PHYADD_SHIFT 21
+#define I40E_GLGEN_MSCA_PHYADD_MASK I40E_MASK(0x1F, I40E_GLGEN_MSCA_PHYADD_SHIFT)
+#define I40E_GLGEN_MSCA_OPCODE_SHIFT 26
+#define I40E_GLGEN_MSCA_OPCODE_MASK I40E_MASK(0x3, I40E_GLGEN_MSCA_OPCODE_SHIFT)
+#define I40E_GLGEN_MSCA_STCODE_SHIFT 28
+#define I40E_GLGEN_MSCA_STCODE_MASK I40E_MASK(0x3, I40E_GLGEN_MSCA_STCODE_SHIFT)
+#define I40E_GLGEN_MSCA_MDICMD_SHIFT 30
+#define I40E_GLGEN_MSCA_MDICMD_MASK I40E_MASK(0x1, I40E_GLGEN_MSCA_MDICMD_SHIFT)
+#define I40E_GLGEN_MSCA_MDIINPROGEN_SHIFT 31
+#define I40E_GLGEN_MSCA_MDIINPROGEN_MASK I40E_MASK(0x1, I40E_GLGEN_MSCA_MDIINPROGEN_SHIFT)
+#define I40E_GLGEN_MSRWD(_i) (0x0008819C + ((_i) * 4)) /* _i=0...3 */ /* Reset: POR */
+#define I40E_GLGEN_MSRWD_MAX_INDEX 3
+#define I40E_GLGEN_MSRWD_MDIWRDATA_SHIFT 0
+#define I40E_GLGEN_MSRWD_MDIWRDATA_MASK I40E_MASK(0xFFFF, I40E_GLGEN_MSRWD_MDIWRDATA_SHIFT)
+#define I40E_GLGEN_MSRWD_MDIRDDATA_SHIFT 16
+#define I40E_GLGEN_MSRWD_MDIRDDATA_MASK I40E_MASK(0xFFFF, I40E_GLGEN_MSRWD_MDIRDDATA_SHIFT)
+#define I40E_GLGEN_PCIFCNCNT 0x001C0AB4 /* Reset: PCIR */
+#define I40E_GLGEN_PCIFCNCNT_PCIPFCNT_SHIFT 0
+#define I40E_GLGEN_PCIFCNCNT_PCIPFCNT_MASK I40E_MASK(0x1F, I40E_GLGEN_PCIFCNCNT_PCIPFCNT_SHIFT)
+#define I40E_GLGEN_PCIFCNCNT_PCIVFCNT_SHIFT 16
+#define I40E_GLGEN_PCIFCNCNT_PCIVFCNT_MASK I40E_MASK(0xFF, I40E_GLGEN_PCIFCNCNT_PCIVFCNT_SHIFT)
+#define I40E_GLGEN_RSTAT 0x000B8188 /* Reset: POR */
+#define I40E_GLGEN_RSTAT_DEVSTATE_SHIFT 0
+#define I40E_GLGEN_RSTAT_DEVSTATE_MASK I40E_MASK(0x3, I40E_GLGEN_RSTAT_DEVSTATE_SHIFT)
+#define I40E_GLGEN_RSTAT_RESET_TYPE_SHIFT 2
+#define I40E_GLGEN_RSTAT_RESET_TYPE_MASK I40E_MASK(0x3, I40E_GLGEN_RSTAT_RESET_TYPE_SHIFT)
+#define I40E_GLGEN_RSTAT_CORERCNT_SHIFT 4
+#define I40E_GLGEN_RSTAT_CORERCNT_MASK I40E_MASK(0x3, I40E_GLGEN_RSTAT_CORERCNT_SHIFT)
+#define I40E_GLGEN_RSTAT_GLOBRCNT_SHIFT 6
+#define I40E_GLGEN_RSTAT_GLOBRCNT_MASK I40E_MASK(0x3, I40E_GLGEN_RSTAT_GLOBRCNT_SHIFT)
+#define I40E_GLGEN_RSTAT_EMPRCNT_SHIFT 8
+#define I40E_GLGEN_RSTAT_EMPRCNT_MASK I40E_MASK(0x3, I40E_GLGEN_RSTAT_EMPRCNT_SHIFT)
+#define I40E_GLGEN_RSTAT_TIME_TO_RST_SHIFT 10
+#define I40E_GLGEN_RSTAT_TIME_TO_RST_MASK I40E_MASK(0x3F, I40E_GLGEN_RSTAT_TIME_TO_RST_SHIFT)
+#define I40E_GLGEN_RSTCTL 0x000B8180 /* Reset: POR */
+#define I40E_GLGEN_RSTCTL_GRSTDEL_SHIFT 0
+#define I40E_GLGEN_RSTCTL_GRSTDEL_MASK I40E_MASK(0x3F, I40E_GLGEN_RSTCTL_GRSTDEL_SHIFT)
+#define I40E_GLGEN_RSTCTL_ECC_RST_ENA_SHIFT 8
+#define I40E_GLGEN_RSTCTL_ECC_RST_ENA_MASK I40E_MASK(0x1, I40E_GLGEN_RSTCTL_ECC_RST_ENA_SHIFT)
+#define I40E_GLGEN_RTRIG 0x000B8190 /* Reset: CORER */
+#define I40E_GLGEN_RTRIG_CORER_SHIFT 0
+#define I40E_GLGEN_RTRIG_CORER_MASK I40E_MASK(0x1, I40E_GLGEN_RTRIG_CORER_SHIFT)
+#define I40E_GLGEN_RTRIG_GLOBR_SHIFT 1
+#define I40E_GLGEN_RTRIG_GLOBR_MASK I40E_MASK(0x1, I40E_GLGEN_RTRIG_GLOBR_SHIFT)
+#define I40E_GLGEN_RTRIG_EMPFWR_SHIFT 2
+#define I40E_GLGEN_RTRIG_EMPFWR_MASK I40E_MASK(0x1, I40E_GLGEN_RTRIG_EMPFWR_SHIFT)
+#define I40E_GLGEN_STAT 0x000B612C /* Reset: POR */
+#define I40E_GLGEN_STAT_HWRSVD0_SHIFT 0
+#define I40E_GLGEN_STAT_HWRSVD0_MASK I40E_MASK(0x3, I40E_GLGEN_STAT_HWRSVD0_SHIFT)
+#define I40E_GLGEN_STAT_DCBEN_SHIFT 2
+#define I40E_GLGEN_STAT_DCBEN_MASK I40E_MASK(0x1, I40E_GLGEN_STAT_DCBEN_SHIFT)
+#define I40E_GLGEN_STAT_VTEN_SHIFT 3
+#define I40E_GLGEN_STAT_VTEN_MASK I40E_MASK(0x1, I40E_GLGEN_STAT_VTEN_SHIFT)
+#define I40E_GLGEN_STAT_FCOEN_SHIFT 4
+#define I40E_GLGEN_STAT_FCOEN_MASK I40E_MASK(0x1, I40E_GLGEN_STAT_FCOEN_SHIFT)
+#define I40E_GLGEN_STAT_EVBEN_SHIFT 5
+#define I40E_GLGEN_STAT_EVBEN_MASK I40E_MASK(0x1, I40E_GLGEN_STAT_EVBEN_SHIFT)
+#define I40E_GLGEN_STAT_HWRSVD1_SHIFT 6
+#define I40E_GLGEN_STAT_HWRSVD1_MASK I40E_MASK(0x3, I40E_GLGEN_STAT_HWRSVD1_SHIFT)
+#define I40E_GLGEN_VFLRSTAT(_i) (0x00092600 + ((_i) * 4)) /* _i=0...3 */ /* Reset: CORER */
+#define I40E_GLGEN_VFLRSTAT_MAX_INDEX 3
+#define I40E_GLGEN_VFLRSTAT_VFLRE_SHIFT 0
+#define I40E_GLGEN_VFLRSTAT_VFLRE_MASK I40E_MASK(0xFFFFFFFF, I40E_GLGEN_VFLRSTAT_VFLRE_SHIFT)
+#define I40E_GLVFGEN_TIMER 0x000881BC /* Reset: CORER */
+#define I40E_GLVFGEN_TIMER_GTIME_SHIFT 0
+#define I40E_GLVFGEN_TIMER_GTIME_MASK I40E_MASK(0xFFFFFFFF, I40E_GLVFGEN_TIMER_GTIME_SHIFT)
+#define I40E_PFGEN_CTRL 0x00092400 /* Reset: PFR */
+#define I40E_PFGEN_CTRL_PFSWR_SHIFT 0
+#define I40E_PFGEN_CTRL_PFSWR_MASK I40E_MASK(0x1, I40E_PFGEN_CTRL_PFSWR_SHIFT)
+#define I40E_PFGEN_DRUN 0x00092500 /* Reset: CORER */
+#define I40E_PFGEN_DRUN_DRVUNLD_SHIFT 0
+#define I40E_PFGEN_DRUN_DRVUNLD_MASK I40E_MASK(0x1, I40E_PFGEN_DRUN_DRVUNLD_SHIFT)
+#define I40E_PFGEN_PORTNUM 0x001C0480 /* Reset: CORER */
+#define I40E_PFGEN_PORTNUM_PORT_NUM_SHIFT 0
+#define I40E_PFGEN_PORTNUM_PORT_NUM_MASK I40E_MASK(0x3, I40E_PFGEN_PORTNUM_PORT_NUM_SHIFT)
+#define I40E_PFGEN_STATE 0x00088000 /* Reset: CORER */
+#define I40E_PFGEN_STATE_RESERVED_0_SHIFT 0
+#define I40E_PFGEN_STATE_RESERVED_0_MASK I40E_MASK(0x1, I40E_PFGEN_STATE_RESERVED_0_SHIFT)
+#define I40E_PFGEN_STATE_PFFCEN_SHIFT 1
+#define I40E_PFGEN_STATE_PFFCEN_MASK I40E_MASK(0x1, I40E_PFGEN_STATE_PFFCEN_SHIFT)
+#define I40E_PFGEN_STATE_PFLINKEN_SHIFT 2
+#define I40E_PFGEN_STATE_PFLINKEN_MASK I40E_MASK(0x1, I40E_PFGEN_STATE_PFLINKEN_SHIFT)
+#define I40E_PFGEN_STATE_PFSCEN_SHIFT 3
+#define I40E_PFGEN_STATE_PFSCEN_MASK I40E_MASK(0x1, I40E_PFGEN_STATE_PFSCEN_SHIFT)
+#define I40E_PRTGEN_CNF 0x000B8120 /* Reset: POR */
+#define I40E_PRTGEN_CNF_PORT_DIS_SHIFT 0
+#define I40E_PRTGEN_CNF_PORT_DIS_MASK I40E_MASK(0x1, I40E_PRTGEN_CNF_PORT_DIS_SHIFT)
+#define I40E_PRTGEN_CNF_ALLOW_PORT_DIS_SHIFT 1
+#define I40E_PRTGEN_CNF_ALLOW_PORT_DIS_MASK I40E_MASK(0x1, I40E_PRTGEN_CNF_ALLOW_PORT_DIS_SHIFT)
+#define I40E_PRTGEN_CNF_EMP_PORT_DIS_SHIFT 2
+#define I40E_PRTGEN_CNF_EMP_PORT_DIS_MASK I40E_MASK(0x1, I40E_PRTGEN_CNF_EMP_PORT_DIS_SHIFT)
+#define I40E_PRTGEN_CNF2 0x000B8160 /* Reset: POR */
+#define I40E_PRTGEN_CNF2_ACTIVATE_PORT_LINK_SHIFT 0
+#define I40E_PRTGEN_CNF2_ACTIVATE_PORT_LINK_MASK I40E_MASK(0x1, I40E_PRTGEN_CNF2_ACTIVATE_PORT_LINK_SHIFT)
+#define I40E_PRTGEN_STATUS 0x000B8100 /* Reset: POR */
+#define I40E_PRTGEN_STATUS_PORT_VALID_SHIFT 0
+#define I40E_PRTGEN_STATUS_PORT_VALID_MASK I40E_MASK(0x1, I40E_PRTGEN_STATUS_PORT_VALID_SHIFT)
+#define I40E_PRTGEN_STATUS_PORT_ACTIVE_SHIFT 1
+#define I40E_PRTGEN_STATUS_PORT_ACTIVE_MASK I40E_MASK(0x1, I40E_PRTGEN_STATUS_PORT_ACTIVE_SHIFT)
+#define I40E_VFGEN_RSTAT1(_VF) (0x00074400 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: VFR */
+#define I40E_VFGEN_RSTAT1_MAX_INDEX 127
+#define I40E_VFGEN_RSTAT1_VFR_STATE_SHIFT 0
+#define I40E_VFGEN_RSTAT1_VFR_STATE_MASK I40E_MASK(0x3, I40E_VFGEN_RSTAT1_VFR_STATE_SHIFT)
+#define I40E_VPGEN_VFRSTAT(_VF) (0x00091C00 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: CORER */
+#define I40E_VPGEN_VFRSTAT_MAX_INDEX 127
+#define I40E_VPGEN_VFRSTAT_VFRD_SHIFT 0
+#define I40E_VPGEN_VFRSTAT_VFRD_MASK I40E_MASK(0x1, I40E_VPGEN_VFRSTAT_VFRD_SHIFT)
+#define I40E_VPGEN_VFRTRIG(_VF) (0x00091800 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: CORER */
+#define I40E_VPGEN_VFRTRIG_MAX_INDEX 127
+#define I40E_VPGEN_VFRTRIG_VFSWR_SHIFT 0
+#define I40E_VPGEN_VFRTRIG_VFSWR_MASK I40E_MASK(0x1, I40E_VPGEN_VFRTRIG_VFSWR_SHIFT)
+#define I40E_VSIGEN_RSTAT(_VSI) (0x00090800 + ((_VSI) * 4)) /* _i=0...383 */ /* Reset: CORER */
+#define I40E_VSIGEN_RSTAT_MAX_INDEX 383
+#define I40E_VSIGEN_RSTAT_VMRD_SHIFT 0
+#define I40E_VSIGEN_RSTAT_VMRD_MASK I40E_MASK(0x1, I40E_VSIGEN_RSTAT_VMRD_SHIFT)
+#define I40E_VSIGEN_RTRIG(_VSI) (0x00090000 + ((_VSI) * 4)) /* _i=0...383 */ /* Reset: CORER */
+#define I40E_VSIGEN_RTRIG_MAX_INDEX 383
+#define I40E_VSIGEN_RTRIG_VMSWR_SHIFT 0
+#define I40E_VSIGEN_RTRIG_VMSWR_MASK I40E_MASK(0x1, I40E_VSIGEN_RTRIG_VMSWR_SHIFT)
+#define I40E_GLHMC_FCOEDDPBASE(_i) (0x000C6600 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */
+#define I40E_GLHMC_FCOEDDPBASE_MAX_INDEX 15
+#define I40E_GLHMC_FCOEDDPBASE_FPMFCOEDDPBASE_SHIFT 0
+#define I40E_GLHMC_FCOEDDPBASE_FPMFCOEDDPBASE_MASK I40E_MASK(0xFFFFFF, I40E_GLHMC_FCOEDDPBASE_FPMFCOEDDPBASE_SHIFT)
+#define I40E_GLHMC_FCOEDDPCNT(_i) (0x000C6700 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */
+#define I40E_GLHMC_FCOEDDPCNT_MAX_INDEX 15
+#define I40E_GLHMC_FCOEDDPCNT_FPMFCOEDDPCNT_SHIFT 0
+#define I40E_GLHMC_FCOEDDPCNT_FPMFCOEDDPCNT_MASK I40E_MASK(0xFFFFF, I40E_GLHMC_FCOEDDPCNT_FPMFCOEDDPCNT_SHIFT)
+#define I40E_GLHMC_FCOEDDPOBJSZ 0x000C2010 /* Reset: CORER */
+#define I40E_GLHMC_FCOEDDPOBJSZ_PMFCOEDDPOBJSZ_SHIFT 0
+#define I40E_GLHMC_FCOEDDPOBJSZ_PMFCOEDDPOBJSZ_MASK I40E_MASK(0xF, I40E_GLHMC_FCOEDDPOBJSZ_PMFCOEDDPOBJSZ_SHIFT)
+#define I40E_GLHMC_FCOEFBASE(_i) (0x000C6800 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */
+#define I40E_GLHMC_FCOEFBASE_MAX_INDEX 15
+#define I40E_GLHMC_FCOEFBASE_FPMFCOEFBASE_SHIFT 0
+#define I40E_GLHMC_FCOEFBASE_FPMFCOEFBASE_MASK I40E_MASK(0xFFFFFF, I40E_GLHMC_FCOEFBASE_FPMFCOEFBASE_SHIFT)
+#define I40E_GLHMC_FCOEFCNT(_i) (0x000C6900 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */
+#define I40E_GLHMC_FCOEFCNT_MAX_INDEX 15
+#define I40E_GLHMC_FCOEFCNT_FPMFCOEFCNT_SHIFT 0
+#define I40E_GLHMC_FCOEFCNT_FPMFCOEFCNT_MASK I40E_MASK(0x7FFFFF, I40E_GLHMC_FCOEFCNT_FPMFCOEFCNT_SHIFT)
+#define I40E_GLHMC_FCOEFMAX 0x000C20D0 /* Reset: CORER */
+#define I40E_GLHMC_FCOEFMAX_PMFCOEFMAX_SHIFT 0
+#define I40E_GLHMC_FCOEFMAX_PMFCOEFMAX_MASK I40E_MASK(0xFFFF, I40E_GLHMC_FCOEFMAX_PMFCOEFMAX_SHIFT)
+#define I40E_GLHMC_FCOEFOBJSZ 0x000C2018 /* Reset: CORER */
+#define I40E_GLHMC_FCOEFOBJSZ_PMFCOEFOBJSZ_SHIFT 0
+#define I40E_GLHMC_FCOEFOBJSZ_PMFCOEFOBJSZ_MASK I40E_MASK(0xF, I40E_GLHMC_FCOEFOBJSZ_PMFCOEFOBJSZ_SHIFT)
+#define I40E_GLHMC_FCOEMAX 0x000C2014 /* Reset: CORER */
+#define I40E_GLHMC_FCOEMAX_PMFCOEMAX_SHIFT 0
+#define I40E_GLHMC_FCOEMAX_PMFCOEMAX_MASK I40E_MASK(0x1FFF, I40E_GLHMC_FCOEMAX_PMFCOEMAX_SHIFT)
+#define I40E_GLHMC_FSIAVBASE(_i) (0x000C5600 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */
+#define I40E_GLHMC_FSIAVBASE_MAX_INDEX 15
+#define I40E_GLHMC_FSIAVBASE_FPMFSIAVBASE_SHIFT 0
+#define I40E_GLHMC_FSIAVBASE_FPMFSIAVBASE_MASK I40E_MASK(0xFFFFFF, I40E_GLHMC_FSIAVBASE_FPMFSIAVBASE_SHIFT)
+#define I40E_GLHMC_FSIAVCNT(_i) (0x000C5700 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */
+#define I40E_GLHMC_FSIAVCNT_MAX_INDEX 15
+#define I40E_GLHMC_FSIAVCNT_FPMFSIAVCNT_SHIFT 0
+#define I40E_GLHMC_FSIAVCNT_FPMFSIAVCNT_MASK I40E_MASK(0x1FFFFFFF, I40E_GLHMC_FSIAVCNT_FPMFSIAVCNT_SHIFT)
+#define I40E_GLHMC_FSIAVCNT_RSVD_SHIFT 29
+#define I40E_GLHMC_FSIAVCNT_RSVD_MASK I40E_MASK(0x7, I40E_GLHMC_FSIAVCNT_RSVD_SHIFT)
+#define I40E_GLHMC_FSIAVMAX 0x000C2068 /* Reset: CORER */
+#define I40E_GLHMC_FSIAVMAX_PMFSIAVMAX_SHIFT 0
+#define I40E_GLHMC_FSIAVMAX_PMFSIAVMAX_MASK I40E_MASK(0x1FFFF, I40E_GLHMC_FSIAVMAX_PMFSIAVMAX_SHIFT)
+#define I40E_GLHMC_FSIAVOBJSZ 0x000C2064 /* Reset: CORER */
+#define I40E_GLHMC_FSIAVOBJSZ_PMFSIAVOBJSZ_SHIFT 0
+#define I40E_GLHMC_FSIAVOBJSZ_PMFSIAVOBJSZ_MASK I40E_MASK(0xF, I40E_GLHMC_FSIAVOBJSZ_PMFSIAVOBJSZ_SHIFT)
+#define I40E_GLHMC_FSIMCBASE(_i) (0x000C6000 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */
+#define I40E_GLHMC_FSIMCBASE_MAX_INDEX 15
+#define I40E_GLHMC_FSIMCBASE_FPMFSIMCBASE_SHIFT 0
+#define I40E_GLHMC_FSIMCBASE_FPMFSIMCBASE_MASK I40E_MASK(0xFFFFFF, I40E_GLHMC_FSIMCBASE_FPMFSIMCBASE_SHIFT)
+#define I40E_GLHMC_FSIMCCNT(_i) (0x000C6100 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */
+#define I40E_GLHMC_FSIMCCNT_MAX_INDEX 15
+#define I40E_GLHMC_FSIMCCNT_FPMFSIMCSZ_SHIFT 0
+#define I40E_GLHMC_FSIMCCNT_FPMFSIMCSZ_MASK I40E_MASK(0x1FFFFFFF, I40E_GLHMC_FSIMCCNT_FPMFSIMCSZ_SHIFT)
+#define I40E_GLHMC_FSIMCMAX 0x000C2060 /* Reset: CORER */
+#define I40E_GLHMC_FSIMCMAX_PMFSIMCMAX_SHIFT 0
+#define I40E_GLHMC_FSIMCMAX_PMFSIMCMAX_MASK I40E_MASK(0x3FFF, I40E_GLHMC_FSIMCMAX_PMFSIMCMAX_SHIFT)
+#define I40E_GLHMC_FSIMCOBJSZ 0x000C205c /* Reset: CORER */
+#define I40E_GLHMC_FSIMCOBJSZ_PMFSIMCOBJSZ_SHIFT 0
+#define I40E_GLHMC_FSIMCOBJSZ_PMFSIMCOBJSZ_MASK I40E_MASK(0xF, I40E_GLHMC_FSIMCOBJSZ_PMFSIMCOBJSZ_SHIFT)
+#define I40E_GLHMC_LANQMAX 0x000C2008 /* Reset: CORER */
+#define I40E_GLHMC_LANQMAX_PMLANQMAX_SHIFT 0
+#define I40E_GLHMC_LANQMAX_PMLANQMAX_MASK I40E_MASK(0x7FF, I40E_GLHMC_LANQMAX_PMLANQMAX_SHIFT)
+#define I40E_GLHMC_LANRXBASE(_i) (0x000C6400 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */
+#define I40E_GLHMC_LANRXBASE_MAX_INDEX 15
+#define I40E_GLHMC_LANRXBASE_FPMLANRXBASE_SHIFT 0
+#define I40E_GLHMC_LANRXBASE_FPMLANRXBASE_MASK I40E_MASK(0xFFFFFF, I40E_GLHMC_LANRXBASE_FPMLANRXBASE_SHIFT)
+#define I40E_GLHMC_LANRXCNT(_i) (0x000C6500 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */
+#define I40E_GLHMC_LANRXCNT_MAX_INDEX 15
+#define I40E_GLHMC_LANRXCNT_FPMLANRXCNT_SHIFT 0
+#define I40E_GLHMC_LANRXCNT_FPMLANRXCNT_MASK I40E_MASK(0x7FF, I40E_GLHMC_LANRXCNT_FPMLANRXCNT_SHIFT)
+#define I40E_GLHMC_LANRXOBJSZ 0x000C200c /* Reset: CORER */
+#define I40E_GLHMC_LANRXOBJSZ_PMLANRXOBJSZ_SHIFT 0
+#define I40E_GLHMC_LANRXOBJSZ_PMLANRXOBJSZ_MASK I40E_MASK(0xF, I40E_GLHMC_LANRXOBJSZ_PMLANRXOBJSZ_SHIFT)
+#define I40E_GLHMC_LANTXBASE(_i) (0x000C6200 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */
+#define I40E_GLHMC_LANTXBASE_MAX_INDEX 15
+#define I40E_GLHMC_LANTXBASE_FPMLANTXBASE_SHIFT 0
+#define I40E_GLHMC_LANTXBASE_FPMLANTXBASE_MASK I40E_MASK(0xFFFFFF, I40E_GLHMC_LANTXBASE_FPMLANTXBASE_SHIFT)
+#define I40E_GLHMC_LANTXBASE_RSVD_SHIFT 24
+#define I40E_GLHMC_LANTXBASE_RSVD_MASK I40E_MASK(0xFF, I40E_GLHMC_LANTXBASE_RSVD_SHIFT)
+#define I40E_GLHMC_LANTXCNT(_i) (0x000C6300 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */
+#define I40E_GLHMC_LANTXCNT_MAX_INDEX 15
+#define I40E_GLHMC_LANTXCNT_FPMLANTXCNT_SHIFT 0
+#define I40E_GLHMC_LANTXCNT_FPMLANTXCNT_MASK I40E_MASK(0x7FF, I40E_GLHMC_LANTXCNT_FPMLANTXCNT_SHIFT)
+#define I40E_GLHMC_LANTXOBJSZ 0x000C2004 /* Reset: CORER */
+#define I40E_GLHMC_LANTXOBJSZ_PMLANTXOBJSZ_SHIFT 0
+#define I40E_GLHMC_LANTXOBJSZ_PMLANTXOBJSZ_MASK I40E_MASK(0xF, I40E_GLHMC_LANTXOBJSZ_PMLANTXOBJSZ_SHIFT)
+#define I40E_GLHMC_PFASSIGN(_i) (0x000C0c00 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */
+#define I40E_GLHMC_PFASSIGN_MAX_INDEX 15
+#define I40E_GLHMC_PFASSIGN_PMFCNPFASSIGN_SHIFT 0
+#define I40E_GLHMC_PFASSIGN_PMFCNPFASSIGN_MASK I40E_MASK(0xF, I40E_GLHMC_PFASSIGN_PMFCNPFASSIGN_SHIFT)
+#define I40E_GLHMC_SDPART(_i) (0x000C0800 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */
+#define I40E_GLHMC_SDPART_MAX_INDEX 15
+#define I40E_GLHMC_SDPART_PMSDBASE_SHIFT 0
+#define I40E_GLHMC_SDPART_PMSDBASE_MASK I40E_MASK(0xFFF, I40E_GLHMC_SDPART_PMSDBASE_SHIFT)
+#define I40E_GLHMC_SDPART_PMSDSIZE_SHIFT 16
+#define I40E_GLHMC_SDPART_PMSDSIZE_MASK I40E_MASK(0x1FFF, I40E_GLHMC_SDPART_PMSDSIZE_SHIFT)
+#define I40E_PFHMC_ERRORDATA 0x000C0500 /* Reset: PFR */
+#define I40E_PFHMC_ERRORDATA_HMC_ERROR_DATA_SHIFT 0
+#define I40E_PFHMC_ERRORDATA_HMC_ERROR_DATA_MASK I40E_MASK(0x3FFFFFFF, I40E_PFHMC_ERRORDATA_HMC_ERROR_DATA_SHIFT)
+#define I40E_PFHMC_ERRORINFO 0x000C0400 /* Reset: PFR */
+#define I40E_PFHMC_ERRORINFO_PMF_INDEX_SHIFT 0
+#define I40E_PFHMC_ERRORINFO_PMF_INDEX_MASK I40E_MASK(0x1F, I40E_PFHMC_ERRORINFO_PMF_INDEX_SHIFT)
+#define I40E_PFHMC_ERRORINFO_PMF_ISVF_SHIFT 7
+#define I40E_PFHMC_ERRORINFO_PMF_ISVF_MASK I40E_MASK(0x1, I40E_PFHMC_ERRORINFO_PMF_ISVF_SHIFT)
+#define I40E_PFHMC_ERRORINFO_HMC_ERROR_TYPE_SHIFT 8
+#define I40E_PFHMC_ERRORINFO_HMC_ERROR_TYPE_MASK I40E_MASK(0xF, I40E_PFHMC_ERRORINFO_HMC_ERROR_TYPE_SHIFT)
+#define I40E_PFHMC_ERRORINFO_HMC_OBJECT_TYPE_SHIFT 16
+#define I40E_PFHMC_ERRORINFO_HMC_OBJECT_TYPE_MASK I40E_MASK(0x1F, I40E_PFHMC_ERRORINFO_HMC_OBJECT_TYPE_SHIFT)
+#define I40E_PFHMC_ERRORINFO_ERROR_DETECTED_SHIFT 31
+#define I40E_PFHMC_ERRORINFO_ERROR_DETECTED_MASK I40E_MASK(0x1, I40E_PFHMC_ERRORINFO_ERROR_DETECTED_SHIFT)
+#define I40E_PFHMC_PDINV 0x000C0300 /* Reset: PFR */
+#define I40E_PFHMC_PDINV_PMSDIDX_SHIFT 0
+#define I40E_PFHMC_PDINV_PMSDIDX_MASK I40E_MASK(0xFFF, I40E_PFHMC_PDINV_PMSDIDX_SHIFT)
+#define I40E_PFHMC_PDINV_PMPDIDX_SHIFT 16
+#define I40E_PFHMC_PDINV_PMPDIDX_MASK I40E_MASK(0x1FF, I40E_PFHMC_PDINV_PMPDIDX_SHIFT)
+#define I40E_PFHMC_SDCMD 0x000C0000 /* Reset: PFR */
+#define I40E_PFHMC_SDCMD_PMSDIDX_SHIFT 0
+#define I40E_PFHMC_SDCMD_PMSDIDX_MASK I40E_MASK(0xFFF, I40E_PFHMC_SDCMD_PMSDIDX_SHIFT)
+#define I40E_PFHMC_SDCMD_PMSDWR_SHIFT 31
+#define I40E_PFHMC_SDCMD_PMSDWR_MASK I40E_MASK(0x1, I40E_PFHMC_SDCMD_PMSDWR_SHIFT)
+#define I40E_PFHMC_SDDATAHIGH 0x000C0200 /* Reset: PFR */
+#define I40E_PFHMC_SDDATAHIGH_PMSDDATAHIGH_SHIFT 0
+#define I40E_PFHMC_SDDATAHIGH_PMSDDATAHIGH_MASK I40E_MASK(0xFFFFFFFF, I40E_PFHMC_SDDATAHIGH_PMSDDATAHIGH_SHIFT)
+#define I40E_PFHMC_SDDATALOW 0x000C0100 /* Reset: PFR */
+#define I40E_PFHMC_SDDATALOW_PMSDVALID_SHIFT 0
+#define I40E_PFHMC_SDDATALOW_PMSDVALID_MASK I40E_MASK(0x1, I40E_PFHMC_SDDATALOW_PMSDVALID_SHIFT)
+#define I40E_PFHMC_SDDATALOW_PMSDTYPE_SHIFT 1
+#define I40E_PFHMC_SDDATALOW_PMSDTYPE_MASK I40E_MASK(0x1, I40E_PFHMC_SDDATALOW_PMSDTYPE_SHIFT)
+#define I40E_PFHMC_SDDATALOW_PMSDBPCOUNT_SHIFT 2
+#define I40E_PFHMC_SDDATALOW_PMSDBPCOUNT_MASK I40E_MASK(0x3FF, I40E_PFHMC_SDDATALOW_PMSDBPCOUNT_SHIFT)
+#define I40E_PFHMC_SDDATALOW_PMSDDATALOW_SHIFT 12
+#define I40E_PFHMC_SDDATALOW_PMSDDATALOW_MASK I40E_MASK(0xFFFFF, I40E_PFHMC_SDDATALOW_PMSDDATALOW_SHIFT)
+#define I40E_GL_GP_FUSE(_i) (0x0009400C + ((_i) * 4)) /* _i=0...28 */ /* Reset: POR */
+#define I40E_GL_GP_FUSE_MAX_INDEX 28
+#define I40E_GL_GP_FUSE_GL_GP_FUSE_SHIFT 0
+#define I40E_GL_GP_FUSE_GL_GP_FUSE_MASK I40E_MASK(0xFFFFFFFF, I40E_GL_GP_FUSE_GL_GP_FUSE_SHIFT)
+#define I40E_GL_UFUSE 0x00094008 /* Reset: POR */
+#define I40E_GL_UFUSE_FOUR_PORT_ENABLE_SHIFT 1
+#define I40E_GL_UFUSE_FOUR_PORT_ENABLE_MASK I40E_MASK(0x1, I40E_GL_UFUSE_FOUR_PORT_ENABLE_SHIFT)
+#define I40E_GL_UFUSE_NIC_ID_SHIFT 2
+#define I40E_GL_UFUSE_NIC_ID_MASK I40E_MASK(0x1, I40E_GL_UFUSE_NIC_ID_SHIFT)
+#define I40E_GL_UFUSE_ULT_LOCKOUT_SHIFT 10
+#define I40E_GL_UFUSE_ULT_LOCKOUT_MASK I40E_MASK(0x1, I40E_GL_UFUSE_ULT_LOCKOUT_SHIFT)
+#define I40E_GL_UFUSE_CLS_LOCKOUT_SHIFT 11
+#define I40E_GL_UFUSE_CLS_LOCKOUT_MASK I40E_MASK(0x1, I40E_GL_UFUSE_CLS_LOCKOUT_SHIFT)
+#define I40E_EMPINT_GPIO_ENA 0x00088188 /* Reset: POR */
+#define I40E_EMPINT_GPIO_ENA_GPIO0_ENA_SHIFT 0
+#define I40E_EMPINT_GPIO_ENA_GPIO0_ENA_MASK I40E_MASK(0x1, I40E_EMPINT_GPIO_ENA_GPIO0_ENA_SHIFT)
+#define I40E_EMPINT_GPIO_ENA_GPIO1_ENA_SHIFT 1
+#define I40E_EMPINT_GPIO_ENA_GPIO1_ENA_MASK I40E_MASK(0x1, I40E_EMPINT_GPIO_ENA_GPIO1_ENA_SHIFT)
+#define I40E_EMPINT_GPIO_ENA_GPIO2_ENA_SHIFT 2
+#define I40E_EMPINT_GPIO_ENA_GPIO2_ENA_MASK I40E_MASK(0x1, I40E_EMPINT_GPIO_ENA_GPIO2_ENA_SHIFT)
+#define I40E_EMPINT_GPIO_ENA_GPIO3_ENA_SHIFT 3
+#define I40E_EMPINT_GPIO_ENA_GPIO3_ENA_MASK I40E_MASK(0x1, I40E_EMPINT_GPIO_ENA_GPIO3_ENA_SHIFT)
+#define I40E_EMPINT_GPIO_ENA_GPIO4_ENA_SHIFT 4
+#define I40E_EMPINT_GPIO_ENA_GPIO4_ENA_MASK I40E_MASK(0x1, I40E_EMPINT_GPIO_ENA_GPIO4_ENA_SHIFT)
+#define I40E_EMPINT_GPIO_ENA_GPIO5_ENA_SHIFT 5
+#define I40E_EMPINT_GPIO_ENA_GPIO5_ENA_MASK I40E_MASK(0x1, I40E_EMPINT_GPIO_ENA_GPIO5_ENA_SHIFT)
+#define I40E_EMPINT_GPIO_ENA_GPIO6_ENA_SHIFT 6
+#define I40E_EMPINT_GPIO_ENA_GPIO6_ENA_MASK I40E_MASK(0x1, I40E_EMPINT_GPIO_ENA_GPIO6_ENA_SHIFT)
+#define I40E_EMPINT_GPIO_ENA_GPIO7_ENA_SHIFT 7
+#define I40E_EMPINT_GPIO_ENA_GPIO7_ENA_MASK I40E_MASK(0x1, I40E_EMPINT_GPIO_ENA_GPIO7_ENA_SHIFT)
+#define I40E_EMPINT_GPIO_ENA_GPIO8_ENA_SHIFT 8
+#define I40E_EMPINT_GPIO_ENA_GPIO8_ENA_MASK I40E_MASK(0x1, I40E_EMPINT_GPIO_ENA_GPIO8_ENA_SHIFT)
+#define I40E_EMPINT_GPIO_ENA_GPIO9_ENA_SHIFT 9
+#define I40E_EMPINT_GPIO_ENA_GPIO9_ENA_MASK I40E_MASK(0x1, I40E_EMPINT_GPIO_ENA_GPIO9_ENA_SHIFT)
+#define I40E_EMPINT_GPIO_ENA_GPIO10_ENA_SHIFT 10
+#define I40E_EMPINT_GPIO_ENA_GPIO10_ENA_MASK I40E_MASK(0x1, I40E_EMPINT_GPIO_ENA_GPIO10_ENA_SHIFT)
+#define I40E_EMPINT_GPIO_ENA_GPIO11_ENA_SHIFT 11
+#define I40E_EMPINT_GPIO_ENA_GPIO11_ENA_MASK I40E_MASK(0x1, I40E_EMPINT_GPIO_ENA_GPIO11_ENA_SHIFT)
+#define I40E_EMPINT_GPIO_ENA_GPIO12_ENA_SHIFT 12
+#define I40E_EMPINT_GPIO_ENA_GPIO12_ENA_MASK I40E_MASK(0x1, I40E_EMPINT_GPIO_ENA_GPIO12_ENA_SHIFT)
+#define I40E_EMPINT_GPIO_ENA_GPIO13_ENA_SHIFT 13
+#define I40E_EMPINT_GPIO_ENA_GPIO13_ENA_MASK I40E_MASK(0x1, I40E_EMPINT_GPIO_ENA_GPIO13_ENA_SHIFT)
+#define I40E_EMPINT_GPIO_ENA_GPIO14_ENA_SHIFT 14
+#define I40E_EMPINT_GPIO_ENA_GPIO14_ENA_MASK I40E_MASK(0x1, I40E_EMPINT_GPIO_ENA_GPIO14_ENA_SHIFT)
+#define I40E_EMPINT_GPIO_ENA_GPIO15_ENA_SHIFT 15
+#define I40E_EMPINT_GPIO_ENA_GPIO15_ENA_MASK I40E_MASK(0x1, I40E_EMPINT_GPIO_ENA_GPIO15_ENA_SHIFT)
+#define I40E_EMPINT_GPIO_ENA_GPIO16_ENA_SHIFT 16
+#define I40E_EMPINT_GPIO_ENA_GPIO16_ENA_MASK I40E_MASK(0x1, I40E_EMPINT_GPIO_ENA_GPIO16_ENA_SHIFT)
+#define I40E_EMPINT_GPIO_ENA_GPIO17_ENA_SHIFT 17
+#define I40E_EMPINT_GPIO_ENA_GPIO17_ENA_MASK I40E_MASK(0x1, I40E_EMPINT_GPIO_ENA_GPIO17_ENA_SHIFT)
+#define I40E_EMPINT_GPIO_ENA_GPIO18_ENA_SHIFT 18
+#define I40E_EMPINT_GPIO_ENA_GPIO18_ENA_MASK I40E_MASK(0x1, I40E_EMPINT_GPIO_ENA_GPIO18_ENA_SHIFT)
+#define I40E_EMPINT_GPIO_ENA_GPIO19_ENA_SHIFT 19
+#define I40E_EMPINT_GPIO_ENA_GPIO19_ENA_MASK I40E_MASK(0x1, I40E_EMPINT_GPIO_ENA_GPIO19_ENA_SHIFT)
+#define I40E_EMPINT_GPIO_ENA_GPIO20_ENA_SHIFT 20
+#define I40E_EMPINT_GPIO_ENA_GPIO20_ENA_MASK I40E_MASK(0x1, I40E_EMPINT_GPIO_ENA_GPIO20_ENA_SHIFT)
+#define I40E_EMPINT_GPIO_ENA_GPIO21_ENA_SHIFT 21
+#define I40E_EMPINT_GPIO_ENA_GPIO21_ENA_MASK I40E_MASK(0x1, I40E_EMPINT_GPIO_ENA_GPIO21_ENA_SHIFT)
+#define I40E_EMPINT_GPIO_ENA_GPIO22_ENA_SHIFT 22
+#define I40E_EMPINT_GPIO_ENA_GPIO22_ENA_MASK I40E_MASK(0x1, I40E_EMPINT_GPIO_ENA_GPIO22_ENA_SHIFT)
+#define I40E_EMPINT_GPIO_ENA_GPIO23_ENA_SHIFT 23
+#define I40E_EMPINT_GPIO_ENA_GPIO23_ENA_MASK I40E_MASK(0x1, I40E_EMPINT_GPIO_ENA_GPIO23_ENA_SHIFT)
+#define I40E_EMPINT_GPIO_ENA_GPIO24_ENA_SHIFT 24
+#define I40E_EMPINT_GPIO_ENA_GPIO24_ENA_MASK I40E_MASK(0x1, I40E_EMPINT_GPIO_ENA_GPIO24_ENA_SHIFT)
+#define I40E_EMPINT_GPIO_ENA_GPIO25_ENA_SHIFT 25
+#define I40E_EMPINT_GPIO_ENA_GPIO25_ENA_MASK I40E_MASK(0x1, I40E_EMPINT_GPIO_ENA_GPIO25_ENA_SHIFT)
+#define I40E_EMPINT_GPIO_ENA_GPIO26_ENA_SHIFT 26
+#define I40E_EMPINT_GPIO_ENA_GPIO26_ENA_MASK I40E_MASK(0x1, I40E_EMPINT_GPIO_ENA_GPIO26_ENA_SHIFT)
+#define I40E_EMPINT_GPIO_ENA_GPIO27_ENA_SHIFT 27
+#define I40E_EMPINT_GPIO_ENA_GPIO27_ENA_MASK I40E_MASK(0x1, I40E_EMPINT_GPIO_ENA_GPIO27_ENA_SHIFT)
+#define I40E_EMPINT_GPIO_ENA_GPIO28_ENA_SHIFT 28
+#define I40E_EMPINT_GPIO_ENA_GPIO28_ENA_MASK I40E_MASK(0x1, I40E_EMPINT_GPIO_ENA_GPIO28_ENA_SHIFT)
+#define I40E_EMPINT_GPIO_ENA_GPIO29_ENA_SHIFT 29
+#define I40E_EMPINT_GPIO_ENA_GPIO29_ENA_MASK I40E_MASK(0x1, I40E_EMPINT_GPIO_ENA_GPIO29_ENA_SHIFT)
+#define I40E_PFGEN_PORTMDIO_NUM 0x0003F100 /* Reset: CORER */
+#define I40E_PFGEN_PORTMDIO_NUM_PORT_NUM_SHIFT 0
+#define I40E_PFGEN_PORTMDIO_NUM_PORT_NUM_MASK I40E_MASK(0x3, I40E_PFGEN_PORTMDIO_NUM_PORT_NUM_SHIFT)
+#define I40E_PFGEN_PORTMDIO_NUM_VFLINK_STAT_ENA_SHIFT 4
+#define I40E_PFGEN_PORTMDIO_NUM_VFLINK_STAT_ENA_MASK I40E_MASK(0x1, I40E_PFGEN_PORTMDIO_NUM_VFLINK_STAT_ENA_SHIFT)
+#define I40E_PFINT_AEQCTL 0x00038700 /* Reset: CORER */
+#define I40E_PFINT_AEQCTL_MSIX_INDX_SHIFT 0
+#define I40E_PFINT_AEQCTL_MSIX_INDX_MASK I40E_MASK(0xFF, I40E_PFINT_AEQCTL_MSIX_INDX_SHIFT)
+#define I40E_PFINT_AEQCTL_ITR_INDX_SHIFT 11
+#define I40E_PFINT_AEQCTL_ITR_INDX_MASK I40E_MASK(0x3, I40E_PFINT_AEQCTL_ITR_INDX_SHIFT)
+#define I40E_PFINT_AEQCTL_MSIX0_INDX_SHIFT 13
+#define I40E_PFINT_AEQCTL_MSIX0_INDX_MASK I40E_MASK(0x7, I40E_PFINT_AEQCTL_MSIX0_INDX_SHIFT)
+#define I40E_PFINT_AEQCTL_CAUSE_ENA_SHIFT 30
+#define I40E_PFINT_AEQCTL_CAUSE_ENA_MASK I40E_MASK(0x1, I40E_PFINT_AEQCTL_CAUSE_ENA_SHIFT)
+#define I40E_PFINT_AEQCTL_INTEVENT_SHIFT 31
+#define I40E_PFINT_AEQCTL_INTEVENT_MASK I40E_MASK(0x1, I40E_PFINT_AEQCTL_INTEVENT_SHIFT)
+#define I40E_PFINT_CEQCTL(_INTPF) (0x00036800 + ((_INTPF) * 4)) /* _i=0...511 */ /* Reset: CORER */
+#define I40E_PFINT_CEQCTL_MAX_INDEX 511
+#define I40E_PFINT_CEQCTL_MSIX_INDX_SHIFT 0
+#define I40E_PFINT_CEQCTL_MSIX_INDX_MASK I40E_MASK(0xFF, I40E_PFINT_CEQCTL_MSIX_INDX_SHIFT)
+#define I40E_PFINT_CEQCTL_ITR_INDX_SHIFT 11
+#define I40E_PFINT_CEQCTL_ITR_INDX_MASK I40E_MASK(0x3, I40E_PFINT_CEQCTL_ITR_INDX_SHIFT)
+#define I40E_PFINT_CEQCTL_MSIX0_INDX_SHIFT 13
+#define I40E_PFINT_CEQCTL_MSIX0_INDX_MASK I40E_MASK(0x7, I40E_PFINT_CEQCTL_MSIX0_INDX_SHIFT)
+#define I40E_PFINT_CEQCTL_NEXTQ_INDX_SHIFT 16
+#define I40E_PFINT_CEQCTL_NEXTQ_INDX_MASK I40E_MASK(0x7FF, I40E_PFINT_CEQCTL_NEXTQ_INDX_SHIFT)
+#define I40E_PFINT_CEQCTL_NEXTQ_TYPE_SHIFT 27
+#define I40E_PFINT_CEQCTL_NEXTQ_TYPE_MASK I40E_MASK(0x3, I40E_PFINT_CEQCTL_NEXTQ_TYPE_SHIFT)
+#define I40E_PFINT_CEQCTL_CAUSE_ENA_SHIFT 30
+#define I40E_PFINT_CEQCTL_CAUSE_ENA_MASK I40E_MASK(0x1, I40E_PFINT_CEQCTL_CAUSE_ENA_SHIFT)
+#define I40E_PFINT_CEQCTL_INTEVENT_SHIFT 31
+#define I40E_PFINT_CEQCTL_INTEVENT_MASK I40E_MASK(0x1, I40E_PFINT_CEQCTL_INTEVENT_SHIFT)
+#define I40E_GLINT_CTL 0x0003F800 /* Reset: CORER */
+#define I40E_GLINT_CTL_DIS_AUTOMASK_PF0_SHIFT 0
+#define I40E_GLINT_CTL_DIS_AUTOMASK_PF0_MASK I40E_MASK(0x1, I40E_GLINT_CTL_DIS_AUTOMASK_PF0_SHIFT)
+#define I40E_GLINT_CTL_DIS_AUTOMASK_VF0_SHIFT 1
+#define I40E_GLINT_CTL_DIS_AUTOMASK_VF0_MASK I40E_MASK(0x1, I40E_GLINT_CTL_DIS_AUTOMASK_VF0_SHIFT)
+#define I40E_GLINT_CTL_DIS_AUTOMASK_N_SHIFT 2
+#define I40E_GLINT_CTL_DIS_AUTOMASK_N_MASK I40E_MASK(0x1, I40E_GLINT_CTL_DIS_AUTOMASK_N_SHIFT)
+#define I40E_PFINT_DYN_CTL0 0x00038480 /* Reset: PFR */
+#define I40E_PFINT_DYN_CTL0_INTENA_SHIFT 0
+#define I40E_PFINT_DYN_CTL0_INTENA_MASK I40E_MASK(0x1, I40E_PFINT_DYN_CTL0_INTENA_SHIFT)
+#define I40E_PFINT_DYN_CTL0_CLEARPBA_SHIFT 1
+#define I40E_PFINT_DYN_CTL0_CLEARPBA_MASK I40E_MASK(0x1, I40E_PFINT_DYN_CTL0_CLEARPBA_SHIFT)
+#define I40E_PFINT_DYN_CTL0_SWINT_TRIG_SHIFT 2
+#define I40E_PFINT_DYN_CTL0_SWINT_TRIG_MASK I40E_MASK(0x1, I40E_PFINT_DYN_CTL0_SWINT_TRIG_SHIFT)
+#define I40E_PFINT_DYN_CTL0_ITR_INDX_SHIFT 3
+#define I40E_PFINT_DYN_CTL0_ITR_INDX_MASK I40E_MASK(0x3, I40E_PFINT_DYN_CTL0_ITR_INDX_SHIFT)
+#define I40E_PFINT_DYN_CTL0_INTERVAL_SHIFT 5
+#define I40E_PFINT_DYN_CTL0_INTERVAL_MASK I40E_MASK(0xFFF, I40E_PFINT_DYN_CTL0_INTERVAL_SHIFT)
+#define I40E_PFINT_DYN_CTL0_SW_ITR_INDX_ENA_SHIFT 24
+#define I40E_PFINT_DYN_CTL0_SW_ITR_INDX_ENA_MASK I40E_MASK(0x1, I40E_PFINT_DYN_CTL0_SW_ITR_INDX_ENA_SHIFT)
+#define I40E_PFINT_DYN_CTL0_SW_ITR_INDX_SHIFT 25
+#define I40E_PFINT_DYN_CTL0_SW_ITR_INDX_MASK I40E_MASK(0x3, I40E_PFINT_DYN_CTL0_SW_ITR_INDX_SHIFT)
+#define I40E_PFINT_DYN_CTL0_INTENA_MSK_SHIFT 31
+#define I40E_PFINT_DYN_CTL0_INTENA_MSK_MASK I40E_MASK(0x1, I40E_PFINT_DYN_CTL0_INTENA_MSK_SHIFT)
+#define I40E_PFINT_DYN_CTLN(_INTPF) (0x00034800 + ((_INTPF) * 4)) /* _i=0...511 */ /* Reset: PFR */
+#define I40E_PFINT_DYN_CTLN_MAX_INDEX 511
+#define I40E_PFINT_DYN_CTLN_INTENA_SHIFT 0
+#define I40E_PFINT_DYN_CTLN_INTENA_MASK I40E_MASK(0x1, I40E_PFINT_DYN_CTLN_INTENA_SHIFT)
+#define I40E_PFINT_DYN_CTLN_CLEARPBA_SHIFT 1
+#define I40E_PFINT_DYN_CTLN_CLEARPBA_MASK I40E_MASK(0x1, I40E_PFINT_DYN_CTLN_CLEARPBA_SHIFT)
+#define I40E_PFINT_DYN_CTLN_SWINT_TRIG_SHIFT 2
+#define I40E_PFINT_DYN_CTLN_SWINT_TRIG_MASK I40E_MASK(0x1, I40E_PFINT_DYN_CTLN_SWINT_TRIG_SHIFT)
+#define I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT 3
+#define I40E_PFINT_DYN_CTLN_ITR_INDX_MASK I40E_MASK(0x3, I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT)
+#define I40E_PFINT_DYN_CTLN_INTERVAL_SHIFT 5
+#define I40E_PFINT_DYN_CTLN_INTERVAL_MASK I40E_MASK(0xFFF, I40E_PFINT_DYN_CTLN_INTERVAL_SHIFT)
+#define I40E_PFINT_DYN_CTLN_SW_ITR_INDX_ENA_SHIFT 24
+#define I40E_PFINT_DYN_CTLN_SW_ITR_INDX_ENA_MASK I40E_MASK(0x1, I40E_PFINT_DYN_CTLN_SW_ITR_INDX_ENA_SHIFT)
+#define I40E_PFINT_DYN_CTLN_SW_ITR_INDX_SHIFT 25
+#define I40E_PFINT_DYN_CTLN_SW_ITR_INDX_MASK I40E_MASK(0x3, I40E_PFINT_DYN_CTLN_SW_ITR_INDX_SHIFT)
+#define I40E_PFINT_DYN_CTLN_INTENA_MSK_SHIFT 31
+#define I40E_PFINT_DYN_CTLN_INTENA_MSK_MASK I40E_MASK(0x1, I40E_PFINT_DYN_CTLN_INTENA_MSK_SHIFT)
+#define I40E_PFINT_GPIO_ENA 0x00088080 /* Reset: CORER */
+#define I40E_PFINT_GPIO_ENA_GPIO0_ENA_SHIFT 0
+#define I40E_PFINT_GPIO_ENA_GPIO0_ENA_MASK I40E_MASK(0x1, I40E_PFINT_GPIO_ENA_GPIO0_ENA_SHIFT)
+#define I40E_PFINT_GPIO_ENA_GPIO1_ENA_SHIFT 1
+#define I40E_PFINT_GPIO_ENA_GPIO1_ENA_MASK I40E_MASK(0x1, I40E_PFINT_GPIO_ENA_GPIO1_ENA_SHIFT)
+#define I40E_PFINT_GPIO_ENA_GPIO2_ENA_SHIFT 2
+#define I40E_PFINT_GPIO_ENA_GPIO2_ENA_MASK I40E_MASK(0x1, I40E_PFINT_GPIO_ENA_GPIO2_ENA_SHIFT)
+#define I40E_PFINT_GPIO_ENA_GPIO3_ENA_SHIFT 3
+#define I40E_PFINT_GPIO_ENA_GPIO3_ENA_MASK I40E_MASK(0x1, I40E_PFINT_GPIO_ENA_GPIO3_ENA_SHIFT)
+#define I40E_PFINT_GPIO_ENA_GPIO4_ENA_SHIFT 4
+#define I40E_PFINT_GPIO_ENA_GPIO4_ENA_MASK I40E_MASK(0x1, I40E_PFINT_GPIO_ENA_GPIO4_ENA_SHIFT)
+#define I40E_PFINT_GPIO_ENA_GPIO5_ENA_SHIFT 5
+#define I40E_PFINT_GPIO_ENA_GPIO5_ENA_MASK I40E_MASK(0x1, I40E_PFINT_GPIO_ENA_GPIO5_ENA_SHIFT)
+#define I40E_PFINT_GPIO_ENA_GPIO6_ENA_SHIFT 6
+#define I40E_PFINT_GPIO_ENA_GPIO6_ENA_MASK I40E_MASK(0x1, I40E_PFINT_GPIO_ENA_GPIO6_ENA_SHIFT)
+#define I40E_PFINT_GPIO_ENA_GPIO7_ENA_SHIFT 7
+#define I40E_PFINT_GPIO_ENA_GPIO7_ENA_MASK I40E_MASK(0x1, I40E_PFINT_GPIO_ENA_GPIO7_ENA_SHIFT)
+#define I40E_PFINT_GPIO_ENA_GPIO8_ENA_SHIFT 8
+#define I40E_PFINT_GPIO_ENA_GPIO8_ENA_MASK I40E_MASK(0x1, I40E_PFINT_GPIO_ENA_GPIO8_ENA_SHIFT)
+#define I40E_PFINT_GPIO_ENA_GPIO9_ENA_SHIFT 9
+#define I40E_PFINT_GPIO_ENA_GPIO9_ENA_MASK I40E_MASK(0x1, I40E_PFINT_GPIO_ENA_GPIO9_ENA_SHIFT)
+#define I40E_PFINT_GPIO_ENA_GPIO10_ENA_SHIFT 10
+#define I40E_PFINT_GPIO_ENA_GPIO10_ENA_MASK I40E_MASK(0x1, I40E_PFINT_GPIO_ENA_GPIO10_ENA_SHIFT)
+#define I40E_PFINT_GPIO_ENA_GPIO11_ENA_SHIFT 11
+#define I40E_PFINT_GPIO_ENA_GPIO11_ENA_MASK I40E_MASK(0x1, I40E_PFINT_GPIO_ENA_GPIO11_ENA_SHIFT)
+#define I40E_PFINT_GPIO_ENA_GPIO12_ENA_SHIFT 12
+#define I40E_PFINT_GPIO_ENA_GPIO12_ENA_MASK I40E_MASK(0x1, I40E_PFINT_GPIO_ENA_GPIO12_ENA_SHIFT)
+#define I40E_PFINT_GPIO_ENA_GPIO13_ENA_SHIFT 13
+#define I40E_PFINT_GPIO_ENA_GPIO13_ENA_MASK I40E_MASK(0x1, I40E_PFINT_GPIO_ENA_GPIO13_ENA_SHIFT)
+#define I40E_PFINT_GPIO_ENA_GPIO14_ENA_SHIFT 14
+#define I40E_PFINT_GPIO_ENA_GPIO14_ENA_MASK I40E_MASK(0x1, I40E_PFINT_GPIO_ENA_GPIO14_ENA_SHIFT)
+#define I40E_PFINT_GPIO_ENA_GPIO15_ENA_SHIFT 15
+#define I40E_PFINT_GPIO_ENA_GPIO15_ENA_MASK I40E_MASK(0x1, I40E_PFINT_GPIO_ENA_GPIO15_ENA_SHIFT)
+#define I40E_PFINT_GPIO_ENA_GPIO16_ENA_SHIFT 16
+#define I40E_PFINT_GPIO_ENA_GPIO16_ENA_MASK I40E_MASK(0x1, I40E_PFINT_GPIO_ENA_GPIO16_ENA_SHIFT)
+#define I40E_PFINT_GPIO_ENA_GPIO17_ENA_SHIFT 17
+#define I40E_PFINT_GPIO_ENA_GPIO17_ENA_MASK I40E_MASK(0x1, I40E_PFINT_GPIO_ENA_GPIO17_ENA_SHIFT)
+#define I40E_PFINT_GPIO_ENA_GPIO18_ENA_SHIFT 18
+#define I40E_PFINT_GPIO_ENA_GPIO18_ENA_MASK I40E_MASK(0x1, I40E_PFINT_GPIO_ENA_GPIO18_ENA_SHIFT)
+#define I40E_PFINT_GPIO_ENA_GPIO19_ENA_SHIFT 19
+#define I40E_PFINT_GPIO_ENA_GPIO19_ENA_MASK I40E_MASK(0x1, I40E_PFINT_GPIO_ENA_GPIO19_ENA_SHIFT)
+#define I40E_PFINT_GPIO_ENA_GPIO20_ENA_SHIFT 20
+#define I40E_PFINT_GPIO_ENA_GPIO20_ENA_MASK I40E_MASK(0x1, I40E_PFINT_GPIO_ENA_GPIO20_ENA_SHIFT)
+#define I40E_PFINT_GPIO_ENA_GPIO21_ENA_SHIFT 21
+#define I40E_PFINT_GPIO_ENA_GPIO21_ENA_MASK I40E_MASK(0x1, I40E_PFINT_GPIO_ENA_GPIO21_ENA_SHIFT)
+#define I40E_PFINT_GPIO_ENA_GPIO22_ENA_SHIFT 22
+#define I40E_PFINT_GPIO_ENA_GPIO22_ENA_MASK I40E_MASK(0x1, I40E_PFINT_GPIO_ENA_GPIO22_ENA_SHIFT)
+#define I40E_PFINT_GPIO_ENA_GPIO23_ENA_SHIFT 23
+#define I40E_PFINT_GPIO_ENA_GPIO23_ENA_MASK I40E_MASK(0x1, I40E_PFINT_GPIO_ENA_GPIO23_ENA_SHIFT)
+#define I40E_PFINT_GPIO_ENA_GPIO24_ENA_SHIFT 24
+#define I40E_PFINT_GPIO_ENA_GPIO24_ENA_MASK I40E_MASK(0x1, I40E_PFINT_GPIO_ENA_GPIO24_ENA_SHIFT)
+#define I40E_PFINT_GPIO_ENA_GPIO25_ENA_SHIFT 25
+#define I40E_PFINT_GPIO_ENA_GPIO25_ENA_MASK I40E_MASK(0x1, I40E_PFINT_GPIO_ENA_GPIO25_ENA_SHIFT)
+#define I40E_PFINT_GPIO_ENA_GPIO26_ENA_SHIFT 26
+#define I40E_PFINT_GPIO_ENA_GPIO26_ENA_MASK I40E_MASK(0x1, I40E_PFINT_GPIO_ENA_GPIO26_ENA_SHIFT)
+#define I40E_PFINT_GPIO_ENA_GPIO27_ENA_SHIFT 27
+#define I40E_PFINT_GPIO_ENA_GPIO27_ENA_MASK I40E_MASK(0x1, I40E_PFINT_GPIO_ENA_GPIO27_ENA_SHIFT)
+#define I40E_PFINT_GPIO_ENA_GPIO28_ENA_SHIFT 28
+#define I40E_PFINT_GPIO_ENA_GPIO28_ENA_MASK I40E_MASK(0x1, I40E_PFINT_GPIO_ENA_GPIO28_ENA_SHIFT)
+#define I40E_PFINT_GPIO_ENA_GPIO29_ENA_SHIFT 29
+#define I40E_PFINT_GPIO_ENA_GPIO29_ENA_MASK I40E_MASK(0x1, I40E_PFINT_GPIO_ENA_GPIO29_ENA_SHIFT)
+#define I40E_PFINT_ICR0 0x00038780 /* Reset: CORER */
+#define I40E_PFINT_ICR0_INTEVENT_SHIFT 0
+#define I40E_PFINT_ICR0_INTEVENT_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_INTEVENT_SHIFT)
+#define I40E_PFINT_ICR0_QUEUE_0_SHIFT 1
+#define I40E_PFINT_ICR0_QUEUE_0_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_QUEUE_0_SHIFT)
+#define I40E_PFINT_ICR0_QUEUE_1_SHIFT 2
+#define I40E_PFINT_ICR0_QUEUE_1_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_QUEUE_1_SHIFT)
+#define I40E_PFINT_ICR0_QUEUE_2_SHIFT 3
+#define I40E_PFINT_ICR0_QUEUE_2_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_QUEUE_2_SHIFT)
+#define I40E_PFINT_ICR0_QUEUE_3_SHIFT 4
+#define I40E_PFINT_ICR0_QUEUE_3_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_QUEUE_3_SHIFT)
+#define I40E_PFINT_ICR0_QUEUE_4_SHIFT 5
+#define I40E_PFINT_ICR0_QUEUE_4_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_QUEUE_4_SHIFT)
+#define I40E_PFINT_ICR0_QUEUE_5_SHIFT 6
+#define I40E_PFINT_ICR0_QUEUE_5_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_QUEUE_5_SHIFT)
+#define I40E_PFINT_ICR0_QUEUE_6_SHIFT 7
+#define I40E_PFINT_ICR0_QUEUE_6_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_QUEUE_6_SHIFT)
+#define I40E_PFINT_ICR0_QUEUE_7_SHIFT 8
+#define I40E_PFINT_ICR0_QUEUE_7_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_QUEUE_7_SHIFT)
+#define I40E_PFINT_ICR0_ECC_ERR_SHIFT 16
+#define I40E_PFINT_ICR0_ECC_ERR_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_ECC_ERR_SHIFT)
+#define I40E_PFINT_ICR0_MAL_DETECT_SHIFT 19
+#define I40E_PFINT_ICR0_MAL_DETECT_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_MAL_DETECT_SHIFT)
+#define I40E_PFINT_ICR0_GRST_SHIFT 20
+#define I40E_PFINT_ICR0_GRST_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_GRST_SHIFT)
+#define I40E_PFINT_ICR0_PCI_EXCEPTION_SHIFT 21
+#define I40E_PFINT_ICR0_PCI_EXCEPTION_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_PCI_EXCEPTION_SHIFT)
+#define I40E_PFINT_ICR0_GPIO_SHIFT 22
+#define I40E_PFINT_ICR0_GPIO_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_GPIO_SHIFT)
+#define I40E_PFINT_ICR0_TIMESYNC_SHIFT 23
+#define I40E_PFINT_ICR0_TIMESYNC_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_TIMESYNC_SHIFT)
+#define I40E_PFINT_ICR0_STORM_DETECT_SHIFT 24
+#define I40E_PFINT_ICR0_STORM_DETECT_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_STORM_DETECT_SHIFT)
+#define I40E_PFINT_ICR0_LINK_STAT_CHANGE_SHIFT 25
+#define I40E_PFINT_ICR0_LINK_STAT_CHANGE_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_LINK_STAT_CHANGE_SHIFT)
+#define I40E_PFINT_ICR0_HMC_ERR_SHIFT 26
+#define I40E_PFINT_ICR0_HMC_ERR_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_HMC_ERR_SHIFT)
+#define I40E_PFINT_ICR0_PE_CRITERR_SHIFT 28
+#define I40E_PFINT_ICR0_PE_CRITERR_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_PE_CRITERR_SHIFT)
+#define I40E_PFINT_ICR0_VFLR_SHIFT 29
+#define I40E_PFINT_ICR0_VFLR_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_VFLR_SHIFT)
+#define I40E_PFINT_ICR0_ADMINQ_SHIFT 30
+#define I40E_PFINT_ICR0_ADMINQ_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_ADMINQ_SHIFT)
+#define I40E_PFINT_ICR0_SWINT_SHIFT 31
+#define I40E_PFINT_ICR0_SWINT_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_SWINT_SHIFT)
+#define I40E_PFINT_ICR0_ENA 0x00038800 /* Reset: CORER */
+#define I40E_PFINT_ICR0_ENA_ECC_ERR_SHIFT 16
+#define I40E_PFINT_ICR0_ENA_ECC_ERR_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_ENA_ECC_ERR_SHIFT)
+#define I40E_PFINT_ICR0_ENA_MAL_DETECT_SHIFT 19
+#define I40E_PFINT_ICR0_ENA_MAL_DETECT_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_ENA_MAL_DETECT_SHIFT)
+#define I40E_PFINT_ICR0_ENA_GRST_SHIFT 20
+#define I40E_PFINT_ICR0_ENA_GRST_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_ENA_GRST_SHIFT)
+#define I40E_PFINT_ICR0_ENA_PCI_EXCEPTION_SHIFT 21
+#define I40E_PFINT_ICR0_ENA_PCI_EXCEPTION_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_ENA_PCI_EXCEPTION_SHIFT)
+#define I40E_PFINT_ICR0_ENA_GPIO_SHIFT 22
+#define I40E_PFINT_ICR0_ENA_GPIO_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_ENA_GPIO_SHIFT)
+#define I40E_PFINT_ICR0_ENA_TIMESYNC_SHIFT 23
+#define I40E_PFINT_ICR0_ENA_TIMESYNC_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_ENA_TIMESYNC_SHIFT)
+#define I40E_PFINT_ICR0_ENA_STORM_DETECT_SHIFT 24
+#define I40E_PFINT_ICR0_ENA_STORM_DETECT_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_ENA_STORM_DETECT_SHIFT)
+#define I40E_PFINT_ICR0_ENA_LINK_STAT_CHANGE_SHIFT 25
+#define I40E_PFINT_ICR0_ENA_LINK_STAT_CHANGE_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_ENA_LINK_STAT_CHANGE_SHIFT)
+#define I40E_PFINT_ICR0_ENA_HMC_ERR_SHIFT 26
+#define I40E_PFINT_ICR0_ENA_HMC_ERR_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_ENA_HMC_ERR_SHIFT)
+#define I40E_PFINT_ICR0_ENA_PE_CRITERR_SHIFT 28
+#define I40E_PFINT_ICR0_ENA_PE_CRITERR_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_ENA_PE_CRITERR_SHIFT)
+#define I40E_PFINT_ICR0_ENA_VFLR_SHIFT 29
+#define I40E_PFINT_ICR0_ENA_VFLR_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_ENA_VFLR_SHIFT)
+#define I40E_PFINT_ICR0_ENA_ADMINQ_SHIFT 30
+#define I40E_PFINT_ICR0_ENA_ADMINQ_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_ENA_ADMINQ_SHIFT)
+#define I40E_PFINT_ICR0_ENA_RSVD_SHIFT 31
+#define I40E_PFINT_ICR0_ENA_RSVD_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_ENA_RSVD_SHIFT)
+#define I40E_PFINT_ITR0(_i) (0x00038000 + ((_i) * 128)) /* _i=0...2 */ /* Reset: PFR */
+#define I40E_PFINT_ITR0_MAX_INDEX 2
+#define I40E_PFINT_ITR0_INTERVAL_SHIFT 0
+#define I40E_PFINT_ITR0_INTERVAL_MASK I40E_MASK(0xFFF, I40E_PFINT_ITR0_INTERVAL_SHIFT)
+#define I40E_PFINT_ITRN(_i, _INTPF) (0x00030000 + ((_i) * 2048 + (_INTPF) * 4)) /* _i=0...2, _INTPF=0...511 */ /* Reset: PFR */
+#define I40E_PFINT_ITRN_MAX_INDEX 2
+#define I40E_PFINT_ITRN_INTERVAL_SHIFT 0
+#define I40E_PFINT_ITRN_INTERVAL_MASK I40E_MASK(0xFFF, I40E_PFINT_ITRN_INTERVAL_SHIFT)
+#define I40E_PFINT_LNKLST0 0x00038500 /* Reset: PFR */
+#define I40E_PFINT_LNKLST0_FIRSTQ_INDX_SHIFT 0
+#define I40E_PFINT_LNKLST0_FIRSTQ_INDX_MASK I40E_MASK(0x7FF, I40E_PFINT_LNKLST0_FIRSTQ_INDX_SHIFT)
+#define I40E_PFINT_LNKLST0_FIRSTQ_TYPE_SHIFT 11
+#define I40E_PFINT_LNKLST0_FIRSTQ_TYPE_MASK I40E_MASK(0x3, I40E_PFINT_LNKLST0_FIRSTQ_TYPE_SHIFT)
+#define I40E_PFINT_LNKLSTN(_INTPF) (0x00035000 + ((_INTPF) * 4)) /* _i=0...511 */ /* Reset: PFR */
+#define I40E_PFINT_LNKLSTN_MAX_INDEX 511
+#define I40E_PFINT_LNKLSTN_FIRSTQ_INDX_SHIFT 0
+#define I40E_PFINT_LNKLSTN_FIRSTQ_INDX_MASK I40E_MASK(0x7FF, I40E_PFINT_LNKLSTN_FIRSTQ_INDX_SHIFT)
+#define I40E_PFINT_LNKLSTN_FIRSTQ_TYPE_SHIFT 11
+#define I40E_PFINT_LNKLSTN_FIRSTQ_TYPE_MASK I40E_MASK(0x3, I40E_PFINT_LNKLSTN_FIRSTQ_TYPE_SHIFT)
+#define I40E_PFINT_RATE0 0x00038580 /* Reset: PFR */
+#define I40E_PFINT_RATE0_INTERVAL_SHIFT 0
+#define I40E_PFINT_RATE0_INTERVAL_MASK I40E_MASK(0x3F, I40E_PFINT_RATE0_INTERVAL_SHIFT)
+#define I40E_PFINT_RATE0_INTRL_ENA_SHIFT 6
+#define I40E_PFINT_RATE0_INTRL_ENA_MASK I40E_MASK(0x1, I40E_PFINT_RATE0_INTRL_ENA_SHIFT)
+#define I40E_PFINT_RATEN(_INTPF) (0x00035800 + ((_INTPF) * 4)) /* _i=0...511 */ /* Reset: PFR */
+#define I40E_PFINT_RATEN_MAX_INDEX 511
+#define I40E_PFINT_RATEN_INTERVAL_SHIFT 0
+#define I40E_PFINT_RATEN_INTERVAL_MASK I40E_MASK(0x3F, I40E_PFINT_RATEN_INTERVAL_SHIFT)
+#define I40E_PFINT_RATEN_INTRL_ENA_SHIFT 6
+#define I40E_PFINT_RATEN_INTRL_ENA_MASK I40E_MASK(0x1, I40E_PFINT_RATEN_INTRL_ENA_SHIFT)
+#define I40E_PFINT_STAT_CTL0 0x00038400 /* Reset: CORER */
+#define I40E_PFINT_STAT_CTL0_OTHER_ITR_INDX_SHIFT 2
+#define I40E_PFINT_STAT_CTL0_OTHER_ITR_INDX_MASK I40E_MASK(0x3, I40E_PFINT_STAT_CTL0_OTHER_ITR_INDX_SHIFT)
+#define I40E_QINT_RQCTL(_Q) (0x0003A000 + ((_Q) * 4)) /* _i=0...1535 */ /* Reset: CORER */
+#define I40E_QINT_RQCTL_MAX_INDEX 1535
+#define I40E_QINT_RQCTL_MSIX_INDX_SHIFT 0
+#define I40E_QINT_RQCTL_MSIX_INDX_MASK I40E_MASK(0xFF, I40E_QINT_RQCTL_MSIX_INDX_SHIFT)
+#define I40E_QINT_RQCTL_ITR_INDX_SHIFT 11
+#define I40E_QINT_RQCTL_ITR_INDX_MASK I40E_MASK(0x3, I40E_QINT_RQCTL_ITR_INDX_SHIFT)
+#define I40E_QINT_RQCTL_MSIX0_INDX_SHIFT 13
+#define I40E_QINT_RQCTL_MSIX0_INDX_MASK I40E_MASK(0x7, I40E_QINT_RQCTL_MSIX0_INDX_SHIFT)
+#define I40E_QINT_RQCTL_NEXTQ_INDX_SHIFT 16
+#define I40E_QINT_RQCTL_NEXTQ_INDX_MASK I40E_MASK(0x7FF, I40E_QINT_RQCTL_NEXTQ_INDX_SHIFT)
+#define I40E_QINT_RQCTL_NEXTQ_TYPE_SHIFT 27
+#define I40E_QINT_RQCTL_NEXTQ_TYPE_MASK I40E_MASK(0x3, I40E_QINT_RQCTL_NEXTQ_TYPE_SHIFT)
+#define I40E_QINT_RQCTL_CAUSE_ENA_SHIFT 30
+#define I40E_QINT_RQCTL_CAUSE_ENA_MASK I40E_MASK(0x1, I40E_QINT_RQCTL_CAUSE_ENA_SHIFT)
+#define I40E_QINT_RQCTL_INTEVENT_SHIFT 31
+#define I40E_QINT_RQCTL_INTEVENT_MASK I40E_MASK(0x1, I40E_QINT_RQCTL_INTEVENT_SHIFT)
+#define I40E_QINT_TQCTL(_Q) (0x0003C000 + ((_Q) * 4)) /* _i=0...1535 */ /* Reset: CORER */
+#define I40E_QINT_TQCTL_MAX_INDEX 1535
+#define I40E_QINT_TQCTL_MSIX_INDX_SHIFT 0
+#define I40E_QINT_TQCTL_MSIX_INDX_MASK I40E_MASK(0xFF, I40E_QINT_TQCTL_MSIX_INDX_SHIFT)
+#define I40E_QINT_TQCTL_ITR_INDX_SHIFT 11
+#define I40E_QINT_TQCTL_ITR_INDX_MASK I40E_MASK(0x3, I40E_QINT_TQCTL_ITR_INDX_SHIFT)
+#define I40E_QINT_TQCTL_MSIX0_INDX_SHIFT 13
+#define I40E_QINT_TQCTL_MSIX0_INDX_MASK I40E_MASK(0x7, I40E_QINT_TQCTL_MSIX0_INDX_SHIFT)
+#define I40E_QINT_TQCTL_NEXTQ_INDX_SHIFT 16
+#define I40E_QINT_TQCTL_NEXTQ_INDX_MASK I40E_MASK(0x7FF, I40E_QINT_TQCTL_NEXTQ_INDX_SHIFT)
+#define I40E_QINT_TQCTL_NEXTQ_TYPE_SHIFT 27
+#define I40E_QINT_TQCTL_NEXTQ_TYPE_MASK I40E_MASK(0x3, I40E_QINT_TQCTL_NEXTQ_TYPE_SHIFT)
+#define I40E_QINT_TQCTL_CAUSE_ENA_SHIFT 30
+#define I40E_QINT_TQCTL_CAUSE_ENA_MASK I40E_MASK(0x1, I40E_QINT_TQCTL_CAUSE_ENA_SHIFT)
+#define I40E_QINT_TQCTL_INTEVENT_SHIFT 31
+#define I40E_QINT_TQCTL_INTEVENT_MASK I40E_MASK(0x1, I40E_QINT_TQCTL_INTEVENT_SHIFT)
+#define I40E_VFINT_DYN_CTL0(_VF) (0x0002A400 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: VFR */
+#define I40E_VFINT_DYN_CTL0_MAX_INDEX 127
+#define I40E_VFINT_DYN_CTL0_INTENA_SHIFT 0
+#define I40E_VFINT_DYN_CTL0_INTENA_MASK I40E_MASK(0x1, I40E_VFINT_DYN_CTL0_INTENA_SHIFT)
+#define I40E_VFINT_DYN_CTL0_CLEARPBA_SHIFT 1
+#define I40E_VFINT_DYN_CTL0_CLEARPBA_MASK I40E_MASK(0x1, I40E_VFINT_DYN_CTL0_CLEARPBA_SHIFT)
+#define I40E_VFINT_DYN_CTL0_SWINT_TRIG_SHIFT 2
+#define I40E_VFINT_DYN_CTL0_SWINT_TRIG_MASK I40E_MASK(0x1, I40E_VFINT_DYN_CTL0_SWINT_TRIG_SHIFT)
+#define I40E_VFINT_DYN_CTL0_ITR_INDX_SHIFT 3
+#define I40E_VFINT_DYN_CTL0_ITR_INDX_MASK I40E_MASK(0x3, I40E_VFINT_DYN_CTL0_ITR_INDX_SHIFT)
+#define I40E_VFINT_DYN_CTL0_INTERVAL_SHIFT 5
+#define I40E_VFINT_DYN_CTL0_INTERVAL_MASK I40E_MASK(0xFFF, I40E_VFINT_DYN_CTL0_INTERVAL_SHIFT)
+#define I40E_VFINT_DYN_CTL0_SW_ITR_INDX_ENA_SHIFT 24
+#define I40E_VFINT_DYN_CTL0_SW_ITR_INDX_ENA_MASK I40E_MASK(0x1, I40E_VFINT_DYN_CTL0_SW_ITR_INDX_ENA_SHIFT)
+#define I40E_VFINT_DYN_CTL0_SW_ITR_INDX_SHIFT 25
+#define I40E_VFINT_DYN_CTL0_SW_ITR_INDX_MASK I40E_MASK(0x3, I40E_VFINT_DYN_CTL0_SW_ITR_INDX_SHIFT)
+#define I40E_VFINT_DYN_CTL0_INTENA_MSK_SHIFT 31
+#define I40E_VFINT_DYN_CTL0_INTENA_MSK_MASK I40E_MASK(0x1, I40E_VFINT_DYN_CTL0_INTENA_MSK_SHIFT)
+#define I40E_VFINT_DYN_CTLN(_INTVF) (0x00024800 + ((_INTVF) * 4)) /* _i=0...511 */ /* Reset: VFR */
+#define I40E_VFINT_DYN_CTLN_MAX_INDEX 511
+#define I40E_VFINT_DYN_CTLN_INTENA_SHIFT 0
+#define I40E_VFINT_DYN_CTLN_INTENA_MASK I40E_MASK(0x1, I40E_VFINT_DYN_CTLN_INTENA_SHIFT)
+#define I40E_VFINT_DYN_CTLN_CLEARPBA_SHIFT 1
+#define I40E_VFINT_DYN_CTLN_CLEARPBA_MASK I40E_MASK(0x1, I40E_VFINT_DYN_CTLN_CLEARPBA_SHIFT)
+#define I40E_VFINT_DYN_CTLN_SWINT_TRIG_SHIFT 2
+#define I40E_VFINT_DYN_CTLN_SWINT_TRIG_MASK I40E_MASK(0x1, I40E_VFINT_DYN_CTLN_SWINT_TRIG_SHIFT)
+#define I40E_VFINT_DYN_CTLN_ITR_INDX_SHIFT 3
+#define I40E_VFINT_DYN_CTLN_ITR_INDX_MASK I40E_MASK(0x3, I40E_VFINT_DYN_CTLN_ITR_INDX_SHIFT)
+#define I40E_VFINT_DYN_CTLN_INTERVAL_SHIFT 5
+#define I40E_VFINT_DYN_CTLN_INTERVAL_MASK I40E_MASK(0xFFF, I40E_VFINT_DYN_CTLN_INTERVAL_SHIFT)
+#define I40E_VFINT_DYN_CTLN_SW_ITR_INDX_ENA_SHIFT 24
+#define I40E_VFINT_DYN_CTLN_SW_ITR_INDX_ENA_MASK I40E_MASK(0x1, I40E_VFINT_DYN_CTLN_SW_ITR_INDX_ENA_SHIFT)
+#define I40E_VFINT_DYN_CTLN_SW_ITR_INDX_SHIFT 25
+#define I40E_VFINT_DYN_CTLN_SW_ITR_INDX_MASK I40E_MASK(0x3, I40E_VFINT_DYN_CTLN_SW_ITR_INDX_SHIFT)
+#define I40E_VFINT_DYN_CTLN_INTENA_MSK_SHIFT 31
+#define I40E_VFINT_DYN_CTLN_INTENA_MSK_MASK I40E_MASK(0x1, I40E_VFINT_DYN_CTLN_INTENA_MSK_SHIFT)
+#define I40E_VFINT_ICR0(_VF) (0x0002BC00 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: CORER */
+#define I40E_VFINT_ICR0_MAX_INDEX 127
+#define I40E_VFINT_ICR0_INTEVENT_SHIFT 0
+#define I40E_VFINT_ICR0_INTEVENT_MASK I40E_MASK(0x1, I40E_VFINT_ICR0_INTEVENT_SHIFT)
+#define I40E_VFINT_ICR0_QUEUE_0_SHIFT 1
+#define I40E_VFINT_ICR0_QUEUE_0_MASK I40E_MASK(0x1, I40E_VFINT_ICR0_QUEUE_0_SHIFT)
+#define I40E_VFINT_ICR0_QUEUE_1_SHIFT 2
+#define I40E_VFINT_ICR0_QUEUE_1_MASK I40E_MASK(0x1, I40E_VFINT_ICR0_QUEUE_1_SHIFT)
+#define I40E_VFINT_ICR0_QUEUE_2_SHIFT 3
+#define I40E_VFINT_ICR0_QUEUE_2_MASK I40E_MASK(0x1, I40E_VFINT_ICR0_QUEUE_2_SHIFT)
+#define I40E_VFINT_ICR0_QUEUE_3_SHIFT 4
+#define I40E_VFINT_ICR0_QUEUE_3_MASK I40E_MASK(0x1, I40E_VFINT_ICR0_QUEUE_3_SHIFT)
+#define I40E_VFINT_ICR0_LINK_STAT_CHANGE_SHIFT 25
+#define I40E_VFINT_ICR0_LINK_STAT_CHANGE_MASK I40E_MASK(0x1, I40E_VFINT_ICR0_LINK_STAT_CHANGE_SHIFT)
+#define I40E_VFINT_ICR0_ADMINQ_SHIFT 30
+#define I40E_VFINT_ICR0_ADMINQ_MASK I40E_MASK(0x1, I40E_VFINT_ICR0_ADMINQ_SHIFT)
+#define I40E_VFINT_ICR0_SWINT_SHIFT 31
+#define I40E_VFINT_ICR0_SWINT_MASK I40E_MASK(0x1, I40E_VFINT_ICR0_SWINT_SHIFT)
+#define I40E_VFINT_ICR0_ENA(_VF) (0x0002C000 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: CORER */
+#define I40E_VFINT_ICR0_ENA_MAX_INDEX 127
+#define I40E_VFINT_ICR0_ENA_LINK_STAT_CHANGE_SHIFT 25
+#define I40E_VFINT_ICR0_ENA_LINK_STAT_CHANGE_MASK I40E_MASK(0x1, I40E_VFINT_ICR0_ENA_LINK_STAT_CHANGE_SHIFT)
+#define I40E_VFINT_ICR0_ENA_ADMINQ_SHIFT 30
+#define I40E_VFINT_ICR0_ENA_ADMINQ_MASK I40E_MASK(0x1, I40E_VFINT_ICR0_ENA_ADMINQ_SHIFT)
+#define I40E_VFINT_ICR0_ENA_RSVD_SHIFT 31
+#define I40E_VFINT_ICR0_ENA_RSVD_MASK I40E_MASK(0x1, I40E_VFINT_ICR0_ENA_RSVD_SHIFT)
+#define I40E_VFINT_ITR0(_i, _VF) (0x00028000 + ((_i) * 1024 + (_VF) * 4)) /* _i=0...2, _VF=0...127 */ /* Reset: VFR */
+#define I40E_VFINT_ITR0_MAX_INDEX 2
+#define I40E_VFINT_ITR0_INTERVAL_SHIFT 0
+#define I40E_VFINT_ITR0_INTERVAL_MASK I40E_MASK(0xFFF, I40E_VFINT_ITR0_INTERVAL_SHIFT)
+#define I40E_VFINT_ITRN(_i, _INTVF) (0x00020000 + ((_i) * 2048 + (_INTVF) * 4)) /* _i=0...2, _INTVF=0...511 */ /* Reset: VFR */
+#define I40E_VFINT_ITRN_MAX_INDEX 2
+#define I40E_VFINT_ITRN_INTERVAL_SHIFT 0
+#define I40E_VFINT_ITRN_INTERVAL_MASK I40E_MASK(0xFFF, I40E_VFINT_ITRN_INTERVAL_SHIFT)
+#define I40E_VFINT_STAT_CTL0(_VF) (0x0002A000 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: CORER */
+#define I40E_VFINT_STAT_CTL0_MAX_INDEX 127
+#define I40E_VFINT_STAT_CTL0_OTHER_ITR_INDX_SHIFT 2
+#define I40E_VFINT_STAT_CTL0_OTHER_ITR_INDX_MASK I40E_MASK(0x3, I40E_VFINT_STAT_CTL0_OTHER_ITR_INDX_SHIFT)
+#define I40E_VPINT_AEQCTL(_VF) (0x0002B800 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: CORER */
+#define I40E_VPINT_AEQCTL_MAX_INDEX 127
+#define I40E_VPINT_AEQCTL_MSIX_INDX_SHIFT 0
+#define I40E_VPINT_AEQCTL_MSIX_INDX_MASK I40E_MASK(0xFF, I40E_VPINT_AEQCTL_MSIX_INDX_SHIFT)
+#define I40E_VPINT_AEQCTL_ITR_INDX_SHIFT 11
+#define I40E_VPINT_AEQCTL_ITR_INDX_MASK I40E_MASK(0x3, I40E_VPINT_AEQCTL_ITR_INDX_SHIFT)
+#define I40E_VPINT_AEQCTL_MSIX0_INDX_SHIFT 13
+#define I40E_VPINT_AEQCTL_MSIX0_INDX_MASK I40E_MASK(0x7, I40E_VPINT_AEQCTL_MSIX0_INDX_SHIFT)
+#define I40E_VPINT_AEQCTL_CAUSE_ENA_SHIFT 30
+#define I40E_VPINT_AEQCTL_CAUSE_ENA_MASK I40E_MASK(0x1, I40E_VPINT_AEQCTL_CAUSE_ENA_SHIFT)
+#define I40E_VPINT_AEQCTL_INTEVENT_SHIFT 31
+#define I40E_VPINT_AEQCTL_INTEVENT_MASK I40E_MASK(0x1, I40E_VPINT_AEQCTL_INTEVENT_SHIFT)
+#define I40E_VPINT_CEQCTL(_INTVF) (0x00026800 + ((_INTVF) * 4)) /* _i=0...511 */ /* Reset: CORER */
+#define I40E_VPINT_CEQCTL_MAX_INDEX 511
+#define I40E_VPINT_CEQCTL_MSIX_INDX_SHIFT 0
+#define I40E_VPINT_CEQCTL_MSIX_INDX_MASK I40E_MASK(0xFF, I40E_VPINT_CEQCTL_MSIX_INDX_SHIFT)
+#define I40E_VPINT_CEQCTL_ITR_INDX_SHIFT 11
+#define I40E_VPINT_CEQCTL_ITR_INDX_MASK I40E_MASK(0x3, I40E_VPINT_CEQCTL_ITR_INDX_SHIFT)
+#define I40E_VPINT_CEQCTL_MSIX0_INDX_SHIFT 13
+#define I40E_VPINT_CEQCTL_MSIX0_INDX_MASK I40E_MASK(0x7, I40E_VPINT_CEQCTL_MSIX0_INDX_SHIFT)
+#define I40E_VPINT_CEQCTL_NEXTQ_INDX_SHIFT 16
+#define I40E_VPINT_CEQCTL_NEXTQ_INDX_MASK I40E_MASK(0x7FF, I40E_VPINT_CEQCTL_NEXTQ_INDX_SHIFT)
+#define I40E_VPINT_CEQCTL_NEXTQ_TYPE_SHIFT 27
+#define I40E_VPINT_CEQCTL_NEXTQ_TYPE_MASK I40E_MASK(0x3, I40E_VPINT_CEQCTL_NEXTQ_TYPE_SHIFT)
+#define I40E_VPINT_CEQCTL_CAUSE_ENA_SHIFT 30
+#define I40E_VPINT_CEQCTL_CAUSE_ENA_MASK I40E_MASK(0x1, I40E_VPINT_CEQCTL_CAUSE_ENA_SHIFT)
+#define I40E_VPINT_CEQCTL_INTEVENT_SHIFT 31
+#define I40E_VPINT_CEQCTL_INTEVENT_MASK I40E_MASK(0x1, I40E_VPINT_CEQCTL_INTEVENT_SHIFT)
+#define I40E_VPINT_LNKLST0(_VF) (0x0002A800 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: VFR */
+#define I40E_VPINT_LNKLST0_MAX_INDEX 127
+#define I40E_VPINT_LNKLST0_FIRSTQ_INDX_SHIFT 0
+#define I40E_VPINT_LNKLST0_FIRSTQ_INDX_MASK I40E_MASK(0x7FF, I40E_VPINT_LNKLST0_FIRSTQ_INDX_SHIFT)
+#define I40E_VPINT_LNKLST0_FIRSTQ_TYPE_SHIFT 11
+#define I40E_VPINT_LNKLST0_FIRSTQ_TYPE_MASK I40E_MASK(0x3, I40E_VPINT_LNKLST0_FIRSTQ_TYPE_SHIFT)
+#define I40E_VPINT_LNKLSTN(_INTVF) (0x00025000 + ((_INTVF) * 4)) /* _i=0...511 */ /* Reset: VFR */
+#define I40E_VPINT_LNKLSTN_MAX_INDEX 511
+#define I40E_VPINT_LNKLSTN_FIRSTQ_INDX_SHIFT 0
+#define I40E_VPINT_LNKLSTN_FIRSTQ_INDX_MASK I40E_MASK(0x7FF, I40E_VPINT_LNKLSTN_FIRSTQ_INDX_SHIFT)
+#define I40E_VPINT_LNKLSTN_FIRSTQ_TYPE_SHIFT 11
+#define I40E_VPINT_LNKLSTN_FIRSTQ_TYPE_MASK I40E_MASK(0x3, I40E_VPINT_LNKLSTN_FIRSTQ_TYPE_SHIFT)
+#define I40E_VPINT_RATE0(_VF) (0x0002AC00 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: VFR */
+#define I40E_VPINT_RATE0_MAX_INDEX 127
+#define I40E_VPINT_RATE0_INTERVAL_SHIFT 0
+#define I40E_VPINT_RATE0_INTERVAL_MASK I40E_MASK(0x3F, I40E_VPINT_RATE0_INTERVAL_SHIFT)
+#define I40E_VPINT_RATE0_INTRL_ENA_SHIFT 6
+#define I40E_VPINT_RATE0_INTRL_ENA_MASK I40E_MASK(0x1, I40E_VPINT_RATE0_INTRL_ENA_SHIFT)
+#define I40E_VPINT_RATEN(_INTVF) (0x00025800 + ((_INTVF) * 4)) /* _i=0...511 */ /* Reset: VFR */
+#define I40E_VPINT_RATEN_MAX_INDEX 511
+#define I40E_VPINT_RATEN_INTERVAL_SHIFT 0
+#define I40E_VPINT_RATEN_INTERVAL_MASK I40E_MASK(0x3F, I40E_VPINT_RATEN_INTERVAL_SHIFT)
+#define I40E_VPINT_RATEN_INTRL_ENA_SHIFT 6
+#define I40E_VPINT_RATEN_INTRL_ENA_MASK I40E_MASK(0x1, I40E_VPINT_RATEN_INTRL_ENA_SHIFT)
+#define I40E_GL_RDPU_CNTRL 0x00051060 /* Reset: CORER */
+#define I40E_GL_RDPU_CNTRL_RX_PAD_EN_SHIFT 0
+#define I40E_GL_RDPU_CNTRL_RX_PAD_EN_MASK I40E_MASK(0x1, I40E_GL_RDPU_CNTRL_RX_PAD_EN_SHIFT)
+#define I40E_GL_RDPU_CNTRL_ECO_SHIFT 1
+#define I40E_GL_RDPU_CNTRL_ECO_MASK I40E_MASK(0x7FFFFFFF, I40E_GL_RDPU_CNTRL_ECO_SHIFT)
+#define I40E_GLLAN_RCTL_0 0x0012A500 /* Reset: CORER */
+#define I40E_GLLAN_RCTL_0_PXE_MODE_SHIFT 0
+#define I40E_GLLAN_RCTL_0_PXE_MODE_MASK I40E_MASK(0x1, I40E_GLLAN_RCTL_0_PXE_MODE_SHIFT)
+#define I40E_GLLAN_TSOMSK_F 0x000442D8 /* Reset: CORER */
+#define I40E_GLLAN_TSOMSK_F_TCPMSKF_SHIFT 0
+#define I40E_GLLAN_TSOMSK_F_TCPMSKF_MASK I40E_MASK(0xFFF, I40E_GLLAN_TSOMSK_F_TCPMSKF_SHIFT)
+#define I40E_GLLAN_TSOMSK_L 0x000442E0 /* Reset: CORER */
+#define I40E_GLLAN_TSOMSK_L_TCPMSKL_SHIFT 0
+#define I40E_GLLAN_TSOMSK_L_TCPMSKL_MASK I40E_MASK(0xFFF, I40E_GLLAN_TSOMSK_L_TCPMSKL_SHIFT)
+#define I40E_GLLAN_TSOMSK_M 0x000442DC /* Reset: CORER */
+#define I40E_GLLAN_TSOMSK_M_TCPMSKM_SHIFT 0
+#define I40E_GLLAN_TSOMSK_M_TCPMSKM_MASK I40E_MASK(0xFFF, I40E_GLLAN_TSOMSK_M_TCPMSKM_SHIFT)
+#define I40E_GLLAN_TXPRE_QDIS(_i) (0x000e6500 + ((_i) * 4)) /* _i=0...11 */ /* Reset: CORER */
+#define I40E_GLLAN_TXPRE_QDIS_MAX_INDEX 11
+#define I40E_GLLAN_TXPRE_QDIS_QINDX_SHIFT 0
+#define I40E_GLLAN_TXPRE_QDIS_QINDX_MASK I40E_MASK(0x7FF, I40E_GLLAN_TXPRE_QDIS_QINDX_SHIFT)
+#define I40E_GLLAN_TXPRE_QDIS_QDIS_STAT_SHIFT 16
+#define I40E_GLLAN_TXPRE_QDIS_QDIS_STAT_MASK I40E_MASK(0x1, I40E_GLLAN_TXPRE_QDIS_QDIS_STAT_SHIFT)
+#define I40E_GLLAN_TXPRE_QDIS_SET_QDIS_SHIFT 30
+#define I40E_GLLAN_TXPRE_QDIS_SET_QDIS_MASK I40E_MASK(0x1, I40E_GLLAN_TXPRE_QDIS_SET_QDIS_SHIFT)
+#define I40E_GLLAN_TXPRE_QDIS_CLEAR_QDIS_SHIFT 31
+#define I40E_GLLAN_TXPRE_QDIS_CLEAR_QDIS_MASK I40E_MASK(0x1, I40E_GLLAN_TXPRE_QDIS_CLEAR_QDIS_SHIFT)
+#define I40E_PFLAN_QALLOC 0x001C0400 /* Reset: CORER */
+#define I40E_PFLAN_QALLOC_FIRSTQ_SHIFT 0
+#define I40E_PFLAN_QALLOC_FIRSTQ_MASK I40E_MASK(0x7FF, I40E_PFLAN_QALLOC_FIRSTQ_SHIFT)
+#define I40E_PFLAN_QALLOC_LASTQ_SHIFT 16
+#define I40E_PFLAN_QALLOC_LASTQ_MASK I40E_MASK(0x7FF, I40E_PFLAN_QALLOC_LASTQ_SHIFT)
+#define I40E_PFLAN_QALLOC_VALID_SHIFT 31
+#define I40E_PFLAN_QALLOC_VALID_MASK I40E_MASK(0x1, I40E_PFLAN_QALLOC_VALID_SHIFT)
+#define I40E_QRX_ENA(_Q) (0x00120000 + ((_Q) * 4)) /* _i=0...1535 */ /* Reset: PFR */
+#define I40E_QRX_ENA_MAX_INDEX 1535
+#define I40E_QRX_ENA_QENA_REQ_SHIFT 0
+#define I40E_QRX_ENA_QENA_REQ_MASK I40E_MASK(0x1, I40E_QRX_ENA_QENA_REQ_SHIFT)
+#define I40E_QRX_ENA_FAST_QDIS_SHIFT 1
+#define I40E_QRX_ENA_FAST_QDIS_MASK I40E_MASK(0x1, I40E_QRX_ENA_FAST_QDIS_SHIFT)
+#define I40E_QRX_ENA_QENA_STAT_SHIFT 2
+#define I40E_QRX_ENA_QENA_STAT_MASK I40E_MASK(0x1, I40E_QRX_ENA_QENA_STAT_SHIFT)
+#define I40E_QRX_TAIL(_Q) (0x00128000 + ((_Q) * 4)) /* _i=0...1535 */ /* Reset: CORER */
+#define I40E_QRX_TAIL_MAX_INDEX 1535
+#define I40E_QRX_TAIL_TAIL_SHIFT 0
+#define I40E_QRX_TAIL_TAIL_MASK I40E_MASK(0x1FFF, I40E_QRX_TAIL_TAIL_SHIFT)
+#define I40E_QTX_CTL(_Q) (0x00104000 + ((_Q) * 4)) /* _i=0...1535 */ /* Reset: CORER */
+#define I40E_QTX_CTL_MAX_INDEX 1535
+#define I40E_QTX_CTL_PFVF_Q_SHIFT 0
+#define I40E_QTX_CTL_PFVF_Q_MASK I40E_MASK(0x3, I40E_QTX_CTL_PFVF_Q_SHIFT)
+#define I40E_QTX_CTL_PF_INDX_SHIFT 2
+#define I40E_QTX_CTL_PF_INDX_MASK I40E_MASK(0xF, I40E_QTX_CTL_PF_INDX_SHIFT)
+#define I40E_QTX_CTL_VFVM_INDX_SHIFT 7
+#define I40E_QTX_CTL_VFVM_INDX_MASK I40E_MASK(0x1FF, I40E_QTX_CTL_VFVM_INDX_SHIFT)
+#define I40E_QTX_ENA(_Q) (0x00100000 + ((_Q) * 4)) /* _i=0...1535 */ /* Reset: PFR */
+#define I40E_QTX_ENA_MAX_INDEX 1535
+#define I40E_QTX_ENA_QENA_REQ_SHIFT 0
+#define I40E_QTX_ENA_QENA_REQ_MASK I40E_MASK(0x1, I40E_QTX_ENA_QENA_REQ_SHIFT)
+#define I40E_QTX_ENA_FAST_QDIS_SHIFT 1
+#define I40E_QTX_ENA_FAST_QDIS_MASK I40E_MASK(0x1, I40E_QTX_ENA_FAST_QDIS_SHIFT)
+#define I40E_QTX_ENA_QENA_STAT_SHIFT 2
+#define I40E_QTX_ENA_QENA_STAT_MASK I40E_MASK(0x1, I40E_QTX_ENA_QENA_STAT_SHIFT)
+#define I40E_QTX_HEAD(_Q) (0x000E4000 + ((_Q) * 4)) /* _i=0...1535 */ /* Reset: CORER */
+#define I40E_QTX_HEAD_MAX_INDEX 1535
+#define I40E_QTX_HEAD_HEAD_SHIFT 0
+#define I40E_QTX_HEAD_HEAD_MASK I40E_MASK(0x1FFF, I40E_QTX_HEAD_HEAD_SHIFT)
+#define I40E_QTX_HEAD_RS_PENDING_SHIFT 16
+#define I40E_QTX_HEAD_RS_PENDING_MASK I40E_MASK(0x1, I40E_QTX_HEAD_RS_PENDING_SHIFT)
+#define I40E_QTX_TAIL(_Q) (0x00108000 + ((_Q) * 4)) /* _i=0...1535 */ /* Reset: PFR */
+#define I40E_QTX_TAIL_MAX_INDEX 1535
+#define I40E_QTX_TAIL_TAIL_SHIFT 0
+#define I40E_QTX_TAIL_TAIL_MASK I40E_MASK(0x1FFF, I40E_QTX_TAIL_TAIL_SHIFT)
+#define I40E_VPLAN_MAPENA(_VF) (0x00074000 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: VFR */
+#define I40E_VPLAN_MAPENA_MAX_INDEX 127
+#define I40E_VPLAN_MAPENA_TXRX_ENA_SHIFT 0
+#define I40E_VPLAN_MAPENA_TXRX_ENA_MASK I40E_MASK(0x1, I40E_VPLAN_MAPENA_TXRX_ENA_SHIFT)
+#define I40E_VPLAN_QTABLE(_i, _VF) (0x00070000 + ((_i) * 1024 + (_VF) * 4)) /* _i=0...15, _VF=0...127 */ /* Reset: VFR */
+#define I40E_VPLAN_QTABLE_MAX_INDEX 15
+#define I40E_VPLAN_QTABLE_QINDEX_SHIFT 0
+#define I40E_VPLAN_QTABLE_QINDEX_MASK I40E_MASK(0x7FF, I40E_VPLAN_QTABLE_QINDEX_SHIFT)
+#define I40E_VSILAN_QBASE(_VSI) (0x0020C800 + ((_VSI) * 4)) /* _i=0...383 */ /* Reset: PFR */
+#define I40E_VSILAN_QBASE_MAX_INDEX 383
+#define I40E_VSILAN_QBASE_VSIBASE_SHIFT 0
+#define I40E_VSILAN_QBASE_VSIBASE_MASK I40E_MASK(0x7FF, I40E_VSILAN_QBASE_VSIBASE_SHIFT)
+#define I40E_VSILAN_QBASE_VSIQTABLE_ENA_SHIFT 11
+#define I40E_VSILAN_QBASE_VSIQTABLE_ENA_MASK I40E_MASK(0x1, I40E_VSILAN_QBASE_VSIQTABLE_ENA_SHIFT)
+#define I40E_VSILAN_QTABLE(_i, _VSI) (0x00200000 + ((_i) * 2048 + (_VSI) * 4)) /* _i=0...7, _VSI=0...383 */ /* Reset: PFR */
+#define I40E_VSILAN_QTABLE_MAX_INDEX 7
+#define I40E_VSILAN_QTABLE_QINDEX_0_SHIFT 0
+#define I40E_VSILAN_QTABLE_QINDEX_0_MASK I40E_MASK(0x7FF, I40E_VSILAN_QTABLE_QINDEX_0_SHIFT)
+#define I40E_VSILAN_QTABLE_QINDEX_1_SHIFT 16
+#define I40E_VSILAN_QTABLE_QINDEX_1_MASK I40E_MASK(0x7FF, I40E_VSILAN_QTABLE_QINDEX_1_SHIFT)
+#define I40E_PRTGL_SAH 0x001E2140 /* Reset: GLOBR */
+#define I40E_PRTGL_SAH_FC_SAH_SHIFT 0
+#define I40E_PRTGL_SAH_FC_SAH_MASK I40E_MASK(0xFFFF, I40E_PRTGL_SAH_FC_SAH_SHIFT)
+#define I40E_PRTGL_SAH_MFS_SHIFT 16
+#define I40E_PRTGL_SAH_MFS_MASK I40E_MASK(0xFFFF, I40E_PRTGL_SAH_MFS_SHIFT)
+#define I40E_PRTGL_SAL 0x001E2120 /* Reset: GLOBR */
+#define I40E_PRTGL_SAL_FC_SAL_SHIFT 0
+#define I40E_PRTGL_SAL_FC_SAL_MASK I40E_MASK(0xFFFFFFFF, I40E_PRTGL_SAL_FC_SAL_SHIFT)
+#define I40E_PRTMAC_HSEC_CTL_RX_ENABLE_GCP 0x001E30E0 /* Reset: GLOBR */
+#define I40E_PRTMAC_HSEC_CTL_RX_ENABLE_GCP_HSEC_CTL_RX_ENABLE_GCP_SHIFT 0
+#define I40E_PRTMAC_HSEC_CTL_RX_ENABLE_GCP_HSEC_CTL_RX_ENABLE_GCP_MASK I40E_MASK(0x1, I40E_PRTMAC_HSEC_CTL_RX_ENABLE_GCP_HSEC_CTL_RX_ENABLE_GCP_SHIFT)
+#define I40E_PRTMAC_HSEC_CTL_RX_ENABLE_GPP 0x001E3260 /* Reset: GLOBR */
+#define I40E_PRTMAC_HSEC_CTL_RX_ENABLE_GPP_HSEC_CTL_RX_ENABLE_GPP_SHIFT 0
+#define I40E_PRTMAC_HSEC_CTL_RX_ENABLE_GPP_HSEC_CTL_RX_ENABLE_GPP_MASK I40E_MASK(0x1, I40E_PRTMAC_HSEC_CTL_RX_ENABLE_GPP_HSEC_CTL_RX_ENABLE_GPP_SHIFT)
+#define I40E_PRTMAC_HSEC_CTL_RX_ENABLE_PPP 0x001E32E0 /* Reset: GLOBR */
+#define I40E_PRTMAC_HSEC_CTL_RX_ENABLE_PPP_HSEC_CTL_RX_ENABLE_PPP_SHIFT 0
+#define I40E_PRTMAC_HSEC_CTL_RX_ENABLE_PPP_HSEC_CTL_RX_ENABLE_PPP_MASK I40E_MASK(0x1, I40E_PRTMAC_HSEC_CTL_RX_ENABLE_PPP_HSEC_CTL_RX_ENABLE_PPP_SHIFT)
+#define I40E_PRTMAC_HSEC_CTL_RX_FORWARD_CONTROL 0x001E3360 /* Reset: GLOBR */
+#define I40E_PRTMAC_HSEC_CTL_RX_FORWARD_CONTROL_HSEC_CTL_RX_FORWARD_CONTROL_SHIFT 0
+#define I40E_PRTMAC_HSEC_CTL_RX_FORWARD_CONTROL_HSEC_CTL_RX_FORWARD_CONTROL_MASK I40E_MASK(0x1, I40E_PRTMAC_HSEC_CTL_RX_FORWARD_CONTROL_HSEC_CTL_RX_FORWARD_CONTROL_SHIFT)
+#define I40E_PRTMAC_HSEC_CTL_RX_PAUSE_DA_UCAST_PART1 0x001E3110 /* Reset: GLOBR */
+#define I40E_PRTMAC_HSEC_CTL_RX_PAUSE_DA_UCAST_PART1_HSEC_CTL_RX_PAUSE_DA_UCAST_PART1_SHIFT 0
+#define I40E_PRTMAC_HSEC_CTL_RX_PAUSE_DA_UCAST_PART1_HSEC_CTL_RX_PAUSE_DA_UCAST_PART1_MASK I40E_MASK(0xFFFFFFFF, I40E_PRTMAC_HSEC_CTL_RX_PAUSE_DA_UCAST_PART1_HSEC_CTL_RX_PAUSE_DA_UCAST_PART1_SHIFT)
+#define I40E_PRTMAC_HSEC_CTL_RX_PAUSE_DA_UCAST_PART2 0x001E3120 /* Reset: GLOBR */
+#define I40E_PRTMAC_HSEC_CTL_RX_PAUSE_DA_UCAST_PART2_HSEC_CTL_RX_PAUSE_DA_UCAST_PART2_SHIFT 0
+#define I40E_PRTMAC_HSEC_CTL_RX_PAUSE_DA_UCAST_PART2_HSEC_CTL_RX_PAUSE_DA_UCAST_PART2_MASK I40E_MASK(0xFFFF, I40E_PRTMAC_HSEC_CTL_RX_PAUSE_DA_UCAST_PART2_HSEC_CTL_RX_PAUSE_DA_UCAST_PART2_SHIFT)
+#define I40E_PRTMAC_HSEC_CTL_RX_PAUSE_ENABLE 0x001E30C0 /* Reset: GLOBR */
+#define I40E_PRTMAC_HSEC_CTL_RX_PAUSE_ENABLE_HSEC_CTL_RX_PAUSE_ENABLE_SHIFT 0
+#define I40E_PRTMAC_HSEC_CTL_RX_PAUSE_ENABLE_HSEC_CTL_RX_PAUSE_ENABLE_MASK I40E_MASK(0x1FF, I40E_PRTMAC_HSEC_CTL_RX_PAUSE_ENABLE_HSEC_CTL_RX_PAUSE_ENABLE_SHIFT)
+#define I40E_PRTMAC_HSEC_CTL_RX_PAUSE_SA_PART1 0x001E3140 /* Reset: GLOBR */
+#define I40E_PRTMAC_HSEC_CTL_RX_PAUSE_SA_PART1_HSEC_CTL_RX_PAUSE_SA_PART1_SHIFT 0
+#define I40E_PRTMAC_HSEC_CTL_RX_PAUSE_SA_PART1_HSEC_CTL_RX_PAUSE_SA_PART1_MASK I40E_MASK(0xFFFFFFFF, I40E_PRTMAC_HSEC_CTL_RX_PAUSE_SA_PART1_HSEC_CTL_RX_PAUSE_SA_PART1_SHIFT)
+#define I40E_PRTMAC_HSEC_CTL_RX_PAUSE_SA_PART2 0x001E3150 /* Reset: GLOBR */
+#define I40E_PRTMAC_HSEC_CTL_RX_PAUSE_SA_PART2_HSEC_CTL_RX_PAUSE_SA_PART2_SHIFT 0
+#define I40E_PRTMAC_HSEC_CTL_RX_PAUSE_SA_PART2_HSEC_CTL_RX_PAUSE_SA_PART2_MASK I40E_MASK(0xFFFF, I40E_PRTMAC_HSEC_CTL_RX_PAUSE_SA_PART2_HSEC_CTL_RX_PAUSE_SA_PART2_SHIFT)
+#define I40E_PRTMAC_HSEC_CTL_TX_PAUSE_ENABLE 0x001E30D0 /* Reset: GLOBR */
+#define I40E_PRTMAC_HSEC_CTL_TX_PAUSE_ENABLE_HSEC_CTL_TX_PAUSE_ENABLE_SHIFT 0
+#define I40E_PRTMAC_HSEC_CTL_TX_PAUSE_ENABLE_HSEC_CTL_TX_PAUSE_ENABLE_MASK I40E_MASK(0x1FF, I40E_PRTMAC_HSEC_CTL_TX_PAUSE_ENABLE_HSEC_CTL_TX_PAUSE_ENABLE_SHIFT)
+#define I40E_PRTMAC_HSEC_CTL_TX_PAUSE_QUANTA(_i) (0x001E3370 + ((_i) * 16)) /* _i=0...8 */ /* Reset: GLOBR */
+#define I40E_PRTMAC_HSEC_CTL_TX_PAUSE_QUANTA_MAX_INDEX 8
+#define I40E_PRTMAC_HSEC_CTL_TX_PAUSE_QUANTA_HSEC_CTL_TX_PAUSE_QUANTA_SHIFT 0
+#define I40E_PRTMAC_HSEC_CTL_TX_PAUSE_QUANTA_HSEC_CTL_TX_PAUSE_QUANTA_MASK I40E_MASK(0xFFFF, I40E_PRTMAC_HSEC_CTL_TX_PAUSE_QUANTA_HSEC_CTL_TX_PAUSE_QUANTA_SHIFT)
+#define I40E_PRTMAC_HSEC_CTL_TX_PAUSE_REFRESH_TIMER(_i) (0x001E3400 + ((_i) * 16)) /* _i=0...8 */ /* Reset: GLOBR */
+#define I40E_PRTMAC_HSEC_CTL_TX_PAUSE_REFRESH_TIMER_MAX_INDEX 8
+#define I40E_PRTMAC_HSEC_CTL_TX_PAUSE_REFRESH_TIMER_HSEC_CTL_TX_PAUSE_REFRESH_TIMER_SHIFT 0
+#define I40E_PRTMAC_HSEC_CTL_TX_PAUSE_REFRESH_TIMER_HSEC_CTL_TX_PAUSE_REFRESH_TIMER_MASK I40E_MASK(0xFFFF, I40E_PRTMAC_HSEC_CTL_TX_PAUSE_REFRESH_TIMER_HSEC_CTL_TX_PAUSE_REFRESH_TIMER_SHIFT)
+#define I40E_PRTMAC_HSEC_CTL_TX_SA_PART1 0x001E34B0 /* Reset: GLOBR */
+#define I40E_PRTMAC_HSEC_CTL_TX_SA_PART1_HSEC_CTL_TX_SA_PART1_SHIFT 0
+#define I40E_PRTMAC_HSEC_CTL_TX_SA_PART1_HSEC_CTL_TX_SA_PART1_MASK I40E_MASK(0xFFFFFFFF, I40E_PRTMAC_HSEC_CTL_TX_SA_PART1_HSEC_CTL_TX_SA_PART1_SHIFT)
+#define I40E_PRTMAC_HSEC_CTL_TX_SA_PART2 0x001E34C0 /* Reset: GLOBR */
+#define I40E_PRTMAC_HSEC_CTL_TX_SA_PART2_HSEC_CTL_TX_SA_PART2_SHIFT 0
+#define I40E_PRTMAC_HSEC_CTL_TX_SA_PART2_HSEC_CTL_TX_SA_PART2_MASK I40E_MASK(0xFFFF, I40E_PRTMAC_HSEC_CTL_TX_SA_PART2_HSEC_CTL_TX_SA_PART2_SHIFT)
+#define I40E_PRTMAC_PCS_XAUI_SWAP_A 0x0008C480 /* Reset: GLOBR */
+#define I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_TX_LANE3_SHIFT 0
+#define I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_TX_LANE3_MASK I40E_MASK(0x3, I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_TX_LANE3_SHIFT)
+#define I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_TX_LANE2_SHIFT 2
+#define I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_TX_LANE2_MASK I40E_MASK(0x3, I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_TX_LANE2_SHIFT)
+#define I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_TX_LANE1_SHIFT 4
+#define I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_TX_LANE1_MASK I40E_MASK(0x3, I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_TX_LANE1_SHIFT)
+#define I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_TX_LANE0_SHIFT 6
+#define I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_TX_LANE0_MASK I40E_MASK(0x3, I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_TX_LANE0_SHIFT)
+#define I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_RX_LANE3_SHIFT 8
+#define I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_RX_LANE3_MASK I40E_MASK(0x3, I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_RX_LANE3_SHIFT)
+#define I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_RX_LANE2_SHIFT 10
+#define I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_RX_LANE2_MASK I40E_MASK(0x3, I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_RX_LANE2_SHIFT)
+#define I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_RX_LANE1_SHIFT 12
+#define I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_RX_LANE1_MASK I40E_MASK(0x3, I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_RX_LANE1_SHIFT)
+#define I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_RX_LANE0_SHIFT 14
+#define I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_RX_LANE0_MASK I40E_MASK(0x3, I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_RX_LANE0_SHIFT)
+#define I40E_PRTMAC_PCS_XAUI_SWAP_B 0x0008C484 /* Reset: GLOBR */
+#define I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_TX_LANE3_SHIFT 0
+#define I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_TX_LANE3_MASK I40E_MASK(0x3, I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_TX_LANE3_SHIFT)
+#define I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_TX_LANE2_SHIFT 2
+#define I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_TX_LANE2_MASK I40E_MASK(0x3, I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_TX_LANE2_SHIFT)
+#define I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_TX_LANE1_SHIFT 4
+#define I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_TX_LANE1_MASK I40E_MASK(0x3, I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_TX_LANE1_SHIFT)
+#define I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_TX_LANE0_SHIFT 6
+#define I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_TX_LANE0_MASK I40E_MASK(0x3, I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_TX_LANE0_SHIFT)
+#define I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_RX_LANE3_SHIFT 8
+#define I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_RX_LANE3_MASK I40E_MASK(0x3, I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_RX_LANE3_SHIFT)
+#define I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_RX_LANE2_SHIFT 10
+#define I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_RX_LANE2_MASK I40E_MASK(0x3, I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_RX_LANE2_SHIFT)
+#define I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_RX_LANE1_SHIFT 12
+#define I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_RX_LANE1_MASK I40E_MASK(0x3, I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_RX_LANE1_SHIFT)
+#define I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_RX_LANE0_SHIFT 14
+#define I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_RX_LANE0_MASK I40E_MASK(0x3, I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_RX_LANE0_SHIFT)
+#define I40E_GL_FWRESETCNT 0x00083100 /* Reset: POR */
+#define I40E_GL_FWRESETCNT_FWRESETCNT_SHIFT 0
+#define I40E_GL_FWRESETCNT_FWRESETCNT_MASK I40E_MASK(0xFFFFFFFF, I40E_GL_FWRESETCNT_FWRESETCNT_SHIFT)
+#define I40E_GL_MNG_FWSM 0x000B6134 /* Reset: POR */
+#define I40E_GL_MNG_FWSM_FW_MODES_SHIFT 0
+#define I40E_GL_MNG_FWSM_FW_MODES_MASK I40E_MASK(0x3, I40E_GL_MNG_FWSM_FW_MODES_SHIFT)
+#define I40E_GL_MNG_FWSM_EEP_RELOAD_IND_SHIFT 10
+#define I40E_GL_MNG_FWSM_EEP_RELOAD_IND_MASK I40E_MASK(0x1, I40E_GL_MNG_FWSM_EEP_RELOAD_IND_SHIFT)
+#define I40E_GL_MNG_FWSM_CRC_ERROR_MODULE_SHIFT 11
+#define I40E_GL_MNG_FWSM_CRC_ERROR_MODULE_MASK I40E_MASK(0xF, I40E_GL_MNG_FWSM_CRC_ERROR_MODULE_SHIFT)
+#define I40E_GL_MNG_FWSM_FW_STATUS_VALID_SHIFT 15
+#define I40E_GL_MNG_FWSM_FW_STATUS_VALID_MASK I40E_MASK(0x1, I40E_GL_MNG_FWSM_FW_STATUS_VALID_SHIFT)
+#define I40E_GL_MNG_FWSM_RESET_CNT_SHIFT 16
+#define I40E_GL_MNG_FWSM_RESET_CNT_MASK I40E_MASK(0x7, I40E_GL_MNG_FWSM_RESET_CNT_SHIFT)
+#define I40E_GL_MNG_FWSM_EXT_ERR_IND_SHIFT 19
+#define I40E_GL_MNG_FWSM_EXT_ERR_IND_MASK I40E_MASK(0x3F, I40E_GL_MNG_FWSM_EXT_ERR_IND_SHIFT)
+#define I40E_GL_MNG_FWSM_PHY_SERDES0_CONFIG_ERR_SHIFT 26
+#define I40E_GL_MNG_FWSM_PHY_SERDES0_CONFIG_ERR_MASK I40E_MASK(0x1, I40E_GL_MNG_FWSM_PHY_SERDES0_CONFIG_ERR_SHIFT)
+#define I40E_GL_MNG_FWSM_PHY_SERDES1_CONFIG_ERR_SHIFT 27
+#define I40E_GL_MNG_FWSM_PHY_SERDES1_CONFIG_ERR_MASK I40E_MASK(0x1, I40E_GL_MNG_FWSM_PHY_SERDES1_CONFIG_ERR_SHIFT)
+#define I40E_GL_MNG_FWSM_PHY_SERDES2_CONFIG_ERR_SHIFT 28
+#define I40E_GL_MNG_FWSM_PHY_SERDES2_CONFIG_ERR_MASK I40E_MASK(0x1, I40E_GL_MNG_FWSM_PHY_SERDES2_CONFIG_ERR_SHIFT)
+#define I40E_GL_MNG_FWSM_PHY_SERDES3_CONFIG_ERR_SHIFT 29
+#define I40E_GL_MNG_FWSM_PHY_SERDES3_CONFIG_ERR_MASK I40E_MASK(0x1, I40E_GL_MNG_FWSM_PHY_SERDES3_CONFIG_ERR_SHIFT)
+#define I40E_GL_MNG_HWARB_CTRL 0x000B6130 /* Reset: POR */
+#define I40E_GL_MNG_HWARB_CTRL_NCSI_ARB_EN_SHIFT 0
+#define I40E_GL_MNG_HWARB_CTRL_NCSI_ARB_EN_MASK I40E_MASK(0x1, I40E_GL_MNG_HWARB_CTRL_NCSI_ARB_EN_SHIFT)
+#define I40E_PRT_MNG_FTFT_DATA(_i) (0x000852A0 + ((_i) * 32)) /* _i=0...31 */ /* Reset: POR */
+#define I40E_PRT_MNG_FTFT_DATA_MAX_INDEX 31
+#define I40E_PRT_MNG_FTFT_DATA_DWORD_SHIFT 0
+#define I40E_PRT_MNG_FTFT_DATA_DWORD_MASK I40E_MASK(0xFFFFFFFF, I40E_PRT_MNG_FTFT_DATA_DWORD_SHIFT)
+#define I40E_PRT_MNG_FTFT_LENGTH 0x00085260 /* Reset: POR */
+#define I40E_PRT_MNG_FTFT_LENGTH_LENGTH_SHIFT 0
+#define I40E_PRT_MNG_FTFT_LENGTH_LENGTH_MASK I40E_MASK(0xFF, I40E_PRT_MNG_FTFT_LENGTH_LENGTH_SHIFT)
+#define I40E_PRT_MNG_FTFT_MASK(_i) (0x00085160 + ((_i) * 32)) /* _i=0...7 */ /* Reset: POR */
+#define I40E_PRT_MNG_FTFT_MASK_MAX_INDEX 7
+#define I40E_PRT_MNG_FTFT_MASK_MASK_SHIFT 0
+#define I40E_PRT_MNG_FTFT_MASK_MASK_MASK I40E_MASK(0xFFFF, I40E_PRT_MNG_FTFT_MASK_MASK_SHIFT)
+#define I40E_PRT_MNG_MANC 0x00256A20 /* Reset: POR */
+#define I40E_PRT_MNG_MANC_FLOW_CONTROL_DISCARD_SHIFT 0
+#define I40E_PRT_MNG_MANC_FLOW_CONTROL_DISCARD_MASK I40E_MASK(0x1, I40E_PRT_MNG_MANC_FLOW_CONTROL_DISCARD_SHIFT)
+#define I40E_PRT_MNG_MANC_NCSI_DISCARD_SHIFT 1
+#define I40E_PRT_MNG_MANC_NCSI_DISCARD_MASK I40E_MASK(0x1, I40E_PRT_MNG_MANC_NCSI_DISCARD_SHIFT)
+#define I40E_PRT_MNG_MANC_RCV_TCO_EN_SHIFT 17
+#define I40E_PRT_MNG_MANC_RCV_TCO_EN_MASK I40E_MASK(0x1, I40E_PRT_MNG_MANC_RCV_TCO_EN_SHIFT)
+#define I40E_PRT_MNG_MANC_RCV_ALL_SHIFT 19
+#define I40E_PRT_MNG_MANC_RCV_ALL_MASK I40E_MASK(0x1, I40E_PRT_MNG_MANC_RCV_ALL_SHIFT)
+#define I40E_PRT_MNG_MANC_FIXED_NET_TYPE_SHIFT 25
+#define I40E_PRT_MNG_MANC_FIXED_NET_TYPE_MASK I40E_MASK(0x1, I40E_PRT_MNG_MANC_FIXED_NET_TYPE_SHIFT)
+#define I40E_PRT_MNG_MANC_NET_TYPE_SHIFT 26
+#define I40E_PRT_MNG_MANC_NET_TYPE_MASK I40E_MASK(0x1, I40E_PRT_MNG_MANC_NET_TYPE_SHIFT)
+#define I40E_PRT_MNG_MANC_EN_BMC2OS_SHIFT 28
+#define I40E_PRT_MNG_MANC_EN_BMC2OS_MASK I40E_MASK(0x1, I40E_PRT_MNG_MANC_EN_BMC2OS_SHIFT)
+#define I40E_PRT_MNG_MANC_EN_BMC2NET_SHIFT 29
+#define I40E_PRT_MNG_MANC_EN_BMC2NET_MASK I40E_MASK(0x1, I40E_PRT_MNG_MANC_EN_BMC2NET_SHIFT)
+#define I40E_PRT_MNG_MAVTV(_i) (0x00255900 + ((_i) * 32)) /* _i=0...7 */ /* Reset: POR */
+#define I40E_PRT_MNG_MAVTV_MAX_INDEX 7
+#define I40E_PRT_MNG_MAVTV_VID_SHIFT 0
+#define I40E_PRT_MNG_MAVTV_VID_MASK I40E_MASK(0xFFF, I40E_PRT_MNG_MAVTV_VID_SHIFT)
+#define I40E_PRT_MNG_MDEF(_i) (0x00255D00 + ((_i) * 32)) /* _i=0...7 */ /* Reset: POR */
+#define I40E_PRT_MNG_MDEF_MAX_INDEX 7
+#define I40E_PRT_MNG_MDEF_MAC_EXACT_AND_SHIFT 0
+#define I40E_PRT_MNG_MDEF_MAC_EXACT_AND_MASK I40E_MASK(0xF, I40E_PRT_MNG_MDEF_MAC_EXACT_AND_SHIFT)
+#define I40E_PRT_MNG_MDEF_BROADCAST_AND_SHIFT 4
+#define I40E_PRT_MNG_MDEF_BROADCAST_AND_MASK I40E_MASK(0x1, I40E_PRT_MNG_MDEF_BROADCAST_AND_SHIFT)
+#define I40E_PRT_MNG_MDEF_VLAN_AND_SHIFT 5
+#define I40E_PRT_MNG_MDEF_VLAN_AND_MASK I40E_MASK(0xFF, I40E_PRT_MNG_MDEF_VLAN_AND_SHIFT)
+#define I40E_PRT_MNG_MDEF_IPV4_ADDRESS_AND_SHIFT 13
+#define I40E_PRT_MNG_MDEF_IPV4_ADDRESS_AND_MASK I40E_MASK(0xF, I40E_PRT_MNG_MDEF_IPV4_ADDRESS_AND_SHIFT)
+#define I40E_PRT_MNG_MDEF_IPV6_ADDRESS_AND_SHIFT 17
+#define I40E_PRT_MNG_MDEF_IPV6_ADDRESS_AND_MASK I40E_MASK(0xF, I40E_PRT_MNG_MDEF_IPV6_ADDRESS_AND_SHIFT)
+#define I40E_PRT_MNG_MDEF_MAC_EXACT_OR_SHIFT 21
+#define I40E_PRT_MNG_MDEF_MAC_EXACT_OR_MASK I40E_MASK(0xF, I40E_PRT_MNG_MDEF_MAC_EXACT_OR_SHIFT)
+#define I40E_PRT_MNG_MDEF_BROADCAST_OR_SHIFT 25
+#define I40E_PRT_MNG_MDEF_BROADCAST_OR_MASK I40E_MASK(0x1, I40E_PRT_MNG_MDEF_BROADCAST_OR_SHIFT)
+#define I40E_PRT_MNG_MDEF_MULTICAST_AND_SHIFT 26
+#define I40E_PRT_MNG_MDEF_MULTICAST_AND_MASK I40E_MASK(0x1, I40E_PRT_MNG_MDEF_MULTICAST_AND_SHIFT)
+#define I40E_PRT_MNG_MDEF_ARP_REQUEST_OR_SHIFT 27
+#define I40E_PRT_MNG_MDEF_ARP_REQUEST_OR_MASK I40E_MASK(0x1, I40E_PRT_MNG_MDEF_ARP_REQUEST_OR_SHIFT)
+#define I40E_PRT_MNG_MDEF_ARP_RESPONSE_OR_SHIFT 28
+#define I40E_PRT_MNG_MDEF_ARP_RESPONSE_OR_MASK I40E_MASK(0x1, I40E_PRT_MNG_MDEF_ARP_RESPONSE_OR_SHIFT)
+#define I40E_PRT_MNG_MDEF_NEIGHBOR_DISCOVERY_134_OR_SHIFT 29
+#define I40E_PRT_MNG_MDEF_NEIGHBOR_DISCOVERY_134_OR_MASK I40E_MASK(0x1, I40E_PRT_MNG_MDEF_NEIGHBOR_DISCOVERY_134_OR_SHIFT)
+#define I40E_PRT_MNG_MDEF_PORT_0X298_OR_SHIFT 30
+#define I40E_PRT_MNG_MDEF_PORT_0X298_OR_MASK I40E_MASK(0x1, I40E_PRT_MNG_MDEF_PORT_0X298_OR_SHIFT)
+#define I40E_PRT_MNG_MDEF_PORT_0X26F_OR_SHIFT 31
+#define I40E_PRT_MNG_MDEF_PORT_0X26F_OR_MASK I40E_MASK(0x1, I40E_PRT_MNG_MDEF_PORT_0X26F_OR_SHIFT)
+#define I40E_PRT_MNG_MDEF_EXT(_i) (0x00255F00 + ((_i) * 32)) /* _i=0...7 */ /* Reset: POR */
+#define I40E_PRT_MNG_MDEF_EXT_MAX_INDEX 7
+#define I40E_PRT_MNG_MDEF_EXT_L2_ETHERTYPE_AND_SHIFT 0
+#define I40E_PRT_MNG_MDEF_EXT_L2_ETHERTYPE_AND_MASK I40E_MASK(0xF, I40E_PRT_MNG_MDEF_EXT_L2_ETHERTYPE_AND_SHIFT)
+#define I40E_PRT_MNG_MDEF_EXT_L2_ETHERTYPE_OR_SHIFT 4
+#define I40E_PRT_MNG_MDEF_EXT_L2_ETHERTYPE_OR_MASK I40E_MASK(0xF, I40E_PRT_MNG_MDEF_EXT_L2_ETHERTYPE_OR_SHIFT)
+#define I40E_PRT_MNG_MDEF_EXT_FLEX_PORT_OR_SHIFT 8
+#define I40E_PRT_MNG_MDEF_EXT_FLEX_PORT_OR_MASK I40E_MASK(0xFFFF, I40E_PRT_MNG_MDEF_EXT_FLEX_PORT_OR_SHIFT)
+#define I40E_PRT_MNG_MDEF_EXT_FLEX_TCO_SHIFT 24
+#define I40E_PRT_MNG_MDEF_EXT_FLEX_TCO_MASK I40E_MASK(0x1, I40E_PRT_MNG_MDEF_EXT_FLEX_TCO_SHIFT)
+#define I40E_PRT_MNG_MDEF_EXT_NEIGHBOR_DISCOVERY_135_OR_SHIFT 25
+#define I40E_PRT_MNG_MDEF_EXT_NEIGHBOR_DISCOVERY_135_OR_MASK I40E_MASK(0x1, I40E_PRT_MNG_MDEF_EXT_NEIGHBOR_DISCOVERY_135_OR_SHIFT)
+#define I40E_PRT_MNG_MDEF_EXT_NEIGHBOR_DISCOVERY_136_OR_SHIFT 26
+#define I40E_PRT_MNG_MDEF_EXT_NEIGHBOR_DISCOVERY_136_OR_MASK I40E_MASK(0x1, I40E_PRT_MNG_MDEF_EXT_NEIGHBOR_DISCOVERY_136_OR_SHIFT)
+#define I40E_PRT_MNG_MDEF_EXT_NEIGHBOR_DISCOVERY_137_OR_SHIFT 27
+#define I40E_PRT_MNG_MDEF_EXT_NEIGHBOR_DISCOVERY_137_OR_MASK I40E_MASK(0x1, I40E_PRT_MNG_MDEF_EXT_NEIGHBOR_DISCOVERY_137_OR_SHIFT)
+#define I40E_PRT_MNG_MDEF_EXT_ICMP_OR_SHIFT 28
+#define I40E_PRT_MNG_MDEF_EXT_ICMP_OR_MASK I40E_MASK(0x1, I40E_PRT_MNG_MDEF_EXT_ICMP_OR_SHIFT)
+#define I40E_PRT_MNG_MDEF_EXT_MLD_SHIFT 29
+#define I40E_PRT_MNG_MDEF_EXT_MLD_MASK I40E_MASK(0x1, I40E_PRT_MNG_MDEF_EXT_MLD_SHIFT)
+#define I40E_PRT_MNG_MDEF_EXT_APPLY_TO_NETWORK_TRAFFIC_SHIFT 30
+#define I40E_PRT_MNG_MDEF_EXT_APPLY_TO_NETWORK_TRAFFIC_MASK I40E_MASK(0x1, I40E_PRT_MNG_MDEF_EXT_APPLY_TO_NETWORK_TRAFFIC_SHIFT)
+#define I40E_PRT_MNG_MDEF_EXT_APPLY_TO_HOST_TRAFFIC_SHIFT 31
+#define I40E_PRT_MNG_MDEF_EXT_APPLY_TO_HOST_TRAFFIC_MASK I40E_MASK(0x1, I40E_PRT_MNG_MDEF_EXT_APPLY_TO_HOST_TRAFFIC_SHIFT)
+#define I40E_PRT_MNG_MDEFVSI(_i) (0x00256580 + ((_i) * 32)) /* _i=0...3 */ /* Reset: POR */
+#define I40E_PRT_MNG_MDEFVSI_MAX_INDEX 3
+#define I40E_PRT_MNG_MDEFVSI_MDEFVSI_2N_SHIFT 0
+#define I40E_PRT_MNG_MDEFVSI_MDEFVSI_2N_MASK I40E_MASK(0xFFFF, I40E_PRT_MNG_MDEFVSI_MDEFVSI_2N_SHIFT)
+#define I40E_PRT_MNG_MDEFVSI_MDEFVSI_2NP1_SHIFT 16
+#define I40E_PRT_MNG_MDEFVSI_MDEFVSI_2NP1_MASK I40E_MASK(0xFFFF, I40E_PRT_MNG_MDEFVSI_MDEFVSI_2NP1_SHIFT)
+#define I40E_PRT_MNG_METF(_i) (0x00256780 + ((_i) * 32)) /* _i=0...3 */ /* Reset: POR */
+#define I40E_PRT_MNG_METF_MAX_INDEX 3
+#define I40E_PRT_MNG_METF_ETYPE_SHIFT 0
+#define I40E_PRT_MNG_METF_ETYPE_MASK I40E_MASK(0xFFFF, I40E_PRT_MNG_METF_ETYPE_SHIFT)
+#define I40E_PRT_MNG_METF_POLARITY_SHIFT 30
+#define I40E_PRT_MNG_METF_POLARITY_MASK I40E_MASK(0x1, I40E_PRT_MNG_METF_POLARITY_SHIFT)
+#define I40E_PRT_MNG_MFUTP(_i) (0x00254E00 + ((_i) * 32)) /* _i=0...15 */ /* Reset: POR */
+#define I40E_PRT_MNG_MFUTP_MAX_INDEX 15
+#define I40E_PRT_MNG_MFUTP_MFUTP_N_SHIFT 0
+#define I40E_PRT_MNG_MFUTP_MFUTP_N_MASK I40E_MASK(0xFFFF, I40E_PRT_MNG_MFUTP_MFUTP_N_SHIFT)
+#define I40E_PRT_MNG_MFUTP_UDP_SHIFT 16
+#define I40E_PRT_MNG_MFUTP_UDP_MASK I40E_MASK(0x1, I40E_PRT_MNG_MFUTP_UDP_SHIFT)
+#define I40E_PRT_MNG_MFUTP_TCP_SHIFT 17
+#define I40E_PRT_MNG_MFUTP_TCP_MASK I40E_MASK(0x1, I40E_PRT_MNG_MFUTP_TCP_SHIFT)
+#define I40E_PRT_MNG_MFUTP_SOURCE_DESTINATION_SHIFT 18
+#define I40E_PRT_MNG_MFUTP_SOURCE_DESTINATION_MASK I40E_MASK(0x1, I40E_PRT_MNG_MFUTP_SOURCE_DESTINATION_SHIFT)
+#define I40E_PRT_MNG_MIPAF4(_i) (0x00256280 + ((_i) * 32)) /* _i=0...3 */ /* Reset: POR */
+#define I40E_PRT_MNG_MIPAF4_MAX_INDEX 3
+#define I40E_PRT_MNG_MIPAF4_MIPAF_SHIFT 0
+#define I40E_PRT_MNG_MIPAF4_MIPAF_MASK I40E_MASK(0xFFFFFFFF, I40E_PRT_MNG_MIPAF4_MIPAF_SHIFT)
+#define I40E_PRT_MNG_MIPAF6(_i) (0x00254200 + ((_i) * 32)) /* _i=0...15 */ /* Reset: POR */
+#define I40E_PRT_MNG_MIPAF6_MAX_INDEX 15
+#define I40E_PRT_MNG_MIPAF6_MIPAF_SHIFT 0
+#define I40E_PRT_MNG_MIPAF6_MIPAF_MASK I40E_MASK(0xFFFFFFFF, I40E_PRT_MNG_MIPAF6_MIPAF_SHIFT)
+#define I40E_PRT_MNG_MMAH(_i) (0x00256380 + ((_i) * 32)) /* _i=0...3 */ /* Reset: POR */
+#define I40E_PRT_MNG_MMAH_MAX_INDEX 3
+#define I40E_PRT_MNG_MMAH_MMAH_SHIFT 0
+#define I40E_PRT_MNG_MMAH_MMAH_MASK I40E_MASK(0xFFFF, I40E_PRT_MNG_MMAH_MMAH_SHIFT)
+#define I40E_PRT_MNG_MMAL(_i) (0x00256480 + ((_i) * 32)) /* _i=0...3 */ /* Reset: POR */
+#define I40E_PRT_MNG_MMAL_MAX_INDEX 3
+#define I40E_PRT_MNG_MMAL_MMAL_SHIFT 0
+#define I40E_PRT_MNG_MMAL_MMAL_MASK I40E_MASK(0xFFFFFFFF, I40E_PRT_MNG_MMAL_MMAL_SHIFT)
+#define I40E_PRT_MNG_MNGONLY 0x00256A60 /* Reset: POR */
+#define I40E_PRT_MNG_MNGONLY_EXCLUSIVE_TO_MANAGEABILITY_SHIFT 0
+#define I40E_PRT_MNG_MNGONLY_EXCLUSIVE_TO_MANAGEABILITY_MASK I40E_MASK(0xFF, I40E_PRT_MNG_MNGONLY_EXCLUSIVE_TO_MANAGEABILITY_SHIFT)
+#define I40E_PRT_MNG_MSFM 0x00256AA0 /* Reset: POR */
+#define I40E_PRT_MNG_MSFM_PORT_26F_UDP_SHIFT 0
+#define I40E_PRT_MNG_MSFM_PORT_26F_UDP_MASK I40E_MASK(0x1, I40E_PRT_MNG_MSFM_PORT_26F_UDP_SHIFT)
+#define I40E_PRT_MNG_MSFM_PORT_26F_TCP_SHIFT 1
+#define I40E_PRT_MNG_MSFM_PORT_26F_TCP_MASK I40E_MASK(0x1, I40E_PRT_MNG_MSFM_PORT_26F_TCP_SHIFT)
+#define I40E_PRT_MNG_MSFM_PORT_298_UDP_SHIFT 2
+#define I40E_PRT_MNG_MSFM_PORT_298_UDP_MASK I40E_MASK(0x1, I40E_PRT_MNG_MSFM_PORT_298_UDP_SHIFT)
+#define I40E_PRT_MNG_MSFM_PORT_298_TCP_SHIFT 3
+#define I40E_PRT_MNG_MSFM_PORT_298_TCP_MASK I40E_MASK(0x1, I40E_PRT_MNG_MSFM_PORT_298_TCP_SHIFT)
+#define I40E_PRT_MNG_MSFM_IPV6_0_MASK_SHIFT 4
+#define I40E_PRT_MNG_MSFM_IPV6_0_MASK_MASK I40E_MASK(0x1, I40E_PRT_MNG_MSFM_IPV6_0_MASK_SHIFT)
+#define I40E_PRT_MNG_MSFM_IPV6_1_MASK_SHIFT 5
+#define I40E_PRT_MNG_MSFM_IPV6_1_MASK_MASK I40E_MASK(0x1, I40E_PRT_MNG_MSFM_IPV6_1_MASK_SHIFT)
+#define I40E_PRT_MNG_MSFM_IPV6_2_MASK_SHIFT 6
+#define I40E_PRT_MNG_MSFM_IPV6_2_MASK_MASK I40E_MASK(0x1, I40E_PRT_MNG_MSFM_IPV6_2_MASK_SHIFT)
+#define I40E_PRT_MNG_MSFM_IPV6_3_MASK_SHIFT 7
+#define I40E_PRT_MNG_MSFM_IPV6_3_MASK_MASK I40E_MASK(0x1, I40E_PRT_MNG_MSFM_IPV6_3_MASK_SHIFT)
+#define I40E_MSIX_PBA(_i) (0x00001000 + ((_i) * 4)) /* _i=0...5 */ /* Reset: FLR */
+#define I40E_MSIX_PBA_MAX_INDEX 5
+#define I40E_MSIX_PBA_PENBIT_SHIFT 0
+#define I40E_MSIX_PBA_PENBIT_MASK I40E_MASK(0xFFFFFFFF, I40E_MSIX_PBA_PENBIT_SHIFT)
+#define I40E_MSIX_TADD(_i) (0x00000000 + ((_i) * 16)) /* _i=0...128 */ /* Reset: FLR */
+#define I40E_MSIX_TADD_MAX_INDEX 128
+#define I40E_MSIX_TADD_MSIXTADD10_SHIFT 0
+#define I40E_MSIX_TADD_MSIXTADD10_MASK I40E_MASK(0x3, I40E_MSIX_TADD_MSIXTADD10_SHIFT)
+#define I40E_MSIX_TADD_MSIXTADD_SHIFT 2
+#define I40E_MSIX_TADD_MSIXTADD_MASK I40E_MASK(0x3FFFFFFF, I40E_MSIX_TADD_MSIXTADD_SHIFT)
+#define I40E_MSIX_TMSG(_i) (0x00000008 + ((_i) * 16)) /* _i=0...128 */ /* Reset: FLR */
+#define I40E_MSIX_TMSG_MAX_INDEX 128
+#define I40E_MSIX_TMSG_MSIXTMSG_SHIFT 0
+#define I40E_MSIX_TMSG_MSIXTMSG_MASK I40E_MASK(0xFFFFFFFF, I40E_MSIX_TMSG_MSIXTMSG_SHIFT)
+#define I40E_MSIX_TUADD(_i) (0x00000004 + ((_i) * 16)) /* _i=0...128 */ /* Reset: FLR */
+#define I40E_MSIX_TUADD_MAX_INDEX 128
+#define I40E_MSIX_TUADD_MSIXTUADD_SHIFT 0
+#define I40E_MSIX_TUADD_MSIXTUADD_MASK I40E_MASK(0xFFFFFFFF, I40E_MSIX_TUADD_MSIXTUADD_SHIFT)
+#define I40E_MSIX_TVCTRL(_i) (0x0000000C + ((_i) * 16)) /* _i=0...128 */ /* Reset: FLR */
+#define I40E_MSIX_TVCTRL_MAX_INDEX 128
+#define I40E_MSIX_TVCTRL_MASK_SHIFT 0
+#define I40E_MSIX_TVCTRL_MASK_MASK I40E_MASK(0x1, I40E_MSIX_TVCTRL_MASK_SHIFT)
+#endif /* PF_DRIVER */
+#define I40E_VFMSIX_PBA1(_i) (0x00002000 + ((_i) * 4)) /* _i=0...19 */ /* Reset: VFLR */
+#define I40E_VFMSIX_PBA1_MAX_INDEX 19
+#define I40E_VFMSIX_PBA1_PENBIT_SHIFT 0
+#define I40E_VFMSIX_PBA1_PENBIT_MASK I40E_MASK(0xFFFFFFFF, I40E_VFMSIX_PBA1_PENBIT_SHIFT)
+#define I40E_VFMSIX_TADD1(_i) (0x00002100 + ((_i) * 16)) /* _i=0...639 */ /* Reset: VFLR */
+#define I40E_VFMSIX_TADD1_MAX_INDEX 639
+#define I40E_VFMSIX_TADD1_MSIXTADD10_SHIFT 0
+#define I40E_VFMSIX_TADD1_MSIXTADD10_MASK I40E_MASK(0x3, I40E_VFMSIX_TADD1_MSIXTADD10_SHIFT)
+#define I40E_VFMSIX_TADD1_MSIXTADD_SHIFT 2
+#define I40E_VFMSIX_TADD1_MSIXTADD_MASK I40E_MASK(0x3FFFFFFF, I40E_VFMSIX_TADD1_MSIXTADD_SHIFT)
+#define I40E_VFMSIX_TMSG1(_i) (0x00002108 + ((_i) * 16)) /* _i=0...639 */ /* Reset: VFLR */
+#define I40E_VFMSIX_TMSG1_MAX_INDEX 639
+#define I40E_VFMSIX_TMSG1_MSIXTMSG_SHIFT 0
+#define I40E_VFMSIX_TMSG1_MSIXTMSG_MASK I40E_MASK(0xFFFFFFFF, I40E_VFMSIX_TMSG1_MSIXTMSG_SHIFT)
+#define I40E_VFMSIX_TUADD1(_i) (0x00002104 + ((_i) * 16)) /* _i=0...639 */ /* Reset: VFLR */
+#define I40E_VFMSIX_TUADD1_MAX_INDEX 639
+#define I40E_VFMSIX_TUADD1_MSIXTUADD_SHIFT 0
+#define I40E_VFMSIX_TUADD1_MSIXTUADD_MASK I40E_MASK(0xFFFFFFFF, I40E_VFMSIX_TUADD1_MSIXTUADD_SHIFT)
+#define I40E_VFMSIX_TVCTRL1(_i) (0x0000210C + ((_i) * 16)) /* _i=0...639 */ /* Reset: VFLR */
+#define I40E_VFMSIX_TVCTRL1_MAX_INDEX 639
+#define I40E_VFMSIX_TVCTRL1_MASK_SHIFT 0
+#define I40E_VFMSIX_TVCTRL1_MASK_MASK I40E_MASK(0x1, I40E_VFMSIX_TVCTRL1_MASK_SHIFT)
+#ifdef PF_DRIVER
+#define I40E_GLNVM_FLA 0x000B6108 /* Reset: POR */
+#define I40E_GLNVM_FLA_FL_SCK_SHIFT 0
+#define I40E_GLNVM_FLA_FL_SCK_MASK I40E_MASK(0x1, I40E_GLNVM_FLA_FL_SCK_SHIFT)
+#define I40E_GLNVM_FLA_FL_CE_SHIFT 1
+#define I40E_GLNVM_FLA_FL_CE_MASK I40E_MASK(0x1, I40E_GLNVM_FLA_FL_CE_SHIFT)
+#define I40E_GLNVM_FLA_FL_SI_SHIFT 2
+#define I40E_GLNVM_FLA_FL_SI_MASK I40E_MASK(0x1, I40E_GLNVM_FLA_FL_SI_SHIFT)
+#define I40E_GLNVM_FLA_FL_SO_SHIFT 3
+#define I40E_GLNVM_FLA_FL_SO_MASK I40E_MASK(0x1, I40E_GLNVM_FLA_FL_SO_SHIFT)
+#define I40E_GLNVM_FLA_FL_REQ_SHIFT 4
+#define I40E_GLNVM_FLA_FL_REQ_MASK I40E_MASK(0x1, I40E_GLNVM_FLA_FL_REQ_SHIFT)
+#define I40E_GLNVM_FLA_FL_GNT_SHIFT 5
+#define I40E_GLNVM_FLA_FL_GNT_MASK I40E_MASK(0x1, I40E_GLNVM_FLA_FL_GNT_SHIFT)
+#define I40E_GLNVM_FLA_LOCKED_SHIFT 6
+#define I40E_GLNVM_FLA_LOCKED_MASK I40E_MASK(0x1, I40E_GLNVM_FLA_LOCKED_SHIFT)
+#define I40E_GLNVM_FLA_FL_SADDR_SHIFT 18
+#define I40E_GLNVM_FLA_FL_SADDR_MASK I40E_MASK(0x7FF, I40E_GLNVM_FLA_FL_SADDR_SHIFT)
+#define I40E_GLNVM_FLA_FL_BUSY_SHIFT 30
+#define I40E_GLNVM_FLA_FL_BUSY_MASK I40E_MASK(0x1, I40E_GLNVM_FLA_FL_BUSY_SHIFT)
+#define I40E_GLNVM_FLA_FL_DER_SHIFT 31
+#define I40E_GLNVM_FLA_FL_DER_MASK I40E_MASK(0x1, I40E_GLNVM_FLA_FL_DER_SHIFT)
+#define I40E_GLNVM_FLASHID 0x000B6104 /* Reset: POR */
+#define I40E_GLNVM_FLASHID_FLASHID_SHIFT 0
+#define I40E_GLNVM_FLASHID_FLASHID_MASK I40E_MASK(0xFFFFFF, I40E_GLNVM_FLASHID_FLASHID_SHIFT)
+#define I40E_GLNVM_FLASHID_FLEEP_PERF_SHIFT 31
+#define I40E_GLNVM_FLASHID_FLEEP_PERF_MASK I40E_MASK(0x1, I40E_GLNVM_FLASHID_FLEEP_PERF_SHIFT)
+#define I40E_GLNVM_GENS 0x000B6100 /* Reset: POR */
+#define I40E_GLNVM_GENS_NVM_PRES_SHIFT 0
+#define I40E_GLNVM_GENS_NVM_PRES_MASK I40E_MASK(0x1, I40E_GLNVM_GENS_NVM_PRES_SHIFT)
+#define I40E_GLNVM_GENS_SR_SIZE_SHIFT 5
+#define I40E_GLNVM_GENS_SR_SIZE_MASK I40E_MASK(0x7, I40E_GLNVM_GENS_SR_SIZE_SHIFT)
+#define I40E_GLNVM_GENS_BANK1VAL_SHIFT 8
+#define I40E_GLNVM_GENS_BANK1VAL_MASK I40E_MASK(0x1, I40E_GLNVM_GENS_BANK1VAL_SHIFT)
+#define I40E_GLNVM_GENS_ALT_PRST_SHIFT 23
+#define I40E_GLNVM_GENS_ALT_PRST_MASK I40E_MASK(0x1, I40E_GLNVM_GENS_ALT_PRST_SHIFT)
+#define I40E_GLNVM_GENS_FL_AUTO_RD_SHIFT 25
+#define I40E_GLNVM_GENS_FL_AUTO_RD_MASK I40E_MASK(0x1, I40E_GLNVM_GENS_FL_AUTO_RD_SHIFT)
+#define I40E_GLNVM_PROTCSR(_i) (0x000B6010 + ((_i) * 4)) /* _i=0...59 */ /* Reset: POR */
+#define I40E_GLNVM_PROTCSR_MAX_INDEX 59
+#define I40E_GLNVM_PROTCSR_ADDR_BLOCK_SHIFT 0
+#define I40E_GLNVM_PROTCSR_ADDR_BLOCK_MASK I40E_MASK(0xFFFFFF, I40E_GLNVM_PROTCSR_ADDR_BLOCK_SHIFT)
+#define I40E_GLNVM_SRCTL 0x000B6110 /* Reset: POR */
+#define I40E_GLNVM_SRCTL_SRBUSY_SHIFT 0
+#define I40E_GLNVM_SRCTL_SRBUSY_MASK I40E_MASK(0x1, I40E_GLNVM_SRCTL_SRBUSY_SHIFT)
+#define I40E_GLNVM_SRCTL_ADDR_SHIFT 14
+#define I40E_GLNVM_SRCTL_ADDR_MASK I40E_MASK(0x7FFF, I40E_GLNVM_SRCTL_ADDR_SHIFT)
+#define I40E_GLNVM_SRCTL_WRITE_SHIFT 29
+#define I40E_GLNVM_SRCTL_WRITE_MASK I40E_MASK(0x1, I40E_GLNVM_SRCTL_WRITE_SHIFT)
+#define I40E_GLNVM_SRCTL_START_SHIFT 30
+#define I40E_GLNVM_SRCTL_START_MASK I40E_MASK(0x1, I40E_GLNVM_SRCTL_START_SHIFT)
+#define I40E_GLNVM_SRCTL_DONE_SHIFT 31
+#define I40E_GLNVM_SRCTL_DONE_MASK I40E_MASK(0x1, I40E_GLNVM_SRCTL_DONE_SHIFT)
+#define I40E_GLNVM_SRDATA 0x000B6114 /* Reset: POR */
+#define I40E_GLNVM_SRDATA_WRDATA_SHIFT 0
+#define I40E_GLNVM_SRDATA_WRDATA_MASK I40E_MASK(0xFFFF, I40E_GLNVM_SRDATA_WRDATA_SHIFT)
+#define I40E_GLNVM_SRDATA_RDDATA_SHIFT 16
+#define I40E_GLNVM_SRDATA_RDDATA_MASK I40E_MASK(0xFFFF, I40E_GLNVM_SRDATA_RDDATA_SHIFT)
+#define I40E_GLNVM_ULD 0x000B6008 /* Reset: POR */
+#define I40E_GLNVM_ULD_CONF_PCIR_DONE_SHIFT 0
+#define I40E_GLNVM_ULD_CONF_PCIR_DONE_MASK I40E_MASK(0x1, I40E_GLNVM_ULD_CONF_PCIR_DONE_SHIFT)
+#define I40E_GLNVM_ULD_CONF_PCIRTL_DONE_SHIFT 1
+#define I40E_GLNVM_ULD_CONF_PCIRTL_DONE_MASK I40E_MASK(0x1, I40E_GLNVM_ULD_CONF_PCIRTL_DONE_SHIFT)
+#define I40E_GLNVM_ULD_CONF_LCB_DONE_SHIFT 2
+#define I40E_GLNVM_ULD_CONF_LCB_DONE_MASK I40E_MASK(0x1, I40E_GLNVM_ULD_CONF_LCB_DONE_SHIFT)
+#define I40E_GLNVM_ULD_CONF_CORE_DONE_SHIFT 3
+#define I40E_GLNVM_ULD_CONF_CORE_DONE_MASK I40E_MASK(0x1, I40E_GLNVM_ULD_CONF_CORE_DONE_SHIFT)
+#define I40E_GLNVM_ULD_CONF_GLOBAL_DONE_SHIFT 4
+#define I40E_GLNVM_ULD_CONF_GLOBAL_DONE_MASK I40E_MASK(0x1, I40E_GLNVM_ULD_CONF_GLOBAL_DONE_SHIFT)
+#define I40E_GLNVM_ULD_CONF_POR_DONE_SHIFT 5
+#define I40E_GLNVM_ULD_CONF_POR_DONE_MASK I40E_MASK(0x1, I40E_GLNVM_ULD_CONF_POR_DONE_SHIFT)
+#define I40E_GLNVM_ULD_CONF_PCIE_ANA_DONE_SHIFT 6
+#define I40E_GLNVM_ULD_CONF_PCIE_ANA_DONE_MASK I40E_MASK(0x1, I40E_GLNVM_ULD_CONF_PCIE_ANA_DONE_SHIFT)
+#define I40E_GLNVM_ULD_CONF_PHY_ANA_DONE_SHIFT 7
+#define I40E_GLNVM_ULD_CONF_PHY_ANA_DONE_MASK I40E_MASK(0x1, I40E_GLNVM_ULD_CONF_PHY_ANA_DONE_SHIFT)
+#define I40E_GLNVM_ULD_CONF_EMP_DONE_SHIFT 8
+#define I40E_GLNVM_ULD_CONF_EMP_DONE_MASK I40E_MASK(0x1, I40E_GLNVM_ULD_CONF_EMP_DONE_SHIFT)
+#define I40E_GLNVM_ULD_CONF_PCIALT_DONE_SHIFT 9
+#define I40E_GLNVM_ULD_CONF_PCIALT_DONE_MASK I40E_MASK(0x1, I40E_GLNVM_ULD_CONF_PCIALT_DONE_SHIFT)
+#define I40E_GLPCI_BYTCTH 0x0009C484 /* Reset: PCIR */
+#define I40E_GLPCI_BYTCTH_PCI_COUNT_BW_BCT_SHIFT 0
+#define I40E_GLPCI_BYTCTH_PCI_COUNT_BW_BCT_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPCI_BYTCTH_PCI_COUNT_BW_BCT_SHIFT)
+#define I40E_GLPCI_BYTCTL 0x0009C488 /* Reset: PCIR */
+#define I40E_GLPCI_BYTCTL_PCI_COUNT_BW_BCT_SHIFT 0
+#define I40E_GLPCI_BYTCTL_PCI_COUNT_BW_BCT_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPCI_BYTCTL_PCI_COUNT_BW_BCT_SHIFT)
+#define I40E_GLPCI_CAPCTRL 0x000BE4A4 /* Reset: PCIR */
+#define I40E_GLPCI_CAPCTRL_VPD_EN_SHIFT 0
+#define I40E_GLPCI_CAPCTRL_VPD_EN_MASK I40E_MASK(0x1, I40E_GLPCI_CAPCTRL_VPD_EN_SHIFT)
+#define I40E_GLPCI_CAPSUP 0x000BE4A8 /* Reset: PCIR */
+#define I40E_GLPCI_CAPSUP_PCIE_VER_SHIFT 0
+#define I40E_GLPCI_CAPSUP_PCIE_VER_MASK I40E_MASK(0x1, I40E_GLPCI_CAPSUP_PCIE_VER_SHIFT)
+#define I40E_GLPCI_CAPSUP_LTR_EN_SHIFT 2
+#define I40E_GLPCI_CAPSUP_LTR_EN_MASK I40E_MASK(0x1, I40E_GLPCI_CAPSUP_LTR_EN_SHIFT)
+#define I40E_GLPCI_CAPSUP_TPH_EN_SHIFT 3
+#define I40E_GLPCI_CAPSUP_TPH_EN_MASK I40E_MASK(0x1, I40E_GLPCI_CAPSUP_TPH_EN_SHIFT)
+#define I40E_GLPCI_CAPSUP_ARI_EN_SHIFT 4
+#define I40E_GLPCI_CAPSUP_ARI_EN_MASK I40E_MASK(0x1, I40E_GLPCI_CAPSUP_ARI_EN_SHIFT)
+#define I40E_GLPCI_CAPSUP_IOV_EN_SHIFT 5
+#define I40E_GLPCI_CAPSUP_IOV_EN_MASK I40E_MASK(0x1, I40E_GLPCI_CAPSUP_IOV_EN_SHIFT)
+#define I40E_GLPCI_CAPSUP_ACS_EN_SHIFT 6
+#define I40E_GLPCI_CAPSUP_ACS_EN_MASK I40E_MASK(0x1, I40E_GLPCI_CAPSUP_ACS_EN_SHIFT)
+#define I40E_GLPCI_CAPSUP_SEC_EN_SHIFT 7
+#define I40E_GLPCI_CAPSUP_SEC_EN_MASK I40E_MASK(0x1, I40E_GLPCI_CAPSUP_SEC_EN_SHIFT)
+#define I40E_GLPCI_CAPSUP_ECRC_GEN_EN_SHIFT 16
+#define I40E_GLPCI_CAPSUP_ECRC_GEN_EN_MASK I40E_MASK(0x1, I40E_GLPCI_CAPSUP_ECRC_GEN_EN_SHIFT)
+#define I40E_GLPCI_CAPSUP_ECRC_CHK_EN_SHIFT 17
+#define I40E_GLPCI_CAPSUP_ECRC_CHK_EN_MASK I40E_MASK(0x1, I40E_GLPCI_CAPSUP_ECRC_CHK_EN_SHIFT)
+#define I40E_GLPCI_CAPSUP_IDO_EN_SHIFT 18
+#define I40E_GLPCI_CAPSUP_IDO_EN_MASK I40E_MASK(0x1, I40E_GLPCI_CAPSUP_IDO_EN_SHIFT)
+#define I40E_GLPCI_CAPSUP_MSI_MASK_SHIFT 19
+#define I40E_GLPCI_CAPSUP_MSI_MASK_MASK I40E_MASK(0x1, I40E_GLPCI_CAPSUP_MSI_MASK_SHIFT)
+#define I40E_GLPCI_CAPSUP_CSR_CONF_EN_SHIFT 20
+#define I40E_GLPCI_CAPSUP_CSR_CONF_EN_MASK I40E_MASK(0x1, I40E_GLPCI_CAPSUP_CSR_CONF_EN_SHIFT)
+#define I40E_GLPCI_CAPSUP_LOAD_SUBSYS_ID_SHIFT 30
+#define I40E_GLPCI_CAPSUP_LOAD_SUBSYS_ID_MASK I40E_MASK(0x1, I40E_GLPCI_CAPSUP_LOAD_SUBSYS_ID_SHIFT)
+#define I40E_GLPCI_CAPSUP_LOAD_DEV_ID_SHIFT 31
+#define I40E_GLPCI_CAPSUP_LOAD_DEV_ID_MASK I40E_MASK(0x1, I40E_GLPCI_CAPSUP_LOAD_DEV_ID_SHIFT)
+#define I40E_GLPCI_CNF 0x000BE4C0 /* Reset: POR */
+#define I40E_GLPCI_CNF_FLEX10_SHIFT 1
+#define I40E_GLPCI_CNF_FLEX10_MASK I40E_MASK(0x1, I40E_GLPCI_CNF_FLEX10_SHIFT)
+#define I40E_GLPCI_CNF_WAKE_PIN_EN_SHIFT 2
+#define I40E_GLPCI_CNF_WAKE_PIN_EN_MASK I40E_MASK(0x1, I40E_GLPCI_CNF_WAKE_PIN_EN_SHIFT)
+#define I40E_GLPCI_CNF2 0x000BE494 /* Reset: PCIR */
+#define I40E_GLPCI_CNF2_RO_DIS_SHIFT 0
+#define I40E_GLPCI_CNF2_RO_DIS_MASK I40E_MASK(0x1, I40E_GLPCI_CNF2_RO_DIS_SHIFT)
+#define I40E_GLPCI_CNF2_CACHELINE_SIZE_SHIFT 1
+#define I40E_GLPCI_CNF2_CACHELINE_SIZE_MASK I40E_MASK(0x1, I40E_GLPCI_CNF2_CACHELINE_SIZE_SHIFT)
+#define I40E_GLPCI_CNF2_MSI_X_PF_N_SHIFT 2
+#define I40E_GLPCI_CNF2_MSI_X_PF_N_MASK I40E_MASK(0x7FF, I40E_GLPCI_CNF2_MSI_X_PF_N_SHIFT)
+#define I40E_GLPCI_CNF2_MSI_X_VF_N_SHIFT 13
+#define I40E_GLPCI_CNF2_MSI_X_VF_N_MASK I40E_MASK(0x7FF, I40E_GLPCI_CNF2_MSI_X_VF_N_SHIFT)
+#define I40E_GLPCI_DREVID 0x0009C480 /* Reset: PCIR */
+#define I40E_GLPCI_DREVID_DEFAULT_REVID_SHIFT 0
+#define I40E_GLPCI_DREVID_DEFAULT_REVID_MASK I40E_MASK(0xFF, I40E_GLPCI_DREVID_DEFAULT_REVID_SHIFT)
+#define I40E_GLPCI_GSCL_1 0x0009C48C /* Reset: PCIR */
+#define I40E_GLPCI_GSCL_1_GIO_COUNT_EN_0_SHIFT 0
+#define I40E_GLPCI_GSCL_1_GIO_COUNT_EN_0_MASK I40E_MASK(0x1, I40E_GLPCI_GSCL_1_GIO_COUNT_EN_0_SHIFT)
+#define I40E_GLPCI_GSCL_1_GIO_COUNT_EN_1_SHIFT 1
+#define I40E_GLPCI_GSCL_1_GIO_COUNT_EN_1_MASK I40E_MASK(0x1, I40E_GLPCI_GSCL_1_GIO_COUNT_EN_1_SHIFT)
+#define I40E_GLPCI_GSCL_1_GIO_COUNT_EN_2_SHIFT 2
+#define I40E_GLPCI_GSCL_1_GIO_COUNT_EN_2_MASK I40E_MASK(0x1, I40E_GLPCI_GSCL_1_GIO_COUNT_EN_2_SHIFT)
+#define I40E_GLPCI_GSCL_1_GIO_COUNT_EN_3_SHIFT 3
+#define I40E_GLPCI_GSCL_1_GIO_COUNT_EN_3_MASK I40E_MASK(0x1, I40E_GLPCI_GSCL_1_GIO_COUNT_EN_3_SHIFT)
+#define I40E_GLPCI_GSCL_1_LBC_ENABLE_0_SHIFT 4
+#define I40E_GLPCI_GSCL_1_LBC_ENABLE_0_MASK I40E_MASK(0x1, I40E_GLPCI_GSCL_1_LBC_ENABLE_0_SHIFT)
+#define I40E_GLPCI_GSCL_1_LBC_ENABLE_1_SHIFT 5
+#define I40E_GLPCI_GSCL_1_LBC_ENABLE_1_MASK I40E_MASK(0x1, I40E_GLPCI_GSCL_1_LBC_ENABLE_1_SHIFT)
+#define I40E_GLPCI_GSCL_1_LBC_ENABLE_2_SHIFT 6
+#define I40E_GLPCI_GSCL_1_LBC_ENABLE_2_MASK I40E_MASK(0x1, I40E_GLPCI_GSCL_1_LBC_ENABLE_2_SHIFT)
+#define I40E_GLPCI_GSCL_1_LBC_ENABLE_3_SHIFT 7
+#define I40E_GLPCI_GSCL_1_LBC_ENABLE_3_MASK I40E_MASK(0x1, I40E_GLPCI_GSCL_1_LBC_ENABLE_3_SHIFT)
+#define I40E_GLPCI_GSCL_1_PCI_COUNT_LAT_EN_SHIFT 8
+#define I40E_GLPCI_GSCL_1_PCI_COUNT_LAT_EN_MASK I40E_MASK(0x1, I40E_GLPCI_GSCL_1_PCI_COUNT_LAT_EN_SHIFT)
+#define I40E_GLPCI_GSCL_1_PCI_COUNT_LAT_EV_SHIFT 9
+#define I40E_GLPCI_GSCL_1_PCI_COUNT_LAT_EV_MASK I40E_MASK(0x1F, I40E_GLPCI_GSCL_1_PCI_COUNT_LAT_EV_SHIFT)
+#define I40E_GLPCI_GSCL_1_PCI_COUNT_BW_EN_SHIFT 14
+#define I40E_GLPCI_GSCL_1_PCI_COUNT_BW_EN_MASK I40E_MASK(0x1, I40E_GLPCI_GSCL_1_PCI_COUNT_BW_EN_SHIFT)
+#define I40E_GLPCI_GSCL_1_PCI_COUNT_BW_EV_SHIFT 15
+#define I40E_GLPCI_GSCL_1_PCI_COUNT_BW_EV_MASK I40E_MASK(0x1F, I40E_GLPCI_GSCL_1_PCI_COUNT_BW_EV_SHIFT)
+#define I40E_GLPCI_GSCL_1_GIO_64_BIT_EN_SHIFT 28
+#define I40E_GLPCI_GSCL_1_GIO_64_BIT_EN_MASK I40E_MASK(0x1, I40E_GLPCI_GSCL_1_GIO_64_BIT_EN_SHIFT)
+#define I40E_GLPCI_GSCL_1_GIO_COUNT_RESET_SHIFT 29
+#define I40E_GLPCI_GSCL_1_GIO_COUNT_RESET_MASK I40E_MASK(0x1, I40E_GLPCI_GSCL_1_GIO_COUNT_RESET_SHIFT)
+#define I40E_GLPCI_GSCL_1_GIO_COUNT_STOP_SHIFT 30
+#define I40E_GLPCI_GSCL_1_GIO_COUNT_STOP_MASK I40E_MASK(0x1, I40E_GLPCI_GSCL_1_GIO_COUNT_STOP_SHIFT)
+#define I40E_GLPCI_GSCL_1_GIO_COUNT_START_SHIFT 31
+#define I40E_GLPCI_GSCL_1_GIO_COUNT_START_MASK I40E_MASK(0x1, I40E_GLPCI_GSCL_1_GIO_COUNT_START_SHIFT)
+#define I40E_GLPCI_GSCL_2 0x0009C490 /* Reset: PCIR */
+#define I40E_GLPCI_GSCL_2_GIO_EVENT_NUM_0_SHIFT 0
+#define I40E_GLPCI_GSCL_2_GIO_EVENT_NUM_0_MASK I40E_MASK(0xFF, I40E_GLPCI_GSCL_2_GIO_EVENT_NUM_0_SHIFT)
+#define I40E_GLPCI_GSCL_2_GIO_EVENT_NUM_1_SHIFT 8
+#define I40E_GLPCI_GSCL_2_GIO_EVENT_NUM_1_MASK I40E_MASK(0xFF, I40E_GLPCI_GSCL_2_GIO_EVENT_NUM_1_SHIFT)
+#define I40E_GLPCI_GSCL_2_GIO_EVENT_NUM_2_SHIFT 16
+#define I40E_GLPCI_GSCL_2_GIO_EVENT_NUM_2_MASK I40E_MASK(0xFF, I40E_GLPCI_GSCL_2_GIO_EVENT_NUM_2_SHIFT)
+#define I40E_GLPCI_GSCL_2_GIO_EVENT_NUM_3_SHIFT 24
+#define I40E_GLPCI_GSCL_2_GIO_EVENT_NUM_3_MASK I40E_MASK(0xFF, I40E_GLPCI_GSCL_2_GIO_EVENT_NUM_3_SHIFT)
+#define I40E_GLPCI_GSCL_5_8(_i) (0x0009C494 + ((_i) * 4)) /* _i=0...3 */ /* Reset: PCIR */
+#define I40E_GLPCI_GSCL_5_8_MAX_INDEX 3
+#define I40E_GLPCI_GSCL_5_8_LBC_THRESHOLD_N_SHIFT 0
+#define I40E_GLPCI_GSCL_5_8_LBC_THRESHOLD_N_MASK I40E_MASK(0xFFFF, I40E_GLPCI_GSCL_5_8_LBC_THRESHOLD_N_SHIFT)
+#define I40E_GLPCI_GSCL_5_8_LBC_TIMER_N_SHIFT 16
+#define I40E_GLPCI_GSCL_5_8_LBC_TIMER_N_MASK I40E_MASK(0xFFFF, I40E_GLPCI_GSCL_5_8_LBC_TIMER_N_SHIFT)
+#define I40E_GLPCI_GSCN_0_3(_i) (0x0009C4A4 + ((_i) * 4)) /* _i=0...3 */ /* Reset: PCIR */
+#define I40E_GLPCI_GSCN_0_3_MAX_INDEX 3
+#define I40E_GLPCI_GSCN_0_3_EVENT_COUNTER_SHIFT 0
+#define I40E_GLPCI_GSCN_0_3_EVENT_COUNTER_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPCI_GSCN_0_3_EVENT_COUNTER_SHIFT)
+#define I40E_GLPCI_LBARCTRL 0x000BE484 /* Reset: POR */
+#define I40E_GLPCI_LBARCTRL_PREFBAR_SHIFT 0
+#define I40E_GLPCI_LBARCTRL_PREFBAR_MASK I40E_MASK(0x1, I40E_GLPCI_LBARCTRL_PREFBAR_SHIFT)
+#define I40E_GLPCI_LBARCTRL_BAR32_SHIFT 1
+#define I40E_GLPCI_LBARCTRL_BAR32_MASK I40E_MASK(0x1, I40E_GLPCI_LBARCTRL_BAR32_SHIFT)
+#define I40E_GLPCI_LBARCTRL_FLASH_EXPOSE_SHIFT 3
+#define I40E_GLPCI_LBARCTRL_FLASH_EXPOSE_MASK I40E_MASK(0x1, I40E_GLPCI_LBARCTRL_FLASH_EXPOSE_SHIFT)
+#define I40E_GLPCI_LBARCTRL_RSVD_4_SHIFT 4
+#define I40E_GLPCI_LBARCTRL_RSVD_4_MASK I40E_MASK(0x3, I40E_GLPCI_LBARCTRL_RSVD_4_SHIFT)
+#define I40E_GLPCI_LBARCTRL_FL_SIZE_SHIFT 6
+#define I40E_GLPCI_LBARCTRL_FL_SIZE_MASK I40E_MASK(0x7, I40E_GLPCI_LBARCTRL_FL_SIZE_SHIFT)
+#define I40E_GLPCI_LBARCTRL_RSVD_10_SHIFT 10
+#define I40E_GLPCI_LBARCTRL_RSVD_10_MASK I40E_MASK(0x1, I40E_GLPCI_LBARCTRL_RSVD_10_SHIFT)
+#define I40E_GLPCI_LBARCTRL_EXROM_SIZE_SHIFT 11
+#define I40E_GLPCI_LBARCTRL_EXROM_SIZE_MASK I40E_MASK(0x7, I40E_GLPCI_LBARCTRL_EXROM_SIZE_SHIFT)
+#define I40E_GLPCI_LINKCAP 0x000BE4AC /* Reset: PCIR */
+#define I40E_GLPCI_LINKCAP_LINK_SPEEDS_VECTOR_SHIFT 0
+#define I40E_GLPCI_LINKCAP_LINK_SPEEDS_VECTOR_MASK I40E_MASK(0x3F, I40E_GLPCI_LINKCAP_LINK_SPEEDS_VECTOR_SHIFT)
+#define I40E_GLPCI_LINKCAP_MAX_PAYLOAD_SHIFT 6
+#define I40E_GLPCI_LINKCAP_MAX_PAYLOAD_MASK I40E_MASK(0x7, I40E_GLPCI_LINKCAP_MAX_PAYLOAD_SHIFT)
+#define I40E_GLPCI_LINKCAP_MAX_LINK_WIDTH_SHIFT 9
+#define I40E_GLPCI_LINKCAP_MAX_LINK_WIDTH_MASK I40E_MASK(0xF, I40E_GLPCI_LINKCAP_MAX_LINK_WIDTH_SHIFT)
+#define I40E_GLPCI_PCIERR 0x000BE4FC /* Reset: PCIR */
+#define I40E_GLPCI_PCIERR_PCIE_ERR_REP_SHIFT 0
+#define I40E_GLPCI_PCIERR_PCIE_ERR_REP_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPCI_PCIERR_PCIE_ERR_REP_SHIFT)
+#define I40E_GLPCI_PKTCT 0x0009C4BC /* Reset: PCIR */
+#define I40E_GLPCI_PKTCT_PCI_COUNT_BW_PCT_SHIFT 0
+#define I40E_GLPCI_PKTCT_PCI_COUNT_BW_PCT_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPCI_PKTCT_PCI_COUNT_BW_PCT_SHIFT)
+#define I40E_GLPCI_PM_MUX_NPQ 0x0009C4F4 /* Reset: PCIR */
+#define I40E_GLPCI_PM_MUX_NPQ_NPQ_NUM_PORT_SEL_SHIFT 0
+#define I40E_GLPCI_PM_MUX_NPQ_NPQ_NUM_PORT_SEL_MASK I40E_MASK(0x7, I40E_GLPCI_PM_MUX_NPQ_NPQ_NUM_PORT_SEL_SHIFT)
+#define I40E_GLPCI_PM_MUX_NPQ_INNER_NPQ_SEL_SHIFT 16
+#define I40E_GLPCI_PM_MUX_NPQ_INNER_NPQ_SEL_MASK I40E_MASK(0x1F, I40E_GLPCI_PM_MUX_NPQ_INNER_NPQ_SEL_SHIFT)
+#define I40E_GLPCI_PM_MUX_PFB 0x0009C4F0 /* Reset: PCIR */
+#define I40E_GLPCI_PM_MUX_PFB_PFB_PORT_SEL_SHIFT 0
+#define I40E_GLPCI_PM_MUX_PFB_PFB_PORT_SEL_MASK I40E_MASK(0x1F, I40E_GLPCI_PM_MUX_PFB_PFB_PORT_SEL_SHIFT)
+#define I40E_GLPCI_PM_MUX_PFB_INNER_PORT_SEL_SHIFT 16
+#define I40E_GLPCI_PM_MUX_PFB_INNER_PORT_SEL_MASK I40E_MASK(0x7, I40E_GLPCI_PM_MUX_PFB_INNER_PORT_SEL_SHIFT)
+#define I40E_GLPCI_PMSUP 0x000BE4B0 /* Reset: PCIR */
+#define I40E_GLPCI_PMSUP_ASPM_SUP_SHIFT 0
+#define I40E_GLPCI_PMSUP_ASPM_SUP_MASK I40E_MASK(0x3, I40E_GLPCI_PMSUP_ASPM_SUP_SHIFT)
+#define I40E_GLPCI_PMSUP_L0S_EXIT_LAT_SHIFT 2
+#define I40E_GLPCI_PMSUP_L0S_EXIT_LAT_MASK I40E_MASK(0x7, I40E_GLPCI_PMSUP_L0S_EXIT_LAT_SHIFT)
+#define I40E_GLPCI_PMSUP_L1_EXIT_LAT_SHIFT 5
+#define I40E_GLPCI_PMSUP_L1_EXIT_LAT_MASK I40E_MASK(0x7, I40E_GLPCI_PMSUP_L1_EXIT_LAT_SHIFT)
+#define I40E_GLPCI_PMSUP_L0S_ACC_LAT_SHIFT 8
+#define I40E_GLPCI_PMSUP_L0S_ACC_LAT_MASK I40E_MASK(0x7, I40E_GLPCI_PMSUP_L0S_ACC_LAT_SHIFT)
+#define I40E_GLPCI_PMSUP_L1_ACC_LAT_SHIFT 11
+#define I40E_GLPCI_PMSUP_L1_ACC_LAT_MASK I40E_MASK(0x7, I40E_GLPCI_PMSUP_L1_ACC_LAT_SHIFT)
+#define I40E_GLPCI_PMSUP_SLOT_CLK_SHIFT 14
+#define I40E_GLPCI_PMSUP_SLOT_CLK_MASK I40E_MASK(0x1, I40E_GLPCI_PMSUP_SLOT_CLK_SHIFT)
+#define I40E_GLPCI_PMSUP_OBFF_SUP_SHIFT 15
+#define I40E_GLPCI_PMSUP_OBFF_SUP_MASK I40E_MASK(0x3, I40E_GLPCI_PMSUP_OBFF_SUP_SHIFT)
+#define I40E_GLPCI_PQ_MAX_USED_SPC 0x0009C4EC /* Reset: PCIR */
+#define I40E_GLPCI_PQ_MAX_USED_SPC_GLPCI_PQ_MAX_USED_SPC_12_SHIFT 0
+#define I40E_GLPCI_PQ_MAX_USED_SPC_GLPCI_PQ_MAX_USED_SPC_12_MASK I40E_MASK(0xFF, I40E_GLPCI_PQ_MAX_USED_SPC_GLPCI_PQ_MAX_USED_SPC_12_SHIFT)
+#define I40E_GLPCI_PQ_MAX_USED_SPC_GLPCI_PQ_MAX_USED_SPC_13_SHIFT 8
+#define I40E_GLPCI_PQ_MAX_USED_SPC_GLPCI_PQ_MAX_USED_SPC_13_MASK I40E_MASK(0xFF, I40E_GLPCI_PQ_MAX_USED_SPC_GLPCI_PQ_MAX_USED_SPC_13_SHIFT)
+#define I40E_GLPCI_PWRDATA 0x000BE490 /* Reset: PCIR */
+#define I40E_GLPCI_PWRDATA_D0_POWER_SHIFT 0
+#define I40E_GLPCI_PWRDATA_D0_POWER_MASK I40E_MASK(0xFF, I40E_GLPCI_PWRDATA_D0_POWER_SHIFT)
+#define I40E_GLPCI_PWRDATA_COMM_POWER_SHIFT 8
+#define I40E_GLPCI_PWRDATA_COMM_POWER_MASK I40E_MASK(0xFF, I40E_GLPCI_PWRDATA_COMM_POWER_SHIFT)
+#define I40E_GLPCI_PWRDATA_D3_POWER_SHIFT 16
+#define I40E_GLPCI_PWRDATA_D3_POWER_MASK I40E_MASK(0xFF, I40E_GLPCI_PWRDATA_D3_POWER_SHIFT)
+#define I40E_GLPCI_PWRDATA_DATA_SCALE_SHIFT 24
+#define I40E_GLPCI_PWRDATA_DATA_SCALE_MASK I40E_MASK(0x3, I40E_GLPCI_PWRDATA_DATA_SCALE_SHIFT)
+#define I40E_GLPCI_REVID 0x000BE4B4 /* Reset: PCIR */
+#define I40E_GLPCI_REVID_NVM_REVID_SHIFT 0
+#define I40E_GLPCI_REVID_NVM_REVID_MASK I40E_MASK(0xFF, I40E_GLPCI_REVID_NVM_REVID_SHIFT)
+#define I40E_GLPCI_SERH 0x000BE49C /* Reset: PCIR */
+#define I40E_GLPCI_SERH_SER_NUM_H_SHIFT 0
+#define I40E_GLPCI_SERH_SER_NUM_H_MASK I40E_MASK(0xFFFF, I40E_GLPCI_SERH_SER_NUM_H_SHIFT)
+#define I40E_GLPCI_SERL 0x000BE498 /* Reset: PCIR */
+#define I40E_GLPCI_SERL_SER_NUM_L_SHIFT 0
+#define I40E_GLPCI_SERL_SER_NUM_L_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPCI_SERL_SER_NUM_L_SHIFT)
+#define I40E_GLPCI_SPARE_BITS_0 0x0009C4F8 /* Reset: PCIR */
+#define I40E_GLPCI_SPARE_BITS_0_SPARE_BITS_SHIFT 0
+#define I40E_GLPCI_SPARE_BITS_0_SPARE_BITS_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPCI_SPARE_BITS_0_SPARE_BITS_SHIFT)
+#define I40E_GLPCI_SPARE_BITS_1 0x0009C4FC /* Reset: PCIR */
+#define I40E_GLPCI_SPARE_BITS_1_SPARE_BITS_SHIFT 0
+#define I40E_GLPCI_SPARE_BITS_1_SPARE_BITS_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPCI_SPARE_BITS_1_SPARE_BITS_SHIFT)
+#define I40E_GLPCI_SUBVENID 0x000BE48C /* Reset: PCIR */
+#define I40E_GLPCI_SUBVENID_SUB_VEN_ID_SHIFT 0
+#define I40E_GLPCI_SUBVENID_SUB_VEN_ID_MASK I40E_MASK(0xFFFF, I40E_GLPCI_SUBVENID_SUB_VEN_ID_SHIFT)
+#define I40E_GLPCI_UPADD 0x000BE4F8 /* Reset: PCIR */
+#define I40E_GLPCI_UPADD_ADDRESS_SHIFT 1
+#define I40E_GLPCI_UPADD_ADDRESS_MASK I40E_MASK(0x7FFFFFFF, I40E_GLPCI_UPADD_ADDRESS_SHIFT)
+#define I40E_GLPCI_VENDORID 0x000BE518 /* Reset: PCIR */
+#define I40E_GLPCI_VENDORID_VENDORID_SHIFT 0
+#define I40E_GLPCI_VENDORID_VENDORID_MASK I40E_MASK(0xFFFF, I40E_GLPCI_VENDORID_VENDORID_SHIFT)
+#define I40E_GLPCI_VFSUP 0x000BE4B8 /* Reset: PCIR */
+#define I40E_GLPCI_VFSUP_VF_PREFETCH_SHIFT 0
+#define I40E_GLPCI_VFSUP_VF_PREFETCH_MASK I40E_MASK(0x1, I40E_GLPCI_VFSUP_VF_PREFETCH_SHIFT)
+#define I40E_GLPCI_VFSUP_VR_BAR_TYPE_SHIFT 1
+#define I40E_GLPCI_VFSUP_VR_BAR_TYPE_MASK I40E_MASK(0x1, I40E_GLPCI_VFSUP_VR_BAR_TYPE_SHIFT)
+#define I40E_GLTPH_CTRL 0x000BE480 /* Reset: PCIR */
+#define I40E_GLTPH_CTRL_DESC_PH_SHIFT 9
+#define I40E_GLTPH_CTRL_DESC_PH_MASK I40E_MASK(0x3, I40E_GLTPH_CTRL_DESC_PH_SHIFT)
+#define I40E_GLTPH_CTRL_DATA_PH_SHIFT 11
+#define I40E_GLTPH_CTRL_DATA_PH_MASK I40E_MASK(0x3, I40E_GLTPH_CTRL_DATA_PH_SHIFT)
+#define I40E_PF_FUNC_RID 0x0009C000 /* Reset: PCIR */
+#define I40E_PF_FUNC_RID_FUNCTION_NUMBER_SHIFT 0
+#define I40E_PF_FUNC_RID_FUNCTION_NUMBER_MASK I40E_MASK(0x7, I40E_PF_FUNC_RID_FUNCTION_NUMBER_SHIFT)
+#define I40E_PF_FUNC_RID_DEVICE_NUMBER_SHIFT 3
+#define I40E_PF_FUNC_RID_DEVICE_NUMBER_MASK I40E_MASK(0x1F, I40E_PF_FUNC_RID_DEVICE_NUMBER_SHIFT)
+#define I40E_PF_FUNC_RID_BUS_NUMBER_SHIFT 8
+#define I40E_PF_FUNC_RID_BUS_NUMBER_MASK I40E_MASK(0xFF, I40E_PF_FUNC_RID_BUS_NUMBER_SHIFT)
+#define I40E_PF_PCI_CIAA 0x0009C080 /* Reset: FLR */
+#define I40E_PF_PCI_CIAA_ADDRESS_SHIFT 0
+#define I40E_PF_PCI_CIAA_ADDRESS_MASK I40E_MASK(0xFFF, I40E_PF_PCI_CIAA_ADDRESS_SHIFT)
+#define I40E_PF_PCI_CIAA_VF_NUM_SHIFT 12
+#define I40E_PF_PCI_CIAA_VF_NUM_MASK I40E_MASK(0x7F, I40E_PF_PCI_CIAA_VF_NUM_SHIFT)
+#define I40E_PF_PCI_CIAD 0x0009C100 /* Reset: FLR */
+#define I40E_PF_PCI_CIAD_DATA_SHIFT 0
+#define I40E_PF_PCI_CIAD_DATA_MASK I40E_MASK(0xFFFFFFFF, I40E_PF_PCI_CIAD_DATA_SHIFT)
+#define I40E_PFPCI_CLASS 0x000BE400 /* Reset: PCIR */
+#define I40E_PFPCI_CLASS_STORAGE_CLASS_SHIFT 0
+#define I40E_PFPCI_CLASS_STORAGE_CLASS_MASK I40E_MASK(0x1, I40E_PFPCI_CLASS_STORAGE_CLASS_SHIFT)
+#define I40E_PFPCI_CLASS_RESERVED_1_SHIFT 1
+#define I40E_PFPCI_CLASS_RESERVED_1_MASK I40E_MASK(0x1, I40E_PFPCI_CLASS_RESERVED_1_SHIFT)
+#define I40E_PFPCI_CLASS_PF_IS_LAN_SHIFT 2
+#define I40E_PFPCI_CLASS_PF_IS_LAN_MASK I40E_MASK(0x1, I40E_PFPCI_CLASS_PF_IS_LAN_SHIFT)
+#define I40E_PFPCI_CNF 0x000BE000 /* Reset: PCIR */
+#define I40E_PFPCI_CNF_MSI_EN_SHIFT 2
+#define I40E_PFPCI_CNF_MSI_EN_MASK I40E_MASK(0x1, I40E_PFPCI_CNF_MSI_EN_SHIFT)
+#define I40E_PFPCI_CNF_EXROM_DIS_SHIFT 3
+#define I40E_PFPCI_CNF_EXROM_DIS_MASK I40E_MASK(0x1, I40E_PFPCI_CNF_EXROM_DIS_SHIFT)
+#define I40E_PFPCI_CNF_IO_BAR_SHIFT 4
+#define I40E_PFPCI_CNF_IO_BAR_MASK I40E_MASK(0x1, I40E_PFPCI_CNF_IO_BAR_SHIFT)
+#define I40E_PFPCI_CNF_INT_PIN_SHIFT 5
+#define I40E_PFPCI_CNF_INT_PIN_MASK I40E_MASK(0x3, I40E_PFPCI_CNF_INT_PIN_SHIFT)
+#define I40E_PFPCI_DEVID 0x000BE080 /* Reset: PCIR */
+#define I40E_PFPCI_DEVID_PF_DEV_ID_SHIFT 0
+#define I40E_PFPCI_DEVID_PF_DEV_ID_MASK I40E_MASK(0xFFFF, I40E_PFPCI_DEVID_PF_DEV_ID_SHIFT)
+#define I40E_PFPCI_DEVID_VF_DEV_ID_SHIFT 16
+#define I40E_PFPCI_DEVID_VF_DEV_ID_MASK I40E_MASK(0xFFFF, I40E_PFPCI_DEVID_VF_DEV_ID_SHIFT)
+#define I40E_PFPCI_FACTPS 0x0009C180 /* Reset: FLR */
+#define I40E_PFPCI_FACTPS_FUNC_POWER_STATE_SHIFT 0
+#define I40E_PFPCI_FACTPS_FUNC_POWER_STATE_MASK I40E_MASK(0x3, I40E_PFPCI_FACTPS_FUNC_POWER_STATE_SHIFT)
+#define I40E_PFPCI_FACTPS_FUNC_AUX_EN_SHIFT 3
+#define I40E_PFPCI_FACTPS_FUNC_AUX_EN_MASK I40E_MASK(0x1, I40E_PFPCI_FACTPS_FUNC_AUX_EN_SHIFT)
+#define I40E_PFPCI_FUNC 0x000BE200 /* Reset: POR */
+#define I40E_PFPCI_FUNC_FUNC_DIS_SHIFT 0
+#define I40E_PFPCI_FUNC_FUNC_DIS_MASK I40E_MASK(0x1, I40E_PFPCI_FUNC_FUNC_DIS_SHIFT)
+#define I40E_PFPCI_FUNC_ALLOW_FUNC_DIS_SHIFT 1
+#define I40E_PFPCI_FUNC_ALLOW_FUNC_DIS_MASK I40E_MASK(0x1, I40E_PFPCI_FUNC_ALLOW_FUNC_DIS_SHIFT)
+#define I40E_PFPCI_FUNC_DIS_FUNC_ON_PORT_DIS_SHIFT 2
+#define I40E_PFPCI_FUNC_DIS_FUNC_ON_PORT_DIS_MASK I40E_MASK(0x1, I40E_PFPCI_FUNC_DIS_FUNC_ON_PORT_DIS_SHIFT)
+#define I40E_PFPCI_FUNC2 0x000BE180 /* Reset: PCIR */
+#define I40E_PFPCI_FUNC2_EMP_FUNC_DIS_SHIFT 0
+#define I40E_PFPCI_FUNC2_EMP_FUNC_DIS_MASK I40E_MASK(0x1, I40E_PFPCI_FUNC2_EMP_FUNC_DIS_SHIFT)
+#define I40E_PFPCI_ICAUSE 0x0009C200 /* Reset: PFR */
+#define I40E_PFPCI_ICAUSE_PCIE_ERR_CAUSE_SHIFT 0
+#define I40E_PFPCI_ICAUSE_PCIE_ERR_CAUSE_MASK I40E_MASK(0xFFFFFFFF, I40E_PFPCI_ICAUSE_PCIE_ERR_CAUSE_SHIFT)
+#define I40E_PFPCI_IENA 0x0009C280 /* Reset: PFR */
+#define I40E_PFPCI_IENA_PCIE_ERR_EN_SHIFT 0
+#define I40E_PFPCI_IENA_PCIE_ERR_EN_MASK I40E_MASK(0xFFFFFFFF, I40E_PFPCI_IENA_PCIE_ERR_EN_SHIFT)
+#define I40E_PFPCI_PF_FLUSH_DONE 0x0009C800 /* Reset: PCIR */
+#define I40E_PFPCI_PF_FLUSH_DONE_FLUSH_DONE_SHIFT 0
+#define I40E_PFPCI_PF_FLUSH_DONE_FLUSH_DONE_MASK I40E_MASK(0x1, I40E_PFPCI_PF_FLUSH_DONE_FLUSH_DONE_SHIFT)
+#define I40E_PFPCI_PM 0x000BE300 /* Reset: POR */
+#define I40E_PFPCI_PM_PME_EN_SHIFT 0
+#define I40E_PFPCI_PM_PME_EN_MASK I40E_MASK(0x1, I40E_PFPCI_PM_PME_EN_SHIFT)
+#define I40E_PFPCI_STATUS1 0x000BE280 /* Reset: POR */
+#define I40E_PFPCI_STATUS1_FUNC_VALID_SHIFT 0
+#define I40E_PFPCI_STATUS1_FUNC_VALID_MASK I40E_MASK(0x1, I40E_PFPCI_STATUS1_FUNC_VALID_SHIFT)
+#define I40E_PFPCI_SUBSYSID 0x000BE100 /* Reset: PCIR */
+#define I40E_PFPCI_SUBSYSID_PF_SUBSYS_ID_SHIFT 0
+#define I40E_PFPCI_SUBSYSID_PF_SUBSYS_ID_MASK I40E_MASK(0xFFFF, I40E_PFPCI_SUBSYSID_PF_SUBSYS_ID_SHIFT)
+#define I40E_PFPCI_SUBSYSID_VF_SUBSYS_ID_SHIFT 16
+#define I40E_PFPCI_SUBSYSID_VF_SUBSYS_ID_MASK I40E_MASK(0xFFFF, I40E_PFPCI_SUBSYSID_VF_SUBSYS_ID_SHIFT)
+#define I40E_PFPCI_VF_FLUSH_DONE 0x0000E400 /* Reset: PCIR */
+#define I40E_PFPCI_VF_FLUSH_DONE_FLUSH_DONE_SHIFT 0
+#define I40E_PFPCI_VF_FLUSH_DONE_FLUSH_DONE_MASK I40E_MASK(0x1, I40E_PFPCI_VF_FLUSH_DONE_FLUSH_DONE_SHIFT)
+#define I40E_PFPCI_VF_FLUSH_DONE1(_VF) (0x0009C600 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: PCIR */
+#define I40E_PFPCI_VF_FLUSH_DONE1_MAX_INDEX 127
+#define I40E_PFPCI_VF_FLUSH_DONE1_FLUSH_DONE_SHIFT 0
+#define I40E_PFPCI_VF_FLUSH_DONE1_FLUSH_DONE_MASK I40E_MASK(0x1, I40E_PFPCI_VF_FLUSH_DONE1_FLUSH_DONE_SHIFT)
+#define I40E_PFPCI_VM_FLUSH_DONE 0x0009C880 /* Reset: PCIR */
+#define I40E_PFPCI_VM_FLUSH_DONE_FLUSH_DONE_SHIFT 0
+#define I40E_PFPCI_VM_FLUSH_DONE_FLUSH_DONE_MASK I40E_MASK(0x1, I40E_PFPCI_VM_FLUSH_DONE_FLUSH_DONE_SHIFT)
+#define I40E_PFPCI_VMINDEX 0x0009C300 /* Reset: PCIR */
+#define I40E_PFPCI_VMINDEX_VMINDEX_SHIFT 0
+#define I40E_PFPCI_VMINDEX_VMINDEX_MASK I40E_MASK(0x1FF, I40E_PFPCI_VMINDEX_VMINDEX_SHIFT)
+#define I40E_PFPCI_VMPEND 0x0009C380 /* Reset: PCIR */
+#define I40E_PFPCI_VMPEND_PENDING_SHIFT 0
+#define I40E_PFPCI_VMPEND_PENDING_MASK I40E_MASK(0x1, I40E_PFPCI_VMPEND_PENDING_SHIFT)
+#define I40E_PRTPM_EEE_STAT 0x001E4320 /* Reset: GLOBR */
+#define I40E_PRTPM_EEE_STAT_EEE_NEG_SHIFT 29
+#define I40E_PRTPM_EEE_STAT_EEE_NEG_MASK I40E_MASK(0x1, I40E_PRTPM_EEE_STAT_EEE_NEG_SHIFT)
+#define I40E_PRTPM_EEE_STAT_RX_LPI_STATUS_SHIFT 30
+#define I40E_PRTPM_EEE_STAT_RX_LPI_STATUS_MASK I40E_MASK(0x1, I40E_PRTPM_EEE_STAT_RX_LPI_STATUS_SHIFT)
+#define I40E_PRTPM_EEE_STAT_TX_LPI_STATUS_SHIFT 31
+#define I40E_PRTPM_EEE_STAT_TX_LPI_STATUS_MASK I40E_MASK(0x1, I40E_PRTPM_EEE_STAT_TX_LPI_STATUS_SHIFT)
+#define I40E_PRTPM_EEEC 0x001E4380 /* Reset: GLOBR */
+#define I40E_PRTPM_EEEC_TW_WAKE_MIN_SHIFT 16
+#define I40E_PRTPM_EEEC_TW_WAKE_MIN_MASK I40E_MASK(0x3F, I40E_PRTPM_EEEC_TW_WAKE_MIN_SHIFT)
+#define I40E_PRTPM_EEEC_TX_LU_LPI_DLY_SHIFT 24
+#define I40E_PRTPM_EEEC_TX_LU_LPI_DLY_MASK I40E_MASK(0x3, I40E_PRTPM_EEEC_TX_LU_LPI_DLY_SHIFT)
+#define I40E_PRTPM_EEEC_TEEE_DLY_SHIFT 26
+#define I40E_PRTPM_EEEC_TEEE_DLY_MASK I40E_MASK(0x3F, I40E_PRTPM_EEEC_TEEE_DLY_SHIFT)
+#define I40E_PRTPM_EEEFWD 0x001E4400 /* Reset: GLOBR */
+#define I40E_PRTPM_EEEFWD_EEE_FW_CONFIG_DONE_SHIFT 31
+#define I40E_PRTPM_EEEFWD_EEE_FW_CONFIG_DONE_MASK I40E_MASK(0x1, I40E_PRTPM_EEEFWD_EEE_FW_CONFIG_DONE_SHIFT)
+#define I40E_PRTPM_EEER 0x001E4360 /* Reset: GLOBR */
+#define I40E_PRTPM_EEER_TW_SYSTEM_SHIFT 0
+#define I40E_PRTPM_EEER_TW_SYSTEM_MASK I40E_MASK(0xFFFF, I40E_PRTPM_EEER_TW_SYSTEM_SHIFT)
+#define I40E_PRTPM_EEER_TX_LPI_EN_SHIFT 16
+#define I40E_PRTPM_EEER_TX_LPI_EN_MASK I40E_MASK(0x1, I40E_PRTPM_EEER_TX_LPI_EN_SHIFT)
+#define I40E_PRTPM_EEETXC 0x001E43E0 /* Reset: GLOBR */
+#define I40E_PRTPM_EEETXC_TW_PHY_SHIFT 0
+#define I40E_PRTPM_EEETXC_TW_PHY_MASK I40E_MASK(0xFFFF, I40E_PRTPM_EEETXC_TW_PHY_SHIFT)
+#define I40E_PRTPM_GC 0x000B8140 /* Reset: POR */
+#define I40E_PRTPM_GC_EMP_LINK_ON_SHIFT 0
+#define I40E_PRTPM_GC_EMP_LINK_ON_MASK I40E_MASK(0x1, I40E_PRTPM_GC_EMP_LINK_ON_SHIFT)
+#define I40E_PRTPM_GC_MNG_VETO_SHIFT 1
+#define I40E_PRTPM_GC_MNG_VETO_MASK I40E_MASK(0x1, I40E_PRTPM_GC_MNG_VETO_SHIFT)
+#define I40E_PRTPM_GC_RATD_SHIFT 2
+#define I40E_PRTPM_GC_RATD_MASK I40E_MASK(0x1, I40E_PRTPM_GC_RATD_SHIFT)
+#define I40E_PRTPM_GC_LCDMP_SHIFT 3
+#define I40E_PRTPM_GC_LCDMP_MASK I40E_MASK(0x1, I40E_PRTPM_GC_LCDMP_SHIFT)
+#define I40E_PRTPM_GC_LPLU_ASSERTED_SHIFT 31
+#define I40E_PRTPM_GC_LPLU_ASSERTED_MASK I40E_MASK(0x1, I40E_PRTPM_GC_LPLU_ASSERTED_SHIFT)
+#define I40E_PRTPM_RLPIC 0x001E43A0 /* Reset: GLOBR */
+#define I40E_PRTPM_RLPIC_ERLPIC_SHIFT 0
+#define I40E_PRTPM_RLPIC_ERLPIC_MASK I40E_MASK(0xFFFFFFFF, I40E_PRTPM_RLPIC_ERLPIC_SHIFT)
+#define I40E_PRTPM_TLPIC 0x001E43C0 /* Reset: GLOBR */
+#define I40E_PRTPM_TLPIC_ETLPIC_SHIFT 0
+#define I40E_PRTPM_TLPIC_ETLPIC_MASK I40E_MASK(0xFFFFFFFF, I40E_PRTPM_TLPIC_ETLPIC_SHIFT)
+#define I40E_GL_PRS_FVBM(_i) (0x00269760 + ((_i) * 4)) /* _i=0...3 */ /* Reset: CORER */
+#define I40E_GL_PRS_FVBM_MAX_INDEX 3
+#define I40E_GL_PRS_FVBM_FV_BYTE_INDX_SHIFT 0
+#define I40E_GL_PRS_FVBM_FV_BYTE_INDX_MASK I40E_MASK(0x7F, I40E_GL_PRS_FVBM_FV_BYTE_INDX_SHIFT)
+#define I40E_GL_PRS_FVBM_RULE_BUS_INDX_SHIFT 8
+#define I40E_GL_PRS_FVBM_RULE_BUS_INDX_MASK I40E_MASK(0x3F, I40E_GL_PRS_FVBM_RULE_BUS_INDX_SHIFT)
+#define I40E_GL_PRS_FVBM_MSK_ENA_SHIFT 31
+#define I40E_GL_PRS_FVBM_MSK_ENA_MASK I40E_MASK(0x1, I40E_GL_PRS_FVBM_MSK_ENA_SHIFT)
+#define I40E_GLRPB_DPSS 0x000AC828 /* Reset: CORER */
+#define I40E_GLRPB_DPSS_DPS_TCN_SHIFT 0
+#define I40E_GLRPB_DPSS_DPS_TCN_MASK I40E_MASK(0xFFFFF, I40E_GLRPB_DPSS_DPS_TCN_SHIFT)
+#define I40E_GLRPB_GHW 0x000AC830 /* Reset: CORER */
+#define I40E_GLRPB_GHW_GHW_SHIFT 0
+#define I40E_GLRPB_GHW_GHW_MASK I40E_MASK(0xFFFFF, I40E_GLRPB_GHW_GHW_SHIFT)
+#define I40E_GLRPB_GLW 0x000AC834 /* Reset: CORER */
+#define I40E_GLRPB_GLW_GLW_SHIFT 0
+#define I40E_GLRPB_GLW_GLW_MASK I40E_MASK(0xFFFFF, I40E_GLRPB_GLW_GLW_SHIFT)
+#define I40E_GLRPB_PHW 0x000AC844 /* Reset: CORER */
+#define I40E_GLRPB_PHW_PHW_SHIFT 0
+#define I40E_GLRPB_PHW_PHW_MASK I40E_MASK(0xFFFFF, I40E_GLRPB_PHW_PHW_SHIFT)
+#define I40E_GLRPB_PLW 0x000AC848 /* Reset: CORER */
+#define I40E_GLRPB_PLW_PLW_SHIFT 0
+#define I40E_GLRPB_PLW_PLW_MASK I40E_MASK(0xFFFFF, I40E_GLRPB_PLW_PLW_SHIFT)
+#define I40E_PRTRPB_DHW(_i) (0x000AC100 + ((_i) * 32)) /* _i=0...7 */ /* Reset: CORER */
+#define I40E_PRTRPB_DHW_MAX_INDEX 7
+#define I40E_PRTRPB_DHW_DHW_TCN_SHIFT 0
+#define I40E_PRTRPB_DHW_DHW_TCN_MASK I40E_MASK(0xFFFFF, I40E_PRTRPB_DHW_DHW_TCN_SHIFT)
+#define I40E_PRTRPB_DLW(_i) (0x000AC220 + ((_i) * 32)) /* _i=0...7 */ /* Reset: CORER */
+#define I40E_PRTRPB_DLW_MAX_INDEX 7
+#define I40E_PRTRPB_DLW_DLW_TCN_SHIFT 0
+#define I40E_PRTRPB_DLW_DLW_TCN_MASK I40E_MASK(0xFFFFF, I40E_PRTRPB_DLW_DLW_TCN_SHIFT)
+#define I40E_PRTRPB_DPS(_i) (0x000AC320 + ((_i) * 32)) /* _i=0...7 */ /* Reset: CORER */
+#define I40E_PRTRPB_DPS_MAX_INDEX 7
+#define I40E_PRTRPB_DPS_DPS_TCN_SHIFT 0
+#define I40E_PRTRPB_DPS_DPS_TCN_MASK I40E_MASK(0xFFFFF, I40E_PRTRPB_DPS_DPS_TCN_SHIFT)
+#define I40E_PRTRPB_SHT(_i) (0x000AC480 + ((_i) * 32)) /* _i=0...7 */ /* Reset: CORER */
+#define I40E_PRTRPB_SHT_MAX_INDEX 7
+#define I40E_PRTRPB_SHT_SHT_TCN_SHIFT 0
+#define I40E_PRTRPB_SHT_SHT_TCN_MASK I40E_MASK(0xFFFFF, I40E_PRTRPB_SHT_SHT_TCN_SHIFT)
+#define I40E_PRTRPB_SHW 0x000AC580 /* Reset: CORER */
+#define I40E_PRTRPB_SHW_SHW_SHIFT 0
+#define I40E_PRTRPB_SHW_SHW_MASK I40E_MASK(0xFFFFF, I40E_PRTRPB_SHW_SHW_SHIFT)
+#define I40E_PRTRPB_SLT(_i) (0x000AC5A0 + ((_i) * 32)) /* _i=0...7 */ /* Reset: CORER */
+#define I40E_PRTRPB_SLT_MAX_INDEX 7
+#define I40E_PRTRPB_SLT_SLT_TCN_SHIFT 0
+#define I40E_PRTRPB_SLT_SLT_TCN_MASK I40E_MASK(0xFFFFF, I40E_PRTRPB_SLT_SLT_TCN_SHIFT)
+#define I40E_PRTRPB_SLW 0x000AC6A0 /* Reset: CORER */
+#define I40E_PRTRPB_SLW_SLW_SHIFT 0
+#define I40E_PRTRPB_SLW_SLW_MASK I40E_MASK(0xFFFFF, I40E_PRTRPB_SLW_SLW_SHIFT)
+#define I40E_PRTRPB_SPS 0x000AC7C0 /* Reset: CORER */
+#define I40E_PRTRPB_SPS_SPS_SHIFT 0
+#define I40E_PRTRPB_SPS_SPS_MASK I40E_MASK(0xFFFFF, I40E_PRTRPB_SPS_SPS_SHIFT)
+#define I40E_GLQF_CTL 0x00269BA4 /* Reset: CORER */
+#define I40E_GLQF_CTL_HTOEP_SHIFT 1
+#define I40E_GLQF_CTL_HTOEP_MASK I40E_MASK(0x1, I40E_GLQF_CTL_HTOEP_SHIFT)
+#define I40E_GLQF_CTL_HTOEP_FCOE_SHIFT 2
+#define I40E_GLQF_CTL_HTOEP_FCOE_MASK I40E_MASK(0x1, I40E_GLQF_CTL_HTOEP_FCOE_SHIFT)
+#define I40E_GLQF_CTL_PCNT_ALLOC_SHIFT 3
+#define I40E_GLQF_CTL_PCNT_ALLOC_MASK I40E_MASK(0x7, I40E_GLQF_CTL_PCNT_ALLOC_SHIFT)
+#define I40E_GLQF_CTL_FD_AUTO_PCTYPE_SHIFT 6
+#define I40E_GLQF_CTL_FD_AUTO_PCTYPE_MASK I40E_MASK(0x1, I40E_GLQF_CTL_FD_AUTO_PCTYPE_SHIFT)
+#define I40E_GLQF_CTL_RSVD_SHIFT 7
+#define I40E_GLQF_CTL_RSVD_MASK I40E_MASK(0x1, I40E_GLQF_CTL_RSVD_SHIFT)
+#define I40E_GLQF_CTL_MAXPEBLEN_SHIFT 8
+#define I40E_GLQF_CTL_MAXPEBLEN_MASK I40E_MASK(0x7, I40E_GLQF_CTL_MAXPEBLEN_SHIFT)
+#define I40E_GLQF_CTL_MAXFCBLEN_SHIFT 11
+#define I40E_GLQF_CTL_MAXFCBLEN_MASK I40E_MASK(0x7, I40E_GLQF_CTL_MAXFCBLEN_SHIFT)
+#define I40E_GLQF_CTL_MAXFDBLEN_SHIFT 14
+#define I40E_GLQF_CTL_MAXFDBLEN_MASK I40E_MASK(0x7, I40E_GLQF_CTL_MAXFDBLEN_SHIFT)
+#define I40E_GLQF_CTL_FDBEST_SHIFT 17
+#define I40E_GLQF_CTL_FDBEST_MASK I40E_MASK(0xFF, I40E_GLQF_CTL_FDBEST_SHIFT)
+#define I40E_GLQF_CTL_PROGPRIO_SHIFT 25
+#define I40E_GLQF_CTL_PROGPRIO_MASK I40E_MASK(0x1, I40E_GLQF_CTL_PROGPRIO_SHIFT)
+#define I40E_GLQF_CTL_INVALPRIO_SHIFT 26
+#define I40E_GLQF_CTL_INVALPRIO_MASK I40E_MASK(0x1, I40E_GLQF_CTL_INVALPRIO_SHIFT)
+#define I40E_GLQF_CTL_IGNORE_IP_SHIFT 27
+#define I40E_GLQF_CTL_IGNORE_IP_MASK I40E_MASK(0x1, I40E_GLQF_CTL_IGNORE_IP_SHIFT)
+#define I40E_GLQF_FDCNT_0 0x00269BAC /* Reset: CORER */
+#define I40E_GLQF_FDCNT_0_GUARANT_CNT_SHIFT 0
+#define I40E_GLQF_FDCNT_0_GUARANT_CNT_MASK I40E_MASK(0x1FFF, I40E_GLQF_FDCNT_0_GUARANT_CNT_SHIFT)
+#define I40E_GLQF_FDCNT_0_BESTCNT_SHIFT 13
+#define I40E_GLQF_FDCNT_0_BESTCNT_MASK I40E_MASK(0x1FFF, I40E_GLQF_FDCNT_0_BESTCNT_SHIFT)
+#define I40E_GLQF_HKEY(_i) (0x00270140 + ((_i) * 4)) /* _i=0...12 */ /* Reset: CORER */
+#define I40E_GLQF_HKEY_MAX_INDEX 12
+#define I40E_GLQF_HKEY_KEY_0_SHIFT 0
+#define I40E_GLQF_HKEY_KEY_0_MASK I40E_MASK(0xFF, I40E_GLQF_HKEY_KEY_0_SHIFT)
+#define I40E_GLQF_HKEY_KEY_1_SHIFT 8
+#define I40E_GLQF_HKEY_KEY_1_MASK I40E_MASK(0xFF, I40E_GLQF_HKEY_KEY_1_SHIFT)
+#define I40E_GLQF_HKEY_KEY_2_SHIFT 16
+#define I40E_GLQF_HKEY_KEY_2_MASK I40E_MASK(0xFF, I40E_GLQF_HKEY_KEY_2_SHIFT)
+#define I40E_GLQF_HKEY_KEY_3_SHIFT 24
+#define I40E_GLQF_HKEY_KEY_3_MASK I40E_MASK(0xFF, I40E_GLQF_HKEY_KEY_3_SHIFT)
+#define I40E_GLQF_HSYM(_i) (0x00269D00 + ((_i) * 4)) /* _i=0...63 */ /* Reset: CORER */
+#define I40E_GLQF_HSYM_MAX_INDEX 63
+#define I40E_GLQF_HSYM_SYMH_ENA_SHIFT 0
+#define I40E_GLQF_HSYM_SYMH_ENA_MASK I40E_MASK(0x1, I40E_GLQF_HSYM_SYMH_ENA_SHIFT)
+#define I40E_GLQF_PCNT(_i) (0x00266800 + ((_i) * 4)) /* _i=0...511 */ /* Reset: CORER */
+#define I40E_GLQF_PCNT_MAX_INDEX 511
+#define I40E_GLQF_PCNT_PCNT_SHIFT 0
+#define I40E_GLQF_PCNT_PCNT_MASK I40E_MASK(0xFFFFFFFF, I40E_GLQF_PCNT_PCNT_SHIFT)
+#define I40E_GLQF_SWAP(_i, _j) (0x00267E00 + ((_i) * 4 + (_j) * 8)) /* _i=0...1, _j=0...63 */ /* Reset: CORER */
+#define I40E_GLQF_SWAP_MAX_INDEX 1
+#define I40E_GLQF_SWAP_OFF0_SRC0_SHIFT 0
+#define I40E_GLQF_SWAP_OFF0_SRC0_MASK I40E_MASK(0x3F, I40E_GLQF_SWAP_OFF0_SRC0_SHIFT)
+#define I40E_GLQF_SWAP_OFF0_SRC1_SHIFT 6
+#define I40E_GLQF_SWAP_OFF0_SRC1_MASK I40E_MASK(0x3F, I40E_GLQF_SWAP_OFF0_SRC1_SHIFT)
+#define I40E_GLQF_SWAP_FLEN0_SHIFT 12
+#define I40E_GLQF_SWAP_FLEN0_MASK I40E_MASK(0xF, I40E_GLQF_SWAP_FLEN0_SHIFT)
+#define I40E_GLQF_SWAP_OFF1_SRC0_SHIFT 16
+#define I40E_GLQF_SWAP_OFF1_SRC0_MASK I40E_MASK(0x3F, I40E_GLQF_SWAP_OFF1_SRC0_SHIFT)
+#define I40E_GLQF_SWAP_OFF1_SRC1_SHIFT 22
+#define I40E_GLQF_SWAP_OFF1_SRC1_MASK I40E_MASK(0x3F, I40E_GLQF_SWAP_OFF1_SRC1_SHIFT)
+#define I40E_GLQF_SWAP_FLEN1_SHIFT 28
+#define I40E_GLQF_SWAP_FLEN1_MASK I40E_MASK(0xF, I40E_GLQF_SWAP_FLEN1_SHIFT)
+#define I40E_PFQF_CTL_0 0x001C0AC0 /* Reset: CORER */
+#define I40E_PFQF_CTL_0_PEHSIZE_SHIFT 0
+#define I40E_PFQF_CTL_0_PEHSIZE_MASK I40E_MASK(0x1F, I40E_PFQF_CTL_0_PEHSIZE_SHIFT)
+#define I40E_PFQF_CTL_0_PEDSIZE_SHIFT 5
+#define I40E_PFQF_CTL_0_PEDSIZE_MASK I40E_MASK(0x1F, I40E_PFQF_CTL_0_PEDSIZE_SHIFT)
+#define I40E_PFQF_CTL_0_PFFCHSIZE_SHIFT 10
+#define I40E_PFQF_CTL_0_PFFCHSIZE_MASK I40E_MASK(0xF, I40E_PFQF_CTL_0_PFFCHSIZE_SHIFT)
+#define I40E_PFQF_CTL_0_PFFCDSIZE_SHIFT 14
+#define I40E_PFQF_CTL_0_PFFCDSIZE_MASK I40E_MASK(0x3, I40E_PFQF_CTL_0_PFFCDSIZE_SHIFT)
+#define I40E_PFQF_CTL_0_HASHLUTSIZE_SHIFT 16
+#define I40E_PFQF_CTL_0_HASHLUTSIZE_MASK I40E_MASK(0x1, I40E_PFQF_CTL_0_HASHLUTSIZE_SHIFT)
+#define I40E_PFQF_CTL_0_FD_ENA_SHIFT 17
+#define I40E_PFQF_CTL_0_FD_ENA_MASK I40E_MASK(0x1, I40E_PFQF_CTL_0_FD_ENA_SHIFT)
+#define I40E_PFQF_CTL_0_ETYPE_ENA_SHIFT 18
+#define I40E_PFQF_CTL_0_ETYPE_ENA_MASK I40E_MASK(0x1, I40E_PFQF_CTL_0_ETYPE_ENA_SHIFT)
+#define I40E_PFQF_CTL_0_MACVLAN_ENA_SHIFT 19
+#define I40E_PFQF_CTL_0_MACVLAN_ENA_MASK I40E_MASK(0x1, I40E_PFQF_CTL_0_MACVLAN_ENA_SHIFT)
+#define I40E_PFQF_CTL_0_VFFCHSIZE_SHIFT 20
+#define I40E_PFQF_CTL_0_VFFCHSIZE_MASK I40E_MASK(0xF, I40E_PFQF_CTL_0_VFFCHSIZE_SHIFT)
+#define I40E_PFQF_CTL_0_VFFCDSIZE_SHIFT 24
+#define I40E_PFQF_CTL_0_VFFCDSIZE_MASK I40E_MASK(0x3, I40E_PFQF_CTL_0_VFFCDSIZE_SHIFT)
+#define I40E_PFQF_CTL_1 0x00245D80 /* Reset: CORER */
+#define I40E_PFQF_CTL_1_CLEARFDTABLE_SHIFT 0
+#define I40E_PFQF_CTL_1_CLEARFDTABLE_MASK I40E_MASK(0x1, I40E_PFQF_CTL_1_CLEARFDTABLE_SHIFT)
+#define I40E_PFQF_FDALLOC 0x00246280 /* Reset: CORER */
+#define I40E_PFQF_FDALLOC_FDALLOC_SHIFT 0
+#define I40E_PFQF_FDALLOC_FDALLOC_MASK I40E_MASK(0xFF, I40E_PFQF_FDALLOC_FDALLOC_SHIFT)
+#define I40E_PFQF_FDALLOC_FDBEST_SHIFT 8
+#define I40E_PFQF_FDALLOC_FDBEST_MASK I40E_MASK(0xFF, I40E_PFQF_FDALLOC_FDBEST_SHIFT)
+#define I40E_PFQF_FDSTAT 0x00246380 /* Reset: CORER */
+#define I40E_PFQF_FDSTAT_GUARANT_CNT_SHIFT 0
+#define I40E_PFQF_FDSTAT_GUARANT_CNT_MASK I40E_MASK(0x1FFF, I40E_PFQF_FDSTAT_GUARANT_CNT_SHIFT)
+#define I40E_PFQF_FDSTAT_BEST_CNT_SHIFT 16
+#define I40E_PFQF_FDSTAT_BEST_CNT_MASK I40E_MASK(0x1FFF, I40E_PFQF_FDSTAT_BEST_CNT_SHIFT)
+#define I40E_PFQF_HENA(_i) (0x00245900 + ((_i) * 128)) /* _i=0...1 */ /* Reset: CORER */
+#define I40E_PFQF_HENA_MAX_INDEX 1
+#define I40E_PFQF_HENA_PTYPE_ENA_SHIFT 0
+#define I40E_PFQF_HENA_PTYPE_ENA_MASK I40E_MASK(0xFFFFFFFF, I40E_PFQF_HENA_PTYPE_ENA_SHIFT)
+#define I40E_PFQF_HKEY(_i) (0x00244800 + ((_i) * 128)) /* _i=0...12 */ /* Reset: CORER */
+#define I40E_PFQF_HKEY_MAX_INDEX 12
+#define I40E_PFQF_HKEY_KEY_0_SHIFT 0
+#define I40E_PFQF_HKEY_KEY_0_MASK I40E_MASK(0xFF, I40E_PFQF_HKEY_KEY_0_SHIFT)
+#define I40E_PFQF_HKEY_KEY_1_SHIFT 8
+#define I40E_PFQF_HKEY_KEY_1_MASK I40E_MASK(0xFF, I40E_PFQF_HKEY_KEY_1_SHIFT)
+#define I40E_PFQF_HKEY_KEY_2_SHIFT 16
+#define I40E_PFQF_HKEY_KEY_2_MASK I40E_MASK(0xFF, I40E_PFQF_HKEY_KEY_2_SHIFT)
+#define I40E_PFQF_HKEY_KEY_3_SHIFT 24
+#define I40E_PFQF_HKEY_KEY_3_MASK I40E_MASK(0xFF, I40E_PFQF_HKEY_KEY_3_SHIFT)
+#define I40E_PFQF_HLUT(_i) (0x00240000 + ((_i) * 128)) /* _i=0...127 */ /* Reset: CORER */
+#define I40E_PFQF_HLUT_MAX_INDEX 127
+#define I40E_PFQF_HLUT_LUT0_SHIFT 0
+#define I40E_PFQF_HLUT_LUT0_MASK I40E_MASK(0x3F, I40E_PFQF_HLUT_LUT0_SHIFT)
+#define I40E_PFQF_HLUT_LUT1_SHIFT 8
+#define I40E_PFQF_HLUT_LUT1_MASK I40E_MASK(0x3F, I40E_PFQF_HLUT_LUT1_SHIFT)
+#define I40E_PFQF_HLUT_LUT2_SHIFT 16
+#define I40E_PFQF_HLUT_LUT2_MASK I40E_MASK(0x3F, I40E_PFQF_HLUT_LUT2_SHIFT)
+#define I40E_PFQF_HLUT_LUT3_SHIFT 24
+#define I40E_PFQF_HLUT_LUT3_MASK I40E_MASK(0x3F, I40E_PFQF_HLUT_LUT3_SHIFT)
+#define I40E_PRTQF_CTL_0 0x00256E60 /* Reset: CORER */
+#define I40E_PRTQF_CTL_0_HSYM_ENA_SHIFT 0
+#define I40E_PRTQF_CTL_0_HSYM_ENA_MASK I40E_MASK(0x1, I40E_PRTQF_CTL_0_HSYM_ENA_SHIFT)
+#define I40E_PRTQF_FD_FLXINSET(_i) (0x00253800 + ((_i) * 32)) /* _i=0...63 */ /* Reset: CORER */
+#define I40E_PRTQF_FD_FLXINSET_MAX_INDEX 63
+#define I40E_PRTQF_FD_FLXINSET_INSET_SHIFT 0
+#define I40E_PRTQF_FD_FLXINSET_INSET_MASK I40E_MASK(0xFF, I40E_PRTQF_FD_FLXINSET_INSET_SHIFT)
+#define I40E_PRTQF_FD_INSET(_i, _j) (0x00250000 + ((_i) * 64 + (_j) * 32)) /* _i=0...63, _j=0...1 */ /* Reset: CORER */
+#define I40E_PRTQF_FD_INSET_MAX_INDEX 63
+#define I40E_PRTQF_FD_INSET_INSET_SHIFT 0
+#define I40E_PRTQF_FD_INSET_INSET_MASK I40E_MASK(0xFFFFFFFF, I40E_PRTQF_FD_INSET_INSET_SHIFT)
+#define I40E_PRTQF_FD_INSET(_i, _j) (0x00250000 + ((_i) * 64 + (_j) * 32)) /* _i=0...63, _j=0...1 */ /* Reset: CORER */
+#define I40E_PRTQF_FD_INSET_MAX_INDEX 63
+#define I40E_PRTQF_FD_INSET_INSET_SHIFT 0
+#define I40E_PRTQF_FD_INSET_INSET_MASK I40E_MASK(0xFFFFFFFF, I40E_PRTQF_FD_INSET_INSET_SHIFT)
+#define I40E_PRTQF_FD_MSK(_i, _j) (0x00252000 + ((_i) * 64 + (_j) * 32)) /* _i=0...63, _j=0...1 */ /* Reset: CORER */
+#define I40E_PRTQF_FD_MSK_MAX_INDEX 63
+#define I40E_PRTQF_FD_MSK_MASK_SHIFT 0
+#define I40E_PRTQF_FD_MSK_MASK_MASK I40E_MASK(0xFFFF, I40E_PRTQF_FD_MSK_MASK_SHIFT)
+#define I40E_PRTQF_FD_MSK_OFFSET_SHIFT 16
+#define I40E_PRTQF_FD_MSK_OFFSET_MASK I40E_MASK(0x3F, I40E_PRTQF_FD_MSK_OFFSET_SHIFT)
+#define I40E_PRTQF_FLX_PIT(_i) (0x00255200 + ((_i) * 32)) /* _i=0...8 */ /* Reset: CORER */
+#define I40E_PRTQF_FLX_PIT_MAX_INDEX 8
+#define I40E_PRTQF_FLX_PIT_SOURCE_OFF_SHIFT 0
+#define I40E_PRTQF_FLX_PIT_SOURCE_OFF_MASK I40E_MASK(0x1F, I40E_PRTQF_FLX_PIT_SOURCE_OFF_SHIFT)
+#define I40E_PRTQF_FLX_PIT_FSIZE_SHIFT 5
+#define I40E_PRTQF_FLX_PIT_FSIZE_MASK I40E_MASK(0x1F, I40E_PRTQF_FLX_PIT_FSIZE_SHIFT)
+#define I40E_PRTQF_FLX_PIT_DEST_OFF_SHIFT 10
+#define I40E_PRTQF_FLX_PIT_DEST_OFF_MASK I40E_MASK(0x3F, I40E_PRTQF_FLX_PIT_DEST_OFF_SHIFT)
+#define I40E_VFQF_HENA1(_i, _VF) (0x00230800 + ((_i) * 1024 + (_VF) * 4)) /* _i=0...1, _VF=0...127 */ /* Reset: CORER */
+#define I40E_VFQF_HENA1_MAX_INDEX 1
+#define I40E_VFQF_HENA1_PTYPE_ENA_SHIFT 0
+#define I40E_VFQF_HENA1_PTYPE_ENA_MASK I40E_MASK(0xFFFFFFFF, I40E_VFQF_HENA1_PTYPE_ENA_SHIFT)
+#define I40E_VFQF_HKEY1(_i, _VF) (0x00228000 + ((_i) * 1024 + (_VF) * 4)) /* _i=0...12, _VF=0...127 */ /* Reset: CORER */
+#define I40E_VFQF_HKEY1_MAX_INDEX 12
+#define I40E_VFQF_HKEY1_KEY_0_SHIFT 0
+#define I40E_VFQF_HKEY1_KEY_0_MASK I40E_MASK(0xFF, I40E_VFQF_HKEY1_KEY_0_SHIFT)
+#define I40E_VFQF_HKEY1_KEY_1_SHIFT 8
+#define I40E_VFQF_HKEY1_KEY_1_MASK I40E_MASK(0xFF, I40E_VFQF_HKEY1_KEY_1_SHIFT)
+#define I40E_VFQF_HKEY1_KEY_2_SHIFT 16
+#define I40E_VFQF_HKEY1_KEY_2_MASK I40E_MASK(0xFF, I40E_VFQF_HKEY1_KEY_2_SHIFT)
+#define I40E_VFQF_HKEY1_KEY_3_SHIFT 24
+#define I40E_VFQF_HKEY1_KEY_3_MASK I40E_MASK(0xFF, I40E_VFQF_HKEY1_KEY_3_SHIFT)
+#define I40E_VFQF_HLUT1(_i, _VF) (0x00220000 + ((_i) * 1024 + (_VF) * 4)) /* _i=0...15, _VF=0...127 */ /* Reset: CORER */
+#define I40E_VFQF_HLUT1_MAX_INDEX 15
+#define I40E_VFQF_HLUT1_LUT0_SHIFT 0
+#define I40E_VFQF_HLUT1_LUT0_MASK I40E_MASK(0xF, I40E_VFQF_HLUT1_LUT0_SHIFT)
+#define I40E_VFQF_HLUT1_LUT1_SHIFT 8
+#define I40E_VFQF_HLUT1_LUT1_MASK I40E_MASK(0xF, I40E_VFQF_HLUT1_LUT1_SHIFT)
+#define I40E_VFQF_HLUT1_LUT2_SHIFT 16
+#define I40E_VFQF_HLUT1_LUT2_MASK I40E_MASK(0xF, I40E_VFQF_HLUT1_LUT2_SHIFT)
+#define I40E_VFQF_HLUT1_LUT3_SHIFT 24
+#define I40E_VFQF_HLUT1_LUT3_MASK I40E_MASK(0xF, I40E_VFQF_HLUT1_LUT3_SHIFT)
+#define I40E_VFQF_HREGION1(_i, _VF) (0x0022E000 + ((_i) * 1024 + (_VF) * 4)) /* _i=0...7, _VF=0...127 */ /* Reset: CORER */
+#define I40E_VFQF_HREGION1_MAX_INDEX 7
+#define I40E_VFQF_HREGION1_OVERRIDE_ENA_0_SHIFT 0
+#define I40E_VFQF_HREGION1_OVERRIDE_ENA_0_MASK I40E_MASK(0x1, I40E_VFQF_HREGION1_OVERRIDE_ENA_0_SHIFT)
+#define I40E_VFQF_HREGION1_REGION_0_SHIFT 1
+#define I40E_VFQF_HREGION1_REGION_0_MASK I40E_MASK(0x7, I40E_VFQF_HREGION1_REGION_0_SHIFT)
+#define I40E_VFQF_HREGION1_OVERRIDE_ENA_1_SHIFT 4
+#define I40E_VFQF_HREGION1_OVERRIDE_ENA_1_MASK I40E_MASK(0x1, I40E_VFQF_HREGION1_OVERRIDE_ENA_1_SHIFT)
+#define I40E_VFQF_HREGION1_REGION_1_SHIFT 5
+#define I40E_VFQF_HREGION1_REGION_1_MASK I40E_MASK(0x7, I40E_VFQF_HREGION1_REGION_1_SHIFT)
+#define I40E_VFQF_HREGION1_OVERRIDE_ENA_2_SHIFT 8
+#define I40E_VFQF_HREGION1_OVERRIDE_ENA_2_MASK I40E_MASK(0x1, I40E_VFQF_HREGION1_OVERRIDE_ENA_2_SHIFT)
+#define I40E_VFQF_HREGION1_REGION_2_SHIFT 9
+#define I40E_VFQF_HREGION1_REGION_2_MASK I40E_MASK(0x7, I40E_VFQF_HREGION1_REGION_2_SHIFT)
+#define I40E_VFQF_HREGION1_OVERRIDE_ENA_3_SHIFT 12
+#define I40E_VFQF_HREGION1_OVERRIDE_ENA_3_MASK I40E_MASK(0x1, I40E_VFQF_HREGION1_OVERRIDE_ENA_3_SHIFT)
+#define I40E_VFQF_HREGION1_REGION_3_SHIFT 13
+#define I40E_VFQF_HREGION1_REGION_3_MASK I40E_MASK(0x7, I40E_VFQF_HREGION1_REGION_3_SHIFT)
+#define I40E_VFQF_HREGION1_OVERRIDE_ENA_4_SHIFT 16
+#define I40E_VFQF_HREGION1_OVERRIDE_ENA_4_MASK I40E_MASK(0x1, I40E_VFQF_HREGION1_OVERRIDE_ENA_4_SHIFT)
+#define I40E_VFQF_HREGION1_REGION_4_SHIFT 17
+#define I40E_VFQF_HREGION1_REGION_4_MASK I40E_MASK(0x7, I40E_VFQF_HREGION1_REGION_4_SHIFT)
+#define I40E_VFQF_HREGION1_OVERRIDE_ENA_5_SHIFT 20
+#define I40E_VFQF_HREGION1_OVERRIDE_ENA_5_MASK I40E_MASK(0x1, I40E_VFQF_HREGION1_OVERRIDE_ENA_5_SHIFT)
+#define I40E_VFQF_HREGION1_REGION_5_SHIFT 21
+#define I40E_VFQF_HREGION1_REGION_5_MASK I40E_MASK(0x7, I40E_VFQF_HREGION1_REGION_5_SHIFT)
+#define I40E_VFQF_HREGION1_OVERRIDE_ENA_6_SHIFT 24
+#define I40E_VFQF_HREGION1_OVERRIDE_ENA_6_MASK I40E_MASK(0x1, I40E_VFQF_HREGION1_OVERRIDE_ENA_6_SHIFT)
+#define I40E_VFQF_HREGION1_REGION_6_SHIFT 25
+#define I40E_VFQF_HREGION1_REGION_6_MASK I40E_MASK(0x7, I40E_VFQF_HREGION1_REGION_6_SHIFT)
+#define I40E_VFQF_HREGION1_OVERRIDE_ENA_7_SHIFT 28
+#define I40E_VFQF_HREGION1_OVERRIDE_ENA_7_MASK I40E_MASK(0x1, I40E_VFQF_HREGION1_OVERRIDE_ENA_7_SHIFT)
+#define I40E_VFQF_HREGION1_REGION_7_SHIFT 29
+#define I40E_VFQF_HREGION1_REGION_7_MASK I40E_MASK(0x7, I40E_VFQF_HREGION1_REGION_7_SHIFT)
+#define I40E_VPQF_CTL(_VF) (0x001C0000 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: VFR */
+#define I40E_VPQF_CTL_MAX_INDEX 127
+#define I40E_VPQF_CTL_PEHSIZE_SHIFT 0
+#define I40E_VPQF_CTL_PEHSIZE_MASK I40E_MASK(0x1F, I40E_VPQF_CTL_PEHSIZE_SHIFT)
+#define I40E_VPQF_CTL_PEDSIZE_SHIFT 5
+#define I40E_VPQF_CTL_PEDSIZE_MASK I40E_MASK(0x1F, I40E_VPQF_CTL_PEDSIZE_SHIFT)
+#define I40E_VPQF_CTL_FCHSIZE_SHIFT 10
+#define I40E_VPQF_CTL_FCHSIZE_MASK I40E_MASK(0xF, I40E_VPQF_CTL_FCHSIZE_SHIFT)
+#define I40E_VPQF_CTL_FCDSIZE_SHIFT 14
+#define I40E_VPQF_CTL_FCDSIZE_MASK I40E_MASK(0x3, I40E_VPQF_CTL_FCDSIZE_SHIFT)
+#define I40E_VSIQF_CTL(_VSI) (0x0020D800 + ((_VSI) * 4)) /* _i=0...383 */ /* Reset: PFR */
+#define I40E_VSIQF_CTL_MAX_INDEX 383
+#define I40E_VSIQF_CTL_FCOE_ENA_SHIFT 0
+#define I40E_VSIQF_CTL_FCOE_ENA_MASK I40E_MASK(0x1, I40E_VSIQF_CTL_FCOE_ENA_SHIFT)
+#define I40E_VSIQF_CTL_PETCP_ENA_SHIFT 1
+#define I40E_VSIQF_CTL_PETCP_ENA_MASK I40E_MASK(0x1, I40E_VSIQF_CTL_PETCP_ENA_SHIFT)
+#define I40E_VSIQF_CTL_PEUUDP_ENA_SHIFT 2
+#define I40E_VSIQF_CTL_PEUUDP_ENA_MASK I40E_MASK(0x1, I40E_VSIQF_CTL_PEUUDP_ENA_SHIFT)
+#define I40E_VSIQF_CTL_PEMUDP_ENA_SHIFT 3
+#define I40E_VSIQF_CTL_PEMUDP_ENA_MASK I40E_MASK(0x1, I40E_VSIQF_CTL_PEMUDP_ENA_SHIFT)
+#define I40E_VSIQF_CTL_PEUFRAG_ENA_SHIFT 4
+#define I40E_VSIQF_CTL_PEUFRAG_ENA_MASK I40E_MASK(0x1, I40E_VSIQF_CTL_PEUFRAG_ENA_SHIFT)
+#define I40E_VSIQF_CTL_PEMFRAG_ENA_SHIFT 5
+#define I40E_VSIQF_CTL_PEMFRAG_ENA_MASK I40E_MASK(0x1, I40E_VSIQF_CTL_PEMFRAG_ENA_SHIFT)
+#define I40E_VSIQF_TCREGION(_i, _VSI) (0x00206000 + ((_i) * 2048 + (_VSI) * 4)) /* _i=0...3, _VSI=0...383 */ /* Reset: PFR */
+#define I40E_VSIQF_TCREGION_MAX_INDEX 3
+#define I40E_VSIQF_TCREGION_TC_OFFSET_SHIFT 0
+#define I40E_VSIQF_TCREGION_TC_OFFSET_MASK I40E_MASK(0x1FF, I40E_VSIQF_TCREGION_TC_OFFSET_SHIFT)
+#define I40E_VSIQF_TCREGION_TC_SIZE_SHIFT 9
+#define I40E_VSIQF_TCREGION_TC_SIZE_MASK I40E_MASK(0x7, I40E_VSIQF_TCREGION_TC_SIZE_SHIFT)
+#define I40E_VSIQF_TCREGION_TC_OFFSET2_SHIFT 16
+#define I40E_VSIQF_TCREGION_TC_OFFSET2_MASK I40E_MASK(0x1FF, I40E_VSIQF_TCREGION_TC_OFFSET2_SHIFT)
+#define I40E_VSIQF_TCREGION_TC_SIZE2_SHIFT 25
+#define I40E_VSIQF_TCREGION_TC_SIZE2_MASK I40E_MASK(0x7, I40E_VSIQF_TCREGION_TC_SIZE2_SHIFT)
+#define I40E_GL_FCOECRC(_i) (0x00314d80 + ((_i) * 8)) /* _i=0...143 */ /* Reset: CORER */
+#define I40E_GL_FCOECRC_MAX_INDEX 143
+#define I40E_GL_FCOECRC_FCOECRC_SHIFT 0
+#define I40E_GL_FCOECRC_FCOECRC_MASK I40E_MASK(0xFFFFFFFF, I40E_GL_FCOECRC_FCOECRC_SHIFT)
+#define I40E_GL_FCOEDDPC(_i) (0x00314480 + ((_i) * 8)) /* _i=0...143 */ /* Reset: CORER */
+#define I40E_GL_FCOEDDPC_MAX_INDEX 143
+#define I40E_GL_FCOEDDPC_FCOEDDPC_SHIFT 0
+#define I40E_GL_FCOEDDPC_FCOEDDPC_MASK I40E_MASK(0xFFFFFFFF, I40E_GL_FCOEDDPC_FCOEDDPC_SHIFT)
+#define I40E_GL_FCOEDIFEC(_i) (0x00318480 + ((_i) * 8)) /* _i=0...143 */ /* Reset: CORER */
+#define I40E_GL_FCOEDIFEC_MAX_INDEX 143
+#define I40E_GL_FCOEDIFEC_FCOEDIFRC_SHIFT 0
+#define I40E_GL_FCOEDIFEC_FCOEDIFRC_MASK I40E_MASK(0xFFFFFFFF, I40E_GL_FCOEDIFEC_FCOEDIFRC_SHIFT)
+#define I40E_GL_FCOEDIFTCL(_i) (0x00354000 + ((_i) * 8)) /* _i=0...143 */ /* Reset: CORER */
+#define I40E_GL_FCOEDIFTCL_MAX_INDEX 143
+#define I40E_GL_FCOEDIFTCL_FCOEDIFTC_SHIFT 0
+#define I40E_GL_FCOEDIFTCL_FCOEDIFTC_MASK I40E_MASK(0xFFFFFFFF, I40E_GL_FCOEDIFTCL_FCOEDIFTC_SHIFT)
+#define I40E_GL_FCOEDIXEC(_i) (0x0034c000 + ((_i) * 8)) /* _i=0...143 */ /* Reset: CORER */
+#define I40E_GL_FCOEDIXEC_MAX_INDEX 143
+#define I40E_GL_FCOEDIXEC_FCOEDIXEC_SHIFT 0
+#define I40E_GL_FCOEDIXEC_FCOEDIXEC_MASK I40E_MASK(0xFFFFFFFF, I40E_GL_FCOEDIXEC_FCOEDIXEC_SHIFT)
+#define I40E_GL_FCOEDIXVC(_i) (0x00350000 + ((_i) * 8)) /* _i=0...143 */ /* Reset: CORER */
+#define I40E_GL_FCOEDIXVC_MAX_INDEX 143
+#define I40E_GL_FCOEDIXVC_FCOEDIXVC_SHIFT 0
+#define I40E_GL_FCOEDIXVC_FCOEDIXVC_MASK I40E_MASK(0xFFFFFFFF, I40E_GL_FCOEDIXVC_FCOEDIXVC_SHIFT)
+#define I40E_GL_FCOEDWRCH(_i) (0x00320004 + ((_i) * 8)) /* _i=0...143 */ /* Reset: CORER */
+#define I40E_GL_FCOEDWRCH_MAX_INDEX 143
+#define I40E_GL_FCOEDWRCH_FCOEDWRCH_SHIFT 0
+#define I40E_GL_FCOEDWRCH_FCOEDWRCH_MASK I40E_MASK(0xFFFF, I40E_GL_FCOEDWRCH_FCOEDWRCH_SHIFT)
+#define I40E_GL_FCOEDWRCL(_i) (0x00320000 + ((_i) * 8)) /* _i=0...143 */ /* Reset: CORER */
+#define I40E_GL_FCOEDWRCL_MAX_INDEX 143
+#define I40E_GL_FCOEDWRCL_FCOEDWRCL_SHIFT 0
+#define I40E_GL_FCOEDWRCL_FCOEDWRCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GL_FCOEDWRCL_FCOEDWRCL_SHIFT)
+#define I40E_GL_FCOEDWTCH(_i) (0x00348084 + ((_i) * 8)) /* _i=0...143 */ /* Reset: CORER */
+#define I40E_GL_FCOEDWTCH_MAX_INDEX 143
+#define I40E_GL_FCOEDWTCH_FCOEDWTCH_SHIFT 0
+#define I40E_GL_FCOEDWTCH_FCOEDWTCH_MASK I40E_MASK(0xFFFF, I40E_GL_FCOEDWTCH_FCOEDWTCH_SHIFT)
+#define I40E_GL_FCOEDWTCL(_i) (0x00348080 + ((_i) * 8)) /* _i=0...143 */ /* Reset: CORER */
+#define I40E_GL_FCOEDWTCL_MAX_INDEX 143
+#define I40E_GL_FCOEDWTCL_FCOEDWTCL_SHIFT 0
+#define I40E_GL_FCOEDWTCL_FCOEDWTCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GL_FCOEDWTCL_FCOEDWTCL_SHIFT)
+#define I40E_GL_FCOELAST(_i) (0x00314000 + ((_i) * 8)) /* _i=0...143 */ /* Reset: CORER */
+#define I40E_GL_FCOELAST_MAX_INDEX 143
+#define I40E_GL_FCOELAST_FCOELAST_SHIFT 0
+#define I40E_GL_FCOELAST_FCOELAST_MASK I40E_MASK(0xFFFFFFFF, I40E_GL_FCOELAST_FCOELAST_SHIFT)
+#define I40E_GL_FCOEPRC(_i) (0x00315200 + ((_i) * 8)) /* _i=0...143 */ /* Reset: CORER */
+#define I40E_GL_FCOEPRC_MAX_INDEX 143
+#define I40E_GL_FCOEPRC_FCOEPRC_SHIFT 0
+#define I40E_GL_FCOEPRC_FCOEPRC_MASK I40E_MASK(0xFFFFFFFF, I40E_GL_FCOEPRC_FCOEPRC_SHIFT)
+#define I40E_GL_FCOEPTC(_i) (0x00344C00 + ((_i) * 8)) /* _i=0...143 */ /* Reset: CORER */
+#define I40E_GL_FCOEPTC_MAX_INDEX 143
+#define I40E_GL_FCOEPTC_FCOEPTC_SHIFT 0
+#define I40E_GL_FCOEPTC_FCOEPTC_MASK I40E_MASK(0xFFFFFFFF, I40E_GL_FCOEPTC_FCOEPTC_SHIFT)
+#define I40E_GL_FCOERPDC(_i) (0x00324000 + ((_i) * 8)) /* _i=0...143 */ /* Reset: CORER */
+#define I40E_GL_FCOERPDC_MAX_INDEX 143
+#define I40E_GL_FCOERPDC_FCOERPDC_SHIFT 0
+#define I40E_GL_FCOERPDC_FCOERPDC_MASK I40E_MASK(0xFFFFFFFF, I40E_GL_FCOERPDC_FCOERPDC_SHIFT)
+#define I40E_GL_RXERR1_L(_i) (0x00318000 + ((_i) * 8)) /* _i=0...143 */ /* Reset: CORER */
+#define I40E_GL_RXERR1_L_MAX_INDEX 143
+#define I40E_GL_RXERR1_L_FCOEDIFRC_SHIFT 0
+#define I40E_GL_RXERR1_L_FCOEDIFRC_MASK I40E_MASK(0xFFFFFFFF, I40E_GL_RXERR1_L_FCOEDIFRC_SHIFT)
+#define I40E_GL_RXERR2_L(_i) (0x0031c000 + ((_i) * 8)) /* _i=0...143 */ /* Reset: CORER */
+#define I40E_GL_RXERR2_L_MAX_INDEX 143
+#define I40E_GL_RXERR2_L_FCOEDIXAC_SHIFT 0
+#define I40E_GL_RXERR2_L_FCOEDIXAC_MASK I40E_MASK(0xFFFFFFFF, I40E_GL_RXERR2_L_FCOEDIXAC_SHIFT)
+#define I40E_GLPRT_BPRCH(_i) (0x003005E4 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
+#define I40E_GLPRT_BPRCH_MAX_INDEX 3
+#define I40E_GLPRT_BPRCH_BPRCH_SHIFT 0
+#define I40E_GLPRT_BPRCH_BPRCH_MASK I40E_MASK(0xFFFF, I40E_GLPRT_BPRCH_BPRCH_SHIFT)
+#define I40E_GLPRT_BPRCL(_i) (0x003005E0 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
+#define I40E_GLPRT_BPRCL_MAX_INDEX 3
+#define I40E_GLPRT_BPRCL_BPRCL_SHIFT 0
+#define I40E_GLPRT_BPRCL_BPRCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_BPRCL_BPRCL_SHIFT)
+#define I40E_GLPRT_BPTCH(_i) (0x00300A04 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
+#define I40E_GLPRT_BPTCH_MAX_INDEX 3
+#define I40E_GLPRT_BPTCH_BPTCH_SHIFT 0
+#define I40E_GLPRT_BPTCH_BPTCH_MASK I40E_MASK(0xFFFF, I40E_GLPRT_BPTCH_BPTCH_SHIFT)
+#define I40E_GLPRT_BPTCL(_i) (0x00300A00 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
+#define I40E_GLPRT_BPTCL_MAX_INDEX 3
+#define I40E_GLPRT_BPTCL_BPTCL_SHIFT 0
+#define I40E_GLPRT_BPTCL_BPTCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_BPTCL_BPTCL_SHIFT)
+#define I40E_GLPRT_CRCERRS(_i) (0x00300080 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
+#define I40E_GLPRT_CRCERRS_MAX_INDEX 3
+#define I40E_GLPRT_CRCERRS_CRCERRS_SHIFT 0
+#define I40E_GLPRT_CRCERRS_CRCERRS_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_CRCERRS_CRCERRS_SHIFT)
+#define I40E_GLPRT_GORCH(_i) (0x00300004 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
+#define I40E_GLPRT_GORCH_MAX_INDEX 3
+#define I40E_GLPRT_GORCH_GORCH_SHIFT 0
+#define I40E_GLPRT_GORCH_GORCH_MASK I40E_MASK(0xFFFF, I40E_GLPRT_GORCH_GORCH_SHIFT)
+#define I40E_GLPRT_GORCL(_i) (0x00300000 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
+#define I40E_GLPRT_GORCL_MAX_INDEX 3
+#define I40E_GLPRT_GORCL_GORCL_SHIFT 0
+#define I40E_GLPRT_GORCL_GORCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_GORCL_GORCL_SHIFT)
+#define I40E_GLPRT_GOTCH(_i) (0x00300684 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
+#define I40E_GLPRT_GOTCH_MAX_INDEX 3
+#define I40E_GLPRT_GOTCH_GOTCH_SHIFT 0
+#define I40E_GLPRT_GOTCH_GOTCH_MASK I40E_MASK(0xFFFF, I40E_GLPRT_GOTCH_GOTCH_SHIFT)
+#define I40E_GLPRT_GOTCL(_i) (0x00300680 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
+#define I40E_GLPRT_GOTCL_MAX_INDEX 3
+#define I40E_GLPRT_GOTCL_GOTCL_SHIFT 0
+#define I40E_GLPRT_GOTCL_GOTCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_GOTCL_GOTCL_SHIFT)
+#define I40E_GLPRT_ILLERRC(_i) (0x003000E0 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
+#define I40E_GLPRT_ILLERRC_MAX_INDEX 3
+#define I40E_GLPRT_ILLERRC_ILLERRC_SHIFT 0
+#define I40E_GLPRT_ILLERRC_ILLERRC_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_ILLERRC_ILLERRC_SHIFT)
+#define I40E_GLPRT_LDPC(_i) (0x00300620 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
+#define I40E_GLPRT_LDPC_MAX_INDEX 3
+#define I40E_GLPRT_LDPC_LDPC_SHIFT 0
+#define I40E_GLPRT_LDPC_LDPC_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_LDPC_LDPC_SHIFT)
+#define I40E_GLPRT_LXOFFRXC(_i) (0x00300160 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
+#define I40E_GLPRT_LXOFFRXC_MAX_INDEX 3
+#define I40E_GLPRT_LXOFFRXC_LXOFFRXCNT_SHIFT 0
+#define I40E_GLPRT_LXOFFRXC_LXOFFRXCNT_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_LXOFFRXC_LXOFFRXCNT_SHIFT)
+#define I40E_GLPRT_LXOFFTXC(_i) (0x003009A0 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
+#define I40E_GLPRT_LXOFFTXC_MAX_INDEX 3
+#define I40E_GLPRT_LXOFFTXC_LXOFFTXC_SHIFT 0
+#define I40E_GLPRT_LXOFFTXC_LXOFFTXC_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_LXOFFTXC_LXOFFTXC_SHIFT)
+#define I40E_GLPRT_LXONRXC(_i) (0x00300140 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
+#define I40E_GLPRT_LXONRXC_MAX_INDEX 3
+#define I40E_GLPRT_LXONRXC_LXONRXCNT_SHIFT 0
+#define I40E_GLPRT_LXONRXC_LXONRXCNT_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_LXONRXC_LXONRXCNT_SHIFT)
+#define I40E_GLPRT_LXONTXC(_i) (0x00300980 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
+#define I40E_GLPRT_LXONTXC_MAX_INDEX 3
+#define I40E_GLPRT_LXONTXC_LXONTXC_SHIFT 0
+#define I40E_GLPRT_LXONTXC_LXONTXC_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_LXONTXC_LXONTXC_SHIFT)
+#define I40E_GLPRT_MLFC(_i) (0x00300020 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
+#define I40E_GLPRT_MLFC_MAX_INDEX 3
+#define I40E_GLPRT_MLFC_MLFC_SHIFT 0
+#define I40E_GLPRT_MLFC_MLFC_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_MLFC_MLFC_SHIFT)
+#define I40E_GLPRT_MPRCH(_i) (0x003005C4 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
+#define I40E_GLPRT_MPRCH_MAX_INDEX 3
+#define I40E_GLPRT_MPRCH_MPRCH_SHIFT 0
+#define I40E_GLPRT_MPRCH_MPRCH_MASK I40E_MASK(0xFFFF, I40E_GLPRT_MPRCH_MPRCH_SHIFT)
+#define I40E_GLPRT_MPRCL(_i) (0x003005C0 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
+#define I40E_GLPRT_MPRCL_MAX_INDEX 3
+#define I40E_GLPRT_MPRCL_MPRCL_SHIFT 0
+#define I40E_GLPRT_MPRCL_MPRCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_MPRCL_MPRCL_SHIFT)
+#define I40E_GLPRT_MPTCH(_i) (0x003009E4 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
+#define I40E_GLPRT_MPTCH_MAX_INDEX 3
+#define I40E_GLPRT_MPTCH_MPTCH_SHIFT 0
+#define I40E_GLPRT_MPTCH_MPTCH_MASK I40E_MASK(0xFFFF, I40E_GLPRT_MPTCH_MPTCH_SHIFT)
+#define I40E_GLPRT_MPTCL(_i) (0x003009E0 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
+#define I40E_GLPRT_MPTCL_MAX_INDEX 3
+#define I40E_GLPRT_MPTCL_MPTCL_SHIFT 0
+#define I40E_GLPRT_MPTCL_MPTCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_MPTCL_MPTCL_SHIFT)
+#define I40E_GLPRT_MRFC(_i) (0x00300040 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
+#define I40E_GLPRT_MRFC_MAX_INDEX 3
+#define I40E_GLPRT_MRFC_MRFC_SHIFT 0
+#define I40E_GLPRT_MRFC_MRFC_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_MRFC_MRFC_SHIFT)
+#define I40E_GLPRT_PRC1023H(_i) (0x00300504 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
+#define I40E_GLPRT_PRC1023H_MAX_INDEX 3
+#define I40E_GLPRT_PRC1023H_PRC1023H_SHIFT 0
+#define I40E_GLPRT_PRC1023H_PRC1023H_MASK I40E_MASK(0xFFFF, I40E_GLPRT_PRC1023H_PRC1023H_SHIFT)
+#define I40E_GLPRT_PRC1023L(_i) (0x00300500 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
+#define I40E_GLPRT_PRC1023L_MAX_INDEX 3
+#define I40E_GLPRT_PRC1023L_PRC1023L_SHIFT 0
+#define I40E_GLPRT_PRC1023L_PRC1023L_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_PRC1023L_PRC1023L_SHIFT)
+#define I40E_GLPRT_PRC127H(_i) (0x003004A4 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
+#define I40E_GLPRT_PRC127H_MAX_INDEX 3
+#define I40E_GLPRT_PRC127H_PRC127H_SHIFT 0
+#define I40E_GLPRT_PRC127H_PRC127H_MASK I40E_MASK(0xFFFF, I40E_GLPRT_PRC127H_PRC127H_SHIFT)
+#define I40E_GLPRT_PRC127L(_i) (0x003004A0 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
+#define I40E_GLPRT_PRC127L_MAX_INDEX 3
+#define I40E_GLPRT_PRC127L_PRC127L_SHIFT 0
+#define I40E_GLPRT_PRC127L_PRC127L_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_PRC127L_PRC127L_SHIFT)
+#define I40E_GLPRT_PRC1522H(_i) (0x00300524 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
+#define I40E_GLPRT_PRC1522H_MAX_INDEX 3
+#define I40E_GLPRT_PRC1522H_PRC1522H_SHIFT 0
+#define I40E_GLPRT_PRC1522H_PRC1522H_MASK I40E_MASK(0xFFFF, I40E_GLPRT_PRC1522H_PRC1522H_SHIFT)
+#define I40E_GLPRT_PRC1522L(_i) (0x00300520 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
+#define I40E_GLPRT_PRC1522L_MAX_INDEX 3
+#define I40E_GLPRT_PRC1522L_PRC1522L_SHIFT 0
+#define I40E_GLPRT_PRC1522L_PRC1522L_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_PRC1522L_PRC1522L_SHIFT)
+#define I40E_GLPRT_PRC255H(_i) (0x003004C4 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
+#define I40E_GLPRT_PRC255H_MAX_INDEX 3
+#define I40E_GLPRT_PRC255H_PRTPRC255H_SHIFT 0
+#define I40E_GLPRT_PRC255H_PRTPRC255H_MASK I40E_MASK(0xFFFF, I40E_GLPRT_PRC255H_PRTPRC255H_SHIFT)
+#define I40E_GLPRT_PRC255L(_i) (0x003004C0 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
+#define I40E_GLPRT_PRC255L_MAX_INDEX 3
+#define I40E_GLPRT_PRC255L_PRC255L_SHIFT 0
+#define I40E_GLPRT_PRC255L_PRC255L_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_PRC255L_PRC255L_SHIFT)
+#define I40E_GLPRT_PRC511H(_i) (0x003004E4 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
+#define I40E_GLPRT_PRC511H_MAX_INDEX 3
+#define I40E_GLPRT_PRC511H_PRC511H_SHIFT 0
+#define I40E_GLPRT_PRC511H_PRC511H_MASK I40E_MASK(0xFFFF, I40E_GLPRT_PRC511H_PRC511H_SHIFT)
+#define I40E_GLPRT_PRC511L(_i) (0x003004E0 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
+#define I40E_GLPRT_PRC511L_MAX_INDEX 3
+#define I40E_GLPRT_PRC511L_PRC511L_SHIFT 0
+#define I40E_GLPRT_PRC511L_PRC511L_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_PRC511L_PRC511L_SHIFT)
+#define I40E_GLPRT_PRC64H(_i) (0x00300484 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
+#define I40E_GLPRT_PRC64H_MAX_INDEX 3
+#define I40E_GLPRT_PRC64H_PRC64H_SHIFT 0
+#define I40E_GLPRT_PRC64H_PRC64H_MASK I40E_MASK(0xFFFF, I40E_GLPRT_PRC64H_PRC64H_SHIFT)
+#define I40E_GLPRT_PRC64L(_i) (0x00300480 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
+#define I40E_GLPRT_PRC64L_MAX_INDEX 3
+#define I40E_GLPRT_PRC64L_PRC64L_SHIFT 0
+#define I40E_GLPRT_PRC64L_PRC64L_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_PRC64L_PRC64L_SHIFT)
+#define I40E_GLPRT_PRC9522H(_i) (0x00300544 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
+#define I40E_GLPRT_PRC9522H_MAX_INDEX 3
+#define I40E_GLPRT_PRC9522H_PRC1522H_SHIFT 0
+#define I40E_GLPRT_PRC9522H_PRC1522H_MASK I40E_MASK(0xFFFF, I40E_GLPRT_PRC9522H_PRC1522H_SHIFT)
+#define I40E_GLPRT_PRC9522L(_i) (0x00300540 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
+#define I40E_GLPRT_PRC9522L_MAX_INDEX 3
+#define I40E_GLPRT_PRC9522L_PRC1522L_SHIFT 0
+#define I40E_GLPRT_PRC9522L_PRC1522L_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_PRC9522L_PRC1522L_SHIFT)
+#define I40E_GLPRT_PTC1023H(_i) (0x00300724 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
+#define I40E_GLPRT_PTC1023H_MAX_INDEX 3
+#define I40E_GLPRT_PTC1023H_PTC1023H_SHIFT 0
+#define I40E_GLPRT_PTC1023H_PTC1023H_MASK I40E_MASK(0xFFFF, I40E_GLPRT_PTC1023H_PTC1023H_SHIFT)
+#define I40E_GLPRT_PTC1023L(_i) (0x00300720 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
+#define I40E_GLPRT_PTC1023L_MAX_INDEX 3
+#define I40E_GLPRT_PTC1023L_PTC1023L_SHIFT 0
+#define I40E_GLPRT_PTC1023L_PTC1023L_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_PTC1023L_PTC1023L_SHIFT)
+#define I40E_GLPRT_PTC127H(_i) (0x003006C4 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
+#define I40E_GLPRT_PTC127H_MAX_INDEX 3
+#define I40E_GLPRT_PTC127H_PTC127H_SHIFT 0
+#define I40E_GLPRT_PTC127H_PTC127H_MASK I40E_MASK(0xFFFF, I40E_GLPRT_PTC127H_PTC127H_SHIFT)
+#define I40E_GLPRT_PTC127L(_i) (0x003006C0 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
+#define I40E_GLPRT_PTC127L_MAX_INDEX 3
+#define I40E_GLPRT_PTC127L_PTC127L_SHIFT 0
+#define I40E_GLPRT_PTC127L_PTC127L_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_PTC127L_PTC127L_SHIFT)
+#define I40E_GLPRT_PTC1522H(_i) (0x00300744 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
+#define I40E_GLPRT_PTC1522H_MAX_INDEX 3
+#define I40E_GLPRT_PTC1522H_PTC1522H_SHIFT 0
+#define I40E_GLPRT_PTC1522H_PTC1522H_MASK I40E_MASK(0xFFFF, I40E_GLPRT_PTC1522H_PTC1522H_SHIFT)
+#define I40E_GLPRT_PTC1522L(_i) (0x00300740 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
+#define I40E_GLPRT_PTC1522L_MAX_INDEX 3
+#define I40E_GLPRT_PTC1522L_PTC1522L_SHIFT 0
+#define I40E_GLPRT_PTC1522L_PTC1522L_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_PTC1522L_PTC1522L_SHIFT)
+#define I40E_GLPRT_PTC255H(_i) (0x003006E4 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
+#define I40E_GLPRT_PTC255H_MAX_INDEX 3
+#define I40E_GLPRT_PTC255H_PTC255H_SHIFT 0
+#define I40E_GLPRT_PTC255H_PTC255H_MASK I40E_MASK(0xFFFF, I40E_GLPRT_PTC255H_PTC255H_SHIFT)
+#define I40E_GLPRT_PTC255L(_i) (0x003006E0 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
+#define I40E_GLPRT_PTC255L_MAX_INDEX 3
+#define I40E_GLPRT_PTC255L_PTC255L_SHIFT 0
+#define I40E_GLPRT_PTC255L_PTC255L_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_PTC255L_PTC255L_SHIFT)
+#define I40E_GLPRT_PTC511H(_i) (0x00300704 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
+#define I40E_GLPRT_PTC511H_MAX_INDEX 3
+#define I40E_GLPRT_PTC511H_PTC511H_SHIFT 0
+#define I40E_GLPRT_PTC511H_PTC511H_MASK I40E_MASK(0xFFFF, I40E_GLPRT_PTC511H_PTC511H_SHIFT)
+#define I40E_GLPRT_PTC511L(_i) (0x00300700 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
+#define I40E_GLPRT_PTC511L_MAX_INDEX 3
+#define I40E_GLPRT_PTC511L_PTC511L_SHIFT 0
+#define I40E_GLPRT_PTC511L_PTC511L_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_PTC511L_PTC511L_SHIFT)
+#define I40E_GLPRT_PTC64H(_i) (0x003006A4 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
+#define I40E_GLPRT_PTC64H_MAX_INDEX 3
+#define I40E_GLPRT_PTC64H_PTC64H_SHIFT 0
+#define I40E_GLPRT_PTC64H_PTC64H_MASK I40E_MASK(0xFFFF, I40E_GLPRT_PTC64H_PTC64H_SHIFT)
+#define I40E_GLPRT_PTC64L(_i) (0x003006A0 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
+#define I40E_GLPRT_PTC64L_MAX_INDEX 3
+#define I40E_GLPRT_PTC64L_PTC64L_SHIFT 0
+#define I40E_GLPRT_PTC64L_PTC64L_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_PTC64L_PTC64L_SHIFT)
+#define I40E_GLPRT_PTC9522H(_i) (0x00300764 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
+#define I40E_GLPRT_PTC9522H_MAX_INDEX 3
+#define I40E_GLPRT_PTC9522H_PTC9522H_SHIFT 0
+#define I40E_GLPRT_PTC9522H_PTC9522H_MASK I40E_MASK(0xFFFF, I40E_GLPRT_PTC9522H_PTC9522H_SHIFT)
+#define I40E_GLPRT_PTC9522L(_i) (0x00300760 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
+#define I40E_GLPRT_PTC9522L_MAX_INDEX 3
+#define I40E_GLPRT_PTC9522L_PTC9522L_SHIFT 0
+#define I40E_GLPRT_PTC9522L_PTC9522L_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_PTC9522L_PTC9522L_SHIFT)
+#define I40E_GLPRT_PXOFFRXC(_i, _j) (0x00300280 + ((_i) * 8 + (_j) * 32)) /* _i=0...3, _j=0...7 */ /* Reset: CORER */
+#define I40E_GLPRT_PXOFFRXC_MAX_INDEX 3
+#define I40E_GLPRT_PXOFFRXC_PRPXOFFRXCNT_SHIFT 0
+#define I40E_GLPRT_PXOFFRXC_PRPXOFFRXCNT_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_PXOFFRXC_PRPXOFFRXCNT_SHIFT)
+#define I40E_GLPRT_PXOFFTXC(_i, _j) (0x00300880 + ((_i) * 8 + (_j) * 32)) /* _i=0...3, _j=0...7 */ /* Reset: CORER */
+#define I40E_GLPRT_PXOFFTXC_MAX_INDEX 3
+#define I40E_GLPRT_PXOFFTXC_PRPXOFFTXCNT_SHIFT 0
+#define I40E_GLPRT_PXOFFTXC_PRPXOFFTXCNT_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_PXOFFTXC_PRPXOFFTXCNT_SHIFT)
+#define I40E_GLPRT_PXONRXC(_i, _j) (0x00300180 + ((_i) * 8 + (_j) * 32)) /* _i=0...3, _j=0...7 */ /* Reset: CORER */
+#define I40E_GLPRT_PXONRXC_MAX_INDEX 3
+#define I40E_GLPRT_PXONRXC_PRPXONRXCNT_SHIFT 0
+#define I40E_GLPRT_PXONRXC_PRPXONRXCNT_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_PXONRXC_PRPXONRXCNT_SHIFT)
+#define I40E_GLPRT_PXONTXC(_i, _j) (0x00300780 + ((_i) * 8 + (_j) * 32)) /* _i=0...3, _j=0...7 */ /* Reset: CORER */
+#define I40E_GLPRT_PXONTXC_MAX_INDEX 3
+#define I40E_GLPRT_PXONTXC_PRPXONTXC_SHIFT 0
+#define I40E_GLPRT_PXONTXC_PRPXONTXC_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_PXONTXC_PRPXONTXC_SHIFT)
+#define I40E_GLPRT_RDPC(_i) (0x00300600 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
+#define I40E_GLPRT_RDPC_MAX_INDEX 3
+#define I40E_GLPRT_RDPC_RDPC_SHIFT 0
+#define I40E_GLPRT_RDPC_RDPC_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_RDPC_RDPC_SHIFT)
+#define I40E_GLPRT_RFC(_i) (0x00300560 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
+#define I40E_GLPRT_RFC_MAX_INDEX 3
+#define I40E_GLPRT_RFC_RFC_SHIFT 0
+#define I40E_GLPRT_RFC_RFC_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_RFC_RFC_SHIFT)
+#define I40E_GLPRT_RJC(_i) (0x00300580 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
+#define I40E_GLPRT_RJC_MAX_INDEX 3
+#define I40E_GLPRT_RJC_RJC_SHIFT 0
+#define I40E_GLPRT_RJC_RJC_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_RJC_RJC_SHIFT)
+#define I40E_GLPRT_RLEC(_i) (0x003000A0 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
+#define I40E_GLPRT_RLEC_MAX_INDEX 3
+#define I40E_GLPRT_RLEC_RLEC_SHIFT 0
+#define I40E_GLPRT_RLEC_RLEC_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_RLEC_RLEC_SHIFT)
+#define I40E_GLPRT_ROC(_i) (0x00300120 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
+#define I40E_GLPRT_ROC_MAX_INDEX 3
+#define I40E_GLPRT_ROC_ROC_SHIFT 0
+#define I40E_GLPRT_ROC_ROC_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_ROC_ROC_SHIFT)
+#define I40E_GLPRT_RUC(_i) (0x00300100 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
+#define I40E_GLPRT_RUC_MAX_INDEX 3
+#define I40E_GLPRT_RUC_RUC_SHIFT 0
+#define I40E_GLPRT_RUC_RUC_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_RUC_RUC_SHIFT)
+#define I40E_GLPRT_RUPP(_i) (0x00300660 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
+#define I40E_GLPRT_RUPP_MAX_INDEX 3
+#define I40E_GLPRT_RUPP_RUPP_SHIFT 0
+#define I40E_GLPRT_RUPP_RUPP_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_RUPP_RUPP_SHIFT)
+#define I40E_GLPRT_RXON2OFFCNT(_i, _j) (0x00300380 + ((_i) * 8 + (_j) * 32)) /* _i=0...3, _j=0...7 */ /* Reset: CORER */
+#define I40E_GLPRT_RXON2OFFCNT_MAX_INDEX 3
+#define I40E_GLPRT_RXON2OFFCNT_PRRXON2OFFCNT_SHIFT 0
+#define I40E_GLPRT_RXON2OFFCNT_PRRXON2OFFCNT_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_RXON2OFFCNT_PRRXON2OFFCNT_SHIFT)
+#define I40E_GLPRT_TDOLD(_i) (0x00300A20 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
+#define I40E_GLPRT_TDOLD_MAX_INDEX 3
+#define I40E_GLPRT_TDOLD_GLPRT_TDOLD_SHIFT 0
+#define I40E_GLPRT_TDOLD_GLPRT_TDOLD_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_TDOLD_GLPRT_TDOLD_SHIFT)
+#define I40E_GLPRT_UPRCH(_i) (0x003005A4 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
+#define I40E_GLPRT_UPRCH_MAX_INDEX 3
+#define I40E_GLPRT_UPRCH_UPRCH_SHIFT 0
+#define I40E_GLPRT_UPRCH_UPRCH_MASK I40E_MASK(0xFFFF, I40E_GLPRT_UPRCH_UPRCH_SHIFT)
+#define I40E_GLPRT_UPRCL(_i) (0x003005A0 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
+#define I40E_GLPRT_UPRCL_MAX_INDEX 3
+#define I40E_GLPRT_UPRCL_UPRCL_SHIFT 0
+#define I40E_GLPRT_UPRCL_UPRCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_UPRCL_UPRCL_SHIFT)
+#define I40E_GLPRT_UPTCH(_i) (0x003009C4 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
+#define I40E_GLPRT_UPTCH_MAX_INDEX 3
+#define I40E_GLPRT_UPTCH_UPTCH_SHIFT 0
+#define I40E_GLPRT_UPTCH_UPTCH_MASK I40E_MASK(0xFFFF, I40E_GLPRT_UPTCH_UPTCH_SHIFT)
+#define I40E_GLPRT_UPTCL(_i) (0x003009C0 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
+#define I40E_GLPRT_UPTCL_MAX_INDEX 3
+#define I40E_GLPRT_UPTCL_VUPTCH_SHIFT 0
+#define I40E_GLPRT_UPTCL_VUPTCH_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_UPTCL_VUPTCH_SHIFT)
+#define I40E_GLSW_BPRCH(_i) (0x00370104 + ((_i) * 8)) /* _i=0...15 */ /* Reset: CORER */
+#define I40E_GLSW_BPRCH_MAX_INDEX 15
+#define I40E_GLSW_BPRCH_BPRCH_SHIFT 0
+#define I40E_GLSW_BPRCH_BPRCH_MASK I40E_MASK(0xFFFF, I40E_GLSW_BPRCH_BPRCH_SHIFT)
+#define I40E_GLSW_BPRCL(_i) (0x00370100 + ((_i) * 8)) /* _i=0...15 */ /* Reset: CORER */
+#define I40E_GLSW_BPRCL_MAX_INDEX 15
+#define I40E_GLSW_BPRCL_BPRCL_SHIFT 0
+#define I40E_GLSW_BPRCL_BPRCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLSW_BPRCL_BPRCL_SHIFT)
+#define I40E_GLSW_BPTCH(_i) (0x00340104 + ((_i) * 8)) /* _i=0...15 */ /* Reset: CORER */
+#define I40E_GLSW_BPTCH_MAX_INDEX 15
+#define I40E_GLSW_BPTCH_BPTCH_SHIFT 0
+#define I40E_GLSW_BPTCH_BPTCH_MASK I40E_MASK(0xFFFF, I40E_GLSW_BPTCH_BPTCH_SHIFT)
+#define I40E_GLSW_BPTCL(_i) (0x00340100 + ((_i) * 8)) /* _i=0...15 */ /* Reset: CORER */
+#define I40E_GLSW_BPTCL_MAX_INDEX 15
+#define I40E_GLSW_BPTCL_BPTCL_SHIFT 0
+#define I40E_GLSW_BPTCL_BPTCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLSW_BPTCL_BPTCL_SHIFT)
+#define I40E_GLSW_GORCH(_i) (0x0035C004 + ((_i) * 8)) /* _i=0...15 */ /* Reset: CORER */
+#define I40E_GLSW_GORCH_MAX_INDEX 15
+#define I40E_GLSW_GORCH_GORCH_SHIFT 0
+#define I40E_GLSW_GORCH_GORCH_MASK I40E_MASK(0xFFFF, I40E_GLSW_GORCH_GORCH_SHIFT)
+#define I40E_GLSW_GORCL(_i) (0x0035c000 + ((_i) * 8)) /* _i=0...15 */ /* Reset: CORER */
+#define I40E_GLSW_GORCL_MAX_INDEX 15
+#define I40E_GLSW_GORCL_GORCL_SHIFT 0
+#define I40E_GLSW_GORCL_GORCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLSW_GORCL_GORCL_SHIFT)
+#define I40E_GLSW_GOTCH(_i) (0x0032C004 + ((_i) * 8)) /* _i=0...15 */ /* Reset: CORER */
+#define I40E_GLSW_GOTCH_MAX_INDEX 15
+#define I40E_GLSW_GOTCH_GOTCH_SHIFT 0
+#define I40E_GLSW_GOTCH_GOTCH_MASK I40E_MASK(0xFFFF, I40E_GLSW_GOTCH_GOTCH_SHIFT)
+#define I40E_GLSW_GOTCL(_i) (0x0032c000 + ((_i) * 8)) /* _i=0...15 */ /* Reset: CORER */
+#define I40E_GLSW_GOTCL_MAX_INDEX 15
+#define I40E_GLSW_GOTCL_GOTCL_SHIFT 0
+#define I40E_GLSW_GOTCL_GOTCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLSW_GOTCL_GOTCL_SHIFT)
+#define I40E_GLSW_MPRCH(_i) (0x00370084 + ((_i) * 8)) /* _i=0...15 */ /* Reset: CORER */
+#define I40E_GLSW_MPRCH_MAX_INDEX 15
+#define I40E_GLSW_MPRCH_MPRCH_SHIFT 0
+#define I40E_GLSW_MPRCH_MPRCH_MASK I40E_MASK(0xFFFF, I40E_GLSW_MPRCH_MPRCH_SHIFT)
+#define I40E_GLSW_MPRCL(_i) (0x00370080 + ((_i) * 8)) /* _i=0...15 */ /* Reset: CORER */
+#define I40E_GLSW_MPRCL_MAX_INDEX 15
+#define I40E_GLSW_MPRCL_MPRCL_SHIFT 0
+#define I40E_GLSW_MPRCL_MPRCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLSW_MPRCL_MPRCL_SHIFT)
+#define I40E_GLSW_MPTCH(_i) (0x00340084 + ((_i) * 8)) /* _i=0...15 */ /* Reset: CORER */
+#define I40E_GLSW_MPTCH_MAX_INDEX 15
+#define I40E_GLSW_MPTCH_MPTCH_SHIFT 0
+#define I40E_GLSW_MPTCH_MPTCH_MASK I40E_MASK(0xFFFF, I40E_GLSW_MPTCH_MPTCH_SHIFT)
+#define I40E_GLSW_MPTCL(_i) (0x00340080 + ((_i) * 8)) /* _i=0...15 */ /* Reset: CORER */
+#define I40E_GLSW_MPTCL_MAX_INDEX 15
+#define I40E_GLSW_MPTCL_MPTCL_SHIFT 0
+#define I40E_GLSW_MPTCL_MPTCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLSW_MPTCL_MPTCL_SHIFT)
+#define I40E_GLSW_RUPP(_i) (0x00370180 + ((_i) * 8)) /* _i=0...15 */ /* Reset: CORER */
+#define I40E_GLSW_RUPP_MAX_INDEX 15
+#define I40E_GLSW_RUPP_RUPP_SHIFT 0
+#define I40E_GLSW_RUPP_RUPP_MASK I40E_MASK(0xFFFFFFFF, I40E_GLSW_RUPP_RUPP_SHIFT)
+#define I40E_GLSW_TDPC(_i) (0x00348000 + ((_i) * 8)) /* _i=0...15 */ /* Reset: CORER */
+#define I40E_GLSW_TDPC_MAX_INDEX 15
+#define I40E_GLSW_TDPC_TDPC_SHIFT 0
+#define I40E_GLSW_TDPC_TDPC_MASK I40E_MASK(0xFFFFFFFF, I40E_GLSW_TDPC_TDPC_SHIFT)
+#define I40E_GLSW_UPRCH(_i) (0x00370004 + ((_i) * 8)) /* _i=0...15 */ /* Reset: CORER */
+#define I40E_GLSW_UPRCH_MAX_INDEX 15
+#define I40E_GLSW_UPRCH_UPRCH_SHIFT 0
+#define I40E_GLSW_UPRCH_UPRCH_MASK I40E_MASK(0xFFFF, I40E_GLSW_UPRCH_UPRCH_SHIFT)
+#define I40E_GLSW_UPRCL(_i) (0x00370000 + ((_i) * 8)) /* _i=0...15 */ /* Reset: CORER */
+#define I40E_GLSW_UPRCL_MAX_INDEX 15
+#define I40E_GLSW_UPRCL_UPRCL_SHIFT 0
+#define I40E_GLSW_UPRCL_UPRCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLSW_UPRCL_UPRCL_SHIFT)
+#define I40E_GLSW_UPTCH(_i) (0x00340004 + ((_i) * 8)) /* _i=0...15 */ /* Reset: CORER */
+#define I40E_GLSW_UPTCH_MAX_INDEX 15
+#define I40E_GLSW_UPTCH_UPTCH_SHIFT 0
+#define I40E_GLSW_UPTCH_UPTCH_MASK I40E_MASK(0xFFFF, I40E_GLSW_UPTCH_UPTCH_SHIFT)
+#define I40E_GLSW_UPTCL(_i) (0x00340000 + ((_i) * 8)) /* _i=0...15 */ /* Reset: CORER */
+#define I40E_GLSW_UPTCL_MAX_INDEX 15
+#define I40E_GLSW_UPTCL_UPTCL_SHIFT 0
+#define I40E_GLSW_UPTCL_UPTCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLSW_UPTCL_UPTCL_SHIFT)
+#define I40E_GLV_BPRCH(_i) (0x0036D804 + ((_i) * 8)) /* _i=0...383 */ /* Reset: CORER */
+#define I40E_GLV_BPRCH_MAX_INDEX 383
+#define I40E_GLV_BPRCH_BPRCH_SHIFT 0
+#define I40E_GLV_BPRCH_BPRCH_MASK I40E_MASK(0xFFFF, I40E_GLV_BPRCH_BPRCH_SHIFT)
+#define I40E_GLV_BPRCL(_i) (0x0036d800 + ((_i) * 8)) /* _i=0...383 */ /* Reset: CORER */
+#define I40E_GLV_BPRCL_MAX_INDEX 383
+#define I40E_GLV_BPRCL_BPRCL_SHIFT 0
+#define I40E_GLV_BPRCL_BPRCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLV_BPRCL_BPRCL_SHIFT)
+#define I40E_GLV_BPTCH(_i) (0x0033D804 + ((_i) * 8)) /* _i=0...383 */ /* Reset: CORER */
+#define I40E_GLV_BPTCH_MAX_INDEX 383
+#define I40E_GLV_BPTCH_BPTCH_SHIFT 0
+#define I40E_GLV_BPTCH_BPTCH_MASK I40E_MASK(0xFFFF, I40E_GLV_BPTCH_BPTCH_SHIFT)
+#define I40E_GLV_BPTCL(_i) (0x0033d800 + ((_i) * 8)) /* _i=0...383 */ /* Reset: CORER */
+#define I40E_GLV_BPTCL_MAX_INDEX 383
+#define I40E_GLV_BPTCL_BPTCL_SHIFT 0
+#define I40E_GLV_BPTCL_BPTCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLV_BPTCL_BPTCL_SHIFT)
+#define I40E_GLV_GORCH(_i) (0x00358004 + ((_i) * 8)) /* _i=0...383 */ /* Reset: CORER */
+#define I40E_GLV_GORCH_MAX_INDEX 383
+#define I40E_GLV_GORCH_GORCH_SHIFT 0
+#define I40E_GLV_GORCH_GORCH_MASK I40E_MASK(0xFFFF, I40E_GLV_GORCH_GORCH_SHIFT)
+#define I40E_GLV_GORCL(_i) (0x00358000 + ((_i) * 8)) /* _i=0...383 */ /* Reset: CORER */
+#define I40E_GLV_GORCL_MAX_INDEX 383
+#define I40E_GLV_GORCL_GORCL_SHIFT 0
+#define I40E_GLV_GORCL_GORCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLV_GORCL_GORCL_SHIFT)
+#define I40E_GLV_GOTCH(_i) (0x00328004 + ((_i) * 8)) /* _i=0...383 */ /* Reset: CORER */
+#define I40E_GLV_GOTCH_MAX_INDEX 383
+#define I40E_GLV_GOTCH_GOTCH_SHIFT 0
+#define I40E_GLV_GOTCH_GOTCH_MASK I40E_MASK(0xFFFF, I40E_GLV_GOTCH_GOTCH_SHIFT)
+#define I40E_GLV_GOTCL(_i) (0x00328000 + ((_i) * 8)) /* _i=0...383 */ /* Reset: CORER */
+#define I40E_GLV_GOTCL_MAX_INDEX 383
+#define I40E_GLV_GOTCL_GOTCL_SHIFT 0
+#define I40E_GLV_GOTCL_GOTCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLV_GOTCL_GOTCL_SHIFT)
+#define I40E_GLV_MPRCH(_i) (0x0036CC04 + ((_i) * 8)) /* _i=0...383 */ /* Reset: CORER */
+#define I40E_GLV_MPRCH_MAX_INDEX 383
+#define I40E_GLV_MPRCH_MPRCH_SHIFT 0
+#define I40E_GLV_MPRCH_MPRCH_MASK I40E_MASK(0xFFFF, I40E_GLV_MPRCH_MPRCH_SHIFT)
+#define I40E_GLV_MPRCL(_i) (0x0036cc00 + ((_i) * 8)) /* _i=0...383 */ /* Reset: CORER */
+#define I40E_GLV_MPRCL_MAX_INDEX 383
+#define I40E_GLV_MPRCL_MPRCL_SHIFT 0
+#define I40E_GLV_MPRCL_MPRCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLV_MPRCL_MPRCL_SHIFT)
+#define I40E_GLV_MPTCH(_i) (0x0033CC04 + ((_i) * 8)) /* _i=0...383 */ /* Reset: CORER */
+#define I40E_GLV_MPTCH_MAX_INDEX 383
+#define I40E_GLV_MPTCH_MPTCH_SHIFT 0
+#define I40E_GLV_MPTCH_MPTCH_MASK I40E_MASK(0xFFFF, I40E_GLV_MPTCH_MPTCH_SHIFT)
+#define I40E_GLV_MPTCL(_i) (0x0033cc00 + ((_i) * 8)) /* _i=0...383 */ /* Reset: CORER */
+#define I40E_GLV_MPTCL_MAX_INDEX 383
+#define I40E_GLV_MPTCL_MPTCL_SHIFT 0
+#define I40E_GLV_MPTCL_MPTCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLV_MPTCL_MPTCL_SHIFT)
+#define I40E_GLV_RDPC(_i) (0x00310000 + ((_i) * 8)) /* _i=0...383 */ /* Reset: CORER */
+#define I40E_GLV_RDPC_MAX_INDEX 383
+#define I40E_GLV_RDPC_RDPC_SHIFT 0
+#define I40E_GLV_RDPC_RDPC_MASK I40E_MASK(0xFFFFFFFF, I40E_GLV_RDPC_RDPC_SHIFT)
+#define I40E_GLV_RUPP(_i) (0x0036E400 + ((_i) * 8)) /* _i=0...383 */ /* Reset: CORER */
+#define I40E_GLV_RUPP_MAX_INDEX 383
+#define I40E_GLV_RUPP_RUPP_SHIFT 0
+#define I40E_GLV_RUPP_RUPP_MASK I40E_MASK(0xFFFFFFFF, I40E_GLV_RUPP_RUPP_SHIFT)
+#define I40E_GLV_TEPC(_VSI) (0x00344000 + ((_VSI) * 4)) /* _i=0...383 */ /* Reset: CORER */
+#define I40E_GLV_TEPC_MAX_INDEX 383
+#define I40E_GLV_TEPC_TEPC_SHIFT 0
+#define I40E_GLV_TEPC_TEPC_MASK I40E_MASK(0xFFFFFFFF, I40E_GLV_TEPC_TEPC_SHIFT)
+#define I40E_GLV_UPRCH(_i) (0x0036C004 + ((_i) * 8)) /* _i=0...383 */ /* Reset: CORER */
+#define I40E_GLV_UPRCH_MAX_INDEX 383
+#define I40E_GLV_UPRCH_UPRCH_SHIFT 0
+#define I40E_GLV_UPRCH_UPRCH_MASK I40E_MASK(0xFFFF, I40E_GLV_UPRCH_UPRCH_SHIFT)
+#define I40E_GLV_UPRCL(_i) (0x0036c000 + ((_i) * 8)) /* _i=0...383 */ /* Reset: CORER */
+#define I40E_GLV_UPRCL_MAX_INDEX 383
+#define I40E_GLV_UPRCL_UPRCL_SHIFT 0
+#define I40E_GLV_UPRCL_UPRCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLV_UPRCL_UPRCL_SHIFT)
+#define I40E_GLV_UPTCH(_i) (0x0033C004 + ((_i) * 8)) /* _i=0...383 */ /* Reset: CORER */
+#define I40E_GLV_UPTCH_MAX_INDEX 383
+#define I40E_GLV_UPTCH_GLVUPTCH_SHIFT 0
+#define I40E_GLV_UPTCH_GLVUPTCH_MASK I40E_MASK(0xFFFF, I40E_GLV_UPTCH_GLVUPTCH_SHIFT)
+#define I40E_GLV_UPTCL(_i) (0x0033c000 + ((_i) * 8)) /* _i=0...383 */ /* Reset: CORER */
+#define I40E_GLV_UPTCL_MAX_INDEX 383
+#define I40E_GLV_UPTCL_UPTCL_SHIFT 0
+#define I40E_GLV_UPTCL_UPTCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLV_UPTCL_UPTCL_SHIFT)
+#define I40E_GLVEBTC_RBCH(_i, _j) (0x00364004 + ((_i) * 8 + (_j) * 64)) /* _i=0...7, _j=0...15 */ /* Reset: CORER */
+#define I40E_GLVEBTC_RBCH_MAX_INDEX 7
+#define I40E_GLVEBTC_RBCH_TCBCH_SHIFT 0
+#define I40E_GLVEBTC_RBCH_TCBCH_MASK I40E_MASK(0xFFFF, I40E_GLVEBTC_RBCH_TCBCH_SHIFT)
+#define I40E_GLVEBTC_RBCL(_i, _j) (0x00364000 + ((_i) * 8 + (_j) * 64)) /* _i=0...7, _j=0...15 */ /* Reset: CORER */
+#define I40E_GLVEBTC_RBCL_MAX_INDEX 7
+#define I40E_GLVEBTC_RBCL_TCBCL_SHIFT 0
+#define I40E_GLVEBTC_RBCL_TCBCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLVEBTC_RBCL_TCBCL_SHIFT)
+#define I40E_GLVEBTC_RPCH(_i, _j) (0x00368004 + ((_i) * 8 + (_j) * 64)) /* _i=0...7, _j=0...15 */ /* Reset: CORER */
+#define I40E_GLVEBTC_RPCH_MAX_INDEX 7
+#define I40E_GLVEBTC_RPCH_TCPCH_SHIFT 0
+#define I40E_GLVEBTC_RPCH_TCPCH_MASK I40E_MASK(0xFFFF, I40E_GLVEBTC_RPCH_TCPCH_SHIFT)
+#define I40E_GLVEBTC_RPCL(_i, _j) (0x00368000 + ((_i) * 8 + (_j) * 64)) /* _i=0...7, _j=0...15 */ /* Reset: CORER */
+#define I40E_GLVEBTC_RPCL_MAX_INDEX 7
+#define I40E_GLVEBTC_RPCL_TCPCL_SHIFT 0
+#define I40E_GLVEBTC_RPCL_TCPCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLVEBTC_RPCL_TCPCL_SHIFT)
+#define I40E_GLVEBTC_TBCH(_i, _j) (0x00334004 + ((_i) * 8 + (_j) * 64)) /* _i=0...7, _j=0...15 */ /* Reset: CORER */
+#define I40E_GLVEBTC_TBCH_MAX_INDEX 7
+#define I40E_GLVEBTC_TBCH_TCBCH_SHIFT 0
+#define I40E_GLVEBTC_TBCH_TCBCH_MASK I40E_MASK(0xFFFF, I40E_GLVEBTC_TBCH_TCBCH_SHIFT)
+#define I40E_GLVEBTC_TBCL(_i, _j) (0x00334000 + ((_i) * 8 + (_j) * 64)) /* _i=0...7, _j=0...15 */ /* Reset: CORER */
+#define I40E_GLVEBTC_TBCL_MAX_INDEX 7
+#define I40E_GLVEBTC_TBCL_TCBCL_SHIFT 0
+#define I40E_GLVEBTC_TBCL_TCBCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLVEBTC_TBCL_TCBCL_SHIFT)
+#define I40E_GLVEBTC_TPCH(_i, _j) (0x00338004 + ((_i) * 8 + (_j) * 64)) /* _i=0...7, _j=0...15 */ /* Reset: CORER */
+#define I40E_GLVEBTC_TPCH_MAX_INDEX 7
+#define I40E_GLVEBTC_TPCH_TCPCH_SHIFT 0
+#define I40E_GLVEBTC_TPCH_TCPCH_MASK I40E_MASK(0xFFFF, I40E_GLVEBTC_TPCH_TCPCH_SHIFT)
+#define I40E_GLVEBTC_TPCL(_i, _j) (0x00338000 + ((_i) * 8 + (_j) * 64)) /* _i=0...7, _j=0...15 */ /* Reset: CORER */
+#define I40E_GLVEBTC_TPCL_MAX_INDEX 7
+#define I40E_GLVEBTC_TPCL_TCPCL_SHIFT 0
+#define I40E_GLVEBTC_TPCL_TCPCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLVEBTC_TPCL_TCPCL_SHIFT)
+#define I40E_GLVEBVL_BPCH(_i) (0x00374804 + ((_i) * 8)) /* _i=0...127 */ /* Reset: CORER */
+#define I40E_GLVEBVL_BPCH_MAX_INDEX 127
+#define I40E_GLVEBVL_BPCH_VLBPCH_SHIFT 0
+#define I40E_GLVEBVL_BPCH_VLBPCH_MASK I40E_MASK(0xFFFF, I40E_GLVEBVL_BPCH_VLBPCH_SHIFT)
+#define I40E_GLVEBVL_BPCL(_i) (0x00374800 + ((_i) * 8)) /* _i=0...127 */ /* Reset: CORER */
+#define I40E_GLVEBVL_BPCL_MAX_INDEX 127
+#define I40E_GLVEBVL_BPCL_VLBPCL_SHIFT 0
+#define I40E_GLVEBVL_BPCL_VLBPCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLVEBVL_BPCL_VLBPCL_SHIFT)
+#define I40E_GLVEBVL_GORCH(_i) (0x00360004 + ((_i) * 8)) /* _i=0...127 */ /* Reset: CORER */
+#define I40E_GLVEBVL_GORCH_MAX_INDEX 127
+#define I40E_GLVEBVL_GORCH_VLBCH_SHIFT 0
+#define I40E_GLVEBVL_GORCH_VLBCH_MASK I40E_MASK(0xFFFF, I40E_GLVEBVL_GORCH_VLBCH_SHIFT)
+#define I40E_GLVEBVL_GORCL(_i) (0x00360000 + ((_i) * 8)) /* _i=0...127 */ /* Reset: CORER */
+#define I40E_GLVEBVL_GORCL_MAX_INDEX 127
+#define I40E_GLVEBVL_GORCL_VLBCL_SHIFT 0
+#define I40E_GLVEBVL_GORCL_VLBCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLVEBVL_GORCL_VLBCL_SHIFT)
+#define I40E_GLVEBVL_GOTCH(_i) (0x00330004 + ((_i) * 8)) /* _i=0...127 */ /* Reset: CORER */
+#define I40E_GLVEBVL_GOTCH_MAX_INDEX 127
+#define I40E_GLVEBVL_GOTCH_VLBCH_SHIFT 0
+#define I40E_GLVEBVL_GOTCH_VLBCH_MASK I40E_MASK(0xFFFF, I40E_GLVEBVL_GOTCH_VLBCH_SHIFT)
+#define I40E_GLVEBVL_GOTCL(_i) (0x00330000 + ((_i) * 8)) /* _i=0...127 */ /* Reset: CORER */
+#define I40E_GLVEBVL_GOTCL_MAX_INDEX 127
+#define I40E_GLVEBVL_GOTCL_VLBCL_SHIFT 0
+#define I40E_GLVEBVL_GOTCL_VLBCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLVEBVL_GOTCL_VLBCL_SHIFT)
+#define I40E_GLVEBVL_MPCH(_i) (0x00374404 + ((_i) * 8)) /* _i=0...127 */ /* Reset: CORER */
+#define I40E_GLVEBVL_MPCH_MAX_INDEX 127
+#define I40E_GLVEBVL_MPCH_VLMPCH_SHIFT 0
+#define I40E_GLVEBVL_MPCH_VLMPCH_MASK I40E_MASK(0xFFFF, I40E_GLVEBVL_MPCH_VLMPCH_SHIFT)
+#define I40E_GLVEBVL_MPCL(_i) (0x00374400 + ((_i) * 8)) /* _i=0...127 */ /* Reset: CORER */
+#define I40E_GLVEBVL_MPCL_MAX_INDEX 127
+#define I40E_GLVEBVL_MPCL_VLMPCL_SHIFT 0
+#define I40E_GLVEBVL_MPCL_VLMPCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLVEBVL_MPCL_VLMPCL_SHIFT)
+#define I40E_GLVEBVL_UPCH(_i) (0x00374004 + ((_i) * 8)) /* _i=0...127 */ /* Reset: CORER */
+#define I40E_GLVEBVL_UPCH_MAX_INDEX 127
+#define I40E_GLVEBVL_UPCH_VLUPCH_SHIFT 0
+#define I40E_GLVEBVL_UPCH_VLUPCH_MASK I40E_MASK(0xFFFF, I40E_GLVEBVL_UPCH_VLUPCH_SHIFT)
+#define I40E_GLVEBVL_UPCL(_i) (0x00374000 + ((_i) * 8)) /* _i=0...127 */ /* Reset: CORER */
+#define I40E_GLVEBVL_UPCL_MAX_INDEX 127
+#define I40E_GLVEBVL_UPCL_VLUPCL_SHIFT 0
+#define I40E_GLVEBVL_UPCL_VLUPCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLVEBVL_UPCL_VLUPCL_SHIFT)
+#define I40E_GL_MTG_FLU_MSK_H 0x00269F4C /* Reset: CORER */
+#define I40E_GL_MTG_FLU_MSK_H_MASK_HIGH_SHIFT 0
+#define I40E_GL_MTG_FLU_MSK_H_MASK_HIGH_MASK I40E_MASK(0xFFFF, I40E_GL_MTG_FLU_MSK_H_MASK_HIGH_SHIFT)
+#define I40E_GL_SWR_DEF_ACT(_i) (0x00270200 + ((_i) * 4)) /* _i=0...35 */ /* Reset: CORER */
+#define I40E_GL_SWR_DEF_ACT_MAX_INDEX 35
+#define I40E_GL_SWR_DEF_ACT_DEF_ACTION_SHIFT 0
+#define I40E_GL_SWR_DEF_ACT_DEF_ACTION_MASK I40E_MASK(0xFFFFFFFF, I40E_GL_SWR_DEF_ACT_DEF_ACTION_SHIFT)
+#define I40E_GL_SWR_DEF_ACT_EN(_i) (0x0026CFB8 + ((_i) * 4)) /* _i=0...1 */ /* Reset: CORER */
+#define I40E_GL_SWR_DEF_ACT_EN_MAX_INDEX 1
+#define I40E_GL_SWR_DEF_ACT_EN_DEF_ACT_EN_BITMAP_SHIFT 0
+#define I40E_GL_SWR_DEF_ACT_EN_DEF_ACT_EN_BITMAP_MASK I40E_MASK(0xFFFFFFFF, I40E_GL_SWR_DEF_ACT_EN_DEF_ACT_EN_BITMAP_SHIFT)
+#define I40E_PRTTSYN_ADJ 0x001E4280 /* Reset: GLOBR */
+#define I40E_PRTTSYN_ADJ_TSYNADJ_SHIFT 0
+#define I40E_PRTTSYN_ADJ_TSYNADJ_MASK I40E_MASK(0x7FFFFFFF, I40E_PRTTSYN_ADJ_TSYNADJ_SHIFT)
+#define I40E_PRTTSYN_ADJ_SIGN_SHIFT 31
+#define I40E_PRTTSYN_ADJ_SIGN_MASK I40E_MASK(0x1, I40E_PRTTSYN_ADJ_SIGN_SHIFT)
+#define I40E_PRTTSYN_AUX_0(_i) (0x001E42A0 + ((_i) * 32)) /* _i=0...1 */ /* Reset: GLOBR */
+#define I40E_PRTTSYN_AUX_0_MAX_INDEX 1
+#define I40E_PRTTSYN_AUX_0_OUT_ENA_SHIFT 0
+#define I40E_PRTTSYN_AUX_0_OUT_ENA_MASK I40E_MASK(0x1, I40E_PRTTSYN_AUX_0_OUT_ENA_SHIFT)
+#define I40E_PRTTSYN_AUX_0_OUTMOD_SHIFT 1
+#define I40E_PRTTSYN_AUX_0_OUTMOD_MASK I40E_MASK(0x3, I40E_PRTTSYN_AUX_0_OUTMOD_SHIFT)
+#define I40E_PRTTSYN_AUX_0_OUTLVL_SHIFT 3
+#define I40E_PRTTSYN_AUX_0_OUTLVL_MASK I40E_MASK(0x1, I40E_PRTTSYN_AUX_0_OUTLVL_SHIFT)
+#define I40E_PRTTSYN_AUX_0_PULSEW_SHIFT 8
+#define I40E_PRTTSYN_AUX_0_PULSEW_MASK I40E_MASK(0xF, I40E_PRTTSYN_AUX_0_PULSEW_SHIFT)
+#define I40E_PRTTSYN_AUX_0_EVNTLVL_SHIFT 16
+#define I40E_PRTTSYN_AUX_0_EVNTLVL_MASK I40E_MASK(0x3, I40E_PRTTSYN_AUX_0_EVNTLVL_SHIFT)
+#define I40E_PRTTSYN_AUX_1(_i) (0x001E42E0 + ((_i) * 32)) /* _i=0...1 */ /* Reset: GLOBR */
+#define I40E_PRTTSYN_AUX_1_MAX_INDEX 1
+#define I40E_PRTTSYN_AUX_1_INSTNT_SHIFT 0
+#define I40E_PRTTSYN_AUX_1_INSTNT_MASK I40E_MASK(0x1, I40E_PRTTSYN_AUX_1_INSTNT_SHIFT)
+#define I40E_PRTTSYN_AUX_1_SAMPLE_TIME_SHIFT 1
+#define I40E_PRTTSYN_AUX_1_SAMPLE_TIME_MASK I40E_MASK(0x1, I40E_PRTTSYN_AUX_1_SAMPLE_TIME_SHIFT)
+#define I40E_PRTTSYN_CLKO(_i) (0x001E4240 + ((_i) * 32)) /* _i=0...1 */ /* Reset: GLOBR */
+#define I40E_PRTTSYN_CLKO_MAX_INDEX 1
+#define I40E_PRTTSYN_CLKO_TSYNCLKO_SHIFT 0
+#define I40E_PRTTSYN_CLKO_TSYNCLKO_MASK I40E_MASK(0xFFFFFFFF, I40E_PRTTSYN_CLKO_TSYNCLKO_SHIFT)
+#define I40E_PRTTSYN_CTL0 0x001E4200 /* Reset: GLOBR */
+#define I40E_PRTTSYN_CTL0_CLEAR_TSYNTIMER_SHIFT 0
+#define I40E_PRTTSYN_CTL0_CLEAR_TSYNTIMER_MASK I40E_MASK(0x1, I40E_PRTTSYN_CTL0_CLEAR_TSYNTIMER_SHIFT)
+#define I40E_PRTTSYN_CTL0_TXTIME_INT_ENA_SHIFT 1
+#define I40E_PRTTSYN_CTL0_TXTIME_INT_ENA_MASK I40E_MASK(0x1, I40E_PRTTSYN_CTL0_TXTIME_INT_ENA_SHIFT)
+#define I40E_PRTTSYN_CTL0_EVENT_INT_ENA_SHIFT 2
+#define I40E_PRTTSYN_CTL0_EVENT_INT_ENA_MASK I40E_MASK(0x1, I40E_PRTTSYN_CTL0_EVENT_INT_ENA_SHIFT)
+#define I40E_PRTTSYN_CTL0_TGT_INT_ENA_SHIFT 3
+#define I40E_PRTTSYN_CTL0_TGT_INT_ENA_MASK I40E_MASK(0x1, I40E_PRTTSYN_CTL0_TGT_INT_ENA_SHIFT)
+#define I40E_PRTTSYN_CTL0_PF_ID_SHIFT 8
+#define I40E_PRTTSYN_CTL0_PF_ID_MASK I40E_MASK(0xF, I40E_PRTTSYN_CTL0_PF_ID_SHIFT)
+#define I40E_PRTTSYN_CTL0_TSYNACT_SHIFT 12
+#define I40E_PRTTSYN_CTL0_TSYNACT_MASK I40E_MASK(0x3, I40E_PRTTSYN_CTL0_TSYNACT_SHIFT)
+#define I40E_PRTTSYN_CTL0_TSYNENA_SHIFT 31
+#define I40E_PRTTSYN_CTL0_TSYNENA_MASK I40E_MASK(0x1, I40E_PRTTSYN_CTL0_TSYNENA_SHIFT)
+#define I40E_PRTTSYN_CTL1 0x00085020 /* Reset: CORER */
+#define I40E_PRTTSYN_CTL1_V1MESSTYPE0_SHIFT 0
+#define I40E_PRTTSYN_CTL1_V1MESSTYPE0_MASK I40E_MASK(0xFF, I40E_PRTTSYN_CTL1_V1MESSTYPE0_SHIFT)
+#define I40E_PRTTSYN_CTL1_V1MESSTYPE1_SHIFT 8
+#define I40E_PRTTSYN_CTL1_V1MESSTYPE1_MASK I40E_MASK(0xFF, I40E_PRTTSYN_CTL1_V1MESSTYPE1_SHIFT)
+#define I40E_PRTTSYN_CTL1_V2MESSTYPE0_SHIFT 16
+#define I40E_PRTTSYN_CTL1_V2MESSTYPE0_MASK I40E_MASK(0xF, I40E_PRTTSYN_CTL1_V2MESSTYPE0_SHIFT)
+#define I40E_PRTTSYN_CTL1_V2MESSTYPE1_SHIFT 20
+#define I40E_PRTTSYN_CTL1_V2MESSTYPE1_MASK I40E_MASK(0xF, I40E_PRTTSYN_CTL1_V2MESSTYPE1_SHIFT)
+#define I40E_PRTTSYN_CTL1_TSYNTYPE_SHIFT 24
+#define I40E_PRTTSYN_CTL1_TSYNTYPE_MASK I40E_MASK(0x3, I40E_PRTTSYN_CTL1_TSYNTYPE_SHIFT)
+#define I40E_PRTTSYN_CTL1_UDP_ENA_SHIFT 26
+#define I40E_PRTTSYN_CTL1_UDP_ENA_MASK I40E_MASK(0x3, I40E_PRTTSYN_CTL1_UDP_ENA_SHIFT)
+#define I40E_PRTTSYN_CTL1_TSYNENA_SHIFT 31
+#define I40E_PRTTSYN_CTL1_TSYNENA_MASK I40E_MASK(0x1, I40E_PRTTSYN_CTL1_TSYNENA_SHIFT)
+#define I40E_PRTTSYN_EVNT_H(_i) (0x001E40C0 + ((_i) * 32)) /* _i=0...1 */ /* Reset: GLOBR */
+#define I40E_PRTTSYN_EVNT_H_MAX_INDEX 1
+#define I40E_PRTTSYN_EVNT_H_TSYNEVNT_H_SHIFT 0
+#define I40E_PRTTSYN_EVNT_H_TSYNEVNT_H_MASK I40E_MASK(0xFFFFFFFF, I40E_PRTTSYN_EVNT_H_TSYNEVNT_H_SHIFT)
+#define I40E_PRTTSYN_EVNT_L(_i) (0x001E4080 + ((_i) * 32)) /* _i=0...1 */ /* Reset: GLOBR */
+#define I40E_PRTTSYN_EVNT_L_MAX_INDEX 1
+#define I40E_PRTTSYN_EVNT_L_TSYNEVNT_L_SHIFT 0
+#define I40E_PRTTSYN_EVNT_L_TSYNEVNT_L_MASK I40E_MASK(0xFFFFFFFF, I40E_PRTTSYN_EVNT_L_TSYNEVNT_L_SHIFT)
+#define I40E_PRTTSYN_INC_H 0x001E4060 /* Reset: GLOBR */
+#define I40E_PRTTSYN_INC_H_TSYNINC_H_SHIFT 0
+#define I40E_PRTTSYN_INC_H_TSYNINC_H_MASK I40E_MASK(0x3F, I40E_PRTTSYN_INC_H_TSYNINC_H_SHIFT)
+#define I40E_PRTTSYN_INC_L 0x001E4040 /* Reset: GLOBR */
+#define I40E_PRTTSYN_INC_L_TSYNINC_L_SHIFT 0
+#define I40E_PRTTSYN_INC_L_TSYNINC_L_MASK I40E_MASK(0xFFFFFFFF, I40E_PRTTSYN_INC_L_TSYNINC_L_SHIFT)
+#define I40E_PRTTSYN_RXTIME_H(_i) (0x00085040 + ((_i) * 32)) /* _i=0...3 */ /* Reset: CORER */
+#define I40E_PRTTSYN_RXTIME_H_MAX_INDEX 3
+#define I40E_PRTTSYN_RXTIME_H_RXTIEM_H_SHIFT 0
+#define I40E_PRTTSYN_RXTIME_H_RXTIEM_H_MASK I40E_MASK(0xFFFFFFFF, I40E_PRTTSYN_RXTIME_H_RXTIEM_H_SHIFT)
+#define I40E_PRTTSYN_RXTIME_L(_i) (0x000850C0 + ((_i) * 32)) /* _i=0...3 */ /* Reset: CORER */
+#define I40E_PRTTSYN_RXTIME_L_MAX_INDEX 3
+#define I40E_PRTTSYN_RXTIME_L_RXTIEM_L_SHIFT 0
+#define I40E_PRTTSYN_RXTIME_L_RXTIEM_L_MASK I40E_MASK(0xFFFFFFFF, I40E_PRTTSYN_RXTIME_L_RXTIEM_L_SHIFT)
+#define I40E_PRTTSYN_STAT_0 0x001E4220 /* Reset: GLOBR */
+#define I40E_PRTTSYN_STAT_0_EVENT0_SHIFT 0
+#define I40E_PRTTSYN_STAT_0_EVENT0_MASK I40E_MASK(0x1, I40E_PRTTSYN_STAT_0_EVENT0_SHIFT)
+#define I40E_PRTTSYN_STAT_0_EVENT1_SHIFT 1
+#define I40E_PRTTSYN_STAT_0_EVENT1_MASK I40E_MASK(0x1, I40E_PRTTSYN_STAT_0_EVENT1_SHIFT)
+#define I40E_PRTTSYN_STAT_0_TGT0_SHIFT 2
+#define I40E_PRTTSYN_STAT_0_TGT0_MASK I40E_MASK(0x1, I40E_PRTTSYN_STAT_0_TGT0_SHIFT)
+#define I40E_PRTTSYN_STAT_0_TGT1_SHIFT 3
+#define I40E_PRTTSYN_STAT_0_TGT1_MASK I40E_MASK(0x1, I40E_PRTTSYN_STAT_0_TGT1_SHIFT)
+#define I40E_PRTTSYN_STAT_0_TXTIME_SHIFT 4
+#define I40E_PRTTSYN_STAT_0_TXTIME_MASK I40E_MASK(0x1, I40E_PRTTSYN_STAT_0_TXTIME_SHIFT)
+#define I40E_PRTTSYN_STAT_1 0x00085140 /* Reset: CORER */
+#define I40E_PRTTSYN_STAT_1_RXT0_SHIFT 0
+#define I40E_PRTTSYN_STAT_1_RXT0_MASK I40E_MASK(0x1, I40E_PRTTSYN_STAT_1_RXT0_SHIFT)
+#define I40E_PRTTSYN_STAT_1_RXT1_SHIFT 1
+#define I40E_PRTTSYN_STAT_1_RXT1_MASK I40E_MASK(0x1, I40E_PRTTSYN_STAT_1_RXT1_SHIFT)
+#define I40E_PRTTSYN_STAT_1_RXT2_SHIFT 2
+#define I40E_PRTTSYN_STAT_1_RXT2_MASK I40E_MASK(0x1, I40E_PRTTSYN_STAT_1_RXT2_SHIFT)
+#define I40E_PRTTSYN_STAT_1_RXT3_SHIFT 3
+#define I40E_PRTTSYN_STAT_1_RXT3_MASK I40E_MASK(0x1, I40E_PRTTSYN_STAT_1_RXT3_SHIFT)
+#define I40E_PRTTSYN_TGT_H(_i) (0x001E4180 + ((_i) * 32)) /* _i=0...1 */ /* Reset: GLOBR */
+#define I40E_PRTTSYN_TGT_H_MAX_INDEX 1
+#define I40E_PRTTSYN_TGT_H_TSYNTGTT_H_SHIFT 0
+#define I40E_PRTTSYN_TGT_H_TSYNTGTT_H_MASK I40E_MASK(0xFFFFFFFF, I40E_PRTTSYN_TGT_H_TSYNTGTT_H_SHIFT)
+#define I40E_PRTTSYN_TGT_L(_i) (0x001E4140 + ((_i) * 32)) /* _i=0...1 */ /* Reset: GLOBR */
+#define I40E_PRTTSYN_TGT_L_MAX_INDEX 1
+#define I40E_PRTTSYN_TGT_L_TSYNTGTT_L_SHIFT 0
+#define I40E_PRTTSYN_TGT_L_TSYNTGTT_L_MASK I40E_MASK(0xFFFFFFFF, I40E_PRTTSYN_TGT_L_TSYNTGTT_L_SHIFT)
+#define I40E_PRTTSYN_TIME_H 0x001E4120 /* Reset: GLOBR */
+#define I40E_PRTTSYN_TIME_H_TSYNTIME_H_SHIFT 0
+#define I40E_PRTTSYN_TIME_H_TSYNTIME_H_MASK I40E_MASK(0xFFFFFFFF, I40E_PRTTSYN_TIME_H_TSYNTIME_H_SHIFT)
+#define I40E_PRTTSYN_TIME_L 0x001E4100 /* Reset: GLOBR */
+#define I40E_PRTTSYN_TIME_L_TSYNTIME_L_SHIFT 0
+#define I40E_PRTTSYN_TIME_L_TSYNTIME_L_MASK I40E_MASK(0xFFFFFFFF, I40E_PRTTSYN_TIME_L_TSYNTIME_L_SHIFT)
+#define I40E_PRTTSYN_TXTIME_H 0x001E41E0 /* Reset: GLOBR */
+#define I40E_PRTTSYN_TXTIME_H_TXTIEM_H_SHIFT 0
+#define I40E_PRTTSYN_TXTIME_H_TXTIEM_H_MASK I40E_MASK(0xFFFFFFFF, I40E_PRTTSYN_TXTIME_H_TXTIEM_H_SHIFT)
+#define I40E_PRTTSYN_TXTIME_L 0x001E41C0 /* Reset: GLOBR */
+#define I40E_PRTTSYN_TXTIME_L_TXTIEM_L_SHIFT 0
+#define I40E_PRTTSYN_TXTIME_L_TXTIEM_L_MASK I40E_MASK(0xFFFFFFFF, I40E_PRTTSYN_TXTIME_L_TXTIEM_L_SHIFT)
+#define I40E_GL_MDET_RX 0x0012A510 /* Reset: CORER */
+#define I40E_GL_MDET_RX_FUNCTION_SHIFT 0
+#define I40E_GL_MDET_RX_FUNCTION_MASK I40E_MASK(0xFF, I40E_GL_MDET_RX_FUNCTION_SHIFT)
+#define I40E_GL_MDET_RX_EVENT_SHIFT 8
+#define I40E_GL_MDET_RX_EVENT_MASK I40E_MASK(0x1FF, I40E_GL_MDET_RX_EVENT_SHIFT)
+#define I40E_GL_MDET_RX_QUEUE_SHIFT 17
+#define I40E_GL_MDET_RX_QUEUE_MASK I40E_MASK(0x3FFF, I40E_GL_MDET_RX_QUEUE_SHIFT)
+#define I40E_GL_MDET_RX_VALID_SHIFT 31
+#define I40E_GL_MDET_RX_VALID_MASK I40E_MASK(0x1, I40E_GL_MDET_RX_VALID_SHIFT)
+#define I40E_GL_MDET_TX 0x000E6480 /* Reset: CORER */
+#define I40E_GL_MDET_TX_QUEUE_SHIFT 0
+#define I40E_GL_MDET_TX_QUEUE_MASK I40E_MASK(0xFFF, I40E_GL_MDET_TX_QUEUE_SHIFT)
+#define I40E_GL_MDET_TX_VF_NUM_SHIFT 12
+#define I40E_GL_MDET_TX_VF_NUM_MASK I40E_MASK(0x1FF, I40E_GL_MDET_TX_VF_NUM_SHIFT)
+#define I40E_GL_MDET_TX_PF_NUM_SHIFT 21
+#define I40E_GL_MDET_TX_PF_NUM_MASK I40E_MASK(0xF, I40E_GL_MDET_TX_PF_NUM_SHIFT)
+#define I40E_GL_MDET_TX_EVENT_SHIFT 25
+#define I40E_GL_MDET_TX_EVENT_MASK I40E_MASK(0x1F, I40E_GL_MDET_TX_EVENT_SHIFT)
+#define I40E_GL_MDET_TX_VALID_SHIFT 31
+#define I40E_GL_MDET_TX_VALID_MASK I40E_MASK(0x1, I40E_GL_MDET_TX_VALID_SHIFT)
+#define I40E_PF_MDET_RX 0x0012A400 /* Reset: CORER */
+#define I40E_PF_MDET_RX_VALID_SHIFT 0
+#define I40E_PF_MDET_RX_VALID_MASK I40E_MASK(0x1, I40E_PF_MDET_RX_VALID_SHIFT)
+#define I40E_PF_MDET_TX 0x000E6400 /* Reset: CORER */
+#define I40E_PF_MDET_TX_VALID_SHIFT 0
+#define I40E_PF_MDET_TX_VALID_MASK I40E_MASK(0x1, I40E_PF_MDET_TX_VALID_SHIFT)
+#define I40E_PF_VT_PFALLOC 0x001C0500 /* Reset: CORER */
+#define I40E_PF_VT_PFALLOC_FIRSTVF_SHIFT 0
+#define I40E_PF_VT_PFALLOC_FIRSTVF_MASK I40E_MASK(0xFF, I40E_PF_VT_PFALLOC_FIRSTVF_SHIFT)
+#define I40E_PF_VT_PFALLOC_LASTVF_SHIFT 8
+#define I40E_PF_VT_PFALLOC_LASTVF_MASK I40E_MASK(0xFF, I40E_PF_VT_PFALLOC_LASTVF_SHIFT)
+#define I40E_PF_VT_PFALLOC_VALID_SHIFT 31
+#define I40E_PF_VT_PFALLOC_VALID_MASK I40E_MASK(0x1, I40E_PF_VT_PFALLOC_VALID_SHIFT)
+#define I40E_VP_MDET_RX(_VF) (0x0012A000 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: CORER */
+#define I40E_VP_MDET_RX_MAX_INDEX 127
+#define I40E_VP_MDET_RX_VALID_SHIFT 0
+#define I40E_VP_MDET_RX_VALID_MASK I40E_MASK(0x1, I40E_VP_MDET_RX_VALID_SHIFT)
+#define I40E_VP_MDET_TX(_VF) (0x000E6000 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: CORER */
+#define I40E_VP_MDET_TX_MAX_INDEX 127
+#define I40E_VP_MDET_TX_VALID_SHIFT 0
+#define I40E_VP_MDET_TX_VALID_MASK I40E_MASK(0x1, I40E_VP_MDET_TX_VALID_SHIFT)
+#define I40E_GLPM_WUMC 0x0006C800 /* Reset: POR */
+#define I40E_GLPM_WUMC_NOTCO_SHIFT 0
+#define I40E_GLPM_WUMC_NOTCO_MASK I40E_MASK(0x1, I40E_GLPM_WUMC_NOTCO_SHIFT)
+#define I40E_GLPM_WUMC_SRST_PIN_VAL_SHIFT 1
+#define I40E_GLPM_WUMC_SRST_PIN_VAL_MASK I40E_MASK(0x1, I40E_GLPM_WUMC_SRST_PIN_VAL_SHIFT)
+#define I40E_GLPM_WUMC_ROL_MODE_SHIFT 2
+#define I40E_GLPM_WUMC_ROL_MODE_MASK I40E_MASK(0x1, I40E_GLPM_WUMC_ROL_MODE_SHIFT)
+#define I40E_GLPM_WUMC_RESERVED_4_SHIFT 3
+#define I40E_GLPM_WUMC_RESERVED_4_MASK I40E_MASK(0x1FFF, I40E_GLPM_WUMC_RESERVED_4_SHIFT)
+#define I40E_GLPM_WUMC_MNG_WU_PF_SHIFT 16
+#define I40E_GLPM_WUMC_MNG_WU_PF_MASK I40E_MASK(0xFFFF, I40E_GLPM_WUMC_MNG_WU_PF_SHIFT)
+#define I40E_PFPM_APM 0x000B8080 /* Reset: POR */
+#define I40E_PFPM_APM_APME_SHIFT 0
+#define I40E_PFPM_APM_APME_MASK I40E_MASK(0x1, I40E_PFPM_APM_APME_SHIFT)
+#define I40E_PFPM_FHFT_LENGTH(_i) (0x0006A000 + ((_i) * 128)) /* _i=0...7 */ /* Reset: POR */
+#define I40E_PFPM_FHFT_LENGTH_MAX_INDEX 7
+#define I40E_PFPM_FHFT_LENGTH_LENGTH_SHIFT 0
+#define I40E_PFPM_FHFT_LENGTH_LENGTH_MASK I40E_MASK(0xFF, I40E_PFPM_FHFT_LENGTH_LENGTH_SHIFT)
+#define I40E_PFPM_WUC 0x0006B200 /* Reset: POR */
+#define I40E_PFPM_WUC_EN_APM_D0_SHIFT 5
+#define I40E_PFPM_WUC_EN_APM_D0_MASK I40E_MASK(0x1, I40E_PFPM_WUC_EN_APM_D0_SHIFT)
+#define I40E_PFPM_WUFC 0x0006B400 /* Reset: POR */
+#define I40E_PFPM_WUFC_LNKC_SHIFT 0
+#define I40E_PFPM_WUFC_LNKC_MASK I40E_MASK(0x1, I40E_PFPM_WUFC_LNKC_SHIFT)
+#define I40E_PFPM_WUFC_MAG_SHIFT 1
+#define I40E_PFPM_WUFC_MAG_MASK I40E_MASK(0x1, I40E_PFPM_WUFC_MAG_SHIFT)
+#define I40E_PFPM_WUFC_MNG_SHIFT 3
+#define I40E_PFPM_WUFC_MNG_MASK I40E_MASK(0x1, I40E_PFPM_WUFC_MNG_SHIFT)
+#define I40E_PFPM_WUFC_FLX0_ACT_SHIFT 4
+#define I40E_PFPM_WUFC_FLX0_ACT_MASK I40E_MASK(0x1, I40E_PFPM_WUFC_FLX0_ACT_SHIFT)
+#define I40E_PFPM_WUFC_FLX1_ACT_SHIFT 5
+#define I40E_PFPM_WUFC_FLX1_ACT_MASK I40E_MASK(0x1, I40E_PFPM_WUFC_FLX1_ACT_SHIFT)
+#define I40E_PFPM_WUFC_FLX2_ACT_SHIFT 6
+#define I40E_PFPM_WUFC_FLX2_ACT_MASK I40E_MASK(0x1, I40E_PFPM_WUFC_FLX2_ACT_SHIFT)
+#define I40E_PFPM_WUFC_FLX3_ACT_SHIFT 7
+#define I40E_PFPM_WUFC_FLX3_ACT_MASK I40E_MASK(0x1, I40E_PFPM_WUFC_FLX3_ACT_SHIFT)
+#define I40E_PFPM_WUFC_FLX4_ACT_SHIFT 8
+#define I40E_PFPM_WUFC_FLX4_ACT_MASK I40E_MASK(0x1, I40E_PFPM_WUFC_FLX4_ACT_SHIFT)
+#define I40E_PFPM_WUFC_FLX5_ACT_SHIFT 9
+#define I40E_PFPM_WUFC_FLX5_ACT_MASK I40E_MASK(0x1, I40E_PFPM_WUFC_FLX5_ACT_SHIFT)
+#define I40E_PFPM_WUFC_FLX6_ACT_SHIFT 10
+#define I40E_PFPM_WUFC_FLX6_ACT_MASK I40E_MASK(0x1, I40E_PFPM_WUFC_FLX6_ACT_SHIFT)
+#define I40E_PFPM_WUFC_FLX7_ACT_SHIFT 11
+#define I40E_PFPM_WUFC_FLX7_ACT_MASK I40E_MASK(0x1, I40E_PFPM_WUFC_FLX7_ACT_SHIFT)
+#define I40E_PFPM_WUFC_FLX0_SHIFT 16
+#define I40E_PFPM_WUFC_FLX0_MASK I40E_MASK(0x1, I40E_PFPM_WUFC_FLX0_SHIFT)
+#define I40E_PFPM_WUFC_FLX1_SHIFT 17
+#define I40E_PFPM_WUFC_FLX1_MASK I40E_MASK(0x1, I40E_PFPM_WUFC_FLX1_SHIFT)
+#define I40E_PFPM_WUFC_FLX2_SHIFT 18
+#define I40E_PFPM_WUFC_FLX2_MASK I40E_MASK(0x1, I40E_PFPM_WUFC_FLX2_SHIFT)
+#define I40E_PFPM_WUFC_FLX3_SHIFT 19
+#define I40E_PFPM_WUFC_FLX3_MASK I40E_MASK(0x1, I40E_PFPM_WUFC_FLX3_SHIFT)
+#define I40E_PFPM_WUFC_FLX4_SHIFT 20
+#define I40E_PFPM_WUFC_FLX4_MASK I40E_MASK(0x1, I40E_PFPM_WUFC_FLX4_SHIFT)
+#define I40E_PFPM_WUFC_FLX5_SHIFT 21
+#define I40E_PFPM_WUFC_FLX5_MASK I40E_MASK(0x1, I40E_PFPM_WUFC_FLX5_SHIFT)
+#define I40E_PFPM_WUFC_FLX6_SHIFT 22
+#define I40E_PFPM_WUFC_FLX6_MASK I40E_MASK(0x1, I40E_PFPM_WUFC_FLX6_SHIFT)
+#define I40E_PFPM_WUFC_FLX7_SHIFT 23
+#define I40E_PFPM_WUFC_FLX7_MASK I40E_MASK(0x1, I40E_PFPM_WUFC_FLX7_SHIFT)
+#define I40E_PFPM_WUFC_FW_RST_WK_SHIFT 31
+#define I40E_PFPM_WUFC_FW_RST_WK_MASK I40E_MASK(0x1, I40E_PFPM_WUFC_FW_RST_WK_SHIFT)
+#define I40E_PFPM_WUS 0x0006B600 /* Reset: POR */
+#define I40E_PFPM_WUS_LNKC_SHIFT 0
+#define I40E_PFPM_WUS_LNKC_MASK I40E_MASK(0x1, I40E_PFPM_WUS_LNKC_SHIFT)
+#define I40E_PFPM_WUS_MAG_SHIFT 1
+#define I40E_PFPM_WUS_MAG_MASK I40E_MASK(0x1, I40E_PFPM_WUS_MAG_SHIFT)
+#define I40E_PFPM_WUS_PME_STATUS_SHIFT 2
+#define I40E_PFPM_WUS_PME_STATUS_MASK I40E_MASK(0x1, I40E_PFPM_WUS_PME_STATUS_SHIFT)
+#define I40E_PFPM_WUS_MNG_SHIFT 3
+#define I40E_PFPM_WUS_MNG_MASK I40E_MASK(0x1, I40E_PFPM_WUS_MNG_SHIFT)
+#define I40E_PFPM_WUS_FLX0_SHIFT 16
+#define I40E_PFPM_WUS_FLX0_MASK I40E_MASK(0x1, I40E_PFPM_WUS_FLX0_SHIFT)
+#define I40E_PFPM_WUS_FLX1_SHIFT 17
+#define I40E_PFPM_WUS_FLX1_MASK I40E_MASK(0x1, I40E_PFPM_WUS_FLX1_SHIFT)
+#define I40E_PFPM_WUS_FLX2_SHIFT 18
+#define I40E_PFPM_WUS_FLX2_MASK I40E_MASK(0x1, I40E_PFPM_WUS_FLX2_SHIFT)
+#define I40E_PFPM_WUS_FLX3_SHIFT 19
+#define I40E_PFPM_WUS_FLX3_MASK I40E_MASK(0x1, I40E_PFPM_WUS_FLX3_SHIFT)
+#define I40E_PFPM_WUS_FLX4_SHIFT 20
+#define I40E_PFPM_WUS_FLX4_MASK I40E_MASK(0x1, I40E_PFPM_WUS_FLX4_SHIFT)
+#define I40E_PFPM_WUS_FLX5_SHIFT 21
+#define I40E_PFPM_WUS_FLX5_MASK I40E_MASK(0x1, I40E_PFPM_WUS_FLX5_SHIFT)
+#define I40E_PFPM_WUS_FLX6_SHIFT 22
+#define I40E_PFPM_WUS_FLX6_MASK I40E_MASK(0x1, I40E_PFPM_WUS_FLX6_SHIFT)
+#define I40E_PFPM_WUS_FLX7_SHIFT 23
+#define I40E_PFPM_WUS_FLX7_MASK I40E_MASK(0x1, I40E_PFPM_WUS_FLX7_SHIFT)
+#define I40E_PFPM_WUS_FW_RST_WK_SHIFT 31
+#define I40E_PFPM_WUS_FW_RST_WK_MASK I40E_MASK(0x1, I40E_PFPM_WUS_FW_RST_WK_SHIFT)
+#define I40E_PRTPM_FHFHR 0x0006C000 /* Reset: POR */
+#define I40E_PRTPM_FHFHR_UNICAST_SHIFT 0
+#define I40E_PRTPM_FHFHR_UNICAST_MASK I40E_MASK(0x1, I40E_PRTPM_FHFHR_UNICAST_SHIFT)
+#define I40E_PRTPM_FHFHR_MULTICAST_SHIFT 1
+#define I40E_PRTPM_FHFHR_MULTICAST_MASK I40E_MASK(0x1, I40E_PRTPM_FHFHR_MULTICAST_SHIFT)
+#define I40E_PRTPM_SAH(_i) (0x001E44C0 + ((_i) * 32)) /* _i=0...3 */ /* Reset: PFR */
+#define I40E_PRTPM_SAH_MAX_INDEX 3
+#define I40E_PRTPM_SAH_PFPM_SAH_SHIFT 0
+#define I40E_PRTPM_SAH_PFPM_SAH_MASK I40E_MASK(0xFFFF, I40E_PRTPM_SAH_PFPM_SAH_SHIFT)
+#define I40E_PRTPM_SAH_PF_NUM_SHIFT 26
+#define I40E_PRTPM_SAH_PF_NUM_MASK I40E_MASK(0xF, I40E_PRTPM_SAH_PF_NUM_SHIFT)
+#define I40E_PRTPM_SAH_MC_MAG_EN_SHIFT 30
+#define I40E_PRTPM_SAH_MC_MAG_EN_MASK I40E_MASK(0x1, I40E_PRTPM_SAH_MC_MAG_EN_SHIFT)
+#define I40E_PRTPM_SAH_AV_SHIFT 31
+#define I40E_PRTPM_SAH_AV_MASK I40E_MASK(0x1, I40E_PRTPM_SAH_AV_SHIFT)
+#define I40E_PRTPM_SAL(_i) (0x001E4440 + ((_i) * 32)) /* _i=0...3 */ /* Reset: PFR */
+#define I40E_PRTPM_SAL_MAX_INDEX 3
+#define I40E_PRTPM_SAL_PFPM_SAL_SHIFT 0
+#define I40E_PRTPM_SAL_PFPM_SAL_MASK I40E_MASK(0xFFFFFFFF, I40E_PRTPM_SAL_PFPM_SAL_SHIFT)
+#endif /* PF_DRIVER */
+#define I40E_VF_ARQBAH1 0x00006000 /* Reset: EMPR */
+#define I40E_VF_ARQBAH1_ARQBAH_SHIFT 0
+#define I40E_VF_ARQBAH1_ARQBAH_MASK I40E_MASK(0xFFFFFFFF, I40E_VF_ARQBAH1_ARQBAH_SHIFT)
+#define I40E_VF_ARQBAL1 0x00006C00 /* Reset: EMPR */
+#define I40E_VF_ARQBAL1_ARQBAL_SHIFT 0
+#define I40E_VF_ARQBAL1_ARQBAL_MASK I40E_MASK(0xFFFFFFFF, I40E_VF_ARQBAL1_ARQBAL_SHIFT)
+#define I40E_VF_ARQH1 0x00007400 /* Reset: EMPR */
+#define I40E_VF_ARQH1_ARQH_SHIFT 0
+#define I40E_VF_ARQH1_ARQH_MASK I40E_MASK(0x3FF, I40E_VF_ARQH1_ARQH_SHIFT)
+#define I40E_VF_ARQLEN1 0x00008000 /* Reset: EMPR */
+#define I40E_VF_ARQLEN1_ARQLEN_SHIFT 0
+#define I40E_VF_ARQLEN1_ARQLEN_MASK I40E_MASK(0x3FF, I40E_VF_ARQLEN1_ARQLEN_SHIFT)
+#define I40E_VF_ARQLEN1_ARQVFE_SHIFT 28
+#define I40E_VF_ARQLEN1_ARQVFE_MASK I40E_MASK(0x1, I40E_VF_ARQLEN1_ARQVFE_SHIFT)
+#define I40E_VF_ARQLEN1_ARQOVFL_SHIFT 29
+#define I40E_VF_ARQLEN1_ARQOVFL_MASK I40E_MASK(0x1, I40E_VF_ARQLEN1_ARQOVFL_SHIFT)
+#define I40E_VF_ARQLEN1_ARQCRIT_SHIFT 30
+#define I40E_VF_ARQLEN1_ARQCRIT_MASK I40E_MASK(0x1, I40E_VF_ARQLEN1_ARQCRIT_SHIFT)
+#define I40E_VF_ARQLEN1_ARQENABLE_SHIFT 31
+#define I40E_VF_ARQLEN1_ARQENABLE_MASK I40E_MASK(0x1, I40E_VF_ARQLEN1_ARQENABLE_SHIFT)
+#define I40E_VF_ARQT1 0x00007000 /* Reset: EMPR */
+#define I40E_VF_ARQT1_ARQT_SHIFT 0
+#define I40E_VF_ARQT1_ARQT_MASK I40E_MASK(0x3FF, I40E_VF_ARQT1_ARQT_SHIFT)
+#define I40E_VF_ATQBAH1 0x00007800 /* Reset: EMPR */
+#define I40E_VF_ATQBAH1_ATQBAH_SHIFT 0
+#define I40E_VF_ATQBAH1_ATQBAH_MASK I40E_MASK(0xFFFFFFFF, I40E_VF_ATQBAH1_ATQBAH_SHIFT)
+#define I40E_VF_ATQBAL1 0x00007C00 /* Reset: EMPR */
+#define I40E_VF_ATQBAL1_ATQBAL_SHIFT 0
+#define I40E_VF_ATQBAL1_ATQBAL_MASK I40E_MASK(0xFFFFFFFF, I40E_VF_ATQBAL1_ATQBAL_SHIFT)
+#define I40E_VF_ATQH1 0x00006400 /* Reset: EMPR */
+#define I40E_VF_ATQH1_ATQH_SHIFT 0
+#define I40E_VF_ATQH1_ATQH_MASK I40E_MASK(0x3FF, I40E_VF_ATQH1_ATQH_SHIFT)
+#define I40E_VF_ATQLEN1 0x00006800 /* Reset: EMPR */
+#define I40E_VF_ATQLEN1_ATQLEN_SHIFT 0
+#define I40E_VF_ATQLEN1_ATQLEN_MASK I40E_MASK(0x3FF, I40E_VF_ATQLEN1_ATQLEN_SHIFT)
+#define I40E_VF_ATQLEN1_ATQVFE_SHIFT 28
+#define I40E_VF_ATQLEN1_ATQVFE_MASK I40E_MASK(0x1, I40E_VF_ATQLEN1_ATQVFE_SHIFT)
+#define I40E_VF_ATQLEN1_ATQOVFL_SHIFT 29
+#define I40E_VF_ATQLEN1_ATQOVFL_MASK I40E_MASK(0x1, I40E_VF_ATQLEN1_ATQOVFL_SHIFT)
+#define I40E_VF_ATQLEN1_ATQCRIT_SHIFT 30
+#define I40E_VF_ATQLEN1_ATQCRIT_MASK I40E_MASK(0x1, I40E_VF_ATQLEN1_ATQCRIT_SHIFT)
+#define I40E_VF_ATQLEN1_ATQENABLE_SHIFT 31
+#define I40E_VF_ATQLEN1_ATQENABLE_MASK I40E_MASK(0x1, I40E_VF_ATQLEN1_ATQENABLE_SHIFT)
+#define I40E_VF_ATQT1 0x00008400 /* Reset: EMPR */
+#define I40E_VF_ATQT1_ATQT_SHIFT 0
+#define I40E_VF_ATQT1_ATQT_MASK I40E_MASK(0x3FF, I40E_VF_ATQT1_ATQT_SHIFT)
+#define I40E_VFGEN_RSTAT 0x00008800 /* Reset: VFR */
+#define I40E_VFGEN_RSTAT_VFR_STATE_SHIFT 0
+#define I40E_VFGEN_RSTAT_VFR_STATE_MASK I40E_MASK(0x3, I40E_VFGEN_RSTAT_VFR_STATE_SHIFT)
+#define I40E_VFINT_DYN_CTL01 0x00005C00 /* Reset: VFR */
+#define I40E_VFINT_DYN_CTL01_INTENA_SHIFT 0
+#define I40E_VFINT_DYN_CTL01_INTENA_MASK I40E_MASK(0x1, I40E_VFINT_DYN_CTL01_INTENA_SHIFT)
+#define I40E_VFINT_DYN_CTL01_CLEARPBA_SHIFT 1
+#define I40E_VFINT_DYN_CTL01_CLEARPBA_MASK I40E_MASK(0x1, I40E_VFINT_DYN_CTL01_CLEARPBA_SHIFT)
+#define I40E_VFINT_DYN_CTL01_SWINT_TRIG_SHIFT 2
+#define I40E_VFINT_DYN_CTL01_SWINT_TRIG_MASK I40E_MASK(0x1, I40E_VFINT_DYN_CTL01_SWINT_TRIG_SHIFT)
+#define I40E_VFINT_DYN_CTL01_ITR_INDX_SHIFT 3
+#define I40E_VFINT_DYN_CTL01_ITR_INDX_MASK I40E_MASK(0x3, I40E_VFINT_DYN_CTL01_ITR_INDX_SHIFT)
+#define I40E_VFINT_DYN_CTL01_INTERVAL_SHIFT 5
+#define I40E_VFINT_DYN_CTL01_INTERVAL_MASK I40E_MASK(0xFFF, I40E_VFINT_DYN_CTL01_INTERVAL_SHIFT)
+#define I40E_VFINT_DYN_CTL01_SW_ITR_INDX_ENA_SHIFT 24
+#define I40E_VFINT_DYN_CTL01_SW_ITR_INDX_ENA_MASK I40E_MASK(0x1, I40E_VFINT_DYN_CTL01_SW_ITR_INDX_ENA_SHIFT)
+#define I40E_VFINT_DYN_CTL01_SW_ITR_INDX_SHIFT 25
+#define I40E_VFINT_DYN_CTL01_SW_ITR_INDX_MASK I40E_MASK(0x3, I40E_VFINT_DYN_CTL01_SW_ITR_INDX_SHIFT)
+#define I40E_VFINT_DYN_CTL01_INTENA_MSK_SHIFT 31
+#define I40E_VFINT_DYN_CTL01_INTENA_MSK_MASK I40E_MASK(0x1, I40E_VFINT_DYN_CTL01_INTENA_MSK_SHIFT)
+#define I40E_VFINT_DYN_CTLN1(_INTVF) (0x00003800 + ((_INTVF) * 4)) /* _i=0...15 */ /* Reset: VFR */
+#define I40E_VFINT_DYN_CTLN1_MAX_INDEX 15
+#define I40E_VFINT_DYN_CTLN1_INTENA_SHIFT 0
+#define I40E_VFINT_DYN_CTLN1_INTENA_MASK I40E_MASK(0x1, I40E_VFINT_DYN_CTLN1_INTENA_SHIFT)
+#define I40E_VFINT_DYN_CTLN1_CLEARPBA_SHIFT 1
+#define I40E_VFINT_DYN_CTLN1_CLEARPBA_MASK I40E_MASK(0x1, I40E_VFINT_DYN_CTLN1_CLEARPBA_SHIFT)
+#define I40E_VFINT_DYN_CTLN1_SWINT_TRIG_SHIFT 2
+#define I40E_VFINT_DYN_CTLN1_SWINT_TRIG_MASK I40E_MASK(0x1, I40E_VFINT_DYN_CTLN1_SWINT_TRIG_SHIFT)
+#define I40E_VFINT_DYN_CTLN1_ITR_INDX_SHIFT 3
+#define I40E_VFINT_DYN_CTLN1_ITR_INDX_MASK I40E_MASK(0x3, I40E_VFINT_DYN_CTLN1_ITR_INDX_SHIFT)
+#define I40E_VFINT_DYN_CTLN1_INTERVAL_SHIFT 5
+#define I40E_VFINT_DYN_CTLN1_INTERVAL_MASK I40E_MASK(0xFFF, I40E_VFINT_DYN_CTLN1_INTERVAL_SHIFT)
+#define I40E_VFINT_DYN_CTLN1_SW_ITR_INDX_ENA_SHIFT 24
+#define I40E_VFINT_DYN_CTLN1_SW_ITR_INDX_ENA_MASK I40E_MASK(0x1, I40E_VFINT_DYN_CTLN1_SW_ITR_INDX_ENA_SHIFT)
+#define I40E_VFINT_DYN_CTLN1_SW_ITR_INDX_SHIFT 25
+#define I40E_VFINT_DYN_CTLN1_SW_ITR_INDX_MASK I40E_MASK(0x3, I40E_VFINT_DYN_CTLN1_SW_ITR_INDX_SHIFT)
+#define I40E_VFINT_DYN_CTLN1_INTENA_MSK_SHIFT 31
+#define I40E_VFINT_DYN_CTLN1_INTENA_MSK_MASK I40E_MASK(0x1, I40E_VFINT_DYN_CTLN1_INTENA_MSK_SHIFT)
+#define I40E_VFINT_ICR0_ENA1 0x00005000 /* Reset: CORER */
+#define I40E_VFINT_ICR0_ENA1_LINK_STAT_CHANGE_SHIFT 25
+#define I40E_VFINT_ICR0_ENA1_LINK_STAT_CHANGE_MASK I40E_MASK(0x1, I40E_VFINT_ICR0_ENA1_LINK_STAT_CHANGE_SHIFT)
+#define I40E_VFINT_ICR0_ENA1_ADMINQ_SHIFT 30
+#define I40E_VFINT_ICR0_ENA1_ADMINQ_MASK I40E_MASK(0x1, I40E_VFINT_ICR0_ENA1_ADMINQ_SHIFT)
+#define I40E_VFINT_ICR0_ENA1_RSVD_SHIFT 31
+#define I40E_VFINT_ICR0_ENA1_RSVD_MASK I40E_MASK(0x1, I40E_VFINT_ICR0_ENA1_RSVD_SHIFT)
+#define I40E_VFINT_ICR01 0x00004800 /* Reset: CORER */
+#define I40E_VFINT_ICR01_INTEVENT_SHIFT 0
+#define I40E_VFINT_ICR01_INTEVENT_MASK I40E_MASK(0x1, I40E_VFINT_ICR01_INTEVENT_SHIFT)
+#define I40E_VFINT_ICR01_QUEUE_0_SHIFT 1
+#define I40E_VFINT_ICR01_QUEUE_0_MASK I40E_MASK(0x1, I40E_VFINT_ICR01_QUEUE_0_SHIFT)
+#define I40E_VFINT_ICR01_QUEUE_1_SHIFT 2
+#define I40E_VFINT_ICR01_QUEUE_1_MASK I40E_MASK(0x1, I40E_VFINT_ICR01_QUEUE_1_SHIFT)
+#define I40E_VFINT_ICR01_QUEUE_2_SHIFT 3
+#define I40E_VFINT_ICR01_QUEUE_2_MASK I40E_MASK(0x1, I40E_VFINT_ICR01_QUEUE_2_SHIFT)
+#define I40E_VFINT_ICR01_QUEUE_3_SHIFT 4
+#define I40E_VFINT_ICR01_QUEUE_3_MASK I40E_MASK(0x1, I40E_VFINT_ICR01_QUEUE_3_SHIFT)
+#define I40E_VFINT_ICR01_LINK_STAT_CHANGE_SHIFT 25
+#define I40E_VFINT_ICR01_LINK_STAT_CHANGE_MASK I40E_MASK(0x1, I40E_VFINT_ICR01_LINK_STAT_CHANGE_SHIFT)
+#define I40E_VFINT_ICR01_ADMINQ_SHIFT 30
+#define I40E_VFINT_ICR01_ADMINQ_MASK I40E_MASK(0x1, I40E_VFINT_ICR01_ADMINQ_SHIFT)
+#define I40E_VFINT_ICR01_SWINT_SHIFT 31
+#define I40E_VFINT_ICR01_SWINT_MASK I40E_MASK(0x1, I40E_VFINT_ICR01_SWINT_SHIFT)
+#define I40E_VFINT_ITR01(_i) (0x00004C00 + ((_i) * 4)) /* _i=0...2 */ /* Reset: VFR */
+#define I40E_VFINT_ITR01_MAX_INDEX 2
+#define I40E_VFINT_ITR01_INTERVAL_SHIFT 0
+#define I40E_VFINT_ITR01_INTERVAL_MASK I40E_MASK(0xFFF, I40E_VFINT_ITR01_INTERVAL_SHIFT)
+#define I40E_VFINT_ITRN1(_i, _INTVF) (0x00002800 + ((_i) * 64 + (_INTVF) * 4)) /* _i=0...2, _INTVF=0...15 */ /* Reset: VFR */
+#define I40E_VFINT_ITRN1_MAX_INDEX 2
+#define I40E_VFINT_ITRN1_INTERVAL_SHIFT 0
+#define I40E_VFINT_ITRN1_INTERVAL_MASK I40E_MASK(0xFFF, I40E_VFINT_ITRN1_INTERVAL_SHIFT)
+#define I40E_VFINT_STAT_CTL01 0x00005400 /* Reset: CORER */
+#define I40E_VFINT_STAT_CTL01_OTHER_ITR_INDX_SHIFT 2
+#define I40E_VFINT_STAT_CTL01_OTHER_ITR_INDX_MASK I40E_MASK(0x3, I40E_VFINT_STAT_CTL01_OTHER_ITR_INDX_SHIFT)
+#define I40E_QRX_TAIL1(_Q) (0x00002000 + ((_Q) * 4)) /* _i=0...15 */ /* Reset: CORER */
+#define I40E_QRX_TAIL1_MAX_INDEX 15
+#define I40E_QRX_TAIL1_TAIL_SHIFT 0
+#define I40E_QRX_TAIL1_TAIL_MASK I40E_MASK(0x1FFF, I40E_QRX_TAIL1_TAIL_SHIFT)
+#define I40E_QTX_TAIL1(_Q) (0x00000000 + ((_Q) * 4)) /* _i=0...15 */ /* Reset: PFR */
+#define I40E_QTX_TAIL1_MAX_INDEX 15
+#define I40E_QTX_TAIL1_TAIL_SHIFT 0
+#define I40E_QTX_TAIL1_TAIL_MASK I40E_MASK(0x1FFF, I40E_QTX_TAIL1_TAIL_SHIFT)
+#define I40E_VFMSIX_PBA 0x00002000 /* Reset: VFLR */
+#define I40E_VFMSIX_PBA_PENBIT_SHIFT 0
+#define I40E_VFMSIX_PBA_PENBIT_MASK I40E_MASK(0xFFFFFFFF, I40E_VFMSIX_PBA_PENBIT_SHIFT)
+#define I40E_VFMSIX_TADD(_i) (0x00000000 + ((_i) * 16)) /* _i=0...16 */ /* Reset: VFLR */
+#define I40E_VFMSIX_TADD_MAX_INDEX 16
+#define I40E_VFMSIX_TADD_MSIXTADD10_SHIFT 0
+#define I40E_VFMSIX_TADD_MSIXTADD10_MASK I40E_MASK(0x3, I40E_VFMSIX_TADD_MSIXTADD10_SHIFT)
+#define I40E_VFMSIX_TADD_MSIXTADD_SHIFT 2
+#define I40E_VFMSIX_TADD_MSIXTADD_MASK I40E_MASK(0x3FFFFFFF, I40E_VFMSIX_TADD_MSIXTADD_SHIFT)
+#define I40E_VFMSIX_TMSG(_i) (0x00000008 + ((_i) * 16)) /* _i=0...16 */ /* Reset: VFLR */
+#define I40E_VFMSIX_TMSG_MAX_INDEX 16
+#define I40E_VFMSIX_TMSG_MSIXTMSG_SHIFT 0
+#define I40E_VFMSIX_TMSG_MSIXTMSG_MASK I40E_MASK(0xFFFFFFFF, I40E_VFMSIX_TMSG_MSIXTMSG_SHIFT)
+#define I40E_VFMSIX_TUADD(_i) (0x00000004 + ((_i) * 16)) /* _i=0...16 */ /* Reset: VFLR */
+#define I40E_VFMSIX_TUADD_MAX_INDEX 16
+#define I40E_VFMSIX_TUADD_MSIXTUADD_SHIFT 0
+#define I40E_VFMSIX_TUADD_MSIXTUADD_MASK I40E_MASK(0xFFFFFFFF, I40E_VFMSIX_TUADD_MSIXTUADD_SHIFT)
+#define I40E_VFMSIX_TVCTRL(_i) (0x0000000C + ((_i) * 16)) /* _i=0...16 */ /* Reset: VFLR */
+#define I40E_VFMSIX_TVCTRL_MAX_INDEX 16
+#define I40E_VFMSIX_TVCTRL_MASK_SHIFT 0
+#define I40E_VFMSIX_TVCTRL_MASK_MASK I40E_MASK(0x1, I40E_VFMSIX_TVCTRL_MASK_SHIFT)
+#define I40E_VFCM_PE_ERRDATA 0x0000DC00 /* Reset: VFR */
+#define I40E_VFCM_PE_ERRDATA_ERROR_CODE_SHIFT 0
+#define I40E_VFCM_PE_ERRDATA_ERROR_CODE_MASK I40E_MASK(0xF, I40E_VFCM_PE_ERRDATA_ERROR_CODE_SHIFT)
+#define I40E_VFCM_PE_ERRDATA_Q_TYPE_SHIFT 4
+#define I40E_VFCM_PE_ERRDATA_Q_TYPE_MASK I40E_MASK(0x7, I40E_VFCM_PE_ERRDATA_Q_TYPE_SHIFT)
+#define I40E_VFCM_PE_ERRDATA_Q_NUM_SHIFT 8
+#define I40E_VFCM_PE_ERRDATA_Q_NUM_MASK I40E_MASK(0x3FFFF, I40E_VFCM_PE_ERRDATA_Q_NUM_SHIFT)
+#define I40E_VFCM_PE_ERRINFO 0x0000D800 /* Reset: VFR */
+#define I40E_VFCM_PE_ERRINFO_ERROR_VALID_SHIFT 0
+#define I40E_VFCM_PE_ERRINFO_ERROR_VALID_MASK I40E_MASK(0x1, I40E_VFCM_PE_ERRINFO_ERROR_VALID_SHIFT)
+#define I40E_VFCM_PE_ERRINFO_ERROR_INST_SHIFT 4
+#define I40E_VFCM_PE_ERRINFO_ERROR_INST_MASK I40E_MASK(0x7, I40E_VFCM_PE_ERRINFO_ERROR_INST_SHIFT)
+#define I40E_VFCM_PE_ERRINFO_DBL_ERROR_CNT_SHIFT 8
+#define I40E_VFCM_PE_ERRINFO_DBL_ERROR_CNT_MASK I40E_MASK(0xFF, I40E_VFCM_PE_ERRINFO_DBL_ERROR_CNT_SHIFT)
+#define I40E_VFCM_PE_ERRINFO_RLU_ERROR_CNT_SHIFT 16
+#define I40E_VFCM_PE_ERRINFO_RLU_ERROR_CNT_MASK I40E_MASK(0xFF, I40E_VFCM_PE_ERRINFO_RLU_ERROR_CNT_SHIFT)
+#define I40E_VFCM_PE_ERRINFO_RLS_ERROR_CNT_SHIFT 24
+#define I40E_VFCM_PE_ERRINFO_RLS_ERROR_CNT_MASK I40E_MASK(0xFF, I40E_VFCM_PE_ERRINFO_RLS_ERROR_CNT_SHIFT)
+#define I40E_VFQF_HENA(_i) (0x0000C400 + ((_i) * 4)) /* _i=0...1 */ /* Reset: CORER */
+#define I40E_VFQF_HENA_MAX_INDEX 1
+#define I40E_VFQF_HENA_PTYPE_ENA_SHIFT 0
+#define I40E_VFQF_HENA_PTYPE_ENA_MASK I40E_MASK(0xFFFFFFFF, I40E_VFQF_HENA_PTYPE_ENA_SHIFT)
+#define I40E_VFQF_HKEY(_i) (0x0000CC00 + ((_i) * 4)) /* _i=0...12 */ /* Reset: CORER */
+#define I40E_VFQF_HKEY_MAX_INDEX 12
+#define I40E_VFQF_HKEY_KEY_0_SHIFT 0
+#define I40E_VFQF_HKEY_KEY_0_MASK I40E_MASK(0xFF, I40E_VFQF_HKEY_KEY_0_SHIFT)
+#define I40E_VFQF_HKEY_KEY_1_SHIFT 8
+#define I40E_VFQF_HKEY_KEY_1_MASK I40E_MASK(0xFF, I40E_VFQF_HKEY_KEY_1_SHIFT)
+#define I40E_VFQF_HKEY_KEY_2_SHIFT 16
+#define I40E_VFQF_HKEY_KEY_2_MASK I40E_MASK(0xFF, I40E_VFQF_HKEY_KEY_2_SHIFT)
+#define I40E_VFQF_HKEY_KEY_3_SHIFT 24
+#define I40E_VFQF_HKEY_KEY_3_MASK I40E_MASK(0xFF, I40E_VFQF_HKEY_KEY_3_SHIFT)
+#define I40E_VFQF_HLUT(_i) (0x0000D000 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */
+#define I40E_VFQF_HLUT_MAX_INDEX 15
+#define I40E_VFQF_HLUT_LUT0_SHIFT 0
+#define I40E_VFQF_HLUT_LUT0_MASK I40E_MASK(0xF, I40E_VFQF_HLUT_LUT0_SHIFT)
+#define I40E_VFQF_HLUT_LUT1_SHIFT 8
+#define I40E_VFQF_HLUT_LUT1_MASK I40E_MASK(0xF, I40E_VFQF_HLUT_LUT1_SHIFT)
+#define I40E_VFQF_HLUT_LUT2_SHIFT 16
+#define I40E_VFQF_HLUT_LUT2_MASK I40E_MASK(0xF, I40E_VFQF_HLUT_LUT2_SHIFT)
+#define I40E_VFQF_HLUT_LUT3_SHIFT 24
+#define I40E_VFQF_HLUT_LUT3_MASK I40E_MASK(0xF, I40E_VFQF_HLUT_LUT3_SHIFT)
+#define I40E_VFQF_HREGION(_i) (0x0000D400 + ((_i) * 4)) /* _i=0...7 */ /* Reset: CORER */
+#define I40E_VFQF_HREGION_MAX_INDEX 7
+#define I40E_VFQF_HREGION_OVERRIDE_ENA_0_SHIFT 0
+#define I40E_VFQF_HREGION_OVERRIDE_ENA_0_MASK I40E_MASK(0x1, I40E_VFQF_HREGION_OVERRIDE_ENA_0_SHIFT)
+#define I40E_VFQF_HREGION_REGION_0_SHIFT 1
+#define I40E_VFQF_HREGION_REGION_0_MASK I40E_MASK(0x7, I40E_VFQF_HREGION_REGION_0_SHIFT)
+#define I40E_VFQF_HREGION_OVERRIDE_ENA_1_SHIFT 4
+#define I40E_VFQF_HREGION_OVERRIDE_ENA_1_MASK I40E_MASK(0x1, I40E_VFQF_HREGION_OVERRIDE_ENA_1_SHIFT)
+#define I40E_VFQF_HREGION_REGION_1_SHIFT 5
+#define I40E_VFQF_HREGION_REGION_1_MASK I40E_MASK(0x7, I40E_VFQF_HREGION_REGION_1_SHIFT)
+#define I40E_VFQF_HREGION_OVERRIDE_ENA_2_SHIFT 8
+#define I40E_VFQF_HREGION_OVERRIDE_ENA_2_MASK I40E_MASK(0x1, I40E_VFQF_HREGION_OVERRIDE_ENA_2_SHIFT)
+#define I40E_VFQF_HREGION_REGION_2_SHIFT 9
+#define I40E_VFQF_HREGION_REGION_2_MASK I40E_MASK(0x7, I40E_VFQF_HREGION_REGION_2_SHIFT)
+#define I40E_VFQF_HREGION_OVERRIDE_ENA_3_SHIFT 12
+#define I40E_VFQF_HREGION_OVERRIDE_ENA_3_MASK I40E_MASK(0x1, I40E_VFQF_HREGION_OVERRIDE_ENA_3_SHIFT)
+#define I40E_VFQF_HREGION_REGION_3_SHIFT 13
+#define I40E_VFQF_HREGION_REGION_3_MASK I40E_MASK(0x7, I40E_VFQF_HREGION_REGION_3_SHIFT)
+#define I40E_VFQF_HREGION_OVERRIDE_ENA_4_SHIFT 16
+#define I40E_VFQF_HREGION_OVERRIDE_ENA_4_MASK I40E_MASK(0x1, I40E_VFQF_HREGION_OVERRIDE_ENA_4_SHIFT)
+#define I40E_VFQF_HREGION_REGION_4_SHIFT 17
+#define I40E_VFQF_HREGION_REGION_4_MASK I40E_MASK(0x7, I40E_VFQF_HREGION_REGION_4_SHIFT)
+#define I40E_VFQF_HREGION_OVERRIDE_ENA_5_SHIFT 20
+#define I40E_VFQF_HREGION_OVERRIDE_ENA_5_MASK I40E_MASK(0x1, I40E_VFQF_HREGION_OVERRIDE_ENA_5_SHIFT)
+#define I40E_VFQF_HREGION_REGION_5_SHIFT 21
+#define I40E_VFQF_HREGION_REGION_5_MASK I40E_MASK(0x7, I40E_VFQF_HREGION_REGION_5_SHIFT)
+#define I40E_VFQF_HREGION_OVERRIDE_ENA_6_SHIFT 24
+#define I40E_VFQF_HREGION_OVERRIDE_ENA_6_MASK I40E_MASK(0x1, I40E_VFQF_HREGION_OVERRIDE_ENA_6_SHIFT)
+#define I40E_VFQF_HREGION_REGION_6_SHIFT 25
+#define I40E_VFQF_HREGION_REGION_6_MASK I40E_MASK(0x7, I40E_VFQF_HREGION_REGION_6_SHIFT)
+#define I40E_VFQF_HREGION_OVERRIDE_ENA_7_SHIFT 28
+#define I40E_VFQF_HREGION_OVERRIDE_ENA_7_MASK I40E_MASK(0x1, I40E_VFQF_HREGION_OVERRIDE_ENA_7_SHIFT)
+#define I40E_VFQF_HREGION_REGION_7_SHIFT 29
+#define I40E_VFQF_HREGION_REGION_7_MASK I40E_MASK(0x7, I40E_VFQF_HREGION_REGION_7_SHIFT)
+#ifdef X722_SUPPORT
+
+#ifdef PF_DRIVER
+#define I40E_MNGSB_FDCRC 0x000B7050 /* Reset: POR */
+#define I40E_MNGSB_FDCRC_CRC_RES_SHIFT 0
+#define I40E_MNGSB_FDCRC_CRC_RES_MASK I40E_MASK(0xFF, I40E_MNGSB_FDCRC_CRC_RES_SHIFT)
+#define I40E_MNGSB_FDCS 0x000B7040 /* Reset: POR */
+#define I40E_MNGSB_FDCS_CRC_CONT_SHIFT 2
+#define I40E_MNGSB_FDCS_CRC_CONT_MASK I40E_MASK(0x1, I40E_MNGSB_FDCS_CRC_CONT_SHIFT)
+#define I40E_MNGSB_FDCS_CRC_SEED_EN_SHIFT 3
+#define I40E_MNGSB_FDCS_CRC_SEED_EN_MASK I40E_MASK(0x1, I40E_MNGSB_FDCS_CRC_SEED_EN_SHIFT)
+#define I40E_MNGSB_FDCS_CRC_WR_INH_SHIFT 4
+#define I40E_MNGSB_FDCS_CRC_WR_INH_MASK I40E_MASK(0x1, I40E_MNGSB_FDCS_CRC_WR_INH_SHIFT)
+#define I40E_MNGSB_FDCS_CRC_SEED_SHIFT 8
+#define I40E_MNGSB_FDCS_CRC_SEED_MASK I40E_MASK(0xFF, I40E_MNGSB_FDCS_CRC_SEED_SHIFT)
+#define I40E_MNGSB_FDS 0x000B7048 /* Reset: POR */
+#define I40E_MNGSB_FDS_START_BC_SHIFT 0
+#define I40E_MNGSB_FDS_START_BC_MASK I40E_MASK(0xFFF, I40E_MNGSB_FDS_START_BC_SHIFT)
+#define I40E_MNGSB_FDS_LAST_BC_SHIFT 16
+#define I40E_MNGSB_FDS_LAST_BC_MASK I40E_MASK(0xFFF, I40E_MNGSB_FDS_LAST_BC_SHIFT)
+
+#define I40E_GL_VF_CTRL_RX(_VF) (0x00083600 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: EMPR */
+#define I40E_GL_VF_CTRL_RX_MAX_INDEX 127
+#define I40E_GL_VF_CTRL_RX_AQ_RX_EN_SHIFT 0
+#define I40E_GL_VF_CTRL_RX_AQ_RX_EN_MASK I40E_MASK(0x1, I40E_GL_VF_CTRL_RX_AQ_RX_EN_SHIFT)
+#define I40E_GL_VF_CTRL_TX(_VF) (0x00083400 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: EMPR */
+#define I40E_GL_VF_CTRL_TX_MAX_INDEX 127
+#define I40E_GL_VF_CTRL_TX_AQ_TX_EN_SHIFT 0
+#define I40E_GL_VF_CTRL_TX_AQ_TX_EN_MASK I40E_MASK(0x1, I40E_GL_VF_CTRL_TX_AQ_TX_EN_SHIFT)
+
+#define I40E_GLCM_LAN_CACHESIZE 0x0010C4D8 /* Reset: CORER */
+#define I40E_GLCM_LAN_CACHESIZE_WORD_SIZE_SHIFT 0
+#define I40E_GLCM_LAN_CACHESIZE_WORD_SIZE_MASK I40E_MASK(0xFFF, I40E_GLCM_LAN_CACHESIZE_WORD_SIZE_SHIFT)
+#define I40E_GLCM_LAN_CACHESIZE_SETS_SHIFT 12
+#define I40E_GLCM_LAN_CACHESIZE_SETS_MASK I40E_MASK(0xF, I40E_GLCM_LAN_CACHESIZE_SETS_SHIFT)
+#define I40E_GLCM_LAN_CACHESIZE_WAYS_SHIFT 16
+#define I40E_GLCM_LAN_CACHESIZE_WAYS_MASK I40E_MASK(0x3FF, I40E_GLCM_LAN_CACHESIZE_WAYS_SHIFT)
+#define I40E_GLCM_PE_CACHESIZE 0x00138FE4 /* Reset: CORER */
+#define I40E_GLCM_PE_CACHESIZE_WORD_SIZE_SHIFT 0
+#define I40E_GLCM_PE_CACHESIZE_WORD_SIZE_MASK I40E_MASK(0xFFF, I40E_GLCM_PE_CACHESIZE_WORD_SIZE_SHIFT)
+#define I40E_GLCM_PE_CACHESIZE_SETS_SHIFT 12
+#define I40E_GLCM_PE_CACHESIZE_SETS_MASK I40E_MASK(0xF, I40E_GLCM_PE_CACHESIZE_SETS_SHIFT)
+#define I40E_GLCM_PE_CACHESIZE_WAYS_SHIFT 16
+#define I40E_GLCM_PE_CACHESIZE_WAYS_MASK I40E_MASK(0x1FF, I40E_GLCM_PE_CACHESIZE_WAYS_SHIFT)
+#define I40E_PFCM_PE_ERRDATA 0x00138D00 /* Reset: PFR */
+#define I40E_PFCM_PE_ERRDATA_ERROR_CODE_SHIFT 0
+#define I40E_PFCM_PE_ERRDATA_ERROR_CODE_MASK I40E_MASK(0xF, I40E_PFCM_PE_ERRDATA_ERROR_CODE_SHIFT)
+#define I40E_PFCM_PE_ERRDATA_Q_TYPE_SHIFT 4
+#define I40E_PFCM_PE_ERRDATA_Q_TYPE_MASK I40E_MASK(0x7, I40E_PFCM_PE_ERRDATA_Q_TYPE_SHIFT)
+#define I40E_PFCM_PE_ERRDATA_Q_NUM_SHIFT 8
+#define I40E_PFCM_PE_ERRDATA_Q_NUM_MASK I40E_MASK(0x3FFFF, I40E_PFCM_PE_ERRDATA_Q_NUM_SHIFT)
+#define I40E_PFCM_PE_ERRINFO 0x00138C80 /* Reset: PFR */
+#define I40E_PFCM_PE_ERRINFO_ERROR_VALID_SHIFT 0
+#define I40E_PFCM_PE_ERRINFO_ERROR_VALID_MASK I40E_MASK(0x1, I40E_PFCM_PE_ERRINFO_ERROR_VALID_SHIFT)
+#define I40E_PFCM_PE_ERRINFO_ERROR_INST_SHIFT 4
+#define I40E_PFCM_PE_ERRINFO_ERROR_INST_MASK I40E_MASK(0x7, I40E_PFCM_PE_ERRINFO_ERROR_INST_SHIFT)
+#define I40E_PFCM_PE_ERRINFO_DBL_ERROR_CNT_SHIFT 8
+#define I40E_PFCM_PE_ERRINFO_DBL_ERROR_CNT_MASK I40E_MASK(0xFF, I40E_PFCM_PE_ERRINFO_DBL_ERROR_CNT_SHIFT)
+#define I40E_PFCM_PE_ERRINFO_RLU_ERROR_CNT_SHIFT 16
+#define I40E_PFCM_PE_ERRINFO_RLU_ERROR_CNT_MASK I40E_MASK(0xFF, I40E_PFCM_PE_ERRINFO_RLU_ERROR_CNT_SHIFT)
+#define I40E_PFCM_PE_ERRINFO_RLS_ERROR_CNT_SHIFT 24
+#define I40E_PFCM_PE_ERRINFO_RLS_ERROR_CNT_MASK I40E_MASK(0xFF, I40E_PFCM_PE_ERRINFO_RLS_ERROR_CNT_SHIFT)
+
+#define I40E_PRTDCB_TFMSTC(_i) (0x000A0040 + ((_i) * 32)) /* _i=0...7 */ /* Reset: CORER */
+#define I40E_PRTDCB_TFMSTC_MAX_INDEX 7
+#define I40E_PRTDCB_TFMSTC_MSTC_SHIFT 0
+#define I40E_PRTDCB_TFMSTC_MSTC_MASK I40E_MASK(0xFFFFF, I40E_PRTDCB_TFMSTC_MSTC_SHIFT)
+#define I40E_GL_FWSTS_FWROWD_SHIFT 8
+#define I40E_GL_FWSTS_FWROWD_MASK I40E_MASK(0x1, I40E_GL_FWSTS_FWROWD_SHIFT)
+#define I40E_GLFOC_CACHESIZE 0x000AA0DC /* Reset: CORER */
+#define I40E_GLFOC_CACHESIZE_WORD_SIZE_SHIFT 0
+#define I40E_GLFOC_CACHESIZE_WORD_SIZE_MASK I40E_MASK(0xFF, I40E_GLFOC_CACHESIZE_WORD_SIZE_SHIFT)
+#define I40E_GLFOC_CACHESIZE_SETS_SHIFT 8
+#define I40E_GLFOC_CACHESIZE_SETS_MASK I40E_MASK(0xFFF, I40E_GLFOC_CACHESIZE_SETS_SHIFT)
+#define I40E_GLFOC_CACHESIZE_WAYS_SHIFT 20
+#define I40E_GLFOC_CACHESIZE_WAYS_MASK I40E_MASK(0xF, I40E_GLFOC_CACHESIZE_WAYS_SHIFT)
+#define I40E_GLHMC_APBVTINUSEBASE(_i) (0x000C4a00 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */
+#define I40E_GLHMC_APBVTINUSEBASE_MAX_INDEX 15
+#define I40E_GLHMC_APBVTINUSEBASE_FPMAPBINUSEBASE_SHIFT 0
+#define I40E_GLHMC_APBVTINUSEBASE_FPMAPBINUSEBASE_MASK I40E_MASK(0xFFFFFF, I40E_GLHMC_APBVTINUSEBASE_FPMAPBINUSEBASE_SHIFT)
+#define I40E_GLHMC_CEQPART(_i) (0x001312C0 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */
+#define I40E_GLHMC_CEQPART_MAX_INDEX 15
+#define I40E_GLHMC_CEQPART_PMCEQBASE_SHIFT 0
+#define I40E_GLHMC_CEQPART_PMCEQBASE_MASK I40E_MASK(0xFF, I40E_GLHMC_CEQPART_PMCEQBASE_SHIFT)
+#define I40E_GLHMC_CEQPART_PMCEQSIZE_SHIFT 16
+#define I40E_GLHMC_CEQPART_PMCEQSIZE_MASK I40E_MASK(0x1FF, I40E_GLHMC_CEQPART_PMCEQSIZE_SHIFT)
+#define I40E_GLHMC_DBCQMAX 0x000C20F0 /* Reset: CORER */
+#define I40E_GLHMC_DBCQMAX_GLHMC_DBCQMAX_SHIFT 0
+#define I40E_GLHMC_DBCQMAX_GLHMC_DBCQMAX_MASK I40E_MASK(0x3FFFF, I40E_GLHMC_DBCQMAX_GLHMC_DBCQMAX_SHIFT)
+#define I40E_GLHMC_DBCQPART(_i) (0x00131240 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */
+#define I40E_GLHMC_DBCQPART_MAX_INDEX 15
+#define I40E_GLHMC_DBCQPART_PMDBCQBASE_SHIFT 0
+#define I40E_GLHMC_DBCQPART_PMDBCQBASE_MASK I40E_MASK(0x3FFF, I40E_GLHMC_DBCQPART_PMDBCQBASE_SHIFT)
+#define I40E_GLHMC_DBCQPART_PMDBCQSIZE_SHIFT 16
+#define I40E_GLHMC_DBCQPART_PMDBCQSIZE_MASK I40E_MASK(0x7FFF, I40E_GLHMC_DBCQPART_PMDBCQSIZE_SHIFT)
+#define I40E_GLHMC_DBQPMAX 0x000C20EC /* Reset: CORER */
+#define I40E_GLHMC_DBQPMAX_GLHMC_DBQPMAX_SHIFT 0
+#define I40E_GLHMC_DBQPMAX_GLHMC_DBQPMAX_MASK I40E_MASK(0x7FFFF, I40E_GLHMC_DBQPMAX_GLHMC_DBQPMAX_SHIFT)
+#define I40E_GLHMC_DBQPPART(_i) (0x00138D80 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */
+#define I40E_GLHMC_DBQPPART_MAX_INDEX 15
+#define I40E_GLHMC_DBQPPART_PMDBQPBASE_SHIFT 0
+#define I40E_GLHMC_DBQPPART_PMDBQPBASE_MASK I40E_MASK(0x3FFF, I40E_GLHMC_DBQPPART_PMDBQPBASE_SHIFT)
+#define I40E_GLHMC_DBQPPART_PMDBQPSIZE_SHIFT 16
+#define I40E_GLHMC_DBQPPART_PMDBQPSIZE_MASK I40E_MASK(0x7FFF, I40E_GLHMC_DBQPPART_PMDBQPSIZE_SHIFT)
+#define I40E_GLHMC_PEARPBASE(_i) (0x000C4800 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */
+#define I40E_GLHMC_PEARPBASE_MAX_INDEX 15
+#define I40E_GLHMC_PEARPBASE_FPMPEARPBASE_SHIFT 0
+#define I40E_GLHMC_PEARPBASE_FPMPEARPBASE_MASK I40E_MASK(0xFFFFFF, I40E_GLHMC_PEARPBASE_FPMPEARPBASE_SHIFT)
+#define I40E_GLHMC_PEARPCNT(_i) (0x000C4900 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */
+#define I40E_GLHMC_PEARPCNT_MAX_INDEX 15
+#define I40E_GLHMC_PEARPCNT_FPMPEARPCNT_SHIFT 0
+#define I40E_GLHMC_PEARPCNT_FPMPEARPCNT_MASK I40E_MASK(0x1FFFFFFF, I40E_GLHMC_PEARPCNT_FPMPEARPCNT_SHIFT)
+#define I40E_GLHMC_PEARPMAX 0x000C2038 /* Reset: CORER */
+#define I40E_GLHMC_PEARPMAX_PMPEARPMAX_SHIFT 0
+#define I40E_GLHMC_PEARPMAX_PMPEARPMAX_MASK I40E_MASK(0x1FFFF, I40E_GLHMC_PEARPMAX_PMPEARPMAX_SHIFT)
+#define I40E_GLHMC_PEARPOBJSZ 0x000C2034 /* Reset: CORER */
+#define I40E_GLHMC_PEARPOBJSZ_PMPEARPOBJSZ_SHIFT 0
+#define I40E_GLHMC_PEARPOBJSZ_PMPEARPOBJSZ_MASK I40E_MASK(0x7, I40E_GLHMC_PEARPOBJSZ_PMPEARPOBJSZ_SHIFT)
+#define I40E_GLHMC_PECQBASE(_i) (0x000C4200 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */
+#define I40E_GLHMC_PECQBASE_MAX_INDEX 15
+#define I40E_GLHMC_PECQBASE_FPMPECQBASE_SHIFT 0
+#define I40E_GLHMC_PECQBASE_FPMPECQBASE_MASK I40E_MASK(0xFFFFFF, I40E_GLHMC_PECQBASE_FPMPECQBASE_SHIFT)
+#define I40E_GLHMC_PECQCNT(_i) (0x000C4300 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */
+#define I40E_GLHMC_PECQCNT_MAX_INDEX 15
+#define I40E_GLHMC_PECQCNT_FPMPECQCNT_SHIFT 0
+#define I40E_GLHMC_PECQCNT_FPMPECQCNT_MASK I40E_MASK(0x1FFFFFFF, I40E_GLHMC_PECQCNT_FPMPECQCNT_SHIFT)
+#define I40E_GLHMC_PECQOBJSZ 0x000C2020 /* Reset: CORER */
+#define I40E_GLHMC_PECQOBJSZ_PMPECQOBJSZ_SHIFT 0
+#define I40E_GLHMC_PECQOBJSZ_PMPECQOBJSZ_MASK I40E_MASK(0xF, I40E_GLHMC_PECQOBJSZ_PMPECQOBJSZ_SHIFT)
+#define I40E_GLHMC_PEHTCNT(_i) (0x000C4700 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */
+#define I40E_GLHMC_PEHTCNT_MAX_INDEX 15
+#define I40E_GLHMC_PEHTCNT_FPMPEHTCNT_SHIFT 0
+#define I40E_GLHMC_PEHTCNT_FPMPEHTCNT_MASK I40E_MASK(0x1FFFFFFF, I40E_GLHMC_PEHTCNT_FPMPEHTCNT_SHIFT)
+#define I40E_GLHMC_PEHTEBASE(_i) (0x000C4600 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */
+#define I40E_GLHMC_PEHTEBASE_MAX_INDEX 15
+#define I40E_GLHMC_PEHTEBASE_FPMPEHTEBASE_SHIFT 0
+#define I40E_GLHMC_PEHTEBASE_FPMPEHTEBASE_MASK I40E_MASK(0xFFFFFF, I40E_GLHMC_PEHTEBASE_FPMPEHTEBASE_SHIFT)
+#define I40E_GLHMC_PEHTEOBJSZ 0x000C202c /* Reset: CORER */
+#define I40E_GLHMC_PEHTEOBJSZ_PMPEHTEOBJSZ_SHIFT 0
+#define I40E_GLHMC_PEHTEOBJSZ_PMPEHTEOBJSZ_MASK I40E_MASK(0xF, I40E_GLHMC_PEHTEOBJSZ_PMPEHTEOBJSZ_SHIFT)
+#define I40E_GLHMC_PEHTMAX 0x000C2030 /* Reset: CORER */
+#define I40E_GLHMC_PEHTMAX_PMPEHTMAX_SHIFT 0
+#define I40E_GLHMC_PEHTMAX_PMPEHTMAX_MASK I40E_MASK(0x1FFFFF, I40E_GLHMC_PEHTMAX_PMPEHTMAX_SHIFT)
+#define I40E_GLHMC_PEMRBASE(_i) (0x000C4c00 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */
+#define I40E_GLHMC_PEMRBASE_MAX_INDEX 15
+#define I40E_GLHMC_PEMRBASE_FPMPEMRBASE_SHIFT 0
+#define I40E_GLHMC_PEMRBASE_FPMPEMRBASE_MASK I40E_MASK(0xFFFFFF, I40E_GLHMC_PEMRBASE_FPMPEMRBASE_SHIFT)
+#define I40E_GLHMC_PEMRCNT(_i) (0x000C4d00 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */
+#define I40E_GLHMC_PEMRCNT_MAX_INDEX 15
+#define I40E_GLHMC_PEMRCNT_FPMPEMRSZ_SHIFT 0
+#define I40E_GLHMC_PEMRCNT_FPMPEMRSZ_MASK I40E_MASK(0x1FFFFFFF, I40E_GLHMC_PEMRCNT_FPMPEMRSZ_SHIFT)
+#define I40E_GLHMC_PEMRMAX 0x000C2040 /* Reset: CORER */
+#define I40E_GLHMC_PEMRMAX_PMPEMRMAX_SHIFT 0
+#define I40E_GLHMC_PEMRMAX_PMPEMRMAX_MASK I40E_MASK(0x7FFFFF, I40E_GLHMC_PEMRMAX_PMPEMRMAX_SHIFT)
+#define I40E_GLHMC_PEMROBJSZ 0x000C203c /* Reset: CORER */
+#define I40E_GLHMC_PEMROBJSZ_PMPEMROBJSZ_SHIFT 0
+#define I40E_GLHMC_PEMROBJSZ_PMPEMROBJSZ_MASK I40E_MASK(0xF, I40E_GLHMC_PEMROBJSZ_PMPEMROBJSZ_SHIFT)
+#define I40E_GLHMC_PEPBLBASE(_i) (0x000C5800 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */
+#define I40E_GLHMC_PEPBLBASE_MAX_INDEX 15
+#define I40E_GLHMC_PEPBLBASE_FPMPEPBLBASE_SHIFT 0
+#define I40E_GLHMC_PEPBLBASE_FPMPEPBLBASE_MASK I40E_MASK(0xFFFFFF, I40E_GLHMC_PEPBLBASE_FPMPEPBLBASE_SHIFT)
+#define I40E_GLHMC_PEPBLCNT(_i) (0x000C5900 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */
+#define I40E_GLHMC_PEPBLCNT_MAX_INDEX 15
+#define I40E_GLHMC_PEPBLCNT_FPMPEPBLCNT_SHIFT 0
+#define I40E_GLHMC_PEPBLCNT_FPMPEPBLCNT_MASK I40E_MASK(0x1FFFFFFF, I40E_GLHMC_PEPBLCNT_FPMPEPBLCNT_SHIFT)
+#define I40E_GLHMC_PEPBLMAX 0x000C206c /* Reset: CORER */
+#define I40E_GLHMC_PEPBLMAX_PMPEPBLMAX_SHIFT 0
+#define I40E_GLHMC_PEPBLMAX_PMPEPBLMAX_MASK I40E_MASK(0x1FFFFFFF, I40E_GLHMC_PEPBLMAX_PMPEPBLMAX_SHIFT)
+#define I40E_GLHMC_PEPFFIRSTSD 0x000C20E4 /* Reset: CORER */
+#define I40E_GLHMC_PEPFFIRSTSD_GLHMC_PEPFFIRSTSD_SHIFT 0
+#define I40E_GLHMC_PEPFFIRSTSD_GLHMC_PEPFFIRSTSD_MASK I40E_MASK(0xFFF, I40E_GLHMC_PEPFFIRSTSD_GLHMC_PEPFFIRSTSD_SHIFT)
+#define I40E_GLHMC_PEQ1BASE(_i) (0x000C5200 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */
+#define I40E_GLHMC_PEQ1BASE_MAX_INDEX 15
+#define I40E_GLHMC_PEQ1BASE_FPMPEQ1BASE_SHIFT 0
+#define I40E_GLHMC_PEQ1BASE_FPMPEQ1BASE_MASK I40E_MASK(0xFFFFFF, I40E_GLHMC_PEQ1BASE_FPMPEQ1BASE_SHIFT)
+#define I40E_GLHMC_PEQ1CNT(_i) (0x000C5300 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */
+#define I40E_GLHMC_PEQ1CNT_MAX_INDEX 15
+#define I40E_GLHMC_PEQ1CNT_FPMPEQ1CNT_SHIFT 0
+#define I40E_GLHMC_PEQ1CNT_FPMPEQ1CNT_MASK I40E_MASK(0x1FFFFFFF, I40E_GLHMC_PEQ1CNT_FPMPEQ1CNT_SHIFT)
+#define I40E_GLHMC_PEQ1FLBASE(_i) (0x000C5400 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */
+#define I40E_GLHMC_PEQ1FLBASE_MAX_INDEX 15
+#define I40E_GLHMC_PEQ1FLBASE_FPMPEQ1FLBASE_SHIFT 0
+#define I40E_GLHMC_PEQ1FLBASE_FPMPEQ1FLBASE_MASK I40E_MASK(0xFFFFFF, I40E_GLHMC_PEQ1FLBASE_FPMPEQ1FLBASE_SHIFT)
+#define I40E_GLHMC_PEQ1FLMAX 0x000C2058 /* Reset: CORER */
+#define I40E_GLHMC_PEQ1FLMAX_PMPEQ1FLMAX_SHIFT 0
+#define I40E_GLHMC_PEQ1FLMAX_PMPEQ1FLMAX_MASK I40E_MASK(0x3FFFFFF, I40E_GLHMC_PEQ1FLMAX_PMPEQ1FLMAX_SHIFT)
+#define I40E_GLHMC_PEQ1MAX 0x000C2054 /* Reset: CORER */
+#define I40E_GLHMC_PEQ1MAX_PMPEQ1MAX_SHIFT 0
+#define I40E_GLHMC_PEQ1MAX_PMPEQ1MAX_MASK I40E_MASK(0x3FFFFFF, I40E_GLHMC_PEQ1MAX_PMPEQ1MAX_SHIFT)
+#define I40E_GLHMC_PEQ1OBJSZ 0x000C2050 /* Reset: CORER */
+#define I40E_GLHMC_PEQ1OBJSZ_PMPEQ1OBJSZ_SHIFT 0
+#define I40E_GLHMC_PEQ1OBJSZ_PMPEQ1OBJSZ_MASK I40E_MASK(0xF, I40E_GLHMC_PEQ1OBJSZ_PMPEQ1OBJSZ_SHIFT)
+#define I40E_GLHMC_PEQPBASE(_i) (0x000C4000 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */
+#define I40E_GLHMC_PEQPBASE_MAX_INDEX 15
+#define I40E_GLHMC_PEQPBASE_FPMPEQPBASE_SHIFT 0
+#define I40E_GLHMC_PEQPBASE_FPMPEQPBASE_MASK I40E_MASK(0xFFFFFF, I40E_GLHMC_PEQPBASE_FPMPEQPBASE_SHIFT)
+#define I40E_GLHMC_PEQPCNT(_i) (0x000C4100 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */
+#define I40E_GLHMC_PEQPCNT_MAX_INDEX 15
+#define I40E_GLHMC_PEQPCNT_FPMPEQPCNT_SHIFT 0
+#define I40E_GLHMC_PEQPCNT_FPMPEQPCNT_MASK I40E_MASK(0x1FFFFFFF, I40E_GLHMC_PEQPCNT_FPMPEQPCNT_SHIFT)
+#define I40E_GLHMC_PEQPOBJSZ 0x000C201c /* Reset: CORER */
+#define I40E_GLHMC_PEQPOBJSZ_PMPEQPOBJSZ_SHIFT 0
+#define I40E_GLHMC_PEQPOBJSZ_PMPEQPOBJSZ_MASK I40E_MASK(0xF, I40E_GLHMC_PEQPOBJSZ_PMPEQPOBJSZ_SHIFT)
+#define I40E_GLHMC_PESRQBASE(_i) (0x000C4400 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */
+#define I40E_GLHMC_PESRQBASE_MAX_INDEX 15
+#define I40E_GLHMC_PESRQBASE_FPMPESRQBASE_SHIFT 0
+#define I40E_GLHMC_PESRQBASE_FPMPESRQBASE_MASK I40E_MASK(0xFFFFFF, I40E_GLHMC_PESRQBASE_FPMPESRQBASE_SHIFT)
+#define I40E_GLHMC_PESRQCNT(_i) (0x000C4500 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */
+#define I40E_GLHMC_PESRQCNT_MAX_INDEX 15
+#define I40E_GLHMC_PESRQCNT_FPMPESRQCNT_SHIFT 0
+#define I40E_GLHMC_PESRQCNT_FPMPESRQCNT_MASK I40E_MASK(0x1FFFFFFF, I40E_GLHMC_PESRQCNT_FPMPESRQCNT_SHIFT)
+#define I40E_GLHMC_PESRQMAX 0x000C2028 /* Reset: CORER */
+#define I40E_GLHMC_PESRQMAX_PMPESRQMAX_SHIFT 0
+#define I40E_GLHMC_PESRQMAX_PMPESRQMAX_MASK I40E_MASK(0xFFFF, I40E_GLHMC_PESRQMAX_PMPESRQMAX_SHIFT)
+#define I40E_GLHMC_PESRQOBJSZ 0x000C2024 /* Reset: CORER */
+#define I40E_GLHMC_PESRQOBJSZ_PMPESRQOBJSZ_SHIFT 0
+#define I40E_GLHMC_PESRQOBJSZ_PMPESRQOBJSZ_MASK I40E_MASK(0xF, I40E_GLHMC_PESRQOBJSZ_PMPESRQOBJSZ_SHIFT)
+#define I40E_GLHMC_PETIMERBASE(_i) (0x000C5A00 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */
+#define I40E_GLHMC_PETIMERBASE_MAX_INDEX 15
+#define I40E_GLHMC_PETIMERBASE_FPMPETIMERBASE_SHIFT 0
+#define I40E_GLHMC_PETIMERBASE_FPMPETIMERBASE_MASK I40E_MASK(0xFFFFFF, I40E_GLHMC_PETIMERBASE_FPMPETIMERBASE_SHIFT)
+#define I40E_GLHMC_PETIMERCNT(_i) (0x000C5B00 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */
+#define I40E_GLHMC_PETIMERCNT_MAX_INDEX 15
+#define I40E_GLHMC_PETIMERCNT_FPMPETIMERCNT_SHIFT 0
+#define I40E_GLHMC_PETIMERCNT_FPMPETIMERCNT_MASK I40E_MASK(0x1FFFFFFF, I40E_GLHMC_PETIMERCNT_FPMPETIMERCNT_SHIFT)
+#define I40E_GLHMC_PETIMERMAX 0x000C2084 /* Reset: CORER */
+#define I40E_GLHMC_PETIMERMAX_PMPETIMERMAX_SHIFT 0
+#define I40E_GLHMC_PETIMERMAX_PMPETIMERMAX_MASK I40E_MASK(0x1FFFFFFF, I40E_GLHMC_PETIMERMAX_PMPETIMERMAX_SHIFT)
+#define I40E_GLHMC_PETIMEROBJSZ 0x000C2080 /* Reset: CORER */
+#define I40E_GLHMC_PETIMEROBJSZ_PMPETIMEROBJSZ_SHIFT 0
+#define I40E_GLHMC_PETIMEROBJSZ_PMPETIMEROBJSZ_MASK I40E_MASK(0xF, I40E_GLHMC_PETIMEROBJSZ_PMPETIMEROBJSZ_SHIFT)
+#define I40E_GLHMC_PEXFBASE(_i) (0x000C4e00 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */
+#define I40E_GLHMC_PEXFBASE_MAX_INDEX 15
+#define I40E_GLHMC_PEXFBASE_FPMPEXFBASE_SHIFT 0
+#define I40E_GLHMC_PEXFBASE_FPMPEXFBASE_MASK I40E_MASK(0xFFFFFF, I40E_GLHMC_PEXFBASE_FPMPEXFBASE_SHIFT)
+#define I40E_GLHMC_PEXFCNT(_i) (0x000C4f00 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */
+#define I40E_GLHMC_PEXFCNT_MAX_INDEX 15
+#define I40E_GLHMC_PEXFCNT_FPMPEXFCNT_SHIFT 0
+#define I40E_GLHMC_PEXFCNT_FPMPEXFCNT_MASK I40E_MASK(0x1FFFFFFF, I40E_GLHMC_PEXFCNT_FPMPEXFCNT_SHIFT)
+#define I40E_GLHMC_PEXFFLBASE(_i) (0x000C5000 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */
+#define I40E_GLHMC_PEXFFLBASE_MAX_INDEX 15
+#define I40E_GLHMC_PEXFFLBASE_FPMPEXFFLBASE_SHIFT 0
+#define I40E_GLHMC_PEXFFLBASE_FPMPEXFFLBASE_MASK I40E_MASK(0xFFFFFF, I40E_GLHMC_PEXFFLBASE_FPMPEXFFLBASE_SHIFT)
+#define I40E_GLHMC_PEXFFLMAX 0x000C204c /* Reset: CORER */
+#define I40E_GLHMC_PEXFFLMAX_PMPEXFFLMAX_SHIFT 0
+#define I40E_GLHMC_PEXFFLMAX_PMPEXFFLMAX_MASK I40E_MASK(0x1FFFFFF, I40E_GLHMC_PEXFFLMAX_PMPEXFFLMAX_SHIFT)
+#define I40E_GLHMC_PEXFMAX 0x000C2048 /* Reset: CORER */
+#define I40E_GLHMC_PEXFMAX_PMPEXFMAX_SHIFT 0
+#define I40E_GLHMC_PEXFMAX_PMPEXFMAX_MASK I40E_MASK(0x3FFFFFF, I40E_GLHMC_PEXFMAX_PMPEXFMAX_SHIFT)
+#define I40E_GLHMC_PEXFOBJSZ 0x000C2044 /* Reset: CORER */
+#define I40E_GLHMC_PEXFOBJSZ_PMPEXFOBJSZ_SHIFT 0
+#define I40E_GLHMC_PEXFOBJSZ_PMPEXFOBJSZ_MASK I40E_MASK(0xF, I40E_GLHMC_PEXFOBJSZ_PMPEXFOBJSZ_SHIFT)
+#define I40E_GLHMC_PFPESDPART(_i) (0x000C0880 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */
+#define I40E_GLHMC_PFPESDPART_MAX_INDEX 15
+#define I40E_GLHMC_PFPESDPART_PMSDBASE_SHIFT 0
+#define I40E_GLHMC_PFPESDPART_PMSDBASE_MASK I40E_MASK(0xFFF, I40E_GLHMC_PFPESDPART_PMSDBASE_SHIFT)
+#define I40E_GLHMC_PFPESDPART_PMSDSIZE_SHIFT 16
+#define I40E_GLHMC_PFPESDPART_PMSDSIZE_MASK I40E_MASK(0x1FFF, I40E_GLHMC_PFPESDPART_PMSDSIZE_SHIFT)
+#define I40E_GLHMC_VFAPBVTINUSEBASE(_i) (0x000Cca00 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */
+#define I40E_GLHMC_VFAPBVTINUSEBASE_MAX_INDEX 31
+#define I40E_GLHMC_VFAPBVTINUSEBASE_FPMAPBINUSEBASE_SHIFT 0
+#define I40E_GLHMC_VFAPBVTINUSEBASE_FPMAPBINUSEBASE_MASK I40E_MASK(0xFFFFFF, I40E_GLHMC_VFAPBVTINUSEBASE_FPMAPBINUSEBASE_SHIFT)
+#define I40E_GLHMC_VFCEQPART(_i) (0x00132240 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */
+#define I40E_GLHMC_VFCEQPART_MAX_INDEX 31
+#define I40E_GLHMC_VFCEQPART_PMCEQBASE_SHIFT 0
+#define I40E_GLHMC_VFCEQPART_PMCEQBASE_MASK I40E_MASK(0xFF, I40E_GLHMC_VFCEQPART_PMCEQBASE_SHIFT)
+#define I40E_GLHMC_VFCEQPART_PMCEQSIZE_SHIFT 16
+#define I40E_GLHMC_VFCEQPART_PMCEQSIZE_MASK I40E_MASK(0x1FF, I40E_GLHMC_VFCEQPART_PMCEQSIZE_SHIFT)
+#define I40E_GLHMC_VFDBCQPART(_i) (0x00132140 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */
+#define I40E_GLHMC_VFDBCQPART_MAX_INDEX 31
+#define I40E_GLHMC_VFDBCQPART_PMDBCQBASE_SHIFT 0
+#define I40E_GLHMC_VFDBCQPART_PMDBCQBASE_MASK I40E_MASK(0x3FFF, I40E_GLHMC_VFDBCQPART_PMDBCQBASE_SHIFT)
+#define I40E_GLHMC_VFDBCQPART_PMDBCQSIZE_SHIFT 16
+#define I40E_GLHMC_VFDBCQPART_PMDBCQSIZE_MASK I40E_MASK(0x7FFF, I40E_GLHMC_VFDBCQPART_PMDBCQSIZE_SHIFT)
+#define I40E_GLHMC_VFDBQPPART(_i) (0x00138E00 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */
+#define I40E_GLHMC_VFDBQPPART_MAX_INDEX 31
+#define I40E_GLHMC_VFDBQPPART_PMDBQPBASE_SHIFT 0
+#define I40E_GLHMC_VFDBQPPART_PMDBQPBASE_MASK I40E_MASK(0x3FFF, I40E_GLHMC_VFDBQPPART_PMDBQPBASE_SHIFT)
+#define I40E_GLHMC_VFDBQPPART_PMDBQPSIZE_SHIFT 16
+#define I40E_GLHMC_VFDBQPPART_PMDBQPSIZE_MASK I40E_MASK(0x7FFF, I40E_GLHMC_VFDBQPPART_PMDBQPSIZE_SHIFT)
+#define I40E_GLHMC_VFFSIAVBASE(_i) (0x000Cd600 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */
+#define I40E_GLHMC_VFFSIAVBASE_MAX_INDEX 31
+#define I40E_GLHMC_VFFSIAVBASE_FPMFSIAVBASE_SHIFT 0
+#define I40E_GLHMC_VFFSIAVBASE_FPMFSIAVBASE_MASK I40E_MASK(0xFFFFFF, I40E_GLHMC_VFFSIAVBASE_FPMFSIAVBASE_SHIFT)
+#define I40E_GLHMC_VFFSIAVCNT(_i) (0x000Cd700 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */
+#define I40E_GLHMC_VFFSIAVCNT_MAX_INDEX 31
+#define I40E_GLHMC_VFFSIAVCNT_FPMFSIAVCNT_SHIFT 0
+#define I40E_GLHMC_VFFSIAVCNT_FPMFSIAVCNT_MASK I40E_MASK(0x1FFFFFFF, I40E_GLHMC_VFFSIAVCNT_FPMFSIAVCNT_SHIFT)
+#define I40E_GLHMC_VFPDINV(_i) (0x000C8300 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */
+#define I40E_GLHMC_VFPDINV_MAX_INDEX 31
+#define I40E_GLHMC_VFPDINV_PMSDIDX_SHIFT 0
+#define I40E_GLHMC_VFPDINV_PMSDIDX_MASK I40E_MASK(0xFFF, I40E_GLHMC_VFPDINV_PMSDIDX_SHIFT)
+#define I40E_GLHMC_VFPDINV_PMSDPARTSEL_SHIFT 15
+#define I40E_GLHMC_VFPDINV_PMSDPARTSEL_MASK I40E_MASK(0x1, I40E_GLHMC_VFPDINV_PMSDPARTSEL_SHIFT)
+#define I40E_GLHMC_VFPDINV_PMPDIDX_SHIFT 16
+#define I40E_GLHMC_VFPDINV_PMPDIDX_MASK I40E_MASK(0x1FF, I40E_GLHMC_VFPDINV_PMPDIDX_SHIFT)
+#define I40E_GLHMC_VFPEARPBASE(_i) (0x000Cc800 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */
+#define I40E_GLHMC_VFPEARPBASE_MAX_INDEX 31
+#define I40E_GLHMC_VFPEARPBASE_FPMPEARPBASE_SHIFT 0
+#define I40E_GLHMC_VFPEARPBASE_FPMPEARPBASE_MASK I40E_MASK(0xFFFFFF, I40E_GLHMC_VFPEARPBASE_FPMPEARPBASE_SHIFT)
+#define I40E_GLHMC_VFPEARPCNT(_i) (0x000Cc900 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */
+#define I40E_GLHMC_VFPEARPCNT_MAX_INDEX 31
+#define I40E_GLHMC_VFPEARPCNT_FPMPEARPCNT_SHIFT 0
+#define I40E_GLHMC_VFPEARPCNT_FPMPEARPCNT_MASK I40E_MASK(0x1FFFFFFF, I40E_GLHMC_VFPEARPCNT_FPMPEARPCNT_SHIFT)
+#define I40E_GLHMC_VFPECQBASE(_i) (0x000Cc200 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */
+#define I40E_GLHMC_VFPECQBASE_MAX_INDEX 31
+#define I40E_GLHMC_VFPECQBASE_FPMPECQBASE_SHIFT 0
+#define I40E_GLHMC_VFPECQBASE_FPMPECQBASE_MASK I40E_MASK(0xFFFFFF, I40E_GLHMC_VFPECQBASE_FPMPECQBASE_SHIFT)
+#define I40E_GLHMC_VFPECQCNT(_i) (0x000Cc300 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */
+#define I40E_GLHMC_VFPECQCNT_MAX_INDEX 31
+#define I40E_GLHMC_VFPECQCNT_FPMPECQCNT_SHIFT 0
+#define I40E_GLHMC_VFPECQCNT_FPMPECQCNT_MASK I40E_MASK(0x1FFFFFFF, I40E_GLHMC_VFPECQCNT_FPMPECQCNT_SHIFT)
+#define I40E_GLHMC_VFPEHTCNT(_i) (0x000Cc700 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */
+#define I40E_GLHMC_VFPEHTCNT_MAX_INDEX 31
+#define I40E_GLHMC_VFPEHTCNT_FPMPEHTCNT_SHIFT 0
+#define I40E_GLHMC_VFPEHTCNT_FPMPEHTCNT_MASK I40E_MASK(0x1FFFFFFF, I40E_GLHMC_VFPEHTCNT_FPMPEHTCNT_SHIFT)
+#define I40E_GLHMC_VFPEHTEBASE(_i) (0x000Cc600 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */
+#define I40E_GLHMC_VFPEHTEBASE_MAX_INDEX 31
+#define I40E_GLHMC_VFPEHTEBASE_FPMPEHTEBASE_SHIFT 0
+#define I40E_GLHMC_VFPEHTEBASE_FPMPEHTEBASE_MASK I40E_MASK(0xFFFFFF, I40E_GLHMC_VFPEHTEBASE_FPMPEHTEBASE_SHIFT)
+#define I40E_GLHMC_VFPEMRBASE(_i) (0x000Ccc00 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */
+#define I40E_GLHMC_VFPEMRBASE_MAX_INDEX 31
+#define I40E_GLHMC_VFPEMRBASE_FPMPEMRBASE_SHIFT 0
+#define I40E_GLHMC_VFPEMRBASE_FPMPEMRBASE_MASK I40E_MASK(0xFFFFFF, I40E_GLHMC_VFPEMRBASE_FPMPEMRBASE_SHIFT)
+#define I40E_GLHMC_VFPEMRCNT(_i) (0x000Ccd00 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */
+#define I40E_GLHMC_VFPEMRCNT_MAX_INDEX 31
+#define I40E_GLHMC_VFPEMRCNT_FPMPEMRSZ_SHIFT 0
+#define I40E_GLHMC_VFPEMRCNT_FPMPEMRSZ_MASK I40E_MASK(0x1FFFFFFF, I40E_GLHMC_VFPEMRCNT_FPMPEMRSZ_SHIFT)
+#define I40E_GLHMC_VFPEPBLBASE(_i) (0x000Cd800 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */
+#define I40E_GLHMC_VFPEPBLBASE_MAX_INDEX 31
+#define I40E_GLHMC_VFPEPBLBASE_FPMPEPBLBASE_SHIFT 0
+#define I40E_GLHMC_VFPEPBLBASE_FPMPEPBLBASE_MASK I40E_MASK(0xFFFFFF, I40E_GLHMC_VFPEPBLBASE_FPMPEPBLBASE_SHIFT)
+#define I40E_GLHMC_VFPEPBLCNT(_i) (0x000Cd900 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */
+#define I40E_GLHMC_VFPEPBLCNT_MAX_INDEX 31
+#define I40E_GLHMC_VFPEPBLCNT_FPMPEPBLCNT_SHIFT 0
+#define I40E_GLHMC_VFPEPBLCNT_FPMPEPBLCNT_MASK I40E_MASK(0x1FFFFFFF, I40E_GLHMC_VFPEPBLCNT_FPMPEPBLCNT_SHIFT)
+#define I40E_GLHMC_VFPEQ1BASE(_i) (0x000Cd200 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */
+#define I40E_GLHMC_VFPEQ1BASE_MAX_INDEX 31
+#define I40E_GLHMC_VFPEQ1BASE_FPMPEQ1BASE_SHIFT 0
+#define I40E_GLHMC_VFPEQ1BASE_FPMPEQ1BASE_MASK I40E_MASK(0xFFFFFF, I40E_GLHMC_VFPEQ1BASE_FPMPEQ1BASE_SHIFT)
+#define I40E_GLHMC_VFPEQ1CNT(_i) (0x000Cd300 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */
+#define I40E_GLHMC_VFPEQ1CNT_MAX_INDEX 31
+#define I40E_GLHMC_VFPEQ1CNT_FPMPEQ1CNT_SHIFT 0
+#define I40E_GLHMC_VFPEQ1CNT_FPMPEQ1CNT_MASK I40E_MASK(0x1FFFFFFF, I40E_GLHMC_VFPEQ1CNT_FPMPEQ1CNT_SHIFT)
+#define I40E_GLHMC_VFPEQ1FLBASE(_i) (0x000Cd400 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */
+#define I40E_GLHMC_VFPEQ1FLBASE_MAX_INDEX 31
+#define I40E_GLHMC_VFPEQ1FLBASE_FPMPEQ1FLBASE_SHIFT 0
+#define I40E_GLHMC_VFPEQ1FLBASE_FPMPEQ1FLBASE_MASK I40E_MASK(0xFFFFFF, I40E_GLHMC_VFPEQ1FLBASE_FPMPEQ1FLBASE_SHIFT)
+#define I40E_GLHMC_VFPEQPBASE(_i) (0x000Cc000 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */
+#define I40E_GLHMC_VFPEQPBASE_MAX_INDEX 31
+#define I40E_GLHMC_VFPEQPBASE_FPMPEQPBASE_SHIFT 0
+#define I40E_GLHMC_VFPEQPBASE_FPMPEQPBASE_MASK I40E_MASK(0xFFFFFF, I40E_GLHMC_VFPEQPBASE_FPMPEQPBASE_SHIFT)
+#define I40E_GLHMC_VFPEQPCNT(_i) (0x000Cc100 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */
+#define I40E_GLHMC_VFPEQPCNT_MAX_INDEX 31
+#define I40E_GLHMC_VFPEQPCNT_FPMPEQPCNT_SHIFT 0
+#define I40E_GLHMC_VFPEQPCNT_FPMPEQPCNT_MASK I40E_MASK(0x1FFFFFFF, I40E_GLHMC_VFPEQPCNT_FPMPEQPCNT_SHIFT)
+#define I40E_GLHMC_VFPESRQBASE(_i) (0x000Cc400 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */
+#define I40E_GLHMC_VFPESRQBASE_MAX_INDEX 31
+#define I40E_GLHMC_VFPESRQBASE_FPMPESRQBASE_SHIFT 0
+#define I40E_GLHMC_VFPESRQBASE_FPMPESRQBASE_MASK I40E_MASK(0xFFFFFF, I40E_GLHMC_VFPESRQBASE_FPMPESRQBASE_SHIFT)
+#define I40E_GLHMC_VFPESRQCNT(_i) (0x000Cc500 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */
+#define I40E_GLHMC_VFPESRQCNT_MAX_INDEX 31
+#define I40E_GLHMC_VFPESRQCNT_FPMPESRQCNT_SHIFT 0
+#define I40E_GLHMC_VFPESRQCNT_FPMPESRQCNT_MASK I40E_MASK(0x1FFFFFFF, I40E_GLHMC_VFPESRQCNT_FPMPESRQCNT_SHIFT)
+#define I40E_GLHMC_VFPETIMERBASE(_i) (0x000CDA00 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */
+#define I40E_GLHMC_VFPETIMERBASE_MAX_INDEX 31
+#define I40E_GLHMC_VFPETIMERBASE_FPMPETIMERBASE_SHIFT 0
+#define I40E_GLHMC_VFPETIMERBASE_FPMPETIMERBASE_MASK I40E_MASK(0xFFFFFF, I40E_GLHMC_VFPETIMERBASE_FPMPETIMERBASE_SHIFT)
+#define I40E_GLHMC_VFPETIMERCNT(_i) (0x000CDB00 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */
+#define I40E_GLHMC_VFPETIMERCNT_MAX_INDEX 31
+#define I40E_GLHMC_VFPETIMERCNT_FPMPETIMERCNT_SHIFT 0
+#define I40E_GLHMC_VFPETIMERCNT_FPMPETIMERCNT_MASK I40E_MASK(0x1FFFFFFF, I40E_GLHMC_VFPETIMERCNT_FPMPETIMERCNT_SHIFT)
+#define I40E_GLHMC_VFPEXFBASE(_i) (0x000Cce00 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */
+#define I40E_GLHMC_VFPEXFBASE_MAX_INDEX 31
+#define I40E_GLHMC_VFPEXFBASE_FPMPEXFBASE_SHIFT 0
+#define I40E_GLHMC_VFPEXFBASE_FPMPEXFBASE_MASK I40E_MASK(0xFFFFFF, I40E_GLHMC_VFPEXFBASE_FPMPEXFBASE_SHIFT)
+#define I40E_GLHMC_VFPEXFCNT(_i) (0x000Ccf00 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */
+#define I40E_GLHMC_VFPEXFCNT_MAX_INDEX 31
+#define I40E_GLHMC_VFPEXFCNT_FPMPEXFCNT_SHIFT 0
+#define I40E_GLHMC_VFPEXFCNT_FPMPEXFCNT_MASK I40E_MASK(0x1FFFFFFF, I40E_GLHMC_VFPEXFCNT_FPMPEXFCNT_SHIFT)
+#define I40E_GLHMC_VFPEXFFLBASE(_i) (0x000Cd000 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */
+#define I40E_GLHMC_VFPEXFFLBASE_MAX_INDEX 31
+#define I40E_GLHMC_VFPEXFFLBASE_FPMPEXFFLBASE_SHIFT 0
+#define I40E_GLHMC_VFPEXFFLBASE_FPMPEXFFLBASE_MASK I40E_MASK(0xFFFFFF, I40E_GLHMC_VFPEXFFLBASE_FPMPEXFFLBASE_SHIFT)
+#define I40E_GLHMC_VFSDPART(_i) (0x000C8800 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */
+#define I40E_GLHMC_VFSDPART_MAX_INDEX 31
+#define I40E_GLHMC_VFSDPART_PMSDBASE_SHIFT 0
+#define I40E_GLHMC_VFSDPART_PMSDBASE_MASK I40E_MASK(0xFFF, I40E_GLHMC_VFSDPART_PMSDBASE_SHIFT)
+#define I40E_GLHMC_VFSDPART_PMSDSIZE_SHIFT 16
+#define I40E_GLHMC_VFSDPART_PMSDSIZE_MASK I40E_MASK(0x1FFF, I40E_GLHMC_VFSDPART_PMSDSIZE_SHIFT)
+#define I40E_GLPBLOC_CACHESIZE 0x000A80BC /* Reset: CORER */
+#define I40E_GLPBLOC_CACHESIZE_WORD_SIZE_SHIFT 0
+#define I40E_GLPBLOC_CACHESIZE_WORD_SIZE_MASK I40E_MASK(0xFF, I40E_GLPBLOC_CACHESIZE_WORD_SIZE_SHIFT)
+#define I40E_GLPBLOC_CACHESIZE_SETS_SHIFT 8
+#define I40E_GLPBLOC_CACHESIZE_SETS_MASK I40E_MASK(0xFFF, I40E_GLPBLOC_CACHESIZE_SETS_SHIFT)
+#define I40E_GLPBLOC_CACHESIZE_WAYS_SHIFT 20
+#define I40E_GLPBLOC_CACHESIZE_WAYS_MASK I40E_MASK(0xF, I40E_GLPBLOC_CACHESIZE_WAYS_SHIFT)
+#define I40E_GLPDOC_CACHESIZE 0x000D0088 /* Reset: CORER */
+#define I40E_GLPDOC_CACHESIZE_WORD_SIZE_SHIFT 0
+#define I40E_GLPDOC_CACHESIZE_WORD_SIZE_MASK I40E_MASK(0xFF, I40E_GLPDOC_CACHESIZE_WORD_SIZE_SHIFT)
+#define I40E_GLPDOC_CACHESIZE_SETS_SHIFT 8
+#define I40E_GLPDOC_CACHESIZE_SETS_MASK I40E_MASK(0xFFF, I40E_GLPDOC_CACHESIZE_SETS_SHIFT)
+#define I40E_GLPDOC_CACHESIZE_WAYS_SHIFT 20
+#define I40E_GLPDOC_CACHESIZE_WAYS_MASK I40E_MASK(0xF, I40E_GLPDOC_CACHESIZE_WAYS_SHIFT)
+#define I40E_GLPEOC_CACHESIZE 0x000A60E8 /* Reset: CORER */
+#define I40E_GLPEOC_CACHESIZE_WORD_SIZE_SHIFT 0
+#define I40E_GLPEOC_CACHESIZE_WORD_SIZE_MASK I40E_MASK(0xFF, I40E_GLPEOC_CACHESIZE_WORD_SIZE_SHIFT)
+#define I40E_GLPEOC_CACHESIZE_SETS_SHIFT 8
+#define I40E_GLPEOC_CACHESIZE_SETS_MASK I40E_MASK(0xFFF, I40E_GLPEOC_CACHESIZE_SETS_SHIFT)
+#define I40E_GLPEOC_CACHESIZE_WAYS_SHIFT 20
+#define I40E_GLPEOC_CACHESIZE_WAYS_MASK I40E_MASK(0xF, I40E_GLPEOC_CACHESIZE_WAYS_SHIFT)
+#define I40E_PFHMC_PDINV_PMSDPARTSEL_SHIFT 15
+#define I40E_PFHMC_PDINV_PMSDPARTSEL_MASK I40E_MASK(0x1, I40E_PFHMC_PDINV_PMSDPARTSEL_SHIFT)
+#define I40E_PFHMC_SDCMD_PMSDPARTSEL_SHIFT 15
+#define I40E_PFHMC_SDCMD_PMSDPARTSEL_MASK I40E_MASK(0x1, I40E_PFHMC_SDCMD_PMSDPARTSEL_SHIFT)
+#define I40E_GL_PPRS_SPARE 0x000856E0 /* Reset: CORER */
+#define I40E_GL_PPRS_SPARE_GL_PPRS_SPARE_SHIFT 0
+#define I40E_GL_PPRS_SPARE_GL_PPRS_SPARE_MASK I40E_MASK(0xFFFFFFFF, I40E_GL_PPRS_SPARE_GL_PPRS_SPARE_SHIFT)
+#define I40E_GL_TLAN_SPARE 0x000E64E0 /* Reset: CORER */
+#define I40E_GL_TLAN_SPARE_GL_TLAN_SPARE_SHIFT 0
+#define I40E_GL_TLAN_SPARE_GL_TLAN_SPARE_MASK I40E_MASK(0xFFFFFFFF, I40E_GL_TLAN_SPARE_GL_TLAN_SPARE_SHIFT)
+#define I40E_GL_TUPM_SPARE 0x000a2230 /* Reset: CORER */
+#define I40E_GL_TUPM_SPARE_GL_TUPM_SPARE_SHIFT 0
+#define I40E_GL_TUPM_SPARE_GL_TUPM_SPARE_MASK I40E_MASK(0xFFFFFFFF, I40E_GL_TUPM_SPARE_GL_TUPM_SPARE_SHIFT)
+#define I40E_GLGEN_CAR_DEBUG 0x000B81C0 /* Reset: POR */
+#define I40E_GLGEN_CAR_DEBUG_CAR_UPPER_CORE_CLK_EN_SHIFT 0
+#define I40E_GLGEN_CAR_DEBUG_CAR_UPPER_CORE_CLK_EN_MASK I40E_MASK(0x1, I40E_GLGEN_CAR_DEBUG_CAR_UPPER_CORE_CLK_EN_SHIFT)
+#define I40E_GLGEN_CAR_DEBUG_CAR_PCIE_HIU_CLK_EN_SHIFT 1
+#define I40E_GLGEN_CAR_DEBUG_CAR_PCIE_HIU_CLK_EN_MASK I40E_MASK(0x1, I40E_GLGEN_CAR_DEBUG_CAR_PCIE_HIU_CLK_EN_SHIFT)
+#define I40E_GLGEN_CAR_DEBUG_CAR_PE_CLK_EN_SHIFT 2
+#define I40E_GLGEN_CAR_DEBUG_CAR_PE_CLK_EN_MASK I40E_MASK(0x1, I40E_GLGEN_CAR_DEBUG_CAR_PE_CLK_EN_SHIFT)
+#define I40E_GLGEN_CAR_DEBUG_CAR_PCIE_PRIM_CLK_ACTIVE_SHIFT 3
+#define I40E_GLGEN_CAR_DEBUG_CAR_PCIE_PRIM_CLK_ACTIVE_MASK I40E_MASK(0x1, I40E_GLGEN_CAR_DEBUG_CAR_PCIE_PRIM_CLK_ACTIVE_SHIFT)
+#define I40E_GLGEN_CAR_DEBUG_CDC_PE_ACTIVE_SHIFT 4
+#define I40E_GLGEN_CAR_DEBUG_CDC_PE_ACTIVE_MASK I40E_MASK(0x1, I40E_GLGEN_CAR_DEBUG_CDC_PE_ACTIVE_SHIFT)
+#define I40E_GLGEN_CAR_DEBUG_CAR_PCIE_RAW_PRST_RESET_N_SHIFT 5
+#define I40E_GLGEN_CAR_DEBUG_CAR_PCIE_RAW_PRST_RESET_N_MASK I40E_MASK(0x1, I40E_GLGEN_CAR_DEBUG_CAR_PCIE_RAW_PRST_RESET_N_SHIFT)
+#define I40E_GLGEN_CAR_DEBUG_CAR_PCIE_RAW_SCLR_RESET_N_SHIFT 6
+#define I40E_GLGEN_CAR_DEBUG_CAR_PCIE_RAW_SCLR_RESET_N_MASK I40E_MASK(0x1, I40E_GLGEN_CAR_DEBUG_CAR_PCIE_RAW_SCLR_RESET_N_SHIFT)
+#define I40E_GLGEN_CAR_DEBUG_CAR_PCIE_RAW_IB_RESET_N_SHIFT 7
+#define I40E_GLGEN_CAR_DEBUG_CAR_PCIE_RAW_IB_RESET_N_MASK I40E_MASK(0x1, I40E_GLGEN_CAR_DEBUG_CAR_PCIE_RAW_IB_RESET_N_SHIFT)
+#define I40E_GLGEN_CAR_DEBUG_CAR_PCIE_RAW_IMIB_RESET_N_SHIFT 8
+#define I40E_GLGEN_CAR_DEBUG_CAR_PCIE_RAW_IMIB_RESET_N_MASK I40E_MASK(0x1, I40E_GLGEN_CAR_DEBUG_CAR_PCIE_RAW_IMIB_RESET_N_SHIFT)
+#define I40E_GLGEN_CAR_DEBUG_CAR_RAW_EMP_RESET_N_SHIFT 9
+#define I40E_GLGEN_CAR_DEBUG_CAR_RAW_EMP_RESET_N_MASK I40E_MASK(0x1, I40E_GLGEN_CAR_DEBUG_CAR_RAW_EMP_RESET_N_SHIFT)
+#define I40E_GLGEN_CAR_DEBUG_CAR_RAW_GLOBAL_RESET_N_SHIFT 10
+#define I40E_GLGEN_CAR_DEBUG_CAR_RAW_GLOBAL_RESET_N_MASK I40E_MASK(0x1, I40E_GLGEN_CAR_DEBUG_CAR_RAW_GLOBAL_RESET_N_SHIFT)
+#define I40E_GLGEN_CAR_DEBUG_CAR_RAW_LAN_POWER_GOOD_SHIFT 11
+#define I40E_GLGEN_CAR_DEBUG_CAR_RAW_LAN_POWER_GOOD_MASK I40E_MASK(0x1, I40E_GLGEN_CAR_DEBUG_CAR_RAW_LAN_POWER_GOOD_SHIFT)
+#define I40E_GLGEN_CAR_DEBUG_CDC_IOSF_PRIMERY_RST_B_SHIFT 12
+#define I40E_GLGEN_CAR_DEBUG_CDC_IOSF_PRIMERY_RST_B_MASK I40E_MASK(0x1, I40E_GLGEN_CAR_DEBUG_CDC_IOSF_PRIMERY_RST_B_SHIFT)
+#define I40E_GLGEN_CAR_DEBUG_GBE_GLOBALRST_B_SHIFT 13
+#define I40E_GLGEN_CAR_DEBUG_GBE_GLOBALRST_B_MASK I40E_MASK(0x1, I40E_GLGEN_CAR_DEBUG_GBE_GLOBALRST_B_SHIFT)
+#define I40E_GLGEN_CAR_DEBUG_FLEEP_AL_GLOBR_DONE_SHIFT 14
+#define I40E_GLGEN_CAR_DEBUG_FLEEP_AL_GLOBR_DONE_MASK I40E_MASK(0x1, I40E_GLGEN_CAR_DEBUG_FLEEP_AL_GLOBR_DONE_SHIFT)
+#define I40E_GLGEN_MISC_SPARE 0x000880E0 /* Reset: POR */
+#define I40E_GLGEN_MISC_SPARE_GLGEN_MISC_SPARE_SHIFT 0
+#define I40E_GLGEN_MISC_SPARE_GLGEN_MISC_SPARE_MASK I40E_MASK(0xFFFFFFFF, I40E_GLGEN_MISC_SPARE_GLGEN_MISC_SPARE_SHIFT)
+#define I40E_GL_UFUSE_SOC 0x000BE550 /* Reset: POR */
+#define I40E_GL_UFUSE_SOC_PORT_MODE_SHIFT 0
+#define I40E_GL_UFUSE_SOC_PORT_MODE_MASK I40E_MASK(0x3, I40E_GL_UFUSE_SOC_PORT_MODE_SHIFT)
+#define I40E_GL_UFUSE_SOC_NIC_ID_SHIFT 2
+#define I40E_GL_UFUSE_SOC_NIC_ID_MASK I40E_MASK(0x1, I40E_GL_UFUSE_SOC_NIC_ID_SHIFT)
+#define I40E_GL_UFUSE_SOC_SPARE_FUSES_SHIFT 3
+#define I40E_GL_UFUSE_SOC_SPARE_FUSES_MASK I40E_MASK(0x1FFF, I40E_GL_UFUSE_SOC_SPARE_FUSES_SHIFT)
+#define I40E_PFINT_DYN_CTL0_WB_ON_ITR_SHIFT 30
+#define I40E_PFINT_DYN_CTL0_WB_ON_ITR_MASK I40E_MASK(0x1, I40E_PFINT_DYN_CTL0_WB_ON_ITR_SHIFT)
+#define I40E_PFINT_DYN_CTLN_WB_ON_ITR_SHIFT 30
+#define I40E_PFINT_DYN_CTLN_WB_ON_ITR_MASK I40E_MASK(0x1, I40E_PFINT_DYN_CTLN_WB_ON_ITR_SHIFT)
+#define I40E_VFINT_DYN_CTL0_WB_ON_ITR_SHIFT 30
+#define I40E_VFINT_DYN_CTL0_WB_ON_ITR_MASK I40E_MASK(0x1, I40E_VFINT_DYN_CTL0_WB_ON_ITR_SHIFT)
+#define I40E_VFINT_DYN_CTLN_WB_ON_ITR_SHIFT 30
+#define I40E_VFINT_DYN_CTLN_WB_ON_ITR_MASK I40E_MASK(0x1, I40E_VFINT_DYN_CTLN_WB_ON_ITR_SHIFT)
+#define I40E_VPLAN_QBASE(_VF) (0x00074800 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: VFR */
+#define I40E_VPLAN_QBASE_MAX_INDEX 127
+#define I40E_VPLAN_QBASE_VFFIRSTQ_SHIFT 0
+#define I40E_VPLAN_QBASE_VFFIRSTQ_MASK I40E_MASK(0x7FF, I40E_VPLAN_QBASE_VFFIRSTQ_SHIFT)
+#define I40E_VPLAN_QBASE_VFNUMQ_SHIFT 11
+#define I40E_VPLAN_QBASE_VFNUMQ_MASK I40E_MASK(0xFF, I40E_VPLAN_QBASE_VFNUMQ_SHIFT)
+#define I40E_VPLAN_QBASE_VFQTABLE_ENA_SHIFT 31
+#define I40E_VPLAN_QBASE_VFQTABLE_ENA_MASK I40E_MASK(0x1, I40E_VPLAN_QBASE_VFQTABLE_ENA_SHIFT)
+#define I40E_PRTMAC_LINK_DOWN_COUNTER 0x001E2440 /* Reset: GLOBR */
+#define I40E_PRTMAC_LINK_DOWN_COUNTER_LINK_DOWN_COUNTER_SHIFT 0
+#define I40E_PRTMAC_LINK_DOWN_COUNTER_LINK_DOWN_COUNTER_MASK I40E_MASK(0xFFFF, I40E_PRTMAC_LINK_DOWN_COUNTER_LINK_DOWN_COUNTER_SHIFT)
+#define I40E_GLNVM_AL_REQ 0x000B6164 /* Reset: POR */
+#define I40E_GLNVM_AL_REQ_POR_SHIFT 0
+#define I40E_GLNVM_AL_REQ_POR_MASK I40E_MASK(0x1, I40E_GLNVM_AL_REQ_POR_SHIFT)
+#define I40E_GLNVM_AL_REQ_PCIE_IMIB_SHIFT 1
+#define I40E_GLNVM_AL_REQ_PCIE_IMIB_MASK I40E_MASK(0x1, I40E_GLNVM_AL_REQ_PCIE_IMIB_SHIFT)
+#define I40E_GLNVM_AL_REQ_GLOBR_SHIFT 2
+#define I40E_GLNVM_AL_REQ_GLOBR_MASK I40E_MASK(0x1, I40E_GLNVM_AL_REQ_GLOBR_SHIFT)
+#define I40E_GLNVM_AL_REQ_CORER_SHIFT 3
+#define I40E_GLNVM_AL_REQ_CORER_MASK I40E_MASK(0x1, I40E_GLNVM_AL_REQ_CORER_SHIFT)
+#define I40E_GLNVM_AL_REQ_PE_SHIFT 4
+#define I40E_GLNVM_AL_REQ_PE_MASK I40E_MASK(0x1, I40E_GLNVM_AL_REQ_PE_SHIFT)
+#define I40E_GLNVM_AL_REQ_PCIE_IMIB_ASSERT_SHIFT 5
+#define I40E_GLNVM_AL_REQ_PCIE_IMIB_ASSERT_MASK I40E_MASK(0x1, I40E_GLNVM_AL_REQ_PCIE_IMIB_ASSERT_SHIFT)
+#define I40E_GLNVM_ALTIMERS 0x000B6140 /* Reset: POR */
+#define I40E_GLNVM_ALTIMERS_PCI_ALTIMER_SHIFT 0
+#define I40E_GLNVM_ALTIMERS_PCI_ALTIMER_MASK I40E_MASK(0xFFF, I40E_GLNVM_ALTIMERS_PCI_ALTIMER_SHIFT)
+#define I40E_GLNVM_ALTIMERS_GEN_ALTIMER_SHIFT 12
+#define I40E_GLNVM_ALTIMERS_GEN_ALTIMER_MASK I40E_MASK(0xFFFFF, I40E_GLNVM_ALTIMERS_GEN_ALTIMER_SHIFT)
+#define I40E_GLNVM_FLA 0x000B6108 /* Reset: POR */
+#define I40E_GLNVM_FLA_LOCKED_SHIFT 6
+#define I40E_GLNVM_FLA_LOCKED_MASK I40E_MASK(0x1, I40E_GLNVM_FLA_LOCKED_SHIFT)
+
+#define I40E_GLNVM_ULD 0x000B6008 /* Reset: POR */
+#define I40E_GLNVM_ULD_PCIER_DONE_SHIFT 0
+#define I40E_GLNVM_ULD_PCIER_DONE_MASK I40E_MASK(0x1, I40E_GLNVM_ULD_PCIER_DONE_SHIFT)
+#define I40E_GLNVM_ULD_PCIER_DONE_1_SHIFT 1
+#define I40E_GLNVM_ULD_PCIER_DONE_1_MASK I40E_MASK(0x1, I40E_GLNVM_ULD_PCIER_DONE_1_SHIFT)
+#define I40E_GLNVM_ULD_CORER_DONE_SHIFT 3
+#define I40E_GLNVM_ULD_CORER_DONE_MASK I40E_MASK(0x1, I40E_GLNVM_ULD_CORER_DONE_SHIFT)
+#define I40E_GLNVM_ULD_GLOBR_DONE_SHIFT 4
+#define I40E_GLNVM_ULD_GLOBR_DONE_MASK I40E_MASK(0x1, I40E_GLNVM_ULD_GLOBR_DONE_SHIFT)
+#define I40E_GLNVM_ULD_POR_DONE_SHIFT 5
+#define I40E_GLNVM_ULD_POR_DONE_MASK I40E_MASK(0x1, I40E_GLNVM_ULD_POR_DONE_SHIFT)
+#define I40E_GLNVM_ULD_POR_DONE_1_SHIFT 8
+#define I40E_GLNVM_ULD_POR_DONE_1_MASK I40E_MASK(0x1, I40E_GLNVM_ULD_POR_DONE_1_SHIFT)
+#define I40E_GLNVM_ULD_PCIER_DONE_2_SHIFT 9
+#define I40E_GLNVM_ULD_PCIER_DONE_2_MASK I40E_MASK(0x1, I40E_GLNVM_ULD_PCIER_DONE_2_SHIFT)
+#define I40E_GLNVM_ULD_PE_DONE_SHIFT 10
+#define I40E_GLNVM_ULD_PE_DONE_MASK I40E_MASK(0x1, I40E_GLNVM_ULD_PE_DONE_SHIFT)
+#define I40E_GLNVM_ULT 0x000B6154 /* Reset: POR */
+#define I40E_GLNVM_ULT_CONF_PCIR_AE_SHIFT 0
+#define I40E_GLNVM_ULT_CONF_PCIR_AE_MASK I40E_MASK(0x1, I40E_GLNVM_ULT_CONF_PCIR_AE_SHIFT)
+#define I40E_GLNVM_ULT_CONF_PCIRTL_AE_SHIFT 1
+#define I40E_GLNVM_ULT_CONF_PCIRTL_AE_MASK I40E_MASK(0x1, I40E_GLNVM_ULT_CONF_PCIRTL_AE_SHIFT)
+#define I40E_GLNVM_ULT_RESERVED_1_SHIFT 2
+#define I40E_GLNVM_ULT_RESERVED_1_MASK I40E_MASK(0x1, I40E_GLNVM_ULT_RESERVED_1_SHIFT)
+#define I40E_GLNVM_ULT_CONF_CORE_AE_SHIFT 3
+#define I40E_GLNVM_ULT_CONF_CORE_AE_MASK I40E_MASK(0x1, I40E_GLNVM_ULT_CONF_CORE_AE_SHIFT)
+#define I40E_GLNVM_ULT_CONF_GLOBAL_AE_SHIFT 4
+#define I40E_GLNVM_ULT_CONF_GLOBAL_AE_MASK I40E_MASK(0x1, I40E_GLNVM_ULT_CONF_GLOBAL_AE_SHIFT)
+#define I40E_GLNVM_ULT_CONF_POR_AE_SHIFT 5
+#define I40E_GLNVM_ULT_CONF_POR_AE_MASK I40E_MASK(0x1, I40E_GLNVM_ULT_CONF_POR_AE_SHIFT)
+#define I40E_GLNVM_ULT_RESERVED_2_SHIFT 6
+#define I40E_GLNVM_ULT_RESERVED_2_MASK I40E_MASK(0x1, I40E_GLNVM_ULT_RESERVED_2_SHIFT)
+#define I40E_GLNVM_ULT_RESERVED_3_SHIFT 7
+#define I40E_GLNVM_ULT_RESERVED_3_MASK I40E_MASK(0x1, I40E_GLNVM_ULT_RESERVED_3_SHIFT)
+#define I40E_GLNVM_ULT_CONF_EMP_AE_SHIFT 8
+#define I40E_GLNVM_ULT_CONF_EMP_AE_MASK I40E_MASK(0x1, I40E_GLNVM_ULT_CONF_EMP_AE_SHIFT)
+#define I40E_GLNVM_ULT_CONF_PCIALT_AE_SHIFT 9
+#define I40E_GLNVM_ULT_CONF_PCIALT_AE_MASK I40E_MASK(0x1, I40E_GLNVM_ULT_CONF_PCIALT_AE_SHIFT)
+#define I40E_GLNVM_ULT_RESERVED_4_SHIFT 10
+#define I40E_GLNVM_ULT_RESERVED_4_MASK I40E_MASK(0x3FFFFF, I40E_GLNVM_ULT_RESERVED_4_SHIFT)
+#define I40E_MEM_INIT_DONE_STAT 0x000B615C /* Reset: POR */
+#define I40E_MEM_INIT_DONE_STAT_CMLAN_MEM_INIT_DONE_SHIFT 0
+#define I40E_MEM_INIT_DONE_STAT_CMLAN_MEM_INIT_DONE_MASK I40E_MASK(0x1, I40E_MEM_INIT_DONE_STAT_CMLAN_MEM_INIT_DONE_SHIFT)
+#define I40E_MEM_INIT_DONE_STAT_PMAT_MEM_INIT_DONE_SHIFT 1
+#define I40E_MEM_INIT_DONE_STAT_PMAT_MEM_INIT_DONE_MASK I40E_MASK(0x1, I40E_MEM_INIT_DONE_STAT_PMAT_MEM_INIT_DONE_SHIFT)
+#define I40E_MEM_INIT_DONE_STAT_RCU_MEM_INIT_DONE_SHIFT 2
+#define I40E_MEM_INIT_DONE_STAT_RCU_MEM_INIT_DONE_MASK I40E_MASK(0x1, I40E_MEM_INIT_DONE_STAT_RCU_MEM_INIT_DONE_SHIFT)
+#define I40E_MEM_INIT_DONE_STAT_TDPU_MEM_INIT_DONE_SHIFT 3
+#define I40E_MEM_INIT_DONE_STAT_TDPU_MEM_INIT_DONE_MASK I40E_MASK(0x1, I40E_MEM_INIT_DONE_STAT_TDPU_MEM_INIT_DONE_SHIFT)
+#define I40E_MEM_INIT_DONE_STAT_TLAN_MEM_INIT_DONE_SHIFT 4
+#define I40E_MEM_INIT_DONE_STAT_TLAN_MEM_INIT_DONE_MASK I40E_MASK(0x1, I40E_MEM_INIT_DONE_STAT_TLAN_MEM_INIT_DONE_SHIFT)
+#define I40E_MEM_INIT_DONE_STAT_RLAN_MEM_INIT_DONE_SHIFT 5
+#define I40E_MEM_INIT_DONE_STAT_RLAN_MEM_INIT_DONE_MASK I40E_MASK(0x1, I40E_MEM_INIT_DONE_STAT_RLAN_MEM_INIT_DONE_SHIFT)
+#define I40E_MEM_INIT_DONE_STAT_RDPU_MEM_INIT_DONE_SHIFT 6
+#define I40E_MEM_INIT_DONE_STAT_RDPU_MEM_INIT_DONE_MASK I40E_MASK(0x1, I40E_MEM_INIT_DONE_STAT_RDPU_MEM_INIT_DONE_SHIFT)
+#define I40E_MEM_INIT_DONE_STAT_PPRS_MEM_INIT_DONE_SHIFT 7
+#define I40E_MEM_INIT_DONE_STAT_PPRS_MEM_INIT_DONE_MASK I40E_MASK(0x1, I40E_MEM_INIT_DONE_STAT_PPRS_MEM_INIT_DONE_SHIFT)
+#define I40E_MEM_INIT_DONE_STAT_RPB_MEM_INIT_DONE_SHIFT 8
+#define I40E_MEM_INIT_DONE_STAT_RPB_MEM_INIT_DONE_MASK I40E_MASK(0x1, I40E_MEM_INIT_DONE_STAT_RPB_MEM_INIT_DONE_SHIFT)
+#define I40E_MEM_INIT_DONE_STAT_TPB_MEM_INIT_DONE_SHIFT 9
+#define I40E_MEM_INIT_DONE_STAT_TPB_MEM_INIT_DONE_MASK I40E_MASK(0x1, I40E_MEM_INIT_DONE_STAT_TPB_MEM_INIT_DONE_SHIFT)
+#define I40E_MEM_INIT_DONE_STAT_FOC_MEM_INIT_DONE_SHIFT 10
+#define I40E_MEM_INIT_DONE_STAT_FOC_MEM_INIT_DONE_MASK I40E_MASK(0x1, I40E_MEM_INIT_DONE_STAT_FOC_MEM_INIT_DONE_SHIFT)
+#define I40E_MEM_INIT_DONE_STAT_TSCD_MEM_INIT_DONE_SHIFT 11
+#define I40E_MEM_INIT_DONE_STAT_TSCD_MEM_INIT_DONE_MASK I40E_MASK(0x1, I40E_MEM_INIT_DONE_STAT_TSCD_MEM_INIT_DONE_SHIFT)
+#define I40E_MEM_INIT_DONE_STAT_TCB_MEM_INIT_DONE_SHIFT 12
+#define I40E_MEM_INIT_DONE_STAT_TCB_MEM_INIT_DONE_MASK I40E_MASK(0x1, I40E_MEM_INIT_DONE_STAT_TCB_MEM_INIT_DONE_SHIFT)
+#define I40E_MEM_INIT_DONE_STAT_RCB_MEM_INIT_DONE_SHIFT 13
+#define I40E_MEM_INIT_DONE_STAT_RCB_MEM_INIT_DONE_MASK I40E_MASK(0x1, I40E_MEM_INIT_DONE_STAT_RCB_MEM_INIT_DONE_SHIFT)
+#define I40E_MEM_INIT_DONE_STAT_WUC_MEM_INIT_DONE_SHIFT 14
+#define I40E_MEM_INIT_DONE_STAT_WUC_MEM_INIT_DONE_MASK I40E_MASK(0x1, I40E_MEM_INIT_DONE_STAT_WUC_MEM_INIT_DONE_SHIFT)
+#define I40E_MEM_INIT_DONE_STAT_STAT_MEM_INIT_DONE_SHIFT 15
+#define I40E_MEM_INIT_DONE_STAT_STAT_MEM_INIT_DONE_MASK I40E_MASK(0x1, I40E_MEM_INIT_DONE_STAT_STAT_MEM_INIT_DONE_SHIFT)
+#define I40E_MEM_INIT_DONE_STAT_ITR_MEM_INIT_DONE_SHIFT 16
+#define I40E_MEM_INIT_DONE_STAT_ITR_MEM_INIT_DONE_MASK I40E_MASK(0x1, I40E_MEM_INIT_DONE_STAT_ITR_MEM_INIT_DONE_SHIFT)
+#define I40E_MNGSB_DADD 0x000B7030 /* Reset: POR */
+#define I40E_MNGSB_DADD_ADDR_SHIFT 0
+#define I40E_MNGSB_DADD_ADDR_MASK I40E_MASK(0xFFFFFFFF, I40E_MNGSB_DADD_ADDR_SHIFT)
+#define I40E_MNGSB_DCNT 0x000B7034 /* Reset: POR */
+#define I40E_MNGSB_DCNT_BYTE_CNT_SHIFT 0
+#define I40E_MNGSB_DCNT_BYTE_CNT_MASK I40E_MASK(0xFFFFFFFF, I40E_MNGSB_DCNT_BYTE_CNT_SHIFT)
+#define I40E_MNGSB_MSGCTL 0x000B7020 /* Reset: POR */
+#define I40E_MNGSB_MSGCTL_HDR_DWS_SHIFT 0
+#define I40E_MNGSB_MSGCTL_HDR_DWS_MASK I40E_MASK(0x3, I40E_MNGSB_MSGCTL_HDR_DWS_SHIFT)
+#define I40E_MNGSB_MSGCTL_EXP_RDW_SHIFT 8
+#define I40E_MNGSB_MSGCTL_EXP_RDW_MASK I40E_MASK(0x1FF, I40E_MNGSB_MSGCTL_EXP_RDW_SHIFT)
+#define I40E_MNGSB_MSGCTL_MSG_MODE_SHIFT 26
+#define I40E_MNGSB_MSGCTL_MSG_MODE_MASK I40E_MASK(0x3, I40E_MNGSB_MSGCTL_MSG_MODE_SHIFT)
+#define I40E_MNGSB_MSGCTL_TOKEN_MODE_SHIFT 28
+#define I40E_MNGSB_MSGCTL_TOKEN_MODE_MASK I40E_MASK(0x3, I40E_MNGSB_MSGCTL_TOKEN_MODE_SHIFT)
+#define I40E_MNGSB_MSGCTL_BARCLR_SHIFT 30
+#define I40E_MNGSB_MSGCTL_BARCLR_MASK I40E_MASK(0x1, I40E_MNGSB_MSGCTL_BARCLR_SHIFT)
+#define I40E_MNGSB_MSGCTL_CMDV_SHIFT 31
+#define I40E_MNGSB_MSGCTL_CMDV_MASK I40E_MASK(0x1, I40E_MNGSB_MSGCTL_CMDV_SHIFT)
+#define I40E_MNGSB_RDATA 0x000B7300 /* Reset: POR */
+#define I40E_MNGSB_RDATA_DATA_SHIFT 0
+#define I40E_MNGSB_RDATA_DATA_MASK I40E_MASK(0xFFFFFFFF, I40E_MNGSB_RDATA_DATA_SHIFT)
+#define I40E_MNGSB_RHDR0 0x000B72FC /* Reset: POR */
+#define I40E_MNGSB_RHDR0_DESTINATION_SHIFT 0
+#define I40E_MNGSB_RHDR0_DESTINATION_MASK I40E_MASK(0xFF, I40E_MNGSB_RHDR0_DESTINATION_SHIFT)
+#define I40E_MNGSB_RHDR0_SOURCE_SHIFT 8
+#define I40E_MNGSB_RHDR0_SOURCE_MASK I40E_MASK(0xFF, I40E_MNGSB_RHDR0_SOURCE_SHIFT)
+#define I40E_MNGSB_RHDR0_OPCODE_SHIFT 16
+#define I40E_MNGSB_RHDR0_OPCODE_MASK I40E_MASK(0xFF, I40E_MNGSB_RHDR0_OPCODE_SHIFT)
+#define I40E_MNGSB_RHDR0_TAG_SHIFT 24
+#define I40E_MNGSB_RHDR0_TAG_MASK I40E_MASK(0x7, I40E_MNGSB_RHDR0_TAG_SHIFT)
+#define I40E_MNGSB_RHDR0_RESPONSE_SHIFT 27
+#define I40E_MNGSB_RHDR0_RESPONSE_MASK I40E_MASK(0x7, I40E_MNGSB_RHDR0_RESPONSE_SHIFT)
+#define I40E_MNGSB_RHDR0_EH_SHIFT 31
+#define I40E_MNGSB_RHDR0_EH_MASK I40E_MASK(0x1, I40E_MNGSB_RHDR0_EH_SHIFT)
+#define I40E_MNGSB_RSPCTL 0x000B7024 /* Reset: POR */
+#define I40E_MNGSB_RSPCTL_DMA_MSG_DWORDS_SHIFT 0
+#define I40E_MNGSB_RSPCTL_DMA_MSG_DWORDS_MASK I40E_MASK(0x1FF, I40E_MNGSB_RSPCTL_DMA_MSG_DWORDS_SHIFT)
+#define I40E_MNGSB_RSPCTL_RSP_MODE_SHIFT 26
+#define I40E_MNGSB_RSPCTL_RSP_MODE_MASK I40E_MASK(0x3, I40E_MNGSB_RSPCTL_RSP_MODE_SHIFT)
+#define I40E_MNGSB_RSPCTL_RSP_BAD_LEN_SHIFT 30
+#define I40E_MNGSB_RSPCTL_RSP_BAD_LEN_MASK I40E_MASK(0x1, I40E_MNGSB_RSPCTL_RSP_BAD_LEN_SHIFT)
+#define I40E_MNGSB_RSPCTL_RSP_ERR_SHIFT 31
+#define I40E_MNGSB_RSPCTL_RSP_ERR_MASK I40E_MASK(0x1, I40E_MNGSB_RSPCTL_RSP_ERR_SHIFT)
+#define I40E_MNGSB_WDATA 0x000B7100 /* Reset: POR */
+#define I40E_MNGSB_WDATA_DATA_SHIFT 0
+#define I40E_MNGSB_WDATA_DATA_MASK I40E_MASK(0xFFFFFFFF, I40E_MNGSB_WDATA_DATA_SHIFT)
+#define I40E_MNGSB_WHDR0 0x000B70F4 /* Reset: POR */
+#define I40E_MNGSB_WHDR0_RAW_DEST_SHIFT 0
+#define I40E_MNGSB_WHDR0_RAW_DEST_MASK I40E_MASK(0xFF, I40E_MNGSB_WHDR0_RAW_DEST_SHIFT)
+#define I40E_MNGSB_WHDR0_DEST_SEL_SHIFT 12
+#define I40E_MNGSB_WHDR0_DEST_SEL_MASK I40E_MASK(0xF, I40E_MNGSB_WHDR0_DEST_SEL_SHIFT)
+#define I40E_MNGSB_WHDR0_OPCODE_SEL_SHIFT 16
+#define I40E_MNGSB_WHDR0_OPCODE_SEL_MASK I40E_MASK(0xFF, I40E_MNGSB_WHDR0_OPCODE_SEL_SHIFT)
+#define I40E_MNGSB_WHDR0_TAG_SHIFT 24
+#define I40E_MNGSB_WHDR0_TAG_MASK I40E_MASK(0x7F, I40E_MNGSB_WHDR0_TAG_SHIFT)
+#define I40E_MNGSB_WHDR1 0x000B70F8 /* Reset: POR */
+#define I40E_MNGSB_WHDR1_ADDR_SHIFT 0
+#define I40E_MNGSB_WHDR1_ADDR_MASK I40E_MASK(0xFFFFFFFF, I40E_MNGSB_WHDR1_ADDR_SHIFT)
+#define I40E_MNGSB_WHDR2 0x000B70FC /* Reset: POR */
+#define I40E_MNGSB_WHDR2_LENGTH_SHIFT 0
+#define I40E_MNGSB_WHDR2_LENGTH_MASK I40E_MASK(0xFFFFFFFF, I40E_MNGSB_WHDR2_LENGTH_SHIFT)
+
+#define I40E_GLPCI_CAPSUP_WAKUP_EN_SHIFT 21
+#define I40E_GLPCI_CAPSUP_WAKUP_EN_MASK I40E_MASK(0x1, I40E_GLPCI_CAPSUP_WAKUP_EN_SHIFT)
+
+#define I40E_GLPCI_CUR_CLNT_COMMON 0x0009CA18 /* Reset: PCIR */
+#define I40E_GLPCI_CUR_CLNT_COMMON_DATA_LINES_SHIFT 0
+#define I40E_GLPCI_CUR_CLNT_COMMON_DATA_LINES_MASK I40E_MASK(0xFFFF, I40E_GLPCI_CUR_CLNT_COMMON_DATA_LINES_SHIFT)
+#define I40E_GLPCI_CUR_CLNT_COMMON_OSR_SHIFT 16
+#define I40E_GLPCI_CUR_CLNT_COMMON_OSR_MASK I40E_MASK(0xFFFF, I40E_GLPCI_CUR_CLNT_COMMON_OSR_SHIFT)
+#define I40E_GLPCI_CUR_CLNT_PIPEMON 0x0009CA20 /* Reset: PCIR */
+#define I40E_GLPCI_CUR_CLNT_PIPEMON_DATA_LINES_SHIFT 0
+#define I40E_GLPCI_CUR_CLNT_PIPEMON_DATA_LINES_MASK I40E_MASK(0xFFFF, I40E_GLPCI_CUR_CLNT_PIPEMON_DATA_LINES_SHIFT)
+#define I40E_GLPCI_CUR_MNG_ALWD 0x0009c514 /* Reset: PCIR */
+#define I40E_GLPCI_CUR_MNG_ALWD_DATA_LINES_SHIFT 0
+#define I40E_GLPCI_CUR_MNG_ALWD_DATA_LINES_MASK I40E_MASK(0xFFFF, I40E_GLPCI_CUR_MNG_ALWD_DATA_LINES_SHIFT)
+#define I40E_GLPCI_CUR_MNG_ALWD_OSR_SHIFT 16
+#define I40E_GLPCI_CUR_MNG_ALWD_OSR_MASK I40E_MASK(0xFFFF, I40E_GLPCI_CUR_MNG_ALWD_OSR_SHIFT)
+#define I40E_GLPCI_CUR_MNG_RSVD 0x0009c594 /* Reset: PCIR */
+#define I40E_GLPCI_CUR_MNG_RSVD_DATA_LINES_SHIFT 0
+#define I40E_GLPCI_CUR_MNG_RSVD_DATA_LINES_MASK I40E_MASK(0xFFFF, I40E_GLPCI_CUR_MNG_RSVD_DATA_LINES_SHIFT)
+#define I40E_GLPCI_CUR_MNG_RSVD_OSR_SHIFT 16
+#define I40E_GLPCI_CUR_MNG_RSVD_OSR_MASK I40E_MASK(0xFFFF, I40E_GLPCI_CUR_MNG_RSVD_OSR_SHIFT)
+#define I40E_GLPCI_CUR_PMAT_ALWD 0x0009c510 /* Reset: PCIR */
+#define I40E_GLPCI_CUR_PMAT_ALWD_DATA_LINES_SHIFT 0
+#define I40E_GLPCI_CUR_PMAT_ALWD_DATA_LINES_MASK I40E_MASK(0xFFFF, I40E_GLPCI_CUR_PMAT_ALWD_DATA_LINES_SHIFT)
+#define I40E_GLPCI_CUR_PMAT_ALWD_OSR_SHIFT 16
+#define I40E_GLPCI_CUR_PMAT_ALWD_OSR_MASK I40E_MASK(0xFFFF, I40E_GLPCI_CUR_PMAT_ALWD_OSR_SHIFT)
+#define I40E_GLPCI_CUR_PMAT_RSVD 0x0009c590 /* Reset: PCIR */
+#define I40E_GLPCI_CUR_PMAT_RSVD_DATA_LINES_SHIFT 0
+#define I40E_GLPCI_CUR_PMAT_RSVD_DATA_LINES_MASK I40E_MASK(0xFFFF, I40E_GLPCI_CUR_PMAT_RSVD_DATA_LINES_SHIFT)
+#define I40E_GLPCI_CUR_PMAT_RSVD_OSR_SHIFT 16
+#define I40E_GLPCI_CUR_PMAT_RSVD_OSR_MASK I40E_MASK(0xFFFF, I40E_GLPCI_CUR_PMAT_RSVD_OSR_SHIFT)
+#define I40E_GLPCI_CUR_RLAN_ALWD 0x0009c500 /* Reset: PCIR */
+#define I40E_GLPCI_CUR_RLAN_ALWD_DATA_LINES_SHIFT 0
+#define I40E_GLPCI_CUR_RLAN_ALWD_DATA_LINES_MASK I40E_MASK(0xFFFF, I40E_GLPCI_CUR_RLAN_ALWD_DATA_LINES_SHIFT)
+#define I40E_GLPCI_CUR_RLAN_ALWD_OSR_SHIFT 16
+#define I40E_GLPCI_CUR_RLAN_ALWD_OSR_MASK I40E_MASK(0xFFFF, I40E_GLPCI_CUR_RLAN_ALWD_OSR_SHIFT)
+#define I40E_GLPCI_CUR_RLAN_RSVD 0x0009c580 /* Reset: PCIR */
+#define I40E_GLPCI_CUR_RLAN_RSVD_DATA_LINES_SHIFT 0
+#define I40E_GLPCI_CUR_RLAN_RSVD_DATA_LINES_MASK I40E_MASK(0xFFFF, I40E_GLPCI_CUR_RLAN_RSVD_DATA_LINES_SHIFT)
+#define I40E_GLPCI_CUR_RLAN_RSVD_OSR_SHIFT 16
+#define I40E_GLPCI_CUR_RLAN_RSVD_OSR_MASK I40E_MASK(0xFFFF, I40E_GLPCI_CUR_RLAN_RSVD_OSR_SHIFT)
+#define I40E_GLPCI_CUR_RXPE_ALWD 0x0009c508 /* Reset: PCIR */
+#define I40E_GLPCI_CUR_RXPE_ALWD_DATA_LINES_SHIFT 0
+#define I40E_GLPCI_CUR_RXPE_ALWD_DATA_LINES_MASK I40E_MASK(0xFFFF, I40E_GLPCI_CUR_RXPE_ALWD_DATA_LINES_SHIFT)
+#define I40E_GLPCI_CUR_RXPE_ALWD_OSR_SHIFT 16
+#define I40E_GLPCI_CUR_RXPE_ALWD_OSR_MASK I40E_MASK(0xFFFF, I40E_GLPCI_CUR_RXPE_ALWD_OSR_SHIFT)
+#define I40E_GLPCI_CUR_RXPE_RSVD 0x0009c588 /* Reset: PCIR */
+#define I40E_GLPCI_CUR_RXPE_RSVD_DATA_LINES_SHIFT 0
+#define I40E_GLPCI_CUR_RXPE_RSVD_DATA_LINES_MASK I40E_MASK(0xFFFF, I40E_GLPCI_CUR_RXPE_RSVD_DATA_LINES_SHIFT)
+#define I40E_GLPCI_CUR_RXPE_RSVD_OSR_SHIFT 16
+#define I40E_GLPCI_CUR_RXPE_RSVD_OSR_MASK I40E_MASK(0xFFFF, I40E_GLPCI_CUR_RXPE_RSVD_OSR_SHIFT)
+#define I40E_GLPCI_CUR_TDPU_ALWD 0x0009c518 /* Reset: PCIR */
+#define I40E_GLPCI_CUR_TDPU_ALWD_DATA_LINES_SHIFT 0
+#define I40E_GLPCI_CUR_TDPU_ALWD_DATA_LINES_MASK I40E_MASK(0xFFFF, I40E_GLPCI_CUR_TDPU_ALWD_DATA_LINES_SHIFT)
+#define I40E_GLPCI_CUR_TDPU_ALWD_OSR_SHIFT 16
+#define I40E_GLPCI_CUR_TDPU_ALWD_OSR_MASK I40E_MASK(0xFFFF, I40E_GLPCI_CUR_TDPU_ALWD_OSR_SHIFT)
+#define I40E_GLPCI_CUR_TDPU_RSVD 0x0009c598 /* Reset: PCIR */
+#define I40E_GLPCI_CUR_TDPU_RSVD_DATA_LINES_SHIFT 0
+#define I40E_GLPCI_CUR_TDPU_RSVD_DATA_LINES_MASK I40E_MASK(0xFFFF, I40E_GLPCI_CUR_TDPU_RSVD_DATA_LINES_SHIFT)
+#define I40E_GLPCI_CUR_TDPU_RSVD_OSR_SHIFT 16
+#define I40E_GLPCI_CUR_TDPU_RSVD_OSR_MASK I40E_MASK(0xFFFF, I40E_GLPCI_CUR_TDPU_RSVD_OSR_SHIFT)
+#define I40E_GLPCI_CUR_TLAN_ALWD 0x0009c504 /* Reset: PCIR */
+#define I40E_GLPCI_CUR_TLAN_ALWD_DATA_LINES_SHIFT 0
+#define I40E_GLPCI_CUR_TLAN_ALWD_DATA_LINES_MASK I40E_MASK(0xFFFF, I40E_GLPCI_CUR_TLAN_ALWD_DATA_LINES_SHIFT)
+#define I40E_GLPCI_CUR_TLAN_ALWD_OSR_SHIFT 16
+#define I40E_GLPCI_CUR_TLAN_ALWD_OSR_MASK I40E_MASK(0xFFFF, I40E_GLPCI_CUR_TLAN_ALWD_OSR_SHIFT)
+#define I40E_GLPCI_CUR_TLAN_RSVD 0x0009c584 /* Reset: PCIR */
+#define I40E_GLPCI_CUR_TLAN_RSVD_DATA_LINES_SHIFT 0
+#define I40E_GLPCI_CUR_TLAN_RSVD_DATA_LINES_MASK I40E_MASK(0xFFFF, I40E_GLPCI_CUR_TLAN_RSVD_DATA_LINES_SHIFT)
+#define I40E_GLPCI_CUR_TLAN_RSVD_OSR_SHIFT 16
+#define I40E_GLPCI_CUR_TLAN_RSVD_OSR_MASK I40E_MASK(0xFFFF, I40E_GLPCI_CUR_TLAN_RSVD_OSR_SHIFT)
+#define I40E_GLPCI_CUR_TXPE_ALWD 0x0009c50C /* Reset: PCIR */
+#define I40E_GLPCI_CUR_TXPE_ALWD_DATA_LINES_SHIFT 0
+#define I40E_GLPCI_CUR_TXPE_ALWD_DATA_LINES_MASK I40E_MASK(0xFFFF, I40E_GLPCI_CUR_TXPE_ALWD_DATA_LINES_SHIFT)
+#define I40E_GLPCI_CUR_TXPE_ALWD_OSR_SHIFT 16
+#define I40E_GLPCI_CUR_TXPE_ALWD_OSR_MASK I40E_MASK(0xFFFF, I40E_GLPCI_CUR_TXPE_ALWD_OSR_SHIFT)
+#define I40E_GLPCI_CUR_TXPE_RSVD 0x0009c58c /* Reset: PCIR */
+#define I40E_GLPCI_CUR_TXPE_RSVD_DATA_LINES_SHIFT 0
+#define I40E_GLPCI_CUR_TXPE_RSVD_DATA_LINES_MASK I40E_MASK(0xFFFF, I40E_GLPCI_CUR_TXPE_RSVD_DATA_LINES_SHIFT)
+#define I40E_GLPCI_CUR_TXPE_RSVD_OSR_SHIFT 16
+#define I40E_GLPCI_CUR_TXPE_RSVD_OSR_MASK I40E_MASK(0xFFFF, I40E_GLPCI_CUR_TXPE_RSVD_OSR_SHIFT)
+#define I40E_GLPCI_CUR_WATMK_CLNT_COMMON 0x0009CA28 /* Reset: PCIR */
+#define I40E_GLPCI_CUR_WATMK_CLNT_COMMON_DATA_LINES_SHIFT 0
+#define I40E_GLPCI_CUR_WATMK_CLNT_COMMON_DATA_LINES_MASK I40E_MASK(0xFFFF, I40E_GLPCI_CUR_WATMK_CLNT_COMMON_DATA_LINES_SHIFT)
+#define I40E_GLPCI_CUR_WATMK_CLNT_COMMON_OSR_SHIFT 16
+#define I40E_GLPCI_CUR_WATMK_CLNT_COMMON_OSR_MASK I40E_MASK(0xFFFF, I40E_GLPCI_CUR_WATMK_CLNT_COMMON_OSR_SHIFT)
+
+#define I40E_GLPCI_LBARCTRL_PE_DB_SIZE_SHIFT 4
+#define I40E_GLPCI_LBARCTRL_PE_DB_SIZE_MASK I40E_MASK(0x3, I40E_GLPCI_LBARCTRL_PE_DB_SIZE_SHIFT)
+#define I40E_GLPCI_LBARCTRL_VF_PE_DB_SIZE_SHIFT 10
+#define I40E_GLPCI_LBARCTRL_VF_PE_DB_SIZE_MASK I40E_MASK(0x1, I40E_GLPCI_LBARCTRL_VF_PE_DB_SIZE_SHIFT)
+#define I40E_GLPCI_NPQ_CFG 0x0009CA00 /* Reset: PCIR */
+#define I40E_GLPCI_NPQ_CFG_EXTEND_TO_SHIFT 0
+#define I40E_GLPCI_NPQ_CFG_EXTEND_TO_MASK I40E_MASK(0x1, I40E_GLPCI_NPQ_CFG_EXTEND_TO_SHIFT)
+#define I40E_GLPCI_NPQ_CFG_SMALL_TO_SHIFT 1
+#define I40E_GLPCI_NPQ_CFG_SMALL_TO_MASK I40E_MASK(0x1, I40E_GLPCI_NPQ_CFG_SMALL_TO_SHIFT)
+#define I40E_GLPCI_NPQ_CFG_WEIGHT_AVG_SHIFT 2
+#define I40E_GLPCI_NPQ_CFG_WEIGHT_AVG_MASK I40E_MASK(0xF, I40E_GLPCI_NPQ_CFG_WEIGHT_AVG_SHIFT)
+#define I40E_GLPCI_NPQ_CFG_NPQ_SPARE_SHIFT 6
+#define I40E_GLPCI_NPQ_CFG_NPQ_SPARE_MASK I40E_MASK(0x3FF, I40E_GLPCI_NPQ_CFG_NPQ_SPARE_SHIFT)
+#define I40E_GLPCI_NPQ_CFG_NPQ_ERR_STAT_SHIFT 16
+#define I40E_GLPCI_NPQ_CFG_NPQ_ERR_STAT_MASK I40E_MASK(0xF, I40E_GLPCI_NPQ_CFG_NPQ_ERR_STAT_SHIFT)
+#define I40E_GLPCI_WATMK_CLNT_PIPEMON 0x0009CA30 /* Reset: PCIR */
+#define I40E_GLPCI_WATMK_CLNT_PIPEMON_DATA_LINES_SHIFT 0
+#define I40E_GLPCI_WATMK_CLNT_PIPEMON_DATA_LINES_MASK I40E_MASK(0xFFFF, I40E_GLPCI_WATMK_CLNT_PIPEMON_DATA_LINES_SHIFT)
+#define I40E_GLPCI_WATMK_MNG_ALWD 0x0009CB14 /* Reset: PCIR */
+#define I40E_GLPCI_WATMK_MNG_ALWD_DATA_LINES_SHIFT 0
+#define I40E_GLPCI_WATMK_MNG_ALWD_DATA_LINES_MASK I40E_MASK(0xFFFF, I40E_GLPCI_WATMK_MNG_ALWD_DATA_LINES_SHIFT)
+#define I40E_GLPCI_WATMK_MNG_ALWD_OSR_SHIFT 16
+#define I40E_GLPCI_WATMK_MNG_ALWD_OSR_MASK I40E_MASK(0xFFFF, I40E_GLPCI_WATMK_MNG_ALWD_OSR_SHIFT)
+#define I40E_GLPCI_WATMK_PMAT_ALWD 0x0009CB10 /* Reset: PCIR */
+#define I40E_GLPCI_WATMK_PMAT_ALWD_DATA_LINES_SHIFT 0
+#define I40E_GLPCI_WATMK_PMAT_ALWD_DATA_LINES_MASK I40E_MASK(0xFFFF, I40E_GLPCI_WATMK_PMAT_ALWD_DATA_LINES_SHIFT)
+#define I40E_GLPCI_WATMK_PMAT_ALWD_OSR_SHIFT 16
+#define I40E_GLPCI_WATMK_PMAT_ALWD_OSR_MASK I40E_MASK(0xFFFF, I40E_GLPCI_WATMK_PMAT_ALWD_OSR_SHIFT)
+#define I40E_GLPCI_WATMK_RLAN_ALWD 0x0009CB00 /* Reset: PCIR */
+#define I40E_GLPCI_WATMK_RLAN_ALWD_DATA_LINES_SHIFT 0
+#define I40E_GLPCI_WATMK_RLAN_ALWD_DATA_LINES_MASK I40E_MASK(0xFFFF, I40E_GLPCI_WATMK_RLAN_ALWD_DATA_LINES_SHIFT)
+#define I40E_GLPCI_WATMK_RLAN_ALWD_OSR_SHIFT 16
+#define I40E_GLPCI_WATMK_RLAN_ALWD_OSR_MASK I40E_MASK(0xFFFF, I40E_GLPCI_WATMK_RLAN_ALWD_OSR_SHIFT)
+#define I40E_GLPCI_WATMK_RXPE_ALWD 0x0009CB08 /* Reset: PCIR */
+#define I40E_GLPCI_WATMK_RXPE_ALWD_DATA_LINES_SHIFT 0
+#define I40E_GLPCI_WATMK_RXPE_ALWD_DATA_LINES_MASK I40E_MASK(0xFFFF, I40E_GLPCI_WATMK_RXPE_ALWD_DATA_LINES_SHIFT)
+#define I40E_GLPCI_WATMK_RXPE_ALWD_OSR_SHIFT 16
+#define I40E_GLPCI_WATMK_RXPE_ALWD_OSR_MASK I40E_MASK(0xFFFF, I40E_GLPCI_WATMK_RXPE_ALWD_OSR_SHIFT)
+#define I40E_GLPCI_WATMK_TLAN_ALWD 0x0009CB04 /* Reset: PCIR */
+#define I40E_GLPCI_WATMK_TLAN_ALWD_DATA_LINES_SHIFT 0
+#define I40E_GLPCI_WATMK_TLAN_ALWD_DATA_LINES_MASK I40E_MASK(0xFFFF, I40E_GLPCI_WATMK_TLAN_ALWD_DATA_LINES_SHIFT)
+#define I40E_GLPCI_WATMK_TLAN_ALWD_OSR_SHIFT 16
+#define I40E_GLPCI_WATMK_TLAN_ALWD_OSR_MASK I40E_MASK(0xFFFF, I40E_GLPCI_WATMK_TLAN_ALWD_OSR_SHIFT)
+#define I40E_GLPCI_WATMK_TPDU_ALWD 0x0009CB18 /* Reset: PCIR */
+#define I40E_GLPCI_WATMK_TPDU_ALWD_DATA_LINES_SHIFT 0
+#define I40E_GLPCI_WATMK_TPDU_ALWD_DATA_LINES_MASK I40E_MASK(0xFFFF, I40E_GLPCI_WATMK_TPDU_ALWD_DATA_LINES_SHIFT)
+#define I40E_GLPCI_WATMK_TPDU_ALWD_OSR_SHIFT 16
+#define I40E_GLPCI_WATMK_TPDU_ALWD_OSR_MASK I40E_MASK(0xFFFF, I40E_GLPCI_WATMK_TPDU_ALWD_OSR_SHIFT)
+#define I40E_GLPCI_WATMK_TXPE_ALWD 0x0009CB0c /* Reset: PCIR */
+#define I40E_GLPCI_WATMK_TXPE_ALWD_DATA_LINES_SHIFT 0
+#define I40E_GLPCI_WATMK_TXPE_ALWD_DATA_LINES_MASK I40E_MASK(0xFFFF, I40E_GLPCI_WATMK_TXPE_ALWD_DATA_LINES_SHIFT)
+#define I40E_GLPCI_WATMK_TXPE_ALWD_OSR_SHIFT 16
+#define I40E_GLPCI_WATMK_TXPE_ALWD_OSR_MASK I40E_MASK(0xFFFF, I40E_GLPCI_WATMK_TXPE_ALWD_OSR_SHIFT)
+#define I40E_GLPE_CPUSTATUS0 0x0000D040 /* Reset: PE_CORER */
+#define I40E_GLPE_CPUSTATUS0_PECPUSTATUS0_SHIFT 0
+#define I40E_GLPE_CPUSTATUS0_PECPUSTATUS0_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPE_CPUSTATUS0_PECPUSTATUS0_SHIFT)
+#define I40E_GLPE_CPUSTATUS1 0x0000D044 /* Reset: PE_CORER */
+#define I40E_GLPE_CPUSTATUS1_PECPUSTATUS1_SHIFT 0
+#define I40E_GLPE_CPUSTATUS1_PECPUSTATUS1_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPE_CPUSTATUS1_PECPUSTATUS1_SHIFT)
+#define I40E_GLPE_CPUSTATUS2 0x0000D048 /* Reset: PE_CORER */
+#define I40E_GLPE_CPUSTATUS2_PECPUSTATUS2_SHIFT 0
+#define I40E_GLPE_CPUSTATUS2_PECPUSTATUS2_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPE_CPUSTATUS2_PECPUSTATUS2_SHIFT)
+#define I40E_GLPE_CPUTRIG0 0x0000D060 /* Reset: PE_CORER */
+#define I40E_GLPE_CPUTRIG0_PECPUTRIG0_SHIFT 0
+#define I40E_GLPE_CPUTRIG0_PECPUTRIG0_MASK I40E_MASK(0xFFFF, I40E_GLPE_CPUTRIG0_PECPUTRIG0_SHIFT)
+#define I40E_GLPE_CPUTRIG0_TEPREQUEST0_SHIFT 17
+#define I40E_GLPE_CPUTRIG0_TEPREQUEST0_MASK I40E_MASK(0x1, I40E_GLPE_CPUTRIG0_TEPREQUEST0_SHIFT)
+#define I40E_GLPE_CPUTRIG0_OOPREQUEST0_SHIFT 18
+#define I40E_GLPE_CPUTRIG0_OOPREQUEST0_MASK I40E_MASK(0x1, I40E_GLPE_CPUTRIG0_OOPREQUEST0_SHIFT)
+#define I40E_GLPE_DUAL40_RUPM 0x0000DA04 /* Reset: PE_CORER */
+#define I40E_GLPE_DUAL40_RUPM_DUAL_40G_MODE_SHIFT 0
+#define I40E_GLPE_DUAL40_RUPM_DUAL_40G_MODE_MASK I40E_MASK(0x1, I40E_GLPE_DUAL40_RUPM_DUAL_40G_MODE_SHIFT)
+#define I40E_GLPE_PFAEQEDROPCNT(_i) (0x00131440 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */
+#define I40E_GLPE_PFAEQEDROPCNT_MAX_INDEX 15
+#define I40E_GLPE_PFAEQEDROPCNT_AEQEDROPCNT_SHIFT 0
+#define I40E_GLPE_PFAEQEDROPCNT_AEQEDROPCNT_MASK I40E_MASK(0xFFFF, I40E_GLPE_PFAEQEDROPCNT_AEQEDROPCNT_SHIFT)
+#define I40E_GLPE_PFCEQEDROPCNT(_i) (0x001313C0 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */
+#define I40E_GLPE_PFCEQEDROPCNT_MAX_INDEX 15
+#define I40E_GLPE_PFCEQEDROPCNT_CEQEDROPCNT_SHIFT 0
+#define I40E_GLPE_PFCEQEDROPCNT_CEQEDROPCNT_MASK I40E_MASK(0xFFFF, I40E_GLPE_PFCEQEDROPCNT_CEQEDROPCNT_SHIFT)
+#define I40E_GLPE_PFCQEDROPCNT(_i) (0x00131340 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */
+#define I40E_GLPE_PFCQEDROPCNT_MAX_INDEX 15
+#define I40E_GLPE_PFCQEDROPCNT_CQEDROPCNT_SHIFT 0
+#define I40E_GLPE_PFCQEDROPCNT_CQEDROPCNT_MASK I40E_MASK(0xFFFF, I40E_GLPE_PFCQEDROPCNT_CQEDROPCNT_SHIFT)
+#define I40E_GLPE_RUPM_CQPPOOL 0x0000DACC /* Reset: PE_CORER */
+#define I40E_GLPE_RUPM_CQPPOOL_CQPSPADS_SHIFT 0
+#define I40E_GLPE_RUPM_CQPPOOL_CQPSPADS_MASK I40E_MASK(0xFF, I40E_GLPE_RUPM_CQPPOOL_CQPSPADS_SHIFT)
+#define I40E_GLPE_RUPM_FLRPOOL 0x0000DAC4 /* Reset: PE_CORER */
+#define I40E_GLPE_RUPM_FLRPOOL_FLRSPADS_SHIFT 0
+#define I40E_GLPE_RUPM_FLRPOOL_FLRSPADS_MASK I40E_MASK(0xFF, I40E_GLPE_RUPM_FLRPOOL_FLRSPADS_SHIFT)
+#define I40E_GLPE_RUPM_GCTL 0x0000DA00 /* Reset: PE_CORER */
+#define I40E_GLPE_RUPM_GCTL_ALLOFFTH_SHIFT 0
+#define I40E_GLPE_RUPM_GCTL_ALLOFFTH_MASK I40E_MASK(0xFF, I40E_GLPE_RUPM_GCTL_ALLOFFTH_SHIFT)
+#define I40E_GLPE_RUPM_GCTL_RUPM_P0_DIS_SHIFT 26
+#define I40E_GLPE_RUPM_GCTL_RUPM_P0_DIS_MASK I40E_MASK(0x1, I40E_GLPE_RUPM_GCTL_RUPM_P0_DIS_SHIFT)
+#define I40E_GLPE_RUPM_GCTL_RUPM_P1_DIS_SHIFT 27
+#define I40E_GLPE_RUPM_GCTL_RUPM_P1_DIS_MASK I40E_MASK(0x1, I40E_GLPE_RUPM_GCTL_RUPM_P1_DIS_SHIFT)
+#define I40E_GLPE_RUPM_GCTL_RUPM_P2_DIS_SHIFT 28
+#define I40E_GLPE_RUPM_GCTL_RUPM_P2_DIS_MASK I40E_MASK(0x1, I40E_GLPE_RUPM_GCTL_RUPM_P2_DIS_SHIFT)
+#define I40E_GLPE_RUPM_GCTL_RUPM_P3_DIS_SHIFT 29
+#define I40E_GLPE_RUPM_GCTL_RUPM_P3_DIS_MASK I40E_MASK(0x1, I40E_GLPE_RUPM_GCTL_RUPM_P3_DIS_SHIFT)
+#define I40E_GLPE_RUPM_GCTL_RUPM_DIS_SHIFT 30
+#define I40E_GLPE_RUPM_GCTL_RUPM_DIS_MASK I40E_MASK(0x1, I40E_GLPE_RUPM_GCTL_RUPM_DIS_SHIFT)
+#define I40E_GLPE_RUPM_GCTL_SWLB_MODE_SHIFT 31
+#define I40E_GLPE_RUPM_GCTL_SWLB_MODE_MASK I40E_MASK(0x1, I40E_GLPE_RUPM_GCTL_SWLB_MODE_SHIFT)
+#define I40E_GLPE_RUPM_PTXPOOL 0x0000DAC8 /* Reset: PE_CORER */
+#define I40E_GLPE_RUPM_PTXPOOL_PTXSPADS_SHIFT 0
+#define I40E_GLPE_RUPM_PTXPOOL_PTXSPADS_MASK I40E_MASK(0xFF, I40E_GLPE_RUPM_PTXPOOL_PTXSPADS_SHIFT)
+#define I40E_GLPE_RUPM_PUSHPOOL 0x0000DAC0 /* Reset: PE_CORER */
+#define I40E_GLPE_RUPM_PUSHPOOL_PUSHSPADS_SHIFT 0
+#define I40E_GLPE_RUPM_PUSHPOOL_PUSHSPADS_MASK I40E_MASK(0xFF, I40E_GLPE_RUPM_PUSHPOOL_PUSHSPADS_SHIFT)
+#define I40E_GLPE_RUPM_TXHOST_EN 0x0000DA08 /* Reset: PE_CORER */
+#define I40E_GLPE_RUPM_TXHOST_EN_TXHOST_EN_SHIFT 0
+#define I40E_GLPE_RUPM_TXHOST_EN_TXHOST_EN_MASK I40E_MASK(0x1, I40E_GLPE_RUPM_TXHOST_EN_TXHOST_EN_SHIFT)
+#define I40E_GLPE_VFAEQEDROPCNT(_i) (0x00132540 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */
+#define I40E_GLPE_VFAEQEDROPCNT_MAX_INDEX 31
+#define I40E_GLPE_VFAEQEDROPCNT_AEQEDROPCNT_SHIFT 0
+#define I40E_GLPE_VFAEQEDROPCNT_AEQEDROPCNT_MASK I40E_MASK(0xFFFF, I40E_GLPE_VFAEQEDROPCNT_AEQEDROPCNT_SHIFT)
+#define I40E_GLPE_VFCEQEDROPCNT(_i) (0x00132440 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */
+#define I40E_GLPE_VFCEQEDROPCNT_MAX_INDEX 31
+#define I40E_GLPE_VFCEQEDROPCNT_CEQEDROPCNT_SHIFT 0
+#define I40E_GLPE_VFCEQEDROPCNT_CEQEDROPCNT_MASK I40E_MASK(0xFFFF, I40E_GLPE_VFCEQEDROPCNT_CEQEDROPCNT_SHIFT)
+#define I40E_GLPE_VFCQEDROPCNT(_i) (0x00132340 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */
+#define I40E_GLPE_VFCQEDROPCNT_MAX_INDEX 31
+#define I40E_GLPE_VFCQEDROPCNT_CQEDROPCNT_SHIFT 0
+#define I40E_GLPE_VFCQEDROPCNT_CQEDROPCNT_MASK I40E_MASK(0xFFFF, I40E_GLPE_VFCQEDROPCNT_CQEDROPCNT_SHIFT)
+#define I40E_GLPE_VFFLMOBJCTRL(_i) (0x0000D400 + ((_i) * 4)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPE_VFFLMOBJCTRL_MAX_INDEX 31
+#define I40E_GLPE_VFFLMOBJCTRL_XMIT_BLOCKSIZE_SHIFT 0
+#define I40E_GLPE_VFFLMOBJCTRL_XMIT_BLOCKSIZE_MASK I40E_MASK(0x7, I40E_GLPE_VFFLMOBJCTRL_XMIT_BLOCKSIZE_SHIFT)
+#define I40E_GLPE_VFFLMOBJCTRL_Q1_BLOCKSIZE_SHIFT 8
+#define I40E_GLPE_VFFLMOBJCTRL_Q1_BLOCKSIZE_MASK I40E_MASK(0x7, I40E_GLPE_VFFLMOBJCTRL_Q1_BLOCKSIZE_SHIFT)
+#define I40E_GLPE_VFFLMQ1ALLOCERR(_i) (0x0000C700 + ((_i) * 4)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPE_VFFLMQ1ALLOCERR_MAX_INDEX 31
+#define I40E_GLPE_VFFLMQ1ALLOCERR_ERROR_COUNT_SHIFT 0
+#define I40E_GLPE_VFFLMQ1ALLOCERR_ERROR_COUNT_MASK I40E_MASK(0xFFFF, I40E_GLPE_VFFLMQ1ALLOCERR_ERROR_COUNT_SHIFT)
+#define I40E_GLPE_VFFLMXMITALLOCERR(_i) (0x0000C600 + ((_i) * 4)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPE_VFFLMXMITALLOCERR_MAX_INDEX 31
+#define I40E_GLPE_VFFLMXMITALLOCERR_ERROR_COUNT_SHIFT 0
+#define I40E_GLPE_VFFLMXMITALLOCERR_ERROR_COUNT_MASK I40E_MASK(0xFFFF, I40E_GLPE_VFFLMXMITALLOCERR_ERROR_COUNT_SHIFT)
+#define I40E_GLPE_VFUDACTRL(_i) (0x0000C000 + ((_i) * 4)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPE_VFUDACTRL_MAX_INDEX 31
+#define I40E_GLPE_VFUDACTRL_IPV4MCFRAGRESBP_SHIFT 0
+#define I40E_GLPE_VFUDACTRL_IPV4MCFRAGRESBP_MASK I40E_MASK(0x1, I40E_GLPE_VFUDACTRL_IPV4MCFRAGRESBP_SHIFT)
+#define I40E_GLPE_VFUDACTRL_IPV4UCFRAGRESBP_SHIFT 1
+#define I40E_GLPE_VFUDACTRL_IPV4UCFRAGRESBP_MASK I40E_MASK(0x1, I40E_GLPE_VFUDACTRL_IPV4UCFRAGRESBP_SHIFT)
+#define I40E_GLPE_VFUDACTRL_IPV6MCFRAGRESBP_SHIFT 2
+#define I40E_GLPE_VFUDACTRL_IPV6MCFRAGRESBP_MASK I40E_MASK(0x1, I40E_GLPE_VFUDACTRL_IPV6MCFRAGRESBP_SHIFT)
+#define I40E_GLPE_VFUDACTRL_IPV6UCFRAGRESBP_SHIFT 3
+#define I40E_GLPE_VFUDACTRL_IPV6UCFRAGRESBP_MASK I40E_MASK(0x1, I40E_GLPE_VFUDACTRL_IPV6UCFRAGRESBP_SHIFT)
+#define I40E_GLPE_VFUDACTRL_UDPMCFRAGRESFAIL_SHIFT 4
+#define I40E_GLPE_VFUDACTRL_UDPMCFRAGRESFAIL_MASK I40E_MASK(0x1, I40E_GLPE_VFUDACTRL_UDPMCFRAGRESFAIL_SHIFT)
+#define I40E_GLPE_VFUDAUCFBQPN(_i) (0x0000C100 + ((_i) * 4)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPE_VFUDAUCFBQPN_MAX_INDEX 31
+#define I40E_GLPE_VFUDAUCFBQPN_QPN_SHIFT 0
+#define I40E_GLPE_VFUDAUCFBQPN_QPN_MASK I40E_MASK(0x3FFFF, I40E_GLPE_VFUDAUCFBQPN_QPN_SHIFT)
+#define I40E_GLPE_VFUDAUCFBQPN_VALID_SHIFT 31
+#define I40E_GLPE_VFUDAUCFBQPN_VALID_MASK I40E_MASK(0x1, I40E_GLPE_VFUDAUCFBQPN_VALID_SHIFT)
+#define I40E_PFPE_AEQALLOC 0x00131180 /* Reset: PFR */
+#define I40E_PFPE_AEQALLOC_AECOUNT_SHIFT 0
+#define I40E_PFPE_AEQALLOC_AECOUNT_MASK I40E_MASK(0xFFFFFFFF, I40E_PFPE_AEQALLOC_AECOUNT_SHIFT)
+#define I40E_PFPE_CCQPHIGH 0x00008200 /* Reset: PFR */
+#define I40E_PFPE_CCQPHIGH_PECCQPHIGH_SHIFT 0
+#define I40E_PFPE_CCQPHIGH_PECCQPHIGH_MASK I40E_MASK(0xFFFFFFFF, I40E_PFPE_CCQPHIGH_PECCQPHIGH_SHIFT)
+#define I40E_PFPE_CCQPLOW 0x00008180 /* Reset: PFR */
+#define I40E_PFPE_CCQPLOW_PECCQPLOW_SHIFT 0
+#define I40E_PFPE_CCQPLOW_PECCQPLOW_MASK I40E_MASK(0xFFFFFFFF, I40E_PFPE_CCQPLOW_PECCQPLOW_SHIFT)
+#define I40E_PFPE_CCQPSTATUS 0x00008100 /* Reset: PFR */
+#define I40E_PFPE_CCQPSTATUS_CCQP_DONE_SHIFT 0
+#define I40E_PFPE_CCQPSTATUS_CCQP_DONE_MASK I40E_MASK(0x1, I40E_PFPE_CCQPSTATUS_CCQP_DONE_SHIFT)
+#define I40E_PFPE_CCQPSTATUS_HMC_PROFILE_SHIFT 4
+#define I40E_PFPE_CCQPSTATUS_HMC_PROFILE_MASK I40E_MASK(0x7, I40E_PFPE_CCQPSTATUS_HMC_PROFILE_SHIFT)
+#define I40E_PFPE_CCQPSTATUS_RDMA_EN_VFS_SHIFT 16
+#define I40E_PFPE_CCQPSTATUS_RDMA_EN_VFS_MASK I40E_MASK(0x3F, I40E_PFPE_CCQPSTATUS_RDMA_EN_VFS_SHIFT)
+#define I40E_PFPE_CCQPSTATUS_CCQP_ERR_SHIFT 31
+#define I40E_PFPE_CCQPSTATUS_CCQP_ERR_MASK I40E_MASK(0x1, I40E_PFPE_CCQPSTATUS_CCQP_ERR_SHIFT)
+#define I40E_PFPE_CQACK 0x00131100 /* Reset: PFR */
+#define I40E_PFPE_CQACK_PECQID_SHIFT 0
+#define I40E_PFPE_CQACK_PECQID_MASK I40E_MASK(0x1FFFF, I40E_PFPE_CQACK_PECQID_SHIFT)
+#define I40E_PFPE_CQARM 0x00131080 /* Reset: PFR */
+#define I40E_PFPE_CQARM_PECQID_SHIFT 0
+#define I40E_PFPE_CQARM_PECQID_MASK I40E_MASK(0x1FFFF, I40E_PFPE_CQARM_PECQID_SHIFT)
+#define I40E_PFPE_CQPDB 0x00008000 /* Reset: PFR */
+#define I40E_PFPE_CQPDB_WQHEAD_SHIFT 0
+#define I40E_PFPE_CQPDB_WQHEAD_MASK I40E_MASK(0x7FF, I40E_PFPE_CQPDB_WQHEAD_SHIFT)
+#define I40E_PFPE_CQPERRCODES 0x00008880 /* Reset: PFR */
+#define I40E_PFPE_CQPERRCODES_CQP_MINOR_CODE_SHIFT 0
+#define I40E_PFPE_CQPERRCODES_CQP_MINOR_CODE_MASK I40E_MASK(0xFFFF, I40E_PFPE_CQPERRCODES_CQP_MINOR_CODE_SHIFT)
+#define I40E_PFPE_CQPERRCODES_CQP_MAJOR_CODE_SHIFT 16
+#define I40E_PFPE_CQPERRCODES_CQP_MAJOR_CODE_MASK I40E_MASK(0xFFFF, I40E_PFPE_CQPERRCODES_CQP_MAJOR_CODE_SHIFT)
+#define I40E_PFPE_CQPTAIL 0x00008080 /* Reset: PFR */
+#define I40E_PFPE_CQPTAIL_WQTAIL_SHIFT 0
+#define I40E_PFPE_CQPTAIL_WQTAIL_MASK I40E_MASK(0x7FF, I40E_PFPE_CQPTAIL_WQTAIL_SHIFT)
+#define I40E_PFPE_CQPTAIL_CQP_OP_ERR_SHIFT 31
+#define I40E_PFPE_CQPTAIL_CQP_OP_ERR_MASK I40E_MASK(0x1, I40E_PFPE_CQPTAIL_CQP_OP_ERR_SHIFT)
+#define I40E_PFPE_FLMQ1ALLOCERR 0x00008980 /* Reset: PFR */
+#define I40E_PFPE_FLMQ1ALLOCERR_ERROR_COUNT_SHIFT 0
+#define I40E_PFPE_FLMQ1ALLOCERR_ERROR_COUNT_MASK I40E_MASK(0xFFFF, I40E_PFPE_FLMQ1ALLOCERR_ERROR_COUNT_SHIFT)
+#define I40E_PFPE_FLMXMITALLOCERR 0x00008900 /* Reset: PFR */
+#define I40E_PFPE_FLMXMITALLOCERR_ERROR_COUNT_SHIFT 0
+#define I40E_PFPE_FLMXMITALLOCERR_ERROR_COUNT_MASK I40E_MASK(0xFFFF, I40E_PFPE_FLMXMITALLOCERR_ERROR_COUNT_SHIFT)
+#define I40E_PFPE_IPCONFIG0 0x00008280 /* Reset: PFR */
+#define I40E_PFPE_IPCONFIG0_PEIPID_SHIFT 0
+#define I40E_PFPE_IPCONFIG0_PEIPID_MASK I40E_MASK(0xFFFF, I40E_PFPE_IPCONFIG0_PEIPID_SHIFT)
+#define I40E_PFPE_IPCONFIG0_USEENTIREIDRANGE_SHIFT 16
+#define I40E_PFPE_IPCONFIG0_USEENTIREIDRANGE_MASK I40E_MASK(0x1, I40E_PFPE_IPCONFIG0_USEENTIREIDRANGE_SHIFT)
+#define I40E_PFPE_MRTEIDXMASK 0x00008600 /* Reset: PFR */
+#define I40E_PFPE_MRTEIDXMASK_MRTEIDXMASKBITS_SHIFT 0
+#define I40E_PFPE_MRTEIDXMASK_MRTEIDXMASKBITS_MASK I40E_MASK(0x1F, I40E_PFPE_MRTEIDXMASK_MRTEIDXMASKBITS_SHIFT)
+#define I40E_PFPE_RCVUNEXPECTEDERROR 0x00008680 /* Reset: PFR */
+#define I40E_PFPE_RCVUNEXPECTEDERROR_TCP_RX_UNEXP_ERR_SHIFT 0
+#define I40E_PFPE_RCVUNEXPECTEDERROR_TCP_RX_UNEXP_ERR_MASK I40E_MASK(0xFFFFFF, I40E_PFPE_RCVUNEXPECTEDERROR_TCP_RX_UNEXP_ERR_SHIFT)
+#define I40E_PFPE_TCPNOWTIMER 0x00008580 /* Reset: PFR */
+#define I40E_PFPE_TCPNOWTIMER_TCP_NOW_SHIFT 0
+#define I40E_PFPE_TCPNOWTIMER_TCP_NOW_MASK I40E_MASK(0xFFFFFFFF, I40E_PFPE_TCPNOWTIMER_TCP_NOW_SHIFT)
+#define I40E_PFPE_UDACTRL 0x00008700 /* Reset: PFR */
+#define I40E_PFPE_UDACTRL_IPV4MCFRAGRESBP_SHIFT 0
+#define I40E_PFPE_UDACTRL_IPV4MCFRAGRESBP_MASK I40E_MASK(0x1, I40E_PFPE_UDACTRL_IPV4MCFRAGRESBP_SHIFT)
+#define I40E_PFPE_UDACTRL_IPV4UCFRAGRESBP_SHIFT 1
+#define I40E_PFPE_UDACTRL_IPV4UCFRAGRESBP_MASK I40E_MASK(0x1, I40E_PFPE_UDACTRL_IPV4UCFRAGRESBP_SHIFT)
+#define I40E_PFPE_UDACTRL_IPV6MCFRAGRESBP_SHIFT 2
+#define I40E_PFPE_UDACTRL_IPV6MCFRAGRESBP_MASK I40E_MASK(0x1, I40E_PFPE_UDACTRL_IPV6MCFRAGRESBP_SHIFT)
+#define I40E_PFPE_UDACTRL_IPV6UCFRAGRESBP_SHIFT 3
+#define I40E_PFPE_UDACTRL_IPV6UCFRAGRESBP_MASK I40E_MASK(0x1, I40E_PFPE_UDACTRL_IPV6UCFRAGRESBP_SHIFT)
+#define I40E_PFPE_UDACTRL_UDPMCFRAGRESFAIL_SHIFT 4
+#define I40E_PFPE_UDACTRL_UDPMCFRAGRESFAIL_MASK I40E_MASK(0x1, I40E_PFPE_UDACTRL_UDPMCFRAGRESFAIL_SHIFT)
+#define I40E_PFPE_UDAUCFBQPN 0x00008780 /* Reset: PFR */
+#define I40E_PFPE_UDAUCFBQPN_QPN_SHIFT 0
+#define I40E_PFPE_UDAUCFBQPN_QPN_MASK I40E_MASK(0x3FFFF, I40E_PFPE_UDAUCFBQPN_QPN_SHIFT)
+#define I40E_PFPE_UDAUCFBQPN_VALID_SHIFT 31
+#define I40E_PFPE_UDAUCFBQPN_VALID_MASK I40E_MASK(0x1, I40E_PFPE_UDAUCFBQPN_VALID_SHIFT)
+#define I40E_PFPE_WQEALLOC 0x00138C00 /* Reset: PFR */
+#define I40E_PFPE_WQEALLOC_PEQPID_SHIFT 0
+#define I40E_PFPE_WQEALLOC_PEQPID_MASK I40E_MASK(0x3FFFF, I40E_PFPE_WQEALLOC_PEQPID_SHIFT)
+#define I40E_PFPE_WQEALLOC_WQE_DESC_INDEX_SHIFT 20
+#define I40E_PFPE_WQEALLOC_WQE_DESC_INDEX_MASK I40E_MASK(0xFFF, I40E_PFPE_WQEALLOC_WQE_DESC_INDEX_SHIFT)
+#define I40E_PRTDCB_RLPMC 0x0001F140 /* Reset: PE_CORER */
+#define I40E_PRTDCB_RLPMC_TC2PFC_SHIFT 0
+#define I40E_PRTDCB_RLPMC_TC2PFC_MASK I40E_MASK(0xFF, I40E_PRTDCB_RLPMC_TC2PFC_SHIFT)
+#define I40E_PRTDCB_TCMSTC_RLPM(_i) (0x0001F040 + ((_i) * 32)) /* _i=0...7 */ /* Reset: PE_CORER */
+#define I40E_PRTDCB_TCMSTC_RLPM_MAX_INDEX 7
+#define I40E_PRTDCB_TCMSTC_RLPM_MSTC_SHIFT 0
+#define I40E_PRTDCB_TCMSTC_RLPM_MSTC_MASK I40E_MASK(0xFFFFF, I40E_PRTDCB_TCMSTC_RLPM_MSTC_SHIFT)
+#define I40E_PRTDCB_TCPMC_RLPM 0x0001F1A0 /* Reset: PE_CORER */
+#define I40E_PRTDCB_TCPMC_RLPM_CPM_SHIFT 0
+#define I40E_PRTDCB_TCPMC_RLPM_CPM_MASK I40E_MASK(0x1FFF, I40E_PRTDCB_TCPMC_RLPM_CPM_SHIFT)
+#define I40E_PRTDCB_TCPMC_RLPM_LLTC_SHIFT 13
+#define I40E_PRTDCB_TCPMC_RLPM_LLTC_MASK I40E_MASK(0xFF, I40E_PRTDCB_TCPMC_RLPM_LLTC_SHIFT)
+#define I40E_PRTDCB_TCPMC_RLPM_TCPM_MODE_SHIFT 30
+#define I40E_PRTDCB_TCPMC_RLPM_TCPM_MODE_MASK I40E_MASK(0x1, I40E_PRTDCB_TCPMC_RLPM_TCPM_MODE_SHIFT)
+#define I40E_PRTE_RUPM_TCCNTR03 0x0000DAE0 /* Reset: PE_CORER */
+#define I40E_PRTE_RUPM_TCCNTR03_TC0COUNT_SHIFT 0
+#define I40E_PRTE_RUPM_TCCNTR03_TC0COUNT_MASK I40E_MASK(0xFF, I40E_PRTE_RUPM_TCCNTR03_TC0COUNT_SHIFT)
+#define I40E_PRTE_RUPM_TCCNTR03_TC1COUNT_SHIFT 8
+#define I40E_PRTE_RUPM_TCCNTR03_TC1COUNT_MASK I40E_MASK(0xFF, I40E_PRTE_RUPM_TCCNTR03_TC1COUNT_SHIFT)
+#define I40E_PRTE_RUPM_TCCNTR03_TC2COUNT_SHIFT 16
+#define I40E_PRTE_RUPM_TCCNTR03_TC2COUNT_MASK I40E_MASK(0xFF, I40E_PRTE_RUPM_TCCNTR03_TC2COUNT_SHIFT)
+#define I40E_PRTE_RUPM_TCCNTR03_TC3COUNT_SHIFT 24
+#define I40E_PRTE_RUPM_TCCNTR03_TC3COUNT_MASK I40E_MASK(0xFF, I40E_PRTE_RUPM_TCCNTR03_TC3COUNT_SHIFT)
+#define I40E_PRTPE_RUPM_CNTR 0x0000DB20 /* Reset: PE_CORER */
+#define I40E_PRTPE_RUPM_CNTR_COUNT_SHIFT 0
+#define I40E_PRTPE_RUPM_CNTR_COUNT_MASK I40E_MASK(0xFF, I40E_PRTPE_RUPM_CNTR_COUNT_SHIFT)
+#define I40E_PRTPE_RUPM_CTL 0x0000DA40 /* Reset: PE_CORER */
+#define I40E_PRTPE_RUPM_CTL_LLTC_SHIFT 13
+#define I40E_PRTPE_RUPM_CTL_LLTC_MASK I40E_MASK(0xFF, I40E_PRTPE_RUPM_CTL_LLTC_SHIFT)
+#define I40E_PRTPE_RUPM_CTL_RUPM_MODE_SHIFT 30
+#define I40E_PRTPE_RUPM_CTL_RUPM_MODE_MASK I40E_MASK(0x1, I40E_PRTPE_RUPM_CTL_RUPM_MODE_SHIFT)
+#define I40E_PRTPE_RUPM_PFCCTL 0x0000DA60 /* Reset: PE_CORER */
+#define I40E_PRTPE_RUPM_PFCCTL_TC2PFC_SHIFT 0
+#define I40E_PRTPE_RUPM_PFCCTL_TC2PFC_MASK I40E_MASK(0xFF, I40E_PRTPE_RUPM_PFCCTL_TC2PFC_SHIFT)
+#define I40E_PRTPE_RUPM_PFCPC 0x0000DA80 /* Reset: PE_CORER */
+#define I40E_PRTPE_RUPM_PFCPC_PORTOFFTH_SHIFT 0
+#define I40E_PRTPE_RUPM_PFCPC_PORTOFFTH_MASK I40E_MASK(0xFF, I40E_PRTPE_RUPM_PFCPC_PORTOFFTH_SHIFT)
+#define I40E_PRTPE_RUPM_PFCTCC 0x0000DAA0 /* Reset: PE_CORER */
+#define I40E_PRTPE_RUPM_PFCTCC_TCOFFTH_SHIFT 0
+#define I40E_PRTPE_RUPM_PFCTCC_TCOFFTH_MASK I40E_MASK(0xFF, I40E_PRTPE_RUPM_PFCTCC_TCOFFTH_SHIFT)
+#define I40E_PRTPE_RUPM_PFCTCC_LL_PRI_TH_SHIFT 16
+#define I40E_PRTPE_RUPM_PFCTCC_LL_PRI_TH_MASK I40E_MASK(0xFF, I40E_PRTPE_RUPM_PFCTCC_LL_PRI_TH_SHIFT)
+#define I40E_PRTPE_RUPM_PFCTCC_LL_PRI_EN_SHIFT 31
+#define I40E_PRTPE_RUPM_PFCTCC_LL_PRI_EN_MASK I40E_MASK(0x1, I40E_PRTPE_RUPM_PFCTCC_LL_PRI_EN_SHIFT)
+#define I40E_PRTPE_RUPM_PTCTCCNTR47 0x0000DB60 /* Reset: PE_CORER */
+#define I40E_PRTPE_RUPM_PTCTCCNTR47_TC4COUNT_SHIFT 0
+#define I40E_PRTPE_RUPM_PTCTCCNTR47_TC4COUNT_MASK I40E_MASK(0xFF, I40E_PRTPE_RUPM_PTCTCCNTR47_TC4COUNT_SHIFT)
+#define I40E_PRTPE_RUPM_PTCTCCNTR47_TC5COUNT_SHIFT 8
+#define I40E_PRTPE_RUPM_PTCTCCNTR47_TC5COUNT_MASK I40E_MASK(0xFF, I40E_PRTPE_RUPM_PTCTCCNTR47_TC5COUNT_SHIFT)
+#define I40E_PRTPE_RUPM_PTCTCCNTR47_TC6COUNT_SHIFT 16
+#define I40E_PRTPE_RUPM_PTCTCCNTR47_TC6COUNT_MASK I40E_MASK(0xFF, I40E_PRTPE_RUPM_PTCTCCNTR47_TC6COUNT_SHIFT)
+#define I40E_PRTPE_RUPM_PTCTCCNTR47_TC7COUNT_SHIFT 24
+#define I40E_PRTPE_RUPM_PTCTCCNTR47_TC7COUNT_MASK I40E_MASK(0xFF, I40E_PRTPE_RUPM_PTCTCCNTR47_TC7COUNT_SHIFT)
+#define I40E_PRTPE_RUPM_PTXTCCNTR03 0x0000DB40 /* Reset: PE_CORER */
+#define I40E_PRTPE_RUPM_PTXTCCNTR03_TC0COUNT_SHIFT 0
+#define I40E_PRTPE_RUPM_PTXTCCNTR03_TC0COUNT_MASK I40E_MASK(0xFF, I40E_PRTPE_RUPM_PTXTCCNTR03_TC0COUNT_SHIFT)
+#define I40E_PRTPE_RUPM_PTXTCCNTR03_TC1COUNT_SHIFT 8
+#define I40E_PRTPE_RUPM_PTXTCCNTR03_TC1COUNT_MASK I40E_MASK(0xFF, I40E_PRTPE_RUPM_PTXTCCNTR03_TC1COUNT_SHIFT)
+#define I40E_PRTPE_RUPM_PTXTCCNTR03_TC2COUNT_SHIFT 16
+#define I40E_PRTPE_RUPM_PTXTCCNTR03_TC2COUNT_MASK I40E_MASK(0xFF, I40E_PRTPE_RUPM_PTXTCCNTR03_TC2COUNT_SHIFT)
+#define I40E_PRTPE_RUPM_PTXTCCNTR03_TC3COUNT_SHIFT 24
+#define I40E_PRTPE_RUPM_PTXTCCNTR03_TC3COUNT_MASK I40E_MASK(0xFF, I40E_PRTPE_RUPM_PTXTCCNTR03_TC3COUNT_SHIFT)
+#define I40E_PRTPE_RUPM_TCCNTR47 0x0000DB00 /* Reset: PE_CORER */
+#define I40E_PRTPE_RUPM_TCCNTR47_TC4COUNT_SHIFT 0
+#define I40E_PRTPE_RUPM_TCCNTR47_TC4COUNT_MASK I40E_MASK(0xFF, I40E_PRTPE_RUPM_TCCNTR47_TC4COUNT_SHIFT)
+#define I40E_PRTPE_RUPM_TCCNTR47_TC5COUNT_SHIFT 8
+#define I40E_PRTPE_RUPM_TCCNTR47_TC5COUNT_MASK I40E_MASK(0xFF, I40E_PRTPE_RUPM_TCCNTR47_TC5COUNT_SHIFT)
+#define I40E_PRTPE_RUPM_TCCNTR47_TC6COUNT_SHIFT 16
+#define I40E_PRTPE_RUPM_TCCNTR47_TC6COUNT_MASK I40E_MASK(0xFF, I40E_PRTPE_RUPM_TCCNTR47_TC6COUNT_SHIFT)
+#define I40E_PRTPE_RUPM_TCCNTR47_TC7COUNT_SHIFT 24
+#define I40E_PRTPE_RUPM_TCCNTR47_TC7COUNT_MASK I40E_MASK(0xFF, I40E_PRTPE_RUPM_TCCNTR47_TC7COUNT_SHIFT)
+#define I40E_PRTPE_RUPM_THRES 0x0000DA20 /* Reset: PE_CORER */
+#define I40E_PRTPE_RUPM_THRES_MINSPADSPERTC_SHIFT 0
+#define I40E_PRTPE_RUPM_THRES_MINSPADSPERTC_MASK I40E_MASK(0xFF, I40E_PRTPE_RUPM_THRES_MINSPADSPERTC_SHIFT)
+#define I40E_PRTPE_RUPM_THRES_MAXSPADS_SHIFT 8
+#define I40E_PRTPE_RUPM_THRES_MAXSPADS_MASK I40E_MASK(0xFF, I40E_PRTPE_RUPM_THRES_MAXSPADS_SHIFT)
+#define I40E_PRTPE_RUPM_THRES_MAXSPADSPERTC_SHIFT 16
+#define I40E_PRTPE_RUPM_THRES_MAXSPADSPERTC_MASK I40E_MASK(0xFF, I40E_PRTPE_RUPM_THRES_MAXSPADSPERTC_SHIFT)
+#define I40E_VFPE_AEQALLOC(_VF) (0x00130C00 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: VFR */
+#define I40E_VFPE_AEQALLOC_MAX_INDEX 127
+#define I40E_VFPE_AEQALLOC_AECOUNT_SHIFT 0
+#define I40E_VFPE_AEQALLOC_AECOUNT_MASK I40E_MASK(0xFFFFFFFF, I40E_VFPE_AEQALLOC_AECOUNT_SHIFT)
+#define I40E_VFPE_CCQPHIGH(_VF) (0x00001000 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: VFR */
+#define I40E_VFPE_CCQPHIGH_MAX_INDEX 127
+#define I40E_VFPE_CCQPHIGH_PECCQPHIGH_SHIFT 0
+#define I40E_VFPE_CCQPHIGH_PECCQPHIGH_MASK I40E_MASK(0xFFFFFFFF, I40E_VFPE_CCQPHIGH_PECCQPHIGH_SHIFT)
+#define I40E_VFPE_CCQPLOW(_VF) (0x00000C00 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: VFR */
+#define I40E_VFPE_CCQPLOW_MAX_INDEX 127
+#define I40E_VFPE_CCQPLOW_PECCQPLOW_SHIFT 0
+#define I40E_VFPE_CCQPLOW_PECCQPLOW_MASK I40E_MASK(0xFFFFFFFF, I40E_VFPE_CCQPLOW_PECCQPLOW_SHIFT)
+#define I40E_VFPE_CCQPSTATUS(_VF) (0x00000800 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: VFR */
+#define I40E_VFPE_CCQPSTATUS_MAX_INDEX 127
+#define I40E_VFPE_CCQPSTATUS_CCQP_DONE_SHIFT 0
+#define I40E_VFPE_CCQPSTATUS_CCQP_DONE_MASK I40E_MASK(0x1, I40E_VFPE_CCQPSTATUS_CCQP_DONE_SHIFT)
+#define I40E_VFPE_CCQPSTATUS_HMC_PROFILE_SHIFT 4
+#define I40E_VFPE_CCQPSTATUS_HMC_PROFILE_MASK I40E_MASK(0x7, I40E_VFPE_CCQPSTATUS_HMC_PROFILE_SHIFT)
+#define I40E_VFPE_CCQPSTATUS_RDMA_EN_VFS_SHIFT 16
+#define I40E_VFPE_CCQPSTATUS_RDMA_EN_VFS_MASK I40E_MASK(0x3F, I40E_VFPE_CCQPSTATUS_RDMA_EN_VFS_SHIFT)
+#define I40E_VFPE_CCQPSTATUS_CCQP_ERR_SHIFT 31
+#define I40E_VFPE_CCQPSTATUS_CCQP_ERR_MASK I40E_MASK(0x1, I40E_VFPE_CCQPSTATUS_CCQP_ERR_SHIFT)
+#define I40E_VFPE_CQACK(_VF) (0x00130800 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: VFR */
+#define I40E_VFPE_CQACK_MAX_INDEX 127
+#define I40E_VFPE_CQACK_PECQID_SHIFT 0
+#define I40E_VFPE_CQACK_PECQID_MASK I40E_MASK(0x1FFFF, I40E_VFPE_CQACK_PECQID_SHIFT)
+#define I40E_VFPE_CQARM(_VF) (0x00130400 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: VFR */
+#define I40E_VFPE_CQARM_MAX_INDEX 127
+#define I40E_VFPE_CQARM_PECQID_SHIFT 0
+#define I40E_VFPE_CQARM_PECQID_MASK I40E_MASK(0x1FFFF, I40E_VFPE_CQARM_PECQID_SHIFT)
+#define I40E_VFPE_CQPDB(_VF) (0x00000000 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: VFR */
+#define I40E_VFPE_CQPDB_MAX_INDEX 127
+#define I40E_VFPE_CQPDB_WQHEAD_SHIFT 0
+#define I40E_VFPE_CQPDB_WQHEAD_MASK I40E_MASK(0x7FF, I40E_VFPE_CQPDB_WQHEAD_SHIFT)
+#define I40E_VFPE_CQPERRCODES(_VF) (0x00001800 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: VFR */
+#define I40E_VFPE_CQPERRCODES_MAX_INDEX 127
+#define I40E_VFPE_CQPERRCODES_CQP_MINOR_CODE_SHIFT 0
+#define I40E_VFPE_CQPERRCODES_CQP_MINOR_CODE_MASK I40E_MASK(0xFFFF, I40E_VFPE_CQPERRCODES_CQP_MINOR_CODE_SHIFT)
+#define I40E_VFPE_CQPERRCODES_CQP_MAJOR_CODE_SHIFT 16
+#define I40E_VFPE_CQPERRCODES_CQP_MAJOR_CODE_MASK I40E_MASK(0xFFFF, I40E_VFPE_CQPERRCODES_CQP_MAJOR_CODE_SHIFT)
+#define I40E_VFPE_CQPTAIL(_VF) (0x00000400 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: VFR */
+#define I40E_VFPE_CQPTAIL_MAX_INDEX 127
+#define I40E_VFPE_CQPTAIL_WQTAIL_SHIFT 0
+#define I40E_VFPE_CQPTAIL_WQTAIL_MASK I40E_MASK(0x7FF, I40E_VFPE_CQPTAIL_WQTAIL_SHIFT)
+#define I40E_VFPE_CQPTAIL_CQP_OP_ERR_SHIFT 31
+#define I40E_VFPE_CQPTAIL_CQP_OP_ERR_MASK I40E_MASK(0x1, I40E_VFPE_CQPTAIL_CQP_OP_ERR_SHIFT)
+#define I40E_VFPE_IPCONFIG0(_VF) (0x00001400 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: VFR */
+#define I40E_VFPE_IPCONFIG0_MAX_INDEX 127
+#define I40E_VFPE_IPCONFIG0_PEIPID_SHIFT 0
+#define I40E_VFPE_IPCONFIG0_PEIPID_MASK I40E_MASK(0xFFFF, I40E_VFPE_IPCONFIG0_PEIPID_SHIFT)
+#define I40E_VFPE_IPCONFIG0_USEENTIREIDRANGE_SHIFT 16
+#define I40E_VFPE_IPCONFIG0_USEENTIREIDRANGE_MASK I40E_MASK(0x1, I40E_VFPE_IPCONFIG0_USEENTIREIDRANGE_SHIFT)
+#define I40E_VFPE_MRTEIDXMASK(_VF) (0x00003000 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: VFR */
+#define I40E_VFPE_MRTEIDXMASK_MAX_INDEX 127
+#define I40E_VFPE_MRTEIDXMASK_MRTEIDXMASKBITS_SHIFT 0
+#define I40E_VFPE_MRTEIDXMASK_MRTEIDXMASKBITS_MASK I40E_MASK(0x1F, I40E_VFPE_MRTEIDXMASK_MRTEIDXMASKBITS_SHIFT)
+#define I40E_VFPE_RCVUNEXPECTEDERROR(_VF) (0x00003400 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: VFR */
+#define I40E_VFPE_RCVUNEXPECTEDERROR_MAX_INDEX 127
+#define I40E_VFPE_RCVUNEXPECTEDERROR_TCP_RX_UNEXP_ERR_SHIFT 0
+#define I40E_VFPE_RCVUNEXPECTEDERROR_TCP_RX_UNEXP_ERR_MASK I40E_MASK(0xFFFFFF, I40E_VFPE_RCVUNEXPECTEDERROR_TCP_RX_UNEXP_ERR_SHIFT)
+#define I40E_VFPE_TCPNOWTIMER(_VF) (0x00002C00 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: VFR */
+#define I40E_VFPE_TCPNOWTIMER_MAX_INDEX 127
+#define I40E_VFPE_TCPNOWTIMER_TCP_NOW_SHIFT 0
+#define I40E_VFPE_TCPNOWTIMER_TCP_NOW_MASK I40E_MASK(0xFFFFFFFF, I40E_VFPE_TCPNOWTIMER_TCP_NOW_SHIFT)
+#define I40E_VFPE_WQEALLOC(_VF) (0x00138000 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: VFR */
+#define I40E_VFPE_WQEALLOC_MAX_INDEX 127
+#define I40E_VFPE_WQEALLOC_PEQPID_SHIFT 0
+#define I40E_VFPE_WQEALLOC_PEQPID_MASK I40E_MASK(0x3FFFF, I40E_VFPE_WQEALLOC_PEQPID_SHIFT)
+#define I40E_VFPE_WQEALLOC_WQE_DESC_INDEX_SHIFT 20
+#define I40E_VFPE_WQEALLOC_WQE_DESC_INDEX_MASK I40E_MASK(0xFFF, I40E_VFPE_WQEALLOC_WQE_DESC_INDEX_SHIFT)
+#define I40E_GLPES_PFIP4RXDISCARD(_i) (0x00010600 + ((_i) * 4)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP4RXDISCARD_MAX_INDEX 15
+#define I40E_GLPES_PFIP4RXDISCARD_IP4RXDISCARD_SHIFT 0
+#define I40E_GLPES_PFIP4RXDISCARD_IP4RXDISCARD_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFIP4RXDISCARD_IP4RXDISCARD_SHIFT)
+#define I40E_GLPES_PFIP4RXFRAGSHI(_i) (0x00010804 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP4RXFRAGSHI_MAX_INDEX 15
+#define I40E_GLPES_PFIP4RXFRAGSHI_IP4RXFRAGSHI_SHIFT 0
+#define I40E_GLPES_PFIP4RXFRAGSHI_IP4RXFRAGSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_PFIP4RXFRAGSHI_IP4RXFRAGSHI_SHIFT)
+#define I40E_GLPES_PFIP4RXFRAGSLO(_i) (0x00010800 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP4RXFRAGSLO_MAX_INDEX 15
+#define I40E_GLPES_PFIP4RXFRAGSLO_IP4RXFRAGSLO_SHIFT 0
+#define I40E_GLPES_PFIP4RXFRAGSLO_IP4RXFRAGSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFIP4RXFRAGSLO_IP4RXFRAGSLO_SHIFT)
+#define I40E_GLPES_PFIP4RXMCOCTSHI(_i) (0x00010A04 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP4RXMCOCTSHI_MAX_INDEX 15
+#define I40E_GLPES_PFIP4RXMCOCTSHI_IP4RXMCOCTSHI_SHIFT 0
+#define I40E_GLPES_PFIP4RXMCOCTSHI_IP4RXMCOCTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_PFIP4RXMCOCTSHI_IP4RXMCOCTSHI_SHIFT)
+#define I40E_GLPES_PFIP4RXMCOCTSLO(_i) (0x00010A00 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP4RXMCOCTSLO_MAX_INDEX 15
+#define I40E_GLPES_PFIP4RXMCOCTSLO_IP4RXMCOCTSLO_SHIFT 0
+#define I40E_GLPES_PFIP4RXMCOCTSLO_IP4RXMCOCTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFIP4RXMCOCTSLO_IP4RXMCOCTSLO_SHIFT)
+#define I40E_GLPES_PFIP4RXMCPKTSHI(_i) (0x00010C04 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP4RXMCPKTSHI_MAX_INDEX 15
+#define I40E_GLPES_PFIP4RXMCPKTSHI_IP4RXMCPKTSHI_SHIFT 0
+#define I40E_GLPES_PFIP4RXMCPKTSHI_IP4RXMCPKTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_PFIP4RXMCPKTSHI_IP4RXMCPKTSHI_SHIFT)
+#define I40E_GLPES_PFIP4RXMCPKTSLO(_i) (0x00010C00 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP4RXMCPKTSLO_MAX_INDEX 15
+#define I40E_GLPES_PFIP4RXMCPKTSLO_IP4RXMCPKTSLO_SHIFT 0
+#define I40E_GLPES_PFIP4RXMCPKTSLO_IP4RXMCPKTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFIP4RXMCPKTSLO_IP4RXMCPKTSLO_SHIFT)
+#define I40E_GLPES_PFIP4RXOCTSHI(_i) (0x00010204 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP4RXOCTSHI_MAX_INDEX 15
+#define I40E_GLPES_PFIP4RXOCTSHI_IP4RXOCTSHI_SHIFT 0
+#define I40E_GLPES_PFIP4RXOCTSHI_IP4RXOCTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_PFIP4RXOCTSHI_IP4RXOCTSHI_SHIFT)
+#define I40E_GLPES_PFIP4RXOCTSLO(_i) (0x00010200 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP4RXOCTSLO_MAX_INDEX 15
+#define I40E_GLPES_PFIP4RXOCTSLO_IP4RXOCTSLO_SHIFT 0
+#define I40E_GLPES_PFIP4RXOCTSLO_IP4RXOCTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFIP4RXOCTSLO_IP4RXOCTSLO_SHIFT)
+#define I40E_GLPES_PFIP4RXPKTSHI(_i) (0x00010404 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP4RXPKTSHI_MAX_INDEX 15
+#define I40E_GLPES_PFIP4RXPKTSHI_IP4RXPKTSHI_SHIFT 0
+#define I40E_GLPES_PFIP4RXPKTSHI_IP4RXPKTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_PFIP4RXPKTSHI_IP4RXPKTSHI_SHIFT)
+#define I40E_GLPES_PFIP4RXPKTSLO(_i) (0x00010400 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP4RXPKTSLO_MAX_INDEX 15
+#define I40E_GLPES_PFIP4RXPKTSLO_IP4RXPKTSLO_SHIFT 0
+#define I40E_GLPES_PFIP4RXPKTSLO_IP4RXPKTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFIP4RXPKTSLO_IP4RXPKTSLO_SHIFT)
+#define I40E_GLPES_PFIP4RXTRUNC(_i) (0x00010700 + ((_i) * 4)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP4RXTRUNC_MAX_INDEX 15
+#define I40E_GLPES_PFIP4RXTRUNC_IP4RXTRUNC_SHIFT 0
+#define I40E_GLPES_PFIP4RXTRUNC_IP4RXTRUNC_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFIP4RXTRUNC_IP4RXTRUNC_SHIFT)
+#define I40E_GLPES_PFIP4TXFRAGSHI(_i) (0x00011E04 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP4TXFRAGSHI_MAX_INDEX 15
+#define I40E_GLPES_PFIP4TXFRAGSHI_IP4TXFRAGSHI_SHIFT 0
+#define I40E_GLPES_PFIP4TXFRAGSHI_IP4TXFRAGSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_PFIP4TXFRAGSHI_IP4TXFRAGSHI_SHIFT)
+#define I40E_GLPES_PFIP4TXFRAGSLO(_i) (0x00011E00 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP4TXFRAGSLO_MAX_INDEX 15
+#define I40E_GLPES_PFIP4TXFRAGSLO_IP4TXFRAGSLO_SHIFT 0
+#define I40E_GLPES_PFIP4TXFRAGSLO_IP4TXFRAGSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFIP4TXFRAGSLO_IP4TXFRAGSLO_SHIFT)
+#define I40E_GLPES_PFIP4TXMCOCTSHI(_i) (0x00012004 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP4TXMCOCTSHI_MAX_INDEX 15
+#define I40E_GLPES_PFIP4TXMCOCTSHI_IP4TXMCOCTSHI_SHIFT 0
+#define I40E_GLPES_PFIP4TXMCOCTSHI_IP4TXMCOCTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_PFIP4TXMCOCTSHI_IP4TXMCOCTSHI_SHIFT)
+#define I40E_GLPES_PFIP4TXMCOCTSLO(_i) (0x00012000 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP4TXMCOCTSLO_MAX_INDEX 15
+#define I40E_GLPES_PFIP4TXMCOCTSLO_IP4TXMCOCTSLO_SHIFT 0
+#define I40E_GLPES_PFIP4TXMCOCTSLO_IP4TXMCOCTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFIP4TXMCOCTSLO_IP4TXMCOCTSLO_SHIFT)
+#define I40E_GLPES_PFIP4TXMCPKTSHI(_i) (0x00012204 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP4TXMCPKTSHI_MAX_INDEX 15
+#define I40E_GLPES_PFIP4TXMCPKTSHI_IP4TXMCPKTSHI_SHIFT 0
+#define I40E_GLPES_PFIP4TXMCPKTSHI_IP4TXMCPKTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_PFIP4TXMCPKTSHI_IP4TXMCPKTSHI_SHIFT)
+#define I40E_GLPES_PFIP4TXMCPKTSLO(_i) (0x00012200 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP4TXMCPKTSLO_MAX_INDEX 15
+#define I40E_GLPES_PFIP4TXMCPKTSLO_IP4TXMCPKTSLO_SHIFT 0
+#define I40E_GLPES_PFIP4TXMCPKTSLO_IP4TXMCPKTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFIP4TXMCPKTSLO_IP4TXMCPKTSLO_SHIFT)
+#define I40E_GLPES_PFIP4TXNOROUTE(_i) (0x00012E00 + ((_i) * 4)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP4TXNOROUTE_MAX_INDEX 15
+#define I40E_GLPES_PFIP4TXNOROUTE_IP4TXNOROUTE_SHIFT 0
+#define I40E_GLPES_PFIP4TXNOROUTE_IP4TXNOROUTE_MASK I40E_MASK(0xFFFFFF, I40E_GLPES_PFIP4TXNOROUTE_IP4TXNOROUTE_SHIFT)
+#define I40E_GLPES_PFIP4TXOCTSHI(_i) (0x00011A04 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP4TXOCTSHI_MAX_INDEX 15
+#define I40E_GLPES_PFIP4TXOCTSHI_IP4TXOCTSHI_SHIFT 0
+#define I40E_GLPES_PFIP4TXOCTSHI_IP4TXOCTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_PFIP4TXOCTSHI_IP4TXOCTSHI_SHIFT)
+#define I40E_GLPES_PFIP4TXOCTSLO(_i) (0x00011A00 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP4TXOCTSLO_MAX_INDEX 15
+#define I40E_GLPES_PFIP4TXOCTSLO_IP4TXOCTSLO_SHIFT 0
+#define I40E_GLPES_PFIP4TXOCTSLO_IP4TXOCTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFIP4TXOCTSLO_IP4TXOCTSLO_SHIFT)
+#define I40E_GLPES_PFIP4TXPKTSHI(_i) (0x00011C04 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP4TXPKTSHI_MAX_INDEX 15
+#define I40E_GLPES_PFIP4TXPKTSHI_IP4TXPKTSHI_SHIFT 0
+#define I40E_GLPES_PFIP4TXPKTSHI_IP4TXPKTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_PFIP4TXPKTSHI_IP4TXPKTSHI_SHIFT)
+#define I40E_GLPES_PFIP4TXPKTSLO(_i) (0x00011C00 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP4TXPKTSLO_MAX_INDEX 15
+#define I40E_GLPES_PFIP4TXPKTSLO_IP4TXPKTSLO_SHIFT 0
+#define I40E_GLPES_PFIP4TXPKTSLO_IP4TXPKTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFIP4TXPKTSLO_IP4TXPKTSLO_SHIFT)
+#define I40E_GLPES_PFIP6RXDISCARD(_i) (0x00011200 + ((_i) * 4)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP6RXDISCARD_MAX_INDEX 15
+#define I40E_GLPES_PFIP6RXDISCARD_IP6RXDISCARD_SHIFT 0
+#define I40E_GLPES_PFIP6RXDISCARD_IP6RXDISCARD_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFIP6RXDISCARD_IP6RXDISCARD_SHIFT)
+#define I40E_GLPES_PFIP6RXFRAGSHI(_i) (0x00011404 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP6RXFRAGSHI_MAX_INDEX 15
+#define I40E_GLPES_PFIP6RXFRAGSHI_IP6RXFRAGSHI_SHIFT 0
+#define I40E_GLPES_PFIP6RXFRAGSHI_IP6RXFRAGSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_PFIP6RXFRAGSHI_IP6RXFRAGSHI_SHIFT)
+#define I40E_GLPES_PFIP6RXFRAGSLO(_i) (0x00011400 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP6RXFRAGSLO_MAX_INDEX 15
+#define I40E_GLPES_PFIP6RXFRAGSLO_IP6RXFRAGSLO_SHIFT 0
+#define I40E_GLPES_PFIP6RXFRAGSLO_IP6RXFRAGSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFIP6RXFRAGSLO_IP6RXFRAGSLO_SHIFT)
+#define I40E_GLPES_PFIP6RXMCOCTSHI(_i) (0x00011604 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP6RXMCOCTSHI_MAX_INDEX 15
+#define I40E_GLPES_PFIP6RXMCOCTSHI_IP6RXMCOCTSHI_SHIFT 0
+#define I40E_GLPES_PFIP6RXMCOCTSHI_IP6RXMCOCTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_PFIP6RXMCOCTSHI_IP6RXMCOCTSHI_SHIFT)
+#define I40E_GLPES_PFIP6RXMCOCTSLO(_i) (0x00011600 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP6RXMCOCTSLO_MAX_INDEX 15
+#define I40E_GLPES_PFIP6RXMCOCTSLO_IP6RXMCOCTSLO_SHIFT 0
+#define I40E_GLPES_PFIP6RXMCOCTSLO_IP6RXMCOCTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFIP6RXMCOCTSLO_IP6RXMCOCTSLO_SHIFT)
+#define I40E_GLPES_PFIP6RXMCPKTSHI(_i) (0x00011804 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP6RXMCPKTSHI_MAX_INDEX 15
+#define I40E_GLPES_PFIP6RXMCPKTSHI_IP6RXMCPKTSHI_SHIFT 0
+#define I40E_GLPES_PFIP6RXMCPKTSHI_IP6RXMCPKTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_PFIP6RXMCPKTSHI_IP6RXMCPKTSHI_SHIFT)
+#define I40E_GLPES_PFIP6RXMCPKTSLO(_i) (0x00011800 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP6RXMCPKTSLO_MAX_INDEX 15
+#define I40E_GLPES_PFIP6RXMCPKTSLO_IP6RXMCPKTSLO_SHIFT 0
+#define I40E_GLPES_PFIP6RXMCPKTSLO_IP6RXMCPKTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFIP6RXMCPKTSLO_IP6RXMCPKTSLO_SHIFT)
+#define I40E_GLPES_PFIP6RXOCTSHI(_i) (0x00010E04 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP6RXOCTSHI_MAX_INDEX 15
+#define I40E_GLPES_PFIP6RXOCTSHI_IP6RXOCTSHI_SHIFT 0
+#define I40E_GLPES_PFIP6RXOCTSHI_IP6RXOCTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_PFIP6RXOCTSHI_IP6RXOCTSHI_SHIFT)
+#define I40E_GLPES_PFIP6RXOCTSLO(_i) (0x00010E00 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP6RXOCTSLO_MAX_INDEX 15
+#define I40E_GLPES_PFIP6RXOCTSLO_IP6RXOCTSLO_SHIFT 0
+#define I40E_GLPES_PFIP6RXOCTSLO_IP6RXOCTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFIP6RXOCTSLO_IP6RXOCTSLO_SHIFT)
+#define I40E_GLPES_PFIP6RXPKTSHI(_i) (0x00011004 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP6RXPKTSHI_MAX_INDEX 15
+#define I40E_GLPES_PFIP6RXPKTSHI_IP6RXPKTSHI_SHIFT 0
+#define I40E_GLPES_PFIP6RXPKTSHI_IP6RXPKTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_PFIP6RXPKTSHI_IP6RXPKTSHI_SHIFT)
+#define I40E_GLPES_PFIP6RXPKTSLO(_i) (0x00011000 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP6RXPKTSLO_MAX_INDEX 15
+#define I40E_GLPES_PFIP6RXPKTSLO_IP6RXPKTSLO_SHIFT 0
+#define I40E_GLPES_PFIP6RXPKTSLO_IP6RXPKTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFIP6RXPKTSLO_IP6RXPKTSLO_SHIFT)
+#define I40E_GLPES_PFIP6RXTRUNC(_i) (0x00011300 + ((_i) * 4)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP6RXTRUNC_MAX_INDEX 15
+#define I40E_GLPES_PFIP6RXTRUNC_IP6RXTRUNC_SHIFT 0
+#define I40E_GLPES_PFIP6RXTRUNC_IP6RXTRUNC_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFIP6RXTRUNC_IP6RXTRUNC_SHIFT)
+#define I40E_GLPES_PFIP6TXFRAGSHI(_i) (0x00012804 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP6TXFRAGSHI_MAX_INDEX 15
+#define I40E_GLPES_PFIP6TXFRAGSHI_IP6TXFRAGSHI_SHIFT 0
+#define I40E_GLPES_PFIP6TXFRAGSHI_IP6TXFRAGSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_PFIP6TXFRAGSHI_IP6TXFRAGSHI_SHIFT)
+#define I40E_GLPES_PFIP6TXFRAGSLO(_i) (0x00012800 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP6TXFRAGSLO_MAX_INDEX 15
+#define I40E_GLPES_PFIP6TXFRAGSLO_IP6TXFRAGSLO_SHIFT 0
+#define I40E_GLPES_PFIP6TXFRAGSLO_IP6TXFRAGSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFIP6TXFRAGSLO_IP6TXFRAGSLO_SHIFT)
+#define I40E_GLPES_PFIP6TXMCOCTSHI(_i) (0x00012A04 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP6TXMCOCTSHI_MAX_INDEX 15
+#define I40E_GLPES_PFIP6TXMCOCTSHI_IP6TXMCOCTSHI_SHIFT 0
+#define I40E_GLPES_PFIP6TXMCOCTSHI_IP6TXMCOCTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_PFIP6TXMCOCTSHI_IP6TXMCOCTSHI_SHIFT)
+#define I40E_GLPES_PFIP6TXMCOCTSLO(_i) (0x00012A00 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP6TXMCOCTSLO_MAX_INDEX 15
+#define I40E_GLPES_PFIP6TXMCOCTSLO_IP6TXMCOCTSLO_SHIFT 0
+#define I40E_GLPES_PFIP6TXMCOCTSLO_IP6TXMCOCTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFIP6TXMCOCTSLO_IP6TXMCOCTSLO_SHIFT)
+#define I40E_GLPES_PFIP6TXMCPKTSHI(_i) (0x00012C04 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP6TXMCPKTSHI_MAX_INDEX 15
+#define I40E_GLPES_PFIP6TXMCPKTSHI_IP6TXMCPKTSHI_SHIFT 0
+#define I40E_GLPES_PFIP6TXMCPKTSHI_IP6TXMCPKTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_PFIP6TXMCPKTSHI_IP6TXMCPKTSHI_SHIFT)
+#define I40E_GLPES_PFIP6TXMCPKTSLO(_i) (0x00012C00 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP6TXMCPKTSLO_MAX_INDEX 15
+#define I40E_GLPES_PFIP6TXMCPKTSLO_IP6TXMCPKTSLO_SHIFT 0
+#define I40E_GLPES_PFIP6TXMCPKTSLO_IP6TXMCPKTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFIP6TXMCPKTSLO_IP6TXMCPKTSLO_SHIFT)
+#define I40E_GLPES_PFIP6TXNOROUTE(_i) (0x00012F00 + ((_i) * 4)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP6TXNOROUTE_MAX_INDEX 15
+#define I40E_GLPES_PFIP6TXNOROUTE_IP6TXNOROUTE_SHIFT 0
+#define I40E_GLPES_PFIP6TXNOROUTE_IP6TXNOROUTE_MASK I40E_MASK(0xFFFFFF, I40E_GLPES_PFIP6TXNOROUTE_IP6TXNOROUTE_SHIFT)
+#define I40E_GLPES_PFIP6TXOCTSHI(_i) (0x00012404 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP6TXOCTSHI_MAX_INDEX 15
+#define I40E_GLPES_PFIP6TXOCTSHI_IP6TXOCTSHI_SHIFT 0
+#define I40E_GLPES_PFIP6TXOCTSHI_IP6TXOCTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_PFIP6TXOCTSHI_IP6TXOCTSHI_SHIFT)
+#define I40E_GLPES_PFIP6TXOCTSLO(_i) (0x00012400 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP6TXOCTSLO_MAX_INDEX 15
+#define I40E_GLPES_PFIP6TXOCTSLO_IP6TXOCTSLO_SHIFT 0
+#define I40E_GLPES_PFIP6TXOCTSLO_IP6TXOCTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFIP6TXOCTSLO_IP6TXOCTSLO_SHIFT)
+#define I40E_GLPES_PFIP6TXPKTSHI(_i) (0x00012604 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP6TXPKTSHI_MAX_INDEX 15
+#define I40E_GLPES_PFIP6TXPKTSHI_IP6TXPKTSHI_SHIFT 0
+#define I40E_GLPES_PFIP6TXPKTSHI_IP6TXPKTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_PFIP6TXPKTSHI_IP6TXPKTSHI_SHIFT)
+#define I40E_GLPES_PFIP6TXPKTSLO(_i) (0x00012600 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP6TXPKTSLO_MAX_INDEX 15
+#define I40E_GLPES_PFIP6TXPKTSLO_IP6TXPKTSLO_SHIFT 0
+#define I40E_GLPES_PFIP6TXPKTSLO_IP6TXPKTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFIP6TXPKTSLO_IP6TXPKTSLO_SHIFT)
+#define I40E_GLPES_PFRDMARXRDSHI(_i) (0x00013E04 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFRDMARXRDSHI_MAX_INDEX 15
+#define I40E_GLPES_PFRDMARXRDSHI_RDMARXRDSHI_SHIFT 0
+#define I40E_GLPES_PFRDMARXRDSHI_RDMARXRDSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_PFRDMARXRDSHI_RDMARXRDSHI_SHIFT)
+#define I40E_GLPES_PFRDMARXRDSLO(_i) (0x00013E00 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFRDMARXRDSLO_MAX_INDEX 15
+#define I40E_GLPES_PFRDMARXRDSLO_RDMARXRDSLO_SHIFT 0
+#define I40E_GLPES_PFRDMARXRDSLO_RDMARXRDSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFRDMARXRDSLO_RDMARXRDSLO_SHIFT)
+#define I40E_GLPES_PFRDMARXSNDSHI(_i) (0x00014004 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFRDMARXSNDSHI_MAX_INDEX 15
+#define I40E_GLPES_PFRDMARXSNDSHI_RDMARXSNDSHI_SHIFT 0
+#define I40E_GLPES_PFRDMARXSNDSHI_RDMARXSNDSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_PFRDMARXSNDSHI_RDMARXSNDSHI_SHIFT)
+#define I40E_GLPES_PFRDMARXSNDSLO(_i) (0x00014000 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFRDMARXSNDSLO_MAX_INDEX 15
+#define I40E_GLPES_PFRDMARXSNDSLO_RDMARXSNDSLO_SHIFT 0
+#define I40E_GLPES_PFRDMARXSNDSLO_RDMARXSNDSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFRDMARXSNDSLO_RDMARXSNDSLO_SHIFT)
+#define I40E_GLPES_PFRDMARXWRSHI(_i) (0x00013C04 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFRDMARXWRSHI_MAX_INDEX 15
+#define I40E_GLPES_PFRDMARXWRSHI_RDMARXWRSHI_SHIFT 0
+#define I40E_GLPES_PFRDMARXWRSHI_RDMARXWRSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_PFRDMARXWRSHI_RDMARXWRSHI_SHIFT)
+#define I40E_GLPES_PFRDMARXWRSLO(_i) (0x00013C00 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFRDMARXWRSLO_MAX_INDEX 15
+#define I40E_GLPES_PFRDMARXWRSLO_RDMARXWRSLO_SHIFT 0
+#define I40E_GLPES_PFRDMARXWRSLO_RDMARXWRSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFRDMARXWRSLO_RDMARXWRSLO_SHIFT)
+#define I40E_GLPES_PFRDMATXRDSHI(_i) (0x00014404 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFRDMATXRDSHI_MAX_INDEX 15
+#define I40E_GLPES_PFRDMATXRDSHI_RDMARXRDSHI_SHIFT 0
+#define I40E_GLPES_PFRDMATXRDSHI_RDMARXRDSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_PFRDMATXRDSHI_RDMARXRDSHI_SHIFT)
+#define I40E_GLPES_PFRDMATXRDSLO(_i) (0x00014400 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFRDMATXRDSLO_MAX_INDEX 15
+#define I40E_GLPES_PFRDMATXRDSLO_RDMARXRDSLO_SHIFT 0
+#define I40E_GLPES_PFRDMATXRDSLO_RDMARXRDSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFRDMATXRDSLO_RDMARXRDSLO_SHIFT)
+#define I40E_GLPES_PFRDMATXSNDSHI(_i) (0x00014604 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFRDMATXSNDSHI_MAX_INDEX 15
+#define I40E_GLPES_PFRDMATXSNDSHI_RDMARXSNDSHI_SHIFT 0
+#define I40E_GLPES_PFRDMATXSNDSHI_RDMARXSNDSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_PFRDMATXSNDSHI_RDMARXSNDSHI_SHIFT)
+#define I40E_GLPES_PFRDMATXSNDSLO(_i) (0x00014600 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFRDMATXSNDSLO_MAX_INDEX 15
+#define I40E_GLPES_PFRDMATXSNDSLO_RDMARXSNDSLO_SHIFT 0
+#define I40E_GLPES_PFRDMATXSNDSLO_RDMARXSNDSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFRDMATXSNDSLO_RDMARXSNDSLO_SHIFT)
+#define I40E_GLPES_PFRDMATXWRSHI(_i) (0x00014204 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFRDMATXWRSHI_MAX_INDEX 15
+#define I40E_GLPES_PFRDMATXWRSHI_RDMARXWRSHI_SHIFT 0
+#define I40E_GLPES_PFRDMATXWRSHI_RDMARXWRSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_PFRDMATXWRSHI_RDMARXWRSHI_SHIFT)
+#define I40E_GLPES_PFRDMATXWRSLO(_i) (0x00014200 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFRDMATXWRSLO_MAX_INDEX 15
+#define I40E_GLPES_PFRDMATXWRSLO_RDMARXWRSLO_SHIFT 0
+#define I40E_GLPES_PFRDMATXWRSLO_RDMARXWRSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFRDMATXWRSLO_RDMARXWRSLO_SHIFT)
+#define I40E_GLPES_PFRDMAVBNDHI(_i) (0x00014804 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFRDMAVBNDHI_MAX_INDEX 15
+#define I40E_GLPES_PFRDMAVBNDHI_RDMAVBNDHI_SHIFT 0
+#define I40E_GLPES_PFRDMAVBNDHI_RDMAVBNDHI_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFRDMAVBNDHI_RDMAVBNDHI_SHIFT)
+#define I40E_GLPES_PFRDMAVBNDLO(_i) (0x00014800 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFRDMAVBNDLO_MAX_INDEX 15
+#define I40E_GLPES_PFRDMAVBNDLO_RDMAVBNDLO_SHIFT 0
+#define I40E_GLPES_PFRDMAVBNDLO_RDMAVBNDLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFRDMAVBNDLO_RDMAVBNDLO_SHIFT)
+#define I40E_GLPES_PFRDMAVINVHI(_i) (0x00014A04 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFRDMAVINVHI_MAX_INDEX 15
+#define I40E_GLPES_PFRDMAVINVHI_RDMAVINVHI_SHIFT 0
+#define I40E_GLPES_PFRDMAVINVHI_RDMAVINVHI_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFRDMAVINVHI_RDMAVINVHI_SHIFT)
+#define I40E_GLPES_PFRDMAVINVLO(_i) (0x00014A00 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFRDMAVINVLO_MAX_INDEX 15
+#define I40E_GLPES_PFRDMAVINVLO_RDMAVINVLO_SHIFT 0
+#define I40E_GLPES_PFRDMAVINVLO_RDMAVINVLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFRDMAVINVLO_RDMAVINVLO_SHIFT)
+#define I40E_GLPES_PFRXVLANERR(_i) (0x00010000 + ((_i) * 4)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFRXVLANERR_MAX_INDEX 15
+#define I40E_GLPES_PFRXVLANERR_RXVLANERR_SHIFT 0
+#define I40E_GLPES_PFRXVLANERR_RXVLANERR_MASK I40E_MASK(0xFFFFFF, I40E_GLPES_PFRXVLANERR_RXVLANERR_SHIFT)
+#define I40E_GLPES_PFTCPRTXSEG(_i) (0x00013600 + ((_i) * 4)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFTCPRTXSEG_MAX_INDEX 15
+#define I40E_GLPES_PFTCPRTXSEG_TCPRTXSEG_SHIFT 0
+#define I40E_GLPES_PFTCPRTXSEG_TCPRTXSEG_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFTCPRTXSEG_TCPRTXSEG_SHIFT)
+#define I40E_GLPES_PFTCPRXOPTERR(_i) (0x00013200 + ((_i) * 4)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFTCPRXOPTERR_MAX_INDEX 15
+#define I40E_GLPES_PFTCPRXOPTERR_TCPRXOPTERR_SHIFT 0
+#define I40E_GLPES_PFTCPRXOPTERR_TCPRXOPTERR_MASK I40E_MASK(0xFFFFFF, I40E_GLPES_PFTCPRXOPTERR_TCPRXOPTERR_SHIFT)
+#define I40E_GLPES_PFTCPRXPROTOERR(_i) (0x00013300 + ((_i) * 4)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFTCPRXPROTOERR_MAX_INDEX 15
+#define I40E_GLPES_PFTCPRXPROTOERR_TCPRXPROTOERR_SHIFT 0
+#define I40E_GLPES_PFTCPRXPROTOERR_TCPRXPROTOERR_MASK I40E_MASK(0xFFFFFF, I40E_GLPES_PFTCPRXPROTOERR_TCPRXPROTOERR_SHIFT)
+#define I40E_GLPES_PFTCPRXSEGSHI(_i) (0x00013004 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFTCPRXSEGSHI_MAX_INDEX 15
+#define I40E_GLPES_PFTCPRXSEGSHI_TCPRXSEGSHI_SHIFT 0
+#define I40E_GLPES_PFTCPRXSEGSHI_TCPRXSEGSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_PFTCPRXSEGSHI_TCPRXSEGSHI_SHIFT)
+#define I40E_GLPES_PFTCPRXSEGSLO(_i) (0x00013000 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFTCPRXSEGSLO_MAX_INDEX 15
+#define I40E_GLPES_PFTCPRXSEGSLO_TCPRXSEGSLO_SHIFT 0
+#define I40E_GLPES_PFTCPRXSEGSLO_TCPRXSEGSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFTCPRXSEGSLO_TCPRXSEGSLO_SHIFT)
+#define I40E_GLPES_PFTCPTXSEGHI(_i) (0x00013404 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFTCPTXSEGHI_MAX_INDEX 15
+#define I40E_GLPES_PFTCPTXSEGHI_TCPTXSEGHI_SHIFT 0
+#define I40E_GLPES_PFTCPTXSEGHI_TCPTXSEGHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_PFTCPTXSEGHI_TCPTXSEGHI_SHIFT)
+#define I40E_GLPES_PFTCPTXSEGLO(_i) (0x00013400 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFTCPTXSEGLO_MAX_INDEX 15
+#define I40E_GLPES_PFTCPTXSEGLO_TCPTXSEGLO_SHIFT 0
+#define I40E_GLPES_PFTCPTXSEGLO_TCPTXSEGLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFTCPTXSEGLO_TCPTXSEGLO_SHIFT)
+#define I40E_GLPES_PFUDPRXPKTSHI(_i) (0x00013804 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFUDPRXPKTSHI_MAX_INDEX 15
+#define I40E_GLPES_PFUDPRXPKTSHI_UDPRXPKTSHI_SHIFT 0
+#define I40E_GLPES_PFUDPRXPKTSHI_UDPRXPKTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_PFUDPRXPKTSHI_UDPRXPKTSHI_SHIFT)
+#define I40E_GLPES_PFUDPRXPKTSLO(_i) (0x00013800 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFUDPRXPKTSLO_MAX_INDEX 15
+#define I40E_GLPES_PFUDPRXPKTSLO_UDPRXPKTSLO_SHIFT 0
+#define I40E_GLPES_PFUDPRXPKTSLO_UDPRXPKTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFUDPRXPKTSLO_UDPRXPKTSLO_SHIFT)
+#define I40E_GLPES_PFUDPTXPKTSHI(_i) (0x00013A04 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFUDPTXPKTSHI_MAX_INDEX 15
+#define I40E_GLPES_PFUDPTXPKTSHI_UDPTXPKTSHI_SHIFT 0
+#define I40E_GLPES_PFUDPTXPKTSHI_UDPTXPKTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_PFUDPTXPKTSHI_UDPTXPKTSHI_SHIFT)
+#define I40E_GLPES_PFUDPTXPKTSLO(_i) (0x00013A00 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFUDPTXPKTSLO_MAX_INDEX 15
+#define I40E_GLPES_PFUDPTXPKTSLO_UDPTXPKTSLO_SHIFT 0
+#define I40E_GLPES_PFUDPTXPKTSLO_UDPTXPKTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFUDPTXPKTSLO_UDPTXPKTSLO_SHIFT)
+#define I40E_GLPES_RDMARXMULTFPDUSHI 0x0001E014 /* Reset: PE_CORER */
+#define I40E_GLPES_RDMARXMULTFPDUSHI_RDMARXMULTFPDUSHI_SHIFT 0
+#define I40E_GLPES_RDMARXMULTFPDUSHI_RDMARXMULTFPDUSHI_MASK I40E_MASK(0xFFFFFF, I40E_GLPES_RDMARXMULTFPDUSHI_RDMARXMULTFPDUSHI_SHIFT)
+#define I40E_GLPES_RDMARXMULTFPDUSLO 0x0001E010 /* Reset: PE_CORER */
+#define I40E_GLPES_RDMARXMULTFPDUSLO_RDMARXMULTFPDUSLO_SHIFT 0
+#define I40E_GLPES_RDMARXMULTFPDUSLO_RDMARXMULTFPDUSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_RDMARXMULTFPDUSLO_RDMARXMULTFPDUSLO_SHIFT)
+#define I40E_GLPES_RDMARXOOODDPHI 0x0001E01C /* Reset: PE_CORER */
+#define I40E_GLPES_RDMARXOOODDPHI_RDMARXOOODDPHI_SHIFT 0
+#define I40E_GLPES_RDMARXOOODDPHI_RDMARXOOODDPHI_MASK I40E_MASK(0xFFFFFF, I40E_GLPES_RDMARXOOODDPHI_RDMARXOOODDPHI_SHIFT)
+#define I40E_GLPES_RDMARXOOODDPLO 0x0001E018 /* Reset: PE_CORER */
+#define I40E_GLPES_RDMARXOOODDPLO_RDMARXOOODDPLO_SHIFT 0
+#define I40E_GLPES_RDMARXOOODDPLO_RDMARXOOODDPLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_RDMARXOOODDPLO_RDMARXOOODDPLO_SHIFT)
+#define I40E_GLPES_RDMARXOOONOMARK 0x0001E004 /* Reset: PE_CORER */
+#define I40E_GLPES_RDMARXOOONOMARK_RDMAOOONOMARK_SHIFT 0
+#define I40E_GLPES_RDMARXOOONOMARK_RDMAOOONOMARK_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_RDMARXOOONOMARK_RDMAOOONOMARK_SHIFT)
+#define I40E_GLPES_RDMARXUNALIGN 0x0001E000 /* Reset: PE_CORER */
+#define I40E_GLPES_RDMARXUNALIGN_RDMRXAUNALIGN_SHIFT 0
+#define I40E_GLPES_RDMARXUNALIGN_RDMRXAUNALIGN_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_RDMARXUNALIGN_RDMRXAUNALIGN_SHIFT)
+#define I40E_GLPES_TCPRXFOURHOLEHI 0x0001E044 /* Reset: PE_CORER */
+#define I40E_GLPES_TCPRXFOURHOLEHI_TCPRXFOURHOLEHI_SHIFT 0
+#define I40E_GLPES_TCPRXFOURHOLEHI_TCPRXFOURHOLEHI_MASK I40E_MASK(0xFFFFFF, I40E_GLPES_TCPRXFOURHOLEHI_TCPRXFOURHOLEHI_SHIFT)
+#define I40E_GLPES_TCPRXFOURHOLELO 0x0001E040 /* Reset: PE_CORER */
+#define I40E_GLPES_TCPRXFOURHOLELO_TCPRXFOURHOLELO_SHIFT 0
+#define I40E_GLPES_TCPRXFOURHOLELO_TCPRXFOURHOLELO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_TCPRXFOURHOLELO_TCPRXFOURHOLELO_SHIFT)
+#define I40E_GLPES_TCPRXONEHOLEHI 0x0001E02C /* Reset: PE_CORER */
+#define I40E_GLPES_TCPRXONEHOLEHI_TCPRXONEHOLEHI_SHIFT 0
+#define I40E_GLPES_TCPRXONEHOLEHI_TCPRXONEHOLEHI_MASK I40E_MASK(0xFFFFFF, I40E_GLPES_TCPRXONEHOLEHI_TCPRXONEHOLEHI_SHIFT)
+#define I40E_GLPES_TCPRXONEHOLELO 0x0001E028 /* Reset: PE_CORER */
+#define I40E_GLPES_TCPRXONEHOLELO_TCPRXONEHOLELO_SHIFT 0
+#define I40E_GLPES_TCPRXONEHOLELO_TCPRXONEHOLELO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_TCPRXONEHOLELO_TCPRXONEHOLELO_SHIFT)
+#define I40E_GLPES_TCPRXPUREACKHI 0x0001E024 /* Reset: PE_CORER */
+#define I40E_GLPES_TCPRXPUREACKHI_TCPRXPUREACKSHI_SHIFT 0
+#define I40E_GLPES_TCPRXPUREACKHI_TCPRXPUREACKSHI_MASK I40E_MASK(0xFFFFFF, I40E_GLPES_TCPRXPUREACKHI_TCPRXPUREACKSHI_SHIFT)
+#define I40E_GLPES_TCPRXPUREACKSLO 0x0001E020 /* Reset: PE_CORER */
+#define I40E_GLPES_TCPRXPUREACKSLO_TCPRXPUREACKLO_SHIFT 0
+#define I40E_GLPES_TCPRXPUREACKSLO_TCPRXPUREACKLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_TCPRXPUREACKSLO_TCPRXPUREACKLO_SHIFT)
+#define I40E_GLPES_TCPRXTHREEHOLEHI 0x0001E03C /* Reset: PE_CORER */
+#define I40E_GLPES_TCPRXTHREEHOLEHI_TCPRXTHREEHOLEHI_SHIFT 0
+#define I40E_GLPES_TCPRXTHREEHOLEHI_TCPRXTHREEHOLEHI_MASK I40E_MASK(0xFFFFFF, I40E_GLPES_TCPRXTHREEHOLEHI_TCPRXTHREEHOLEHI_SHIFT)
+#define I40E_GLPES_TCPRXTHREEHOLELO 0x0001E038 /* Reset: PE_CORER */
+#define I40E_GLPES_TCPRXTHREEHOLELO_TCPRXTHREEHOLELO_SHIFT 0
+#define I40E_GLPES_TCPRXTHREEHOLELO_TCPRXTHREEHOLELO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_TCPRXTHREEHOLELO_TCPRXTHREEHOLELO_SHIFT)
+#define I40E_GLPES_TCPRXTWOHOLEHI 0x0001E034 /* Reset: PE_CORER */
+#define I40E_GLPES_TCPRXTWOHOLEHI_TCPRXTWOHOLEHI_SHIFT 0
+#define I40E_GLPES_TCPRXTWOHOLEHI_TCPRXTWOHOLEHI_MASK I40E_MASK(0xFFFFFF, I40E_GLPES_TCPRXTWOHOLEHI_TCPRXTWOHOLEHI_SHIFT)
+#define I40E_GLPES_TCPRXTWOHOLELO 0x0001E030 /* Reset: PE_CORER */
+#define I40E_GLPES_TCPRXTWOHOLELO_TCPRXTWOHOLELO_SHIFT 0
+#define I40E_GLPES_TCPRXTWOHOLELO_TCPRXTWOHOLELO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_TCPRXTWOHOLELO_TCPRXTWOHOLELO_SHIFT)
+#define I40E_GLPES_TCPTXRETRANSFASTHI 0x0001E04C /* Reset: PE_CORER */
+#define I40E_GLPES_TCPTXRETRANSFASTHI_TCPTXRETRANSFASTHI_SHIFT 0
+#define I40E_GLPES_TCPTXRETRANSFASTHI_TCPTXRETRANSFASTHI_MASK I40E_MASK(0xFFFFFF, I40E_GLPES_TCPTXRETRANSFASTHI_TCPTXRETRANSFASTHI_SHIFT)
+#define I40E_GLPES_TCPTXRETRANSFASTLO 0x0001E048 /* Reset: PE_CORER */
+#define I40E_GLPES_TCPTXRETRANSFASTLO_TCPTXRETRANSFASTLO_SHIFT 0
+#define I40E_GLPES_TCPTXRETRANSFASTLO_TCPTXRETRANSFASTLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_TCPTXRETRANSFASTLO_TCPTXRETRANSFASTLO_SHIFT)
+#define I40E_GLPES_TCPTXTOUTSFASTHI 0x0001E054 /* Reset: PE_CORER */
+#define I40E_GLPES_TCPTXTOUTSFASTHI_TCPTXTOUTSFASTHI_SHIFT 0
+#define I40E_GLPES_TCPTXTOUTSFASTHI_TCPTXTOUTSFASTHI_MASK I40E_MASK(0xFFFFFF, I40E_GLPES_TCPTXTOUTSFASTHI_TCPTXTOUTSFASTHI_SHIFT)
+#define I40E_GLPES_TCPTXTOUTSFASTLO 0x0001E050 /* Reset: PE_CORER */
+#define I40E_GLPES_TCPTXTOUTSFASTLO_TCPTXTOUTSFASTLO_SHIFT 0
+#define I40E_GLPES_TCPTXTOUTSFASTLO_TCPTXTOUTSFASTLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_TCPTXTOUTSFASTLO_TCPTXTOUTSFASTLO_SHIFT)
+#define I40E_GLPES_TCPTXTOUTSHI 0x0001E05C /* Reset: PE_CORER */
+#define I40E_GLPES_TCPTXTOUTSHI_TCPTXTOUTSHI_SHIFT 0
+#define I40E_GLPES_TCPTXTOUTSHI_TCPTXTOUTSHI_MASK I40E_MASK(0xFFFFFF, I40E_GLPES_TCPTXTOUTSHI_TCPTXTOUTSHI_SHIFT)
+#define I40E_GLPES_TCPTXTOUTSLO 0x0001E058 /* Reset: PE_CORER */
+#define I40E_GLPES_TCPTXTOUTSLO_TCPTXTOUTSLO_SHIFT 0
+#define I40E_GLPES_TCPTXTOUTSLO_TCPTXTOUTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_TCPTXTOUTSLO_TCPTXTOUTSLO_SHIFT)
+#define I40E_GLPES_VFIP4RXDISCARD(_i) (0x00018600 + ((_i) * 4)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFIP4RXDISCARD_MAX_INDEX 31
+#define I40E_GLPES_VFIP4RXDISCARD_IP4RXDISCARD_SHIFT 0
+#define I40E_GLPES_VFIP4RXDISCARD_IP4RXDISCARD_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFIP4RXDISCARD_IP4RXDISCARD_SHIFT)
+#define I40E_GLPES_VFIP4RXFRAGSHI(_i) (0x00018804 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFIP4RXFRAGSHI_MAX_INDEX 31
+#define I40E_GLPES_VFIP4RXFRAGSHI_IP4RXFRAGSHI_SHIFT 0
+#define I40E_GLPES_VFIP4RXFRAGSHI_IP4RXFRAGSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_VFIP4RXFRAGSHI_IP4RXFRAGSHI_SHIFT)
+#define I40E_GLPES_VFIP4RXFRAGSLO(_i) (0x00018800 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFIP4RXFRAGSLO_MAX_INDEX 31
+#define I40E_GLPES_VFIP4RXFRAGSLO_IP4RXFRAGSLO_SHIFT 0
+#define I40E_GLPES_VFIP4RXFRAGSLO_IP4RXFRAGSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFIP4RXFRAGSLO_IP4RXFRAGSLO_SHIFT)
+#define I40E_GLPES_VFIP4RXMCOCTSHI(_i) (0x00018A04 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFIP4RXMCOCTSHI_MAX_INDEX 31
+#define I40E_GLPES_VFIP4RXMCOCTSHI_IP4RXMCOCTSHI_SHIFT 0
+#define I40E_GLPES_VFIP4RXMCOCTSHI_IP4RXMCOCTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_VFIP4RXMCOCTSHI_IP4RXMCOCTSHI_SHIFT)
+#define I40E_GLPES_VFIP4RXMCOCTSLO(_i) (0x00018A00 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFIP4RXMCOCTSLO_MAX_INDEX 31
+#define I40E_GLPES_VFIP4RXMCOCTSLO_IP4RXMCOCTSLO_SHIFT 0
+#define I40E_GLPES_VFIP4RXMCOCTSLO_IP4RXMCOCTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFIP4RXMCOCTSLO_IP4RXMCOCTSLO_SHIFT)
+#define I40E_GLPES_VFIP4RXMCPKTSHI(_i) (0x00018C04 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFIP4RXMCPKTSHI_MAX_INDEX 31
+#define I40E_GLPES_VFIP4RXMCPKTSHI_IP4RXMCPKTSHI_SHIFT 0
+#define I40E_GLPES_VFIP4RXMCPKTSHI_IP4RXMCPKTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_VFIP4RXMCPKTSHI_IP4RXMCPKTSHI_SHIFT)
+#define I40E_GLPES_VFIP4RXMCPKTSLO(_i) (0x00018C00 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFIP4RXMCPKTSLO_MAX_INDEX 31
+#define I40E_GLPES_VFIP4RXMCPKTSLO_IP4RXMCPKTSLO_SHIFT 0
+#define I40E_GLPES_VFIP4RXMCPKTSLO_IP4RXMCPKTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFIP4RXMCPKTSLO_IP4RXMCPKTSLO_SHIFT)
+#define I40E_GLPES_VFIP4RXOCTSHI(_i) (0x00018204 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFIP4RXOCTSHI_MAX_INDEX 31
+#define I40E_GLPES_VFIP4RXOCTSHI_IP4RXOCTSHI_SHIFT 0
+#define I40E_GLPES_VFIP4RXOCTSHI_IP4RXOCTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_VFIP4RXOCTSHI_IP4RXOCTSHI_SHIFT)
+#define I40E_GLPES_VFIP4RXOCTSLO(_i) (0x00018200 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFIP4RXOCTSLO_MAX_INDEX 31
+#define I40E_GLPES_VFIP4RXOCTSLO_IP4RXOCTSLO_SHIFT 0
+#define I40E_GLPES_VFIP4RXOCTSLO_IP4RXOCTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFIP4RXOCTSLO_IP4RXOCTSLO_SHIFT)
+#define I40E_GLPES_VFIP4RXPKTSHI(_i) (0x00018404 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFIP4RXPKTSHI_MAX_INDEX 31
+#define I40E_GLPES_VFIP4RXPKTSHI_IP4RXPKTSHI_SHIFT 0
+#define I40E_GLPES_VFIP4RXPKTSHI_IP4RXPKTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_VFIP4RXPKTSHI_IP4RXPKTSHI_SHIFT)
+#define I40E_GLPES_VFIP4RXPKTSLO(_i) (0x00018400 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFIP4RXPKTSLO_MAX_INDEX 31
+#define I40E_GLPES_VFIP4RXPKTSLO_IP4RXPKTSLO_SHIFT 0
+#define I40E_GLPES_VFIP4RXPKTSLO_IP4RXPKTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFIP4RXPKTSLO_IP4RXPKTSLO_SHIFT)
+#define I40E_GLPES_VFIP4RXTRUNC(_i) (0x00018700 + ((_i) * 4)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFIP4RXTRUNC_MAX_INDEX 31
+#define I40E_GLPES_VFIP4RXTRUNC_IP4RXTRUNC_SHIFT 0
+#define I40E_GLPES_VFIP4RXTRUNC_IP4RXTRUNC_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFIP4RXTRUNC_IP4RXTRUNC_SHIFT)
+#define I40E_GLPES_VFIP4TXFRAGSHI(_i) (0x00019E04 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFIP4TXFRAGSHI_MAX_INDEX 31
+#define I40E_GLPES_VFIP4TXFRAGSHI_IP4TXFRAGSHI_SHIFT 0
+#define I40E_GLPES_VFIP4TXFRAGSHI_IP4TXFRAGSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_VFIP4TXFRAGSHI_IP4TXFRAGSHI_SHIFT)
+#define I40E_GLPES_VFIP4TXFRAGSLO(_i) (0x00019E00 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFIP4TXFRAGSLO_MAX_INDEX 31
+#define I40E_GLPES_VFIP4TXFRAGSLO_IP4TXFRAGSLO_SHIFT 0
+#define I40E_GLPES_VFIP4TXFRAGSLO_IP4TXFRAGSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFIP4TXFRAGSLO_IP4TXFRAGSLO_SHIFT)
+#define I40E_GLPES_VFIP4TXMCOCTSHI(_i) (0x0001A004 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFIP4TXMCOCTSHI_MAX_INDEX 31
+#define I40E_GLPES_VFIP4TXMCOCTSHI_IP4TXMCOCTSHI_SHIFT 0
+#define I40E_GLPES_VFIP4TXMCOCTSHI_IP4TXMCOCTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_VFIP4TXMCOCTSHI_IP4TXMCOCTSHI_SHIFT)
+#define I40E_GLPES_VFIP4TXMCOCTSLO(_i) (0x0001A000 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFIP4TXMCOCTSLO_MAX_INDEX 31
+#define I40E_GLPES_VFIP4TXMCOCTSLO_IP4TXMCOCTSLO_SHIFT 0
+#define I40E_GLPES_VFIP4TXMCOCTSLO_IP4TXMCOCTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFIP4TXMCOCTSLO_IP4TXMCOCTSLO_SHIFT)
+#define I40E_GLPES_VFIP4TXMCPKTSHI(_i) (0x0001A204 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFIP4TXMCPKTSHI_MAX_INDEX 31
+#define I40E_GLPES_VFIP4TXMCPKTSHI_IP4TXMCPKTSHI_SHIFT 0
+#define I40E_GLPES_VFIP4TXMCPKTSHI_IP4TXMCPKTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_VFIP4TXMCPKTSHI_IP4TXMCPKTSHI_SHIFT)
+#define I40E_GLPES_VFIP4TXMCPKTSLO(_i) (0x0001A200 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFIP4TXMCPKTSLO_MAX_INDEX 31
+#define I40E_GLPES_VFIP4TXMCPKTSLO_IP4TXMCPKTSLO_SHIFT 0
+#define I40E_GLPES_VFIP4TXMCPKTSLO_IP4TXMCPKTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFIP4TXMCPKTSLO_IP4TXMCPKTSLO_SHIFT)
+#define I40E_GLPES_VFIP4TXNOROUTE(_i) (0x0001AE00 + ((_i) * 4)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFIP4TXNOROUTE_MAX_INDEX 31
+#define I40E_GLPES_VFIP4TXNOROUTE_IP4TXNOROUTE_SHIFT 0
+#define I40E_GLPES_VFIP4TXNOROUTE_IP4TXNOROUTE_MASK I40E_MASK(0xFFFFFF, I40E_GLPES_VFIP4TXNOROUTE_IP4TXNOROUTE_SHIFT)
+#define I40E_GLPES_VFIP4TXOCTSHI(_i) (0x00019A04 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFIP4TXOCTSHI_MAX_INDEX 31
+#define I40E_GLPES_VFIP4TXOCTSHI_IP4TXOCTSHI_SHIFT 0
+#define I40E_GLPES_VFIP4TXOCTSHI_IP4TXOCTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_VFIP4TXOCTSHI_IP4TXOCTSHI_SHIFT)
+#define I40E_GLPES_VFIP4TXOCTSLO(_i) (0x00019A00 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFIP4TXOCTSLO_MAX_INDEX 31
+#define I40E_GLPES_VFIP4TXOCTSLO_IP4TXOCTSLO_SHIFT 0
+#define I40E_GLPES_VFIP4TXOCTSLO_IP4TXOCTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFIP4TXOCTSLO_IP4TXOCTSLO_SHIFT)
+#define I40E_GLPES_VFIP4TXPKTSHI(_i) (0x00019C04 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFIP4TXPKTSHI_MAX_INDEX 31
+#define I40E_GLPES_VFIP4TXPKTSHI_IP4TXPKTSHI_SHIFT 0
+#define I40E_GLPES_VFIP4TXPKTSHI_IP4TXPKTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_VFIP4TXPKTSHI_IP4TXPKTSHI_SHIFT)
+#define I40E_GLPES_VFIP4TXPKTSLO(_i) (0x00019C00 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFIP4TXPKTSLO_MAX_INDEX 31
+#define I40E_GLPES_VFIP4TXPKTSLO_IP4TXPKTSLO_SHIFT 0
+#define I40E_GLPES_VFIP4TXPKTSLO_IP4TXPKTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFIP4TXPKTSLO_IP4TXPKTSLO_SHIFT)
+#define I40E_GLPES_VFIP6RXDISCARD(_i) (0x00019200 + ((_i) * 4)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFIP6RXDISCARD_MAX_INDEX 31
+#define I40E_GLPES_VFIP6RXDISCARD_IP6RXDISCARD_SHIFT 0
+#define I40E_GLPES_VFIP6RXDISCARD_IP6RXDISCARD_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFIP6RXDISCARD_IP6RXDISCARD_SHIFT)
+#define I40E_GLPES_VFIP6RXFRAGSHI(_i) (0x00019404 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFIP6RXFRAGSHI_MAX_INDEX 31
+#define I40E_GLPES_VFIP6RXFRAGSHI_IP6RXFRAGSHI_SHIFT 0
+#define I40E_GLPES_VFIP6RXFRAGSHI_IP6RXFRAGSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_VFIP6RXFRAGSHI_IP6RXFRAGSHI_SHIFT)
+#define I40E_GLPES_VFIP6RXFRAGSLO(_i) (0x00019400 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFIP6RXFRAGSLO_MAX_INDEX 31
+#define I40E_GLPES_VFIP6RXFRAGSLO_IP6RXFRAGSLO_SHIFT 0
+#define I40E_GLPES_VFIP6RXFRAGSLO_IP6RXFRAGSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFIP6RXFRAGSLO_IP6RXFRAGSLO_SHIFT)
+#define I40E_GLPES_VFIP6RXMCOCTSHI(_i) (0x00019604 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFIP6RXMCOCTSHI_MAX_INDEX 31
+#define I40E_GLPES_VFIP6RXMCOCTSHI_IP6RXMCOCTSHI_SHIFT 0
+#define I40E_GLPES_VFIP6RXMCOCTSHI_IP6RXMCOCTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_VFIP6RXMCOCTSHI_IP6RXMCOCTSHI_SHIFT)
+#define I40E_GLPES_VFIP6RXMCOCTSLO(_i) (0x00019600 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFIP6RXMCOCTSLO_MAX_INDEX 31
+#define I40E_GLPES_VFIP6RXMCOCTSLO_IP6RXMCOCTSLO_SHIFT 0
+#define I40E_GLPES_VFIP6RXMCOCTSLO_IP6RXMCOCTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFIP6RXMCOCTSLO_IP6RXMCOCTSLO_SHIFT)
+#define I40E_GLPES_VFIP6RXMCPKTSHI(_i) (0x00019804 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFIP6RXMCPKTSHI_MAX_INDEX 31
+#define I40E_GLPES_VFIP6RXMCPKTSHI_IP6RXMCPKTSHI_SHIFT 0
+#define I40E_GLPES_VFIP6RXMCPKTSHI_IP6RXMCPKTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_VFIP6RXMCPKTSHI_IP6RXMCPKTSHI_SHIFT)
+#define I40E_GLPES_VFIP6RXMCPKTSLO(_i) (0x00019800 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFIP6RXMCPKTSLO_MAX_INDEX 31
+#define I40E_GLPES_VFIP6RXMCPKTSLO_IP6RXMCPKTSLO_SHIFT 0
+#define I40E_GLPES_VFIP6RXMCPKTSLO_IP6RXMCPKTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFIP6RXMCPKTSLO_IP6RXMCPKTSLO_SHIFT)
+#define I40E_GLPES_VFIP6RXOCTSHI(_i) (0x00018E04 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFIP6RXOCTSHI_MAX_INDEX 31
+#define I40E_GLPES_VFIP6RXOCTSHI_IP6RXOCTSHI_SHIFT 0
+#define I40E_GLPES_VFIP6RXOCTSHI_IP6RXOCTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_VFIP6RXOCTSHI_IP6RXOCTSHI_SHIFT)
+#define I40E_GLPES_VFIP6RXOCTSLO(_i) (0x00018E00 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFIP6RXOCTSLO_MAX_INDEX 31
+#define I40E_GLPES_VFIP6RXOCTSLO_IP6RXOCTSLO_SHIFT 0
+#define I40E_GLPES_VFIP6RXOCTSLO_IP6RXOCTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFIP6RXOCTSLO_IP6RXOCTSLO_SHIFT)
+#define I40E_GLPES_VFIP6RXPKTSHI(_i) (0x00019004 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFIP6RXPKTSHI_MAX_INDEX 31
+#define I40E_GLPES_VFIP6RXPKTSHI_IP6RXPKTSHI_SHIFT 0
+#define I40E_GLPES_VFIP6RXPKTSHI_IP6RXPKTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_VFIP6RXPKTSHI_IP6RXPKTSHI_SHIFT)
+#define I40E_GLPES_VFIP6RXPKTSLO(_i) (0x00019000 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFIP6RXPKTSLO_MAX_INDEX 31
+#define I40E_GLPES_VFIP6RXPKTSLO_IP6RXPKTSLO_SHIFT 0
+#define I40E_GLPES_VFIP6RXPKTSLO_IP6RXPKTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFIP6RXPKTSLO_IP6RXPKTSLO_SHIFT)
+#define I40E_GLPES_VFIP6RXTRUNC(_i) (0x00019300 + ((_i) * 4)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFIP6RXTRUNC_MAX_INDEX 31
+#define I40E_GLPES_VFIP6RXTRUNC_IP6RXTRUNC_SHIFT 0
+#define I40E_GLPES_VFIP6RXTRUNC_IP6RXTRUNC_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFIP6RXTRUNC_IP6RXTRUNC_SHIFT)
+#define I40E_GLPES_VFIP6TXFRAGSHI(_i) (0x0001A804 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFIP6TXFRAGSHI_MAX_INDEX 31
+#define I40E_GLPES_VFIP6TXFRAGSHI_IP6TXFRAGSHI_SHIFT 0
+#define I40E_GLPES_VFIP6TXFRAGSHI_IP6TXFRAGSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_VFIP6TXFRAGSHI_IP6TXFRAGSHI_SHIFT)
+#define I40E_GLPES_VFIP6TXFRAGSLO(_i) (0x0001A800 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFIP6TXFRAGSLO_MAX_INDEX 31
+#define I40E_GLPES_VFIP6TXFRAGSLO_IP6TXFRAGSLO_SHIFT 0
+#define I40E_GLPES_VFIP6TXFRAGSLO_IP6TXFRAGSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFIP6TXFRAGSLO_IP6TXFRAGSLO_SHIFT)
+#define I40E_GLPES_VFIP6TXMCOCTSHI(_i) (0x0001AA04 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFIP6TXMCOCTSHI_MAX_INDEX 31
+#define I40E_GLPES_VFIP6TXMCOCTSHI_IP6TXMCOCTSHI_SHIFT 0
+#define I40E_GLPES_VFIP6TXMCOCTSHI_IP6TXMCOCTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_VFIP6TXMCOCTSHI_IP6TXMCOCTSHI_SHIFT)
+#define I40E_GLPES_VFIP6TXMCOCTSLO(_i) (0x0001AA00 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFIP6TXMCOCTSLO_MAX_INDEX 31
+#define I40E_GLPES_VFIP6TXMCOCTSLO_IP6TXMCOCTSLO_SHIFT 0
+#define I40E_GLPES_VFIP6TXMCOCTSLO_IP6TXMCOCTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFIP6TXMCOCTSLO_IP6TXMCOCTSLO_SHIFT)
+#define I40E_GLPES_VFIP6TXMCPKTSHI(_i) (0x0001AC04 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFIP6TXMCPKTSHI_MAX_INDEX 31
+#define I40E_GLPES_VFIP6TXMCPKTSHI_IP6TXMCPKTSHI_SHIFT 0
+#define I40E_GLPES_VFIP6TXMCPKTSHI_IP6TXMCPKTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_VFIP6TXMCPKTSHI_IP6TXMCPKTSHI_SHIFT)
+#define I40E_GLPES_VFIP6TXMCPKTSLO(_i) (0x0001AC00 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFIP6TXMCPKTSLO_MAX_INDEX 31
+#define I40E_GLPES_VFIP6TXMCPKTSLO_IP6TXMCPKTSLO_SHIFT 0
+#define I40E_GLPES_VFIP6TXMCPKTSLO_IP6TXMCPKTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFIP6TXMCPKTSLO_IP6TXMCPKTSLO_SHIFT)
+#define I40E_GLPES_VFIP6TXNOROUTE(_i) (0x0001AF00 + ((_i) * 4)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFIP6TXNOROUTE_MAX_INDEX 31
+#define I40E_GLPES_VFIP6TXNOROUTE_IP6TXNOROUTE_SHIFT 0
+#define I40E_GLPES_VFIP6TXNOROUTE_IP6TXNOROUTE_MASK I40E_MASK(0xFFFFFF, I40E_GLPES_VFIP6TXNOROUTE_IP6TXNOROUTE_SHIFT)
+#define I40E_GLPES_VFIP6TXOCTSHI(_i) (0x0001A404 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFIP6TXOCTSHI_MAX_INDEX 31
+#define I40E_GLPES_VFIP6TXOCTSHI_IP6TXOCTSHI_SHIFT 0
+#define I40E_GLPES_VFIP6TXOCTSHI_IP6TXOCTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_VFIP6TXOCTSHI_IP6TXOCTSHI_SHIFT)
+#define I40E_GLPES_VFIP6TXOCTSLO(_i) (0x0001A400 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFIP6TXOCTSLO_MAX_INDEX 31
+#define I40E_GLPES_VFIP6TXOCTSLO_IP6TXOCTSLO_SHIFT 0
+#define I40E_GLPES_VFIP6TXOCTSLO_IP6TXOCTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFIP6TXOCTSLO_IP6TXOCTSLO_SHIFT)
+#define I40E_GLPES_VFIP6TXPKTSHI(_i) (0x0001A604 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFIP6TXPKTSHI_MAX_INDEX 31
+#define I40E_GLPES_VFIP6TXPKTSHI_IP6TXPKTSHI_SHIFT 0
+#define I40E_GLPES_VFIP6TXPKTSHI_IP6TXPKTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_VFIP6TXPKTSHI_IP6TXPKTSHI_SHIFT)
+#define I40E_GLPES_VFIP6TXPKTSLO(_i) (0x0001A600 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFIP6TXPKTSLO_MAX_INDEX 31
+#define I40E_GLPES_VFIP6TXPKTSLO_IP6TXPKTSLO_SHIFT 0
+#define I40E_GLPES_VFIP6TXPKTSLO_IP6TXPKTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFIP6TXPKTSLO_IP6TXPKTSLO_SHIFT)
+#define I40E_GLPES_VFRDMARXRDSHI(_i) (0x0001BE04 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFRDMARXRDSHI_MAX_INDEX 31
+#define I40E_GLPES_VFRDMARXRDSHI_RDMARXRDSHI_SHIFT 0
+#define I40E_GLPES_VFRDMARXRDSHI_RDMARXRDSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_VFRDMARXRDSHI_RDMARXRDSHI_SHIFT)
+#define I40E_GLPES_VFRDMARXRDSLO(_i) (0x0001BE00 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFRDMARXRDSLO_MAX_INDEX 31
+#define I40E_GLPES_VFRDMARXRDSLO_RDMARXRDSLO_SHIFT 0
+#define I40E_GLPES_VFRDMARXRDSLO_RDMARXRDSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFRDMARXRDSLO_RDMARXRDSLO_SHIFT)
+#define I40E_GLPES_VFRDMARXSNDSHI(_i) (0x0001C004 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFRDMARXSNDSHI_MAX_INDEX 31
+#define I40E_GLPES_VFRDMARXSNDSHI_RDMARXSNDSHI_SHIFT 0
+#define I40E_GLPES_VFRDMARXSNDSHI_RDMARXSNDSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_VFRDMARXSNDSHI_RDMARXSNDSHI_SHIFT)
+#define I40E_GLPES_VFRDMARXSNDSLO(_i) (0x0001C000 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFRDMARXSNDSLO_MAX_INDEX 31
+#define I40E_GLPES_VFRDMARXSNDSLO_RDMARXSNDSLO_SHIFT 0
+#define I40E_GLPES_VFRDMARXSNDSLO_RDMARXSNDSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFRDMARXSNDSLO_RDMARXSNDSLO_SHIFT)
+#define I40E_GLPES_VFRDMARXWRSHI(_i) (0x0001BC04 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFRDMARXWRSHI_MAX_INDEX 31
+#define I40E_GLPES_VFRDMARXWRSHI_RDMARXWRSHI_SHIFT 0
+#define I40E_GLPES_VFRDMARXWRSHI_RDMARXWRSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_VFRDMARXWRSHI_RDMARXWRSHI_SHIFT)
+#define I40E_GLPES_VFRDMARXWRSLO(_i) (0x0001BC00 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFRDMARXWRSLO_MAX_INDEX 31
+#define I40E_GLPES_VFRDMARXWRSLO_RDMARXWRSLO_SHIFT 0
+#define I40E_GLPES_VFRDMARXWRSLO_RDMARXWRSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFRDMARXWRSLO_RDMARXWRSLO_SHIFT)
+#define I40E_GLPES_VFRDMATXRDSHI(_i) (0x0001C404 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFRDMATXRDSHI_MAX_INDEX 31
+#define I40E_GLPES_VFRDMATXRDSHI_RDMARXRDSHI_SHIFT 0
+#define I40E_GLPES_VFRDMATXRDSHI_RDMARXRDSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_VFRDMATXRDSHI_RDMARXRDSHI_SHIFT)
+#define I40E_GLPES_VFRDMATXRDSLO(_i) (0x0001C400 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFRDMATXRDSLO_MAX_INDEX 31
+#define I40E_GLPES_VFRDMATXRDSLO_RDMARXRDSLO_SHIFT 0
+#define I40E_GLPES_VFRDMATXRDSLO_RDMARXRDSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFRDMATXRDSLO_RDMARXRDSLO_SHIFT)
+#define I40E_GLPES_VFRDMATXSNDSHI(_i) (0x0001C604 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFRDMATXSNDSHI_MAX_INDEX 31
+#define I40E_GLPES_VFRDMATXSNDSHI_RDMARXSNDSHI_SHIFT 0
+#define I40E_GLPES_VFRDMATXSNDSHI_RDMARXSNDSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_VFRDMATXSNDSHI_RDMARXSNDSHI_SHIFT)
+#define I40E_GLPES_VFRDMATXSNDSLO(_i) (0x0001C600 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFRDMATXSNDSLO_MAX_INDEX 31
+#define I40E_GLPES_VFRDMATXSNDSLO_RDMARXSNDSLO_SHIFT 0
+#define I40E_GLPES_VFRDMATXSNDSLO_RDMARXSNDSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFRDMATXSNDSLO_RDMARXSNDSLO_SHIFT)
+#define I40E_GLPES_VFRDMATXWRSHI(_i) (0x0001C204 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFRDMATXWRSHI_MAX_INDEX 31
+#define I40E_GLPES_VFRDMATXWRSHI_RDMARXWRSHI_SHIFT 0
+#define I40E_GLPES_VFRDMATXWRSHI_RDMARXWRSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_VFRDMATXWRSHI_RDMARXWRSHI_SHIFT)
+#define I40E_GLPES_VFRDMATXWRSLO(_i) (0x0001C200 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFRDMATXWRSLO_MAX_INDEX 31
+#define I40E_GLPES_VFRDMATXWRSLO_RDMARXWRSLO_SHIFT 0
+#define I40E_GLPES_VFRDMATXWRSLO_RDMARXWRSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFRDMATXWRSLO_RDMARXWRSLO_SHIFT)
+#define I40E_GLPES_VFRDMAVBNDHI(_i) (0x0001C804 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFRDMAVBNDHI_MAX_INDEX 31
+#define I40E_GLPES_VFRDMAVBNDHI_RDMAVBNDHI_SHIFT 0
+#define I40E_GLPES_VFRDMAVBNDHI_RDMAVBNDHI_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFRDMAVBNDHI_RDMAVBNDHI_SHIFT)
+#define I40E_GLPES_VFRDMAVBNDLO(_i) (0x0001C800 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFRDMAVBNDLO_MAX_INDEX 31
+#define I40E_GLPES_VFRDMAVBNDLO_RDMAVBNDLO_SHIFT 0
+#define I40E_GLPES_VFRDMAVBNDLO_RDMAVBNDLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFRDMAVBNDLO_RDMAVBNDLO_SHIFT)
+#define I40E_GLPES_VFRDMAVINVHI(_i) (0x0001CA04 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFRDMAVINVHI_MAX_INDEX 31
+#define I40E_GLPES_VFRDMAVINVHI_RDMAVINVHI_SHIFT 0
+#define I40E_GLPES_VFRDMAVINVHI_RDMAVINVHI_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFRDMAVINVHI_RDMAVINVHI_SHIFT)
+#define I40E_GLPES_VFRDMAVINVLO(_i) (0x0001CA00 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFRDMAVINVLO_MAX_INDEX 31
+#define I40E_GLPES_VFRDMAVINVLO_RDMAVINVLO_SHIFT 0
+#define I40E_GLPES_VFRDMAVINVLO_RDMAVINVLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFRDMAVINVLO_RDMAVINVLO_SHIFT)
+#define I40E_GLPES_VFRXVLANERR(_i) (0x00018000 + ((_i) * 4)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFRXVLANERR_MAX_INDEX 31
+#define I40E_GLPES_VFRXVLANERR_RXVLANERR_SHIFT 0
+#define I40E_GLPES_VFRXVLANERR_RXVLANERR_MASK I40E_MASK(0xFFFFFF, I40E_GLPES_VFRXVLANERR_RXVLANERR_SHIFT)
+#define I40E_GLPES_VFTCPRTXSEG(_i) (0x0001B600 + ((_i) * 4)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFTCPRTXSEG_MAX_INDEX 31
+#define I40E_GLPES_VFTCPRTXSEG_TCPRTXSEG_SHIFT 0
+#define I40E_GLPES_VFTCPRTXSEG_TCPRTXSEG_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFTCPRTXSEG_TCPRTXSEG_SHIFT)
+#define I40E_GLPES_VFTCPRXOPTERR(_i) (0x0001B200 + ((_i) * 4)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFTCPRXOPTERR_MAX_INDEX 31
+#define I40E_GLPES_VFTCPRXOPTERR_TCPRXOPTERR_SHIFT 0
+#define I40E_GLPES_VFTCPRXOPTERR_TCPRXOPTERR_MASK I40E_MASK(0xFFFFFF, I40E_GLPES_VFTCPRXOPTERR_TCPRXOPTERR_SHIFT)
+#define I40E_GLPES_VFTCPRXPROTOERR(_i) (0x0001B300 + ((_i) * 4)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFTCPRXPROTOERR_MAX_INDEX 31
+#define I40E_GLPES_VFTCPRXPROTOERR_TCPRXPROTOERR_SHIFT 0
+#define I40E_GLPES_VFTCPRXPROTOERR_TCPRXPROTOERR_MASK I40E_MASK(0xFFFFFF, I40E_GLPES_VFTCPRXPROTOERR_TCPRXPROTOERR_SHIFT)
+#define I40E_GLPES_VFTCPRXSEGSHI(_i) (0x0001B004 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFTCPRXSEGSHI_MAX_INDEX 31
+#define I40E_GLPES_VFTCPRXSEGSHI_TCPRXSEGSHI_SHIFT 0
+#define I40E_GLPES_VFTCPRXSEGSHI_TCPRXSEGSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_VFTCPRXSEGSHI_TCPRXSEGSHI_SHIFT)
+#define I40E_GLPES_VFTCPRXSEGSLO(_i) (0x0001B000 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFTCPRXSEGSLO_MAX_INDEX 31
+#define I40E_GLPES_VFTCPRXSEGSLO_TCPRXSEGSLO_SHIFT 0
+#define I40E_GLPES_VFTCPRXSEGSLO_TCPRXSEGSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFTCPRXSEGSLO_TCPRXSEGSLO_SHIFT)
+#define I40E_GLPES_VFTCPTXSEGHI(_i) (0x0001B404 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFTCPTXSEGHI_MAX_INDEX 31
+#define I40E_GLPES_VFTCPTXSEGHI_TCPTXSEGHI_SHIFT 0
+#define I40E_GLPES_VFTCPTXSEGHI_TCPTXSEGHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_VFTCPTXSEGHI_TCPTXSEGHI_SHIFT)
+#define I40E_GLPES_VFTCPTXSEGLO(_i) (0x0001B400 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFTCPTXSEGLO_MAX_INDEX 31
+#define I40E_GLPES_VFTCPTXSEGLO_TCPTXSEGLO_SHIFT 0
+#define I40E_GLPES_VFTCPTXSEGLO_TCPTXSEGLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFTCPTXSEGLO_TCPTXSEGLO_SHIFT)
+#define I40E_GLPES_VFUDPRXPKTSHI(_i) (0x0001B804 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFUDPRXPKTSHI_MAX_INDEX 31
+#define I40E_GLPES_VFUDPRXPKTSHI_UDPRXPKTSHI_SHIFT 0
+#define I40E_GLPES_VFUDPRXPKTSHI_UDPRXPKTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_VFUDPRXPKTSHI_UDPRXPKTSHI_SHIFT)
+#define I40E_GLPES_VFUDPRXPKTSLO(_i) (0x0001B800 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFUDPRXPKTSLO_MAX_INDEX 31
+#define I40E_GLPES_VFUDPRXPKTSLO_UDPRXPKTSLO_SHIFT 0
+#define I40E_GLPES_VFUDPRXPKTSLO_UDPRXPKTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFUDPRXPKTSLO_UDPRXPKTSLO_SHIFT)
+#define I40E_GLPES_VFUDPTXPKTSHI(_i) (0x0001BA04 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFUDPTXPKTSHI_MAX_INDEX 31
+#define I40E_GLPES_VFUDPTXPKTSHI_UDPTXPKTSHI_SHIFT 0
+#define I40E_GLPES_VFUDPTXPKTSHI_UDPTXPKTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_VFUDPTXPKTSHI_UDPTXPKTSHI_SHIFT)
+#define I40E_GLPES_VFUDPTXPKTSLO(_i) (0x0001BA00 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFUDPTXPKTSLO_MAX_INDEX 31
+#define I40E_GLPES_VFUDPTXPKTSLO_UDPTXPKTSLO_SHIFT 0
+#define I40E_GLPES_VFUDPTXPKTSLO_UDPTXPKTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFUDPTXPKTSLO_UDPTXPKTSLO_SHIFT)
+#define I40E_GLGEN_PME_TO 0x000B81BC /* Reset: POR */
+#define I40E_GLGEN_PME_TO_PME_TO_FOR_PE_SHIFT 0
+#define I40E_GLGEN_PME_TO_PME_TO_FOR_PE_MASK I40E_MASK(0x1, I40E_GLGEN_PME_TO_PME_TO_FOR_PE_SHIFT)
+#define I40E_GLQF_APBVT(_i) (0x00260000 + ((_i) * 4)) /* _i=0...2047 */ /* Reset: CORER */
+#define I40E_GLQF_APBVT_MAX_INDEX 2047
+#define I40E_GLQF_APBVT_APBVT_SHIFT 0
+#define I40E_GLQF_APBVT_APBVT_MASK I40E_MASK(0xFFFFFFFF, I40E_GLQF_APBVT_APBVT_SHIFT)
+#define I40E_GLQF_FD_PCTYPES(_i) (0x00268000 + ((_i) * 4)) /* _i=0...63 */ /* Reset: POR */
+#define I40E_GLQF_FD_PCTYPES_MAX_INDEX 63
+#define I40E_GLQF_FD_PCTYPES_FD_PCTYPE_SHIFT 0
+#define I40E_GLQF_FD_PCTYPES_FD_PCTYPE_MASK I40E_MASK(0x3F, I40E_GLQF_FD_PCTYPES_FD_PCTYPE_SHIFT)
+#define I40E_GLQF_FD_MSK(_i, _j) (0x00267200 + ((_i) * 4 + (_j) * 8)) /* _i=0...1, _j=0...63 */ /* Reset: CORER */
+#define I40E_GLQF_FD_MSK_MAX_INDEX 1
+#define I40E_GLQF_FD_MSK_MASK_SHIFT 0
+#define I40E_GLQF_FD_MSK_MASK_MASK I40E_MASK(0xFFFF, I40E_GLQF_FD_MSK_MASK_SHIFT)
+#define I40E_GLQF_FD_MSK_OFFSET_SHIFT 16
+#define I40E_GLQF_FD_MSK_OFFSET_MASK I40E_MASK(0x3F, I40E_GLQF_FD_MSK_OFFSET_SHIFT)
+#define I40E_GLQF_HASH_INSET(_i, _j) (0x00267600 + ((_i) * 4 + (_j) * 8)) /* _i=0...1, _j=0...63 */ /* Reset: CORER */
+#define I40E_GLQF_HASH_INSET_MAX_INDEX 1
+#define I40E_GLQF_HASH_INSET_INSET_SHIFT 0
+#define I40E_GLQF_HASH_INSET_INSET_MASK I40E_MASK(0xFFFFFFFF, I40E_GLQF_HASH_INSET_INSET_SHIFT)
+#define I40E_GLQF_HASH_MSK(_i, _j) (0x00267A00 + ((_i) * 4 + (_j) * 8)) /* _i=0...1, _j=0...63 */ /* Reset: CORER */
+#define I40E_GLQF_HASH_MSK_MAX_INDEX 1
+#define I40E_GLQF_HASH_MSK_MASK_SHIFT 0
+#define I40E_GLQF_HASH_MSK_MASK_MASK I40E_MASK(0xFFFF, I40E_GLQF_HASH_MSK_MASK_SHIFT)
+#define I40E_GLQF_HASH_MSK_OFFSET_SHIFT 16
+#define I40E_GLQF_HASH_MSK_OFFSET_MASK I40E_MASK(0x3F, I40E_GLQF_HASH_MSK_OFFSET_SHIFT)
+#define I40E_GLQF_ORT(_i) (0x00268900 + ((_i) * 4)) /* _i=0...63 */ /* Reset: CORER */
+#define I40E_GLQF_ORT_MAX_INDEX 63
+#define I40E_GLQF_ORT_PIT_INDX_SHIFT 0
+#define I40E_GLQF_ORT_PIT_INDX_MASK I40E_MASK(0x1F, I40E_GLQF_ORT_PIT_INDX_SHIFT)
+#define I40E_GLQF_ORT_FIELD_CNT_SHIFT 5
+#define I40E_GLQF_ORT_FIELD_CNT_MASK I40E_MASK(0x3, I40E_GLQF_ORT_FIELD_CNT_SHIFT)
+#define I40E_GLQF_ORT_FLX_PAYLOAD_SHIFT 7
+#define I40E_GLQF_ORT_FLX_PAYLOAD_MASK I40E_MASK(0x1, I40E_GLQF_ORT_FLX_PAYLOAD_SHIFT)
+#define I40E_GLQF_PIT(_i) (0x00268C80 + ((_i) * 4)) /* _i=0...23 */ /* Reset: CORER */
+#define I40E_GLQF_PIT_MAX_INDEX 23
+#define I40E_GLQF_PIT_SOURCE_OFF_SHIFT 0
+#define I40E_GLQF_PIT_SOURCE_OFF_MASK I40E_MASK(0x1F, I40E_GLQF_PIT_SOURCE_OFF_SHIFT)
+#define I40E_GLQF_PIT_FSIZE_SHIFT 5
+#define I40E_GLQF_PIT_FSIZE_MASK I40E_MASK(0x1F, I40E_GLQF_PIT_FSIZE_SHIFT)
+#define I40E_GLQF_PIT_DEST_OFF_SHIFT 10
+#define I40E_GLQF_PIT_DEST_OFF_MASK I40E_MASK(0x3F, I40E_GLQF_PIT_DEST_OFF_SHIFT)
+#define I40E_GLQF_FDEVICTENA(_i) (0x00270384 + ((_i) * 4)) /* _i=0...1 */ /* Reset: CORER */
+#define I40E_GLQF_FDEVICTENA_MAX_INDEX 1
+#define I40E_GLQF_FDEVICTENA_GLQF_FDEVICTENA_SHIFT 0
+#define I40E_GLQF_FDEVICTENA_GLQF_FDEVICTENA_MASK I40E_MASK(0xFFFFFFFF, I40E_GLQF_FDEVICTENA_GLQF_FDEVICTENA_SHIFT)
+#define I40E_GLQF_FDEVICTFLAG 0x00270280 /* Reset: CORER */
+#define I40E_GLQF_FDEVICTFLAG_TX_FLAGS_SHIFT 0
+#define I40E_GLQF_FDEVICTFLAG_TX_FLAGS_MASK I40E_MASK(0xFF, I40E_GLQF_FDEVICTFLAG_TX_FLAGS_SHIFT)
+#define I40E_GLQF_FDEVICTFLAG_RX_FLAGS_SHIFT 8
+#define I40E_GLQF_FDEVICTFLAG_RX_FLAGS_MASK I40E_MASK(0xFF, I40E_GLQF_FDEVICTFLAG_RX_FLAGS_SHIFT)
+#define I40E_PFQF_CTL_2 0x00270300 /* Reset: CORER */
+#define I40E_PFQF_CTL_2_PEHSIZE_SHIFT 0
+#define I40E_PFQF_CTL_2_PEHSIZE_MASK I40E_MASK(0x1F, I40E_PFQF_CTL_2_PEHSIZE_SHIFT)
+#define I40E_PFQF_CTL_2_PEDSIZE_SHIFT 5
+#define I40E_PFQF_CTL_2_PEDSIZE_MASK I40E_MASK(0x1F, I40E_PFQF_CTL_2_PEDSIZE_SHIFT)
+/* Redefined for X722 family */
+#define I40E_X722_PFQF_HLUT(_i) (0x00240000 + ((_i) * 128)) /* _i=0...127 */ /* Reset: CORER */
+#define I40E_X722_PFQF_HLUT_MAX_INDEX 127
+#define I40E_X722_PFQF_HLUT_LUT0_SHIFT 0
+#define I40E_X722_PFQF_HLUT_LUT0_MASK I40E_MASK(0x7F, I40E_X722_PFQF_HLUT_LUT0_SHIFT)
+#define I40E_X722_PFQF_HLUT_LUT1_SHIFT 8
+#define I40E_X722_PFQF_HLUT_LUT1_MASK I40E_MASK(0x7F, I40E_X722_PFQF_HLUT_LUT1_SHIFT)
+#define I40E_X722_PFQF_HLUT_LUT2_SHIFT 16
+#define I40E_X722_PFQF_HLUT_LUT2_MASK I40E_MASK(0x7F, I40E_X722_PFQF_HLUT_LUT2_SHIFT)
+#define I40E_X722_PFQF_HLUT_LUT3_SHIFT 24
+#define I40E_X722_PFQF_HLUT_LUT3_MASK I40E_MASK(0x7F, I40E_X722_PFQF_HLUT_LUT3_SHIFT)
+#define I40E_PFQF_HREGION(_i) (0x00245400 + ((_i) * 128)) /* _i=0...7 */ /* Reset: CORER */
+#define I40E_PFQF_HREGION_MAX_INDEX 7
+#define I40E_PFQF_HREGION_OVERRIDE_ENA_0_SHIFT 0
+#define I40E_PFQF_HREGION_OVERRIDE_ENA_0_MASK I40E_MASK(0x1, I40E_PFQF_HREGION_OVERRIDE_ENA_0_SHIFT)
+#define I40E_PFQF_HREGION_REGION_0_SHIFT 1
+#define I40E_PFQF_HREGION_REGION_0_MASK I40E_MASK(0x7, I40E_PFQF_HREGION_REGION_0_SHIFT)
+#define I40E_PFQF_HREGION_OVERRIDE_ENA_1_SHIFT 4
+#define I40E_PFQF_HREGION_OVERRIDE_ENA_1_MASK I40E_MASK(0x1, I40E_PFQF_HREGION_OVERRIDE_ENA_1_SHIFT)
+#define I40E_PFQF_HREGION_REGION_1_SHIFT 5
+#define I40E_PFQF_HREGION_REGION_1_MASK I40E_MASK(0x7, I40E_PFQF_HREGION_REGION_1_SHIFT)
+#define I40E_PFQF_HREGION_OVERRIDE_ENA_2_SHIFT 8
+#define I40E_PFQF_HREGION_OVERRIDE_ENA_2_MASK I40E_MASK(0x1, I40E_PFQF_HREGION_OVERRIDE_ENA_2_SHIFT)
+#define I40E_PFQF_HREGION_REGION_2_SHIFT 9
+#define I40E_PFQF_HREGION_REGION_2_MASK I40E_MASK(0x7, I40E_PFQF_HREGION_REGION_2_SHIFT)
+#define I40E_PFQF_HREGION_OVERRIDE_ENA_3_SHIFT 12
+#define I40E_PFQF_HREGION_OVERRIDE_ENA_3_MASK I40E_MASK(0x1, I40E_PFQF_HREGION_OVERRIDE_ENA_3_SHIFT)
+#define I40E_PFQF_HREGION_REGION_3_SHIFT 13
+#define I40E_PFQF_HREGION_REGION_3_MASK I40E_MASK(0x7, I40E_PFQF_HREGION_REGION_3_SHIFT)
+#define I40E_PFQF_HREGION_OVERRIDE_ENA_4_SHIFT 16
+#define I40E_PFQF_HREGION_OVERRIDE_ENA_4_MASK I40E_MASK(0x1, I40E_PFQF_HREGION_OVERRIDE_ENA_4_SHIFT)
+#define I40E_PFQF_HREGION_REGION_4_SHIFT 17
+#define I40E_PFQF_HREGION_REGION_4_MASK I40E_MASK(0x7, I40E_PFQF_HREGION_REGION_4_SHIFT)
+#define I40E_PFQF_HREGION_OVERRIDE_ENA_5_SHIFT 20
+#define I40E_PFQF_HREGION_OVERRIDE_ENA_5_MASK I40E_MASK(0x1, I40E_PFQF_HREGION_OVERRIDE_ENA_5_SHIFT)
+#define I40E_PFQF_HREGION_REGION_5_SHIFT 21
+#define I40E_PFQF_HREGION_REGION_5_MASK I40E_MASK(0x7, I40E_PFQF_HREGION_REGION_5_SHIFT)
+#define I40E_PFQF_HREGION_OVERRIDE_ENA_6_SHIFT 24
+#define I40E_PFQF_HREGION_OVERRIDE_ENA_6_MASK I40E_MASK(0x1, I40E_PFQF_HREGION_OVERRIDE_ENA_6_SHIFT)
+#define I40E_PFQF_HREGION_REGION_6_SHIFT 25
+#define I40E_PFQF_HREGION_REGION_6_MASK I40E_MASK(0x7, I40E_PFQF_HREGION_REGION_6_SHIFT)
+#define I40E_PFQF_HREGION_OVERRIDE_ENA_7_SHIFT 28
+#define I40E_PFQF_HREGION_OVERRIDE_ENA_7_MASK I40E_MASK(0x1, I40E_PFQF_HREGION_OVERRIDE_ENA_7_SHIFT)
+#define I40E_PFQF_HREGION_REGION_7_SHIFT 29
+#define I40E_PFQF_HREGION_REGION_7_MASK I40E_MASK(0x7, I40E_PFQF_HREGION_REGION_7_SHIFT)
+#define I40E_VSIQF_CTL_RSS_LUT_TYPE_SHIFT 8
+#define I40E_VSIQF_CTL_RSS_LUT_TYPE_MASK I40E_MASK(0x1, I40E_VSIQF_CTL_RSS_LUT_TYPE_SHIFT)
+#define I40E_VSIQF_HKEY(_i, _VSI) (0x002A0000 + ((_i) * 2048 + (_VSI) * 4)) /* _i=0...12, _VSI=0...383 */ /* Reset: CORER */
+#define I40E_VSIQF_HKEY_MAX_INDEX 12
+#define I40E_VSIQF_HKEY_KEY_0_SHIFT 0
+#define I40E_VSIQF_HKEY_KEY_0_MASK I40E_MASK(0xFF, I40E_VSIQF_HKEY_KEY_0_SHIFT)
+#define I40E_VSIQF_HKEY_KEY_1_SHIFT 8
+#define I40E_VSIQF_HKEY_KEY_1_MASK I40E_MASK(0xFF, I40E_VSIQF_HKEY_KEY_1_SHIFT)
+#define I40E_VSIQF_HKEY_KEY_2_SHIFT 16
+#define I40E_VSIQF_HKEY_KEY_2_MASK I40E_MASK(0xFF, I40E_VSIQF_HKEY_KEY_2_SHIFT)
+#define I40E_VSIQF_HKEY_KEY_3_SHIFT 24
+#define I40E_VSIQF_HKEY_KEY_3_MASK I40E_MASK(0xFF, I40E_VSIQF_HKEY_KEY_3_SHIFT)
+#define I40E_VSIQF_HLUT(_i, _VSI) (0x00220000 + ((_i) * 2048 + (_VSI) * 4)) /* _i=0...15, _VSI=0...383 */ /* Reset: CORER */
+#define I40E_VSIQF_HLUT_MAX_INDEX 15
+#define I40E_VSIQF_HLUT_LUT0_SHIFT 0
+#define I40E_VSIQF_HLUT_LUT0_MASK I40E_MASK(0xF, I40E_VSIQF_HLUT_LUT0_SHIFT)
+#define I40E_VSIQF_HLUT_LUT1_SHIFT 8
+#define I40E_VSIQF_HLUT_LUT1_MASK I40E_MASK(0xF, I40E_VSIQF_HLUT_LUT1_SHIFT)
+#define I40E_VSIQF_HLUT_LUT2_SHIFT 16
+#define I40E_VSIQF_HLUT_LUT2_MASK I40E_MASK(0xF, I40E_VSIQF_HLUT_LUT2_SHIFT)
+#define I40E_VSIQF_HLUT_LUT3_SHIFT 24
+#define I40E_VSIQF_HLUT_LUT3_MASK I40E_MASK(0xF, I40E_VSIQF_HLUT_LUT3_SHIFT)
+#define I40E_GLGEN_STAT_CLEAR 0x00390004 /* Reset: CORER */
+#define I40E_GLGEN_STAT_CLEAR_GLGEN_STAT_CLEAR_SHIFT 0
+#define I40E_GLGEN_STAT_CLEAR_GLGEN_STAT_CLEAR_MASK I40E_MASK(0x1, I40E_GLGEN_STAT_CLEAR_GLGEN_STAT_CLEAR_SHIFT)
+#define I40E_GLGEN_STAT_HALT 0x00390000 /* Reset: CORER */
+#define I40E_GLGEN_STAT_HALT_HALT_CELLS_SHIFT 0
+#define I40E_GLGEN_STAT_HALT_HALT_CELLS_MASK I40E_MASK(0x3FFFFFFF, I40E_GLGEN_STAT_HALT_HALT_CELLS_SHIFT)
+#endif /* PF_DRIVER */
+#define I40E_VFINT_DYN_CTL01_WB_ON_ITR_SHIFT 30
+#define I40E_VFINT_DYN_CTL01_WB_ON_ITR_MASK I40E_MASK(0x1, I40E_VFINT_DYN_CTL01_WB_ON_ITR_SHIFT)
+#define I40E_VFINT_DYN_CTLN1_WB_ON_ITR_SHIFT 30
+#define I40E_VFINT_DYN_CTLN1_WB_ON_ITR_MASK I40E_MASK(0x1, I40E_VFINT_DYN_CTLN1_WB_ON_ITR_SHIFT)
+#define I40E_VFPE_AEQALLOC1 0x0000A400 /* Reset: VFR */
+#define I40E_VFPE_AEQALLOC1_AECOUNT_SHIFT 0
+#define I40E_VFPE_AEQALLOC1_AECOUNT_MASK I40E_MASK(0xFFFFFFFF, I40E_VFPE_AEQALLOC1_AECOUNT_SHIFT)
+#define I40E_VFPE_CCQPHIGH1 0x00009800 /* Reset: VFR */
+#define I40E_VFPE_CCQPHIGH1_PECCQPHIGH_SHIFT 0
+#define I40E_VFPE_CCQPHIGH1_PECCQPHIGH_MASK I40E_MASK(0xFFFFFFFF, I40E_VFPE_CCQPHIGH1_PECCQPHIGH_SHIFT)
+#define I40E_VFPE_CCQPLOW1 0x0000AC00 /* Reset: VFR */
+#define I40E_VFPE_CCQPLOW1_PECCQPLOW_SHIFT 0
+#define I40E_VFPE_CCQPLOW1_PECCQPLOW_MASK I40E_MASK(0xFFFFFFFF, I40E_VFPE_CCQPLOW1_PECCQPLOW_SHIFT)
+#define I40E_VFPE_CCQPSTATUS1 0x0000B800 /* Reset: VFR */
+#define I40E_VFPE_CCQPSTATUS1_CCQP_DONE_SHIFT 0
+#define I40E_VFPE_CCQPSTATUS1_CCQP_DONE_MASK I40E_MASK(0x1, I40E_VFPE_CCQPSTATUS1_CCQP_DONE_SHIFT)
+#define I40E_VFPE_CCQPSTATUS1_HMC_PROFILE_SHIFT 4
+#define I40E_VFPE_CCQPSTATUS1_HMC_PROFILE_MASK I40E_MASK(0x7, I40E_VFPE_CCQPSTATUS1_HMC_PROFILE_SHIFT)
+#define I40E_VFPE_CCQPSTATUS1_RDMA_EN_VFS_SHIFT 16
+#define I40E_VFPE_CCQPSTATUS1_RDMA_EN_VFS_MASK I40E_MASK(0x3F, I40E_VFPE_CCQPSTATUS1_RDMA_EN_VFS_SHIFT)
+#define I40E_VFPE_CCQPSTATUS1_CCQP_ERR_SHIFT 31
+#define I40E_VFPE_CCQPSTATUS1_CCQP_ERR_MASK I40E_MASK(0x1, I40E_VFPE_CCQPSTATUS1_CCQP_ERR_SHIFT)
+#define I40E_VFPE_CQACK1 0x0000B000 /* Reset: VFR */
+#define I40E_VFPE_CQACK1_PECQID_SHIFT 0
+#define I40E_VFPE_CQACK1_PECQID_MASK I40E_MASK(0x1FFFF, I40E_VFPE_CQACK1_PECQID_SHIFT)
+#define I40E_VFPE_CQARM1 0x0000B400 /* Reset: VFR */
+#define I40E_VFPE_CQARM1_PECQID_SHIFT 0
+#define I40E_VFPE_CQARM1_PECQID_MASK I40E_MASK(0x1FFFF, I40E_VFPE_CQARM1_PECQID_SHIFT)
+#define I40E_VFPE_CQPDB1 0x0000BC00 /* Reset: VFR */
+#define I40E_VFPE_CQPDB1_WQHEAD_SHIFT 0
+#define I40E_VFPE_CQPDB1_WQHEAD_MASK I40E_MASK(0x7FF, I40E_VFPE_CQPDB1_WQHEAD_SHIFT)
+#define I40E_VFPE_CQPERRCODES1 0x00009C00 /* Reset: VFR */
+#define I40E_VFPE_CQPERRCODES1_CQP_MINOR_CODE_SHIFT 0
+#define I40E_VFPE_CQPERRCODES1_CQP_MINOR_CODE_MASK I40E_MASK(0xFFFF, I40E_VFPE_CQPERRCODES1_CQP_MINOR_CODE_SHIFT)
+#define I40E_VFPE_CQPERRCODES1_CQP_MAJOR_CODE_SHIFT 16
+#define I40E_VFPE_CQPERRCODES1_CQP_MAJOR_CODE_MASK I40E_MASK(0xFFFF, I40E_VFPE_CQPERRCODES1_CQP_MAJOR_CODE_SHIFT)
+#define I40E_VFPE_CQPTAIL1 0x0000A000 /* Reset: VFR */
+#define I40E_VFPE_CQPTAIL1_WQTAIL_SHIFT 0
+#define I40E_VFPE_CQPTAIL1_WQTAIL_MASK I40E_MASK(0x7FF, I40E_VFPE_CQPTAIL1_WQTAIL_SHIFT)
+#define I40E_VFPE_CQPTAIL1_CQP_OP_ERR_SHIFT 31
+#define I40E_VFPE_CQPTAIL1_CQP_OP_ERR_MASK I40E_MASK(0x1, I40E_VFPE_CQPTAIL1_CQP_OP_ERR_SHIFT)
+#define I40E_VFPE_IPCONFIG01 0x00008C00 /* Reset: VFR */
+#define I40E_VFPE_IPCONFIG01_PEIPID_SHIFT 0
+#define I40E_VFPE_IPCONFIG01_PEIPID_MASK I40E_MASK(0xFFFF, I40E_VFPE_IPCONFIG01_PEIPID_SHIFT)
+#define I40E_VFPE_IPCONFIG01_USEENTIREIDRANGE_SHIFT 16
+#define I40E_VFPE_IPCONFIG01_USEENTIREIDRANGE_MASK I40E_MASK(0x1, I40E_VFPE_IPCONFIG01_USEENTIREIDRANGE_SHIFT)
+#define I40E_VFPE_MRTEIDXMASK1 0x00009000 /* Reset: VFR */
+#define I40E_VFPE_MRTEIDXMASK1_MRTEIDXMASKBITS_SHIFT 0
+#define I40E_VFPE_MRTEIDXMASK1_MRTEIDXMASKBITS_MASK I40E_MASK(0x1F, I40E_VFPE_MRTEIDXMASK1_MRTEIDXMASKBITS_SHIFT)
+#define I40E_VFPE_RCVUNEXPECTEDERROR1 0x00009400 /* Reset: VFR */
+#define I40E_VFPE_RCVUNEXPECTEDERROR1_TCP_RX_UNEXP_ERR_SHIFT 0
+#define I40E_VFPE_RCVUNEXPECTEDERROR1_TCP_RX_UNEXP_ERR_MASK I40E_MASK(0xFFFFFF, I40E_VFPE_RCVUNEXPECTEDERROR1_TCP_RX_UNEXP_ERR_SHIFT)
+#define I40E_VFPE_TCPNOWTIMER1 0x0000A800 /* Reset: VFR */
+#define I40E_VFPE_TCPNOWTIMER1_TCP_NOW_SHIFT 0
+#define I40E_VFPE_TCPNOWTIMER1_TCP_NOW_MASK I40E_MASK(0xFFFFFFFF, I40E_VFPE_TCPNOWTIMER1_TCP_NOW_SHIFT)
+#define I40E_VFPE_WQEALLOC1 0x0000C000 /* Reset: VFR */
+#define I40E_VFPE_WQEALLOC1_PEQPID_SHIFT 0
+#define I40E_VFPE_WQEALLOC1_PEQPID_MASK I40E_MASK(0x3FFFF, I40E_VFPE_WQEALLOC1_PEQPID_SHIFT)
+#define I40E_VFPE_WQEALLOC1_WQE_DESC_INDEX_SHIFT 20
+#define I40E_VFPE_WQEALLOC1_WQE_DESC_INDEX_MASK I40E_MASK(0xFFF, I40E_VFPE_WQEALLOC1_WQE_DESC_INDEX_SHIFT)
+
+#endif /* X722_SUPPORT */
+#endif /* _I40E_REGISTER_H_ */
diff --git a/src/dpdk/drivers/net/i40e/base/i40e_status.h b/src/dpdk/drivers/net/i40e/base/i40e_status.h
new file mode 100644
index 00000000..5632ff2b
--- /dev/null
+++ b/src/dpdk/drivers/net/i40e/base/i40e_status.h
@@ -0,0 +1,107 @@
+/*******************************************************************************
+
+Copyright (c) 2013 - 2015, Intel Corporation
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are met:
+
+ 1. Redistributions of source code must retain the above copyright notice,
+ this list of conditions and the following disclaimer.
+
+ 2. Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in the
+ documentation and/or other materials provided with the distribution.
+
+ 3. Neither the name of the Intel Corporation nor the names of its
+ contributors may be used to endorse or promote products derived from
+ this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+POSSIBILITY OF SUCH DAMAGE.
+
+***************************************************************************/
+
+#ifndef _I40E_STATUS_H_
+#define _I40E_STATUS_H_
+
+/* Error Codes */
+enum i40e_status_code {
+ I40E_SUCCESS = 0,
+ I40E_ERR_NVM = -1,
+ I40E_ERR_NVM_CHECKSUM = -2,
+ I40E_ERR_PHY = -3,
+ I40E_ERR_CONFIG = -4,
+ I40E_ERR_PARAM = -5,
+ I40E_ERR_MAC_TYPE = -6,
+ I40E_ERR_UNKNOWN_PHY = -7,
+ I40E_ERR_LINK_SETUP = -8,
+ I40E_ERR_ADAPTER_STOPPED = -9,
+ I40E_ERR_INVALID_MAC_ADDR = -10,
+ I40E_ERR_DEVICE_NOT_SUPPORTED = -11,
+ I40E_ERR_MASTER_REQUESTS_PENDING = -12,
+ I40E_ERR_INVALID_LINK_SETTINGS = -13,
+ I40E_ERR_AUTONEG_NOT_COMPLETE = -14,
+ I40E_ERR_RESET_FAILED = -15,
+ I40E_ERR_SWFW_SYNC = -16,
+ I40E_ERR_NO_AVAILABLE_VSI = -17,
+ I40E_ERR_NO_MEMORY = -18,
+ I40E_ERR_BAD_PTR = -19,
+ I40E_ERR_RING_FULL = -20,
+ I40E_ERR_INVALID_PD_ID = -21,
+ I40E_ERR_INVALID_QP_ID = -22,
+ I40E_ERR_INVALID_CQ_ID = -23,
+ I40E_ERR_INVALID_CEQ_ID = -24,
+ I40E_ERR_INVALID_AEQ_ID = -25,
+ I40E_ERR_INVALID_SIZE = -26,
+ I40E_ERR_INVALID_ARP_INDEX = -27,
+ I40E_ERR_INVALID_FPM_FUNC_ID = -28,
+ I40E_ERR_QP_INVALID_MSG_SIZE = -29,
+ I40E_ERR_QP_TOOMANY_WRS_POSTED = -30,
+ I40E_ERR_INVALID_FRAG_COUNT = -31,
+ I40E_ERR_QUEUE_EMPTY = -32,
+ I40E_ERR_INVALID_ALIGNMENT = -33,
+ I40E_ERR_FLUSHED_QUEUE = -34,
+ I40E_ERR_INVALID_PUSH_PAGE_INDEX = -35,
+ I40E_ERR_INVALID_IMM_DATA_SIZE = -36,
+ I40E_ERR_TIMEOUT = -37,
+ I40E_ERR_OPCODE_MISMATCH = -38,
+ I40E_ERR_CQP_COMPL_ERROR = -39,
+ I40E_ERR_INVALID_VF_ID = -40,
+ I40E_ERR_INVALID_HMCFN_ID = -41,
+ I40E_ERR_BACKING_PAGE_ERROR = -42,
+ I40E_ERR_NO_PBLCHUNKS_AVAILABLE = -43,
+ I40E_ERR_INVALID_PBLE_INDEX = -44,
+ I40E_ERR_INVALID_SD_INDEX = -45,
+ I40E_ERR_INVALID_PAGE_DESC_INDEX = -46,
+ I40E_ERR_INVALID_SD_TYPE = -47,
+ I40E_ERR_MEMCPY_FAILED = -48,
+ I40E_ERR_INVALID_HMC_OBJ_INDEX = -49,
+ I40E_ERR_INVALID_HMC_OBJ_COUNT = -50,
+ I40E_ERR_INVALID_SRQ_ARM_LIMIT = -51,
+ I40E_ERR_SRQ_ENABLED = -52,
+ I40E_ERR_ADMIN_QUEUE_ERROR = -53,
+ I40E_ERR_ADMIN_QUEUE_TIMEOUT = -54,
+ I40E_ERR_BUF_TOO_SHORT = -55,
+ I40E_ERR_ADMIN_QUEUE_FULL = -56,
+ I40E_ERR_ADMIN_QUEUE_NO_WORK = -57,
+ I40E_ERR_BAD_IWARP_CQE = -58,
+ I40E_ERR_NVM_BLANK_MODE = -59,
+ I40E_ERR_NOT_IMPLEMENTED = -60,
+ I40E_ERR_PE_DOORBELL_NOT_ENABLED = -61,
+ I40E_ERR_DIAG_TEST_FAILED = -62,
+ I40E_ERR_NOT_READY = -63,
+ I40E_NOT_SUPPORTED = -64,
+ I40E_ERR_FIRMWARE_API_VERSION = -65,
+};
+
+#endif /* _I40E_STATUS_H_ */
diff --git a/src/dpdk/drivers/net/i40e/base/i40e_type.h b/src/dpdk/drivers/net/i40e/base/i40e_type.h
new file mode 100644
index 00000000..5349419f
--- /dev/null
+++ b/src/dpdk/drivers/net/i40e/base/i40e_type.h
@@ -0,0 +1,1674 @@
+/*******************************************************************************
+
+Copyright (c) 2013 - 2015, Intel Corporation
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are met:
+
+ 1. Redistributions of source code must retain the above copyright notice,
+ this list of conditions and the following disclaimer.
+
+ 2. Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in the
+ documentation and/or other materials provided with the distribution.
+
+ 3. Neither the name of the Intel Corporation nor the names of its
+ contributors may be used to endorse or promote products derived from
+ this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+POSSIBILITY OF SUCH DAMAGE.
+
+***************************************************************************/
+
+#ifndef _I40E_TYPE_H_
+#define _I40E_TYPE_H_
+
+#include "i40e_status.h"
+#include "i40e_osdep.h"
+#include "i40e_register.h"
+#include "i40e_adminq.h"
+#include "i40e_hmc.h"
+#include "i40e_lan_hmc.h"
+#include "i40e_devids.h"
+
+#define UNREFERENCED_XPARAMETER
+#define UNREFERENCED_1PARAMETER(_p) (_p);
+#define UNREFERENCED_2PARAMETER(_p, _q) (_p); (_q);
+#define UNREFERENCED_3PARAMETER(_p, _q, _r) (_p); (_q); (_r);
+#define UNREFERENCED_4PARAMETER(_p, _q, _r, _s) (_p); (_q); (_r); (_s);
+#define UNREFERENCED_5PARAMETER(_p, _q, _r, _s, _t) (_p); (_q); (_r); (_s); (_t);
+
+#ifndef LINUX_MACROS
+#ifndef BIT
+#define BIT(a) (1UL << (a))
+#endif /* BIT */
+#ifndef BIT_ULL
+#define BIT_ULL(a) (1ULL << (a))
+#endif /* BIT_ULL */
+#endif /* LINUX_MACROS */
+
+#ifndef I40E_MASK
+/* I40E_MASK is a macro used on 32 bit registers */
+#define I40E_MASK(mask, shift) (mask << shift)
+#endif
+
+#define I40E_MAX_PF 16
+#define I40E_MAX_PF_VSI 64
+#define I40E_MAX_PF_QP 128
+#define I40E_MAX_VSI_QP 16
+#define I40E_MAX_VF_VSI 3
+#define I40E_MAX_CHAINED_RX_BUFFERS 5
+#define I40E_MAX_PF_UDP_OFFLOAD_PORTS 16
+
+/* something less than 1 minute */
+#define I40E_HEARTBEAT_TIMEOUT (HZ * 50)
+
+/* Max default timeout in ms, */
+#define I40E_MAX_NVM_TIMEOUT 18000
+
+/* Check whether address is multicast. */
+#define I40E_IS_MULTICAST(address) (bool)(((u8 *)(address))[0] & ((u8)0x01))
+
+/* Check whether an address is broadcast. */
+#define I40E_IS_BROADCAST(address) \
+ ((((u8 *)(address))[0] == ((u8)0xff)) && \
+ (((u8 *)(address))[1] == ((u8)0xff)))
+
+/* Switch from ms to the 1usec global time (this is the GTIME resolution) */
+#define I40E_MS_TO_GTIME(time) ((time) * 1000)
+
+/* forward declaration */
+struct i40e_hw;
+typedef void (*I40E_ADMINQ_CALLBACK)(struct i40e_hw *, struct i40e_aq_desc *);
+
+#define I40E_ETH_LENGTH_OF_ADDRESS 6
+/* Data type manipulation macros. */
+#define I40E_HI_DWORD(x) ((u32)((((x) >> 16) >> 16) & 0xFFFFFFFF))
+#define I40E_LO_DWORD(x) ((u32)((x) & 0xFFFFFFFF))
+
+#define I40E_HI_WORD(x) ((u16)(((x) >> 16) & 0xFFFF))
+#define I40E_LO_WORD(x) ((u16)((x) & 0xFFFF))
+
+#define I40E_HI_BYTE(x) ((u8)(((x) >> 8) & 0xFF))
+#define I40E_LO_BYTE(x) ((u8)((x) & 0xFF))
+
+/* Number of Transmit Descriptors must be a multiple of 8. */
+#define I40E_REQ_TX_DESCRIPTOR_MULTIPLE 8
+/* Number of Receive Descriptors must be a multiple of 32 if
+ * the number of descriptors is greater than 32.
+ */
+#define I40E_REQ_RX_DESCRIPTOR_MULTIPLE 32
+
+#define I40E_DESC_UNUSED(R) \
+ ((((R)->next_to_clean > (R)->next_to_use) ? 0 : (R)->count) + \
+ (R)->next_to_clean - (R)->next_to_use - 1)
+
+/* bitfields for Tx queue mapping in QTX_CTL */
+#define I40E_QTX_CTL_VF_QUEUE 0x0
+#define I40E_QTX_CTL_VM_QUEUE 0x1
+#define I40E_QTX_CTL_PF_QUEUE 0x2
+
+/* debug masks - set these bits in hw->debug_mask to control output */
+enum i40e_debug_mask {
+ I40E_DEBUG_INIT = 0x00000001,
+ I40E_DEBUG_RELEASE = 0x00000002,
+
+ I40E_DEBUG_LINK = 0x00000010,
+ I40E_DEBUG_PHY = 0x00000020,
+ I40E_DEBUG_HMC = 0x00000040,
+ I40E_DEBUG_NVM = 0x00000080,
+ I40E_DEBUG_LAN = 0x00000100,
+ I40E_DEBUG_FLOW = 0x00000200,
+ I40E_DEBUG_DCB = 0x00000400,
+ I40E_DEBUG_DIAG = 0x00000800,
+ I40E_DEBUG_FD = 0x00001000,
+
+ I40E_DEBUG_AQ_MESSAGE = 0x01000000,
+ I40E_DEBUG_AQ_DESCRIPTOR = 0x02000000,
+ I40E_DEBUG_AQ_DESC_BUFFER = 0x04000000,
+ I40E_DEBUG_AQ_COMMAND = 0x06000000,
+ I40E_DEBUG_AQ = 0x0F000000,
+
+ I40E_DEBUG_USER = 0xF0000000,
+
+ I40E_DEBUG_ALL = 0xFFFFFFFF
+};
+
+/* PCI Bus Info */
+#define I40E_PCI_LINK_STATUS 0xB2
+#define I40E_PCI_LINK_WIDTH 0x3F0
+#define I40E_PCI_LINK_WIDTH_1 0x10
+#define I40E_PCI_LINK_WIDTH_2 0x20
+#define I40E_PCI_LINK_WIDTH_4 0x40
+#define I40E_PCI_LINK_WIDTH_8 0x80
+#define I40E_PCI_LINK_SPEED 0xF
+#define I40E_PCI_LINK_SPEED_2500 0x1
+#define I40E_PCI_LINK_SPEED_5000 0x2
+#define I40E_PCI_LINK_SPEED_8000 0x3
+
+#define I40E_MDIO_STCODE 0
+#define I40E_MDIO_OPCODE_ADDRESS 0
+#define I40E_MDIO_OPCODE_WRITE I40E_MASK(1, \
+ I40E_GLGEN_MSCA_OPCODE_SHIFT)
+#define I40E_MDIO_OPCODE_READ_INC_ADDR I40E_MASK(2, \
+ I40E_GLGEN_MSCA_OPCODE_SHIFT)
+#define I40E_MDIO_OPCODE_READ I40E_MASK(3, \
+ I40E_GLGEN_MSCA_OPCODE_SHIFT)
+
+#define I40E_PHY_COM_REG_PAGE 0x1E
+#define I40E_PHY_LED_LINK_MODE_MASK 0xF0
+#define I40E_PHY_LED_MANUAL_ON 0x100
+#define I40E_PHY_LED_PROV_REG_1 0xC430
+#define I40E_PHY_LED_MODE_MASK 0xFFFF
+#define I40E_PHY_LED_MODE_ORIG 0x80000000
+
+/* Memory types */
+enum i40e_memset_type {
+ I40E_NONDMA_MEM = 0,
+ I40E_DMA_MEM
+};
+
+/* Memcpy types */
+enum i40e_memcpy_type {
+ I40E_NONDMA_TO_NONDMA = 0,
+ I40E_NONDMA_TO_DMA,
+ I40E_DMA_TO_DMA,
+ I40E_DMA_TO_NONDMA
+};
+
+#ifdef X722_SUPPORT
+#define I40E_FW_API_VERSION_MINOR_X722 0x0005
+#endif
+#define I40E_FW_API_VERSION_MINOR_X710 0x0005
+
+
+/* These are structs for managing the hardware information and the operations.
+ * The structures of function pointers are filled out at init time when we
+ * know for sure exactly which hardware we're working with. This gives us the
+ * flexibility of using the same main driver code but adapting to slightly
+ * different hardware needs as new parts are developed. For this architecture,
+ * the Firmware and AdminQ are intended to insulate the driver from most of the
+ * future changes, but these structures will also do part of the job.
+ */
+enum i40e_mac_type {
+ I40E_MAC_UNKNOWN = 0,
+ I40E_MAC_X710,
+ I40E_MAC_XL710,
+ I40E_MAC_VF,
+#ifdef X722_SUPPORT
+ I40E_MAC_X722,
+ I40E_MAC_X722_VF,
+#endif
+ I40E_MAC_GENERIC,
+};
+
+enum i40e_media_type {
+ I40E_MEDIA_TYPE_UNKNOWN = 0,
+ I40E_MEDIA_TYPE_FIBER,
+ I40E_MEDIA_TYPE_BASET,
+ I40E_MEDIA_TYPE_BACKPLANE,
+ I40E_MEDIA_TYPE_CX4,
+ I40E_MEDIA_TYPE_DA,
+ I40E_MEDIA_TYPE_VIRTUAL
+};
+
+enum i40e_fc_mode {
+ I40E_FC_NONE = 0,
+ I40E_FC_RX_PAUSE,
+ I40E_FC_TX_PAUSE,
+ I40E_FC_FULL,
+ I40E_FC_PFC,
+ I40E_FC_DEFAULT
+};
+
+enum i40e_set_fc_aq_failures {
+ I40E_SET_FC_AQ_FAIL_NONE = 0,
+ I40E_SET_FC_AQ_FAIL_GET = 1,
+ I40E_SET_FC_AQ_FAIL_SET = 2,
+ I40E_SET_FC_AQ_FAIL_UPDATE = 4,
+ I40E_SET_FC_AQ_FAIL_SET_UPDATE = 6
+};
+
+enum i40e_vsi_type {
+ I40E_VSI_MAIN = 0,
+ I40E_VSI_VMDQ1 = 1,
+ I40E_VSI_VMDQ2 = 2,
+ I40E_VSI_CTRL = 3,
+ I40E_VSI_FCOE = 4,
+ I40E_VSI_MIRROR = 5,
+ I40E_VSI_SRIOV = 6,
+ I40E_VSI_FDIR = 7,
+ I40E_VSI_TYPE_UNKNOWN
+};
+
+enum i40e_queue_type {
+ I40E_QUEUE_TYPE_RX = 0,
+ I40E_QUEUE_TYPE_TX,
+ I40E_QUEUE_TYPE_PE_CEQ,
+ I40E_QUEUE_TYPE_UNKNOWN
+};
+
+struct i40e_link_status {
+ enum i40e_aq_phy_type phy_type;
+ enum i40e_aq_link_speed link_speed;
+ u8 link_info;
+ u8 an_info;
+ u8 ext_info;
+ u8 loopback;
+ /* is Link Status Event notification to SW enabled */
+ bool lse_enable;
+ u16 max_frame_size;
+ bool crc_enable;
+ u8 pacing;
+ u8 requested_speeds;
+ u8 module_type[3];
+ /* 1st byte: module identifier */
+#define I40E_MODULE_TYPE_SFP 0x03
+#define I40E_MODULE_TYPE_QSFP 0x0D
+ /* 2nd byte: ethernet compliance codes for 10/40G */
+#define I40E_MODULE_TYPE_40G_ACTIVE 0x01
+#define I40E_MODULE_TYPE_40G_LR4 0x02
+#define I40E_MODULE_TYPE_40G_SR4 0x04
+#define I40E_MODULE_TYPE_40G_CR4 0x08
+#define I40E_MODULE_TYPE_10G_BASE_SR 0x10
+#define I40E_MODULE_TYPE_10G_BASE_LR 0x20
+#define I40E_MODULE_TYPE_10G_BASE_LRM 0x40
+#define I40E_MODULE_TYPE_10G_BASE_ER 0x80
+ /* 3rd byte: ethernet compliance codes for 1G */
+#define I40E_MODULE_TYPE_1000BASE_SX 0x01
+#define I40E_MODULE_TYPE_1000BASE_LX 0x02
+#define I40E_MODULE_TYPE_1000BASE_CX 0x04
+#define I40E_MODULE_TYPE_1000BASE_T 0x08
+};
+
+enum i40e_aq_capabilities_phy_type {
+ I40E_CAP_PHY_TYPE_SGMII = BIT(I40E_PHY_TYPE_SGMII),
+ I40E_CAP_PHY_TYPE_1000BASE_KX = BIT(I40E_PHY_TYPE_1000BASE_KX),
+ I40E_CAP_PHY_TYPE_10GBASE_KX4 = BIT(I40E_PHY_TYPE_10GBASE_KX4),
+ I40E_CAP_PHY_TYPE_10GBASE_KR = BIT(I40E_PHY_TYPE_10GBASE_KR),
+ I40E_CAP_PHY_TYPE_40GBASE_KR4 = BIT(I40E_PHY_TYPE_40GBASE_KR4),
+ I40E_CAP_PHY_TYPE_XAUI = BIT(I40E_PHY_TYPE_XAUI),
+ I40E_CAP_PHY_TYPE_XFI = BIT(I40E_PHY_TYPE_XFI),
+ I40E_CAP_PHY_TYPE_SFI = BIT(I40E_PHY_TYPE_SFI),
+ I40E_CAP_PHY_TYPE_XLAUI = BIT(I40E_PHY_TYPE_XLAUI),
+ I40E_CAP_PHY_TYPE_XLPPI = BIT(I40E_PHY_TYPE_XLPPI),
+ I40E_CAP_PHY_TYPE_40GBASE_CR4_CU = BIT(I40E_PHY_TYPE_40GBASE_CR4_CU),
+ I40E_CAP_PHY_TYPE_10GBASE_CR1_CU = BIT(I40E_PHY_TYPE_10GBASE_CR1_CU),
+ I40E_CAP_PHY_TYPE_10GBASE_AOC = BIT(I40E_PHY_TYPE_10GBASE_AOC),
+ I40E_CAP_PHY_TYPE_40GBASE_AOC = BIT(I40E_PHY_TYPE_40GBASE_AOC),
+ I40E_CAP_PHY_TYPE_100BASE_TX = BIT(I40E_PHY_TYPE_100BASE_TX),
+ I40E_CAP_PHY_TYPE_1000BASE_T = BIT(I40E_PHY_TYPE_1000BASE_T),
+ I40E_CAP_PHY_TYPE_10GBASE_T = BIT(I40E_PHY_TYPE_10GBASE_T),
+ I40E_CAP_PHY_TYPE_10GBASE_SR = BIT(I40E_PHY_TYPE_10GBASE_SR),
+ I40E_CAP_PHY_TYPE_10GBASE_LR = BIT(I40E_PHY_TYPE_10GBASE_LR),
+ I40E_CAP_PHY_TYPE_10GBASE_SFPP_CU = BIT(I40E_PHY_TYPE_10GBASE_SFPP_CU),
+ I40E_CAP_PHY_TYPE_10GBASE_CR1 = BIT(I40E_PHY_TYPE_10GBASE_CR1),
+ I40E_CAP_PHY_TYPE_40GBASE_CR4 = BIT(I40E_PHY_TYPE_40GBASE_CR4),
+ I40E_CAP_PHY_TYPE_40GBASE_SR4 = BIT(I40E_PHY_TYPE_40GBASE_SR4),
+ I40E_CAP_PHY_TYPE_40GBASE_LR4 = BIT(I40E_PHY_TYPE_40GBASE_LR4),
+ I40E_CAP_PHY_TYPE_1000BASE_SX = BIT(I40E_PHY_TYPE_1000BASE_SX),
+ I40E_CAP_PHY_TYPE_1000BASE_LX = BIT(I40E_PHY_TYPE_1000BASE_LX),
+ I40E_CAP_PHY_TYPE_1000BASE_T_OPTICAL = BIT(I40E_PHY_TYPE_1000BASE_T_OPTICAL),
+ I40E_CAP_PHY_TYPE_20GBASE_KR2 = BIT(I40E_PHY_TYPE_20GBASE_KR2)
+};
+
+struct i40e_phy_info {
+ struct i40e_link_status link_info;
+ struct i40e_link_status link_info_old;
+ bool get_link_info;
+ enum i40e_media_type media_type;
+ /* all the phy types the NVM is capable of */
+ u32 phy_types;
+};
+
+#define I40E_HW_CAP_MAX_GPIO 30
+#define I40E_HW_CAP_MDIO_PORT_MODE_MDIO 0
+#define I40E_HW_CAP_MDIO_PORT_MODE_I2C 1
+
+#ifdef X722_SUPPORT
+enum i40e_acpi_programming_method {
+ I40E_ACPI_PROGRAMMING_METHOD_HW_FVL = 0,
+ I40E_ACPI_PROGRAMMING_METHOD_AQC_FPK = 1
+};
+
+#define I40E_WOL_SUPPORT_MASK 1
+#define I40E_ACPI_PROGRAMMING_METHOD_MASK (1 << 1)
+#define I40E_PROXY_SUPPORT_MASK (1 << 2)
+
+#endif
+/* Capabilities of a PF or a VF or the whole device */
+struct i40e_hw_capabilities {
+ u32 switch_mode;
+#define I40E_NVM_IMAGE_TYPE_EVB 0x0
+#define I40E_NVM_IMAGE_TYPE_CLOUD 0x2
+#define I40E_NVM_IMAGE_TYPE_UDP_CLOUD 0x3
+
+ u32 management_mode;
+ u32 npar_enable;
+ u32 os2bmc;
+ u32 valid_functions;
+ bool sr_iov_1_1;
+ bool vmdq;
+ bool evb_802_1_qbg; /* Edge Virtual Bridging */
+ bool evb_802_1_qbh; /* Bridge Port Extension */
+ bool dcb;
+ bool fcoe;
+ bool iscsi; /* Indicates iSCSI enabled */
+ bool flex10_enable;
+ bool flex10_capable;
+ u32 flex10_mode;
+#define I40E_FLEX10_MODE_UNKNOWN 0x0
+#define I40E_FLEX10_MODE_DCC 0x1
+#define I40E_FLEX10_MODE_DCI 0x2
+
+ u32 flex10_status;
+#define I40E_FLEX10_STATUS_DCC_ERROR 0x1
+#define I40E_FLEX10_STATUS_VC_MODE 0x2
+
+ bool sec_rev_disabled;
+ bool update_disabled;
+#define I40E_NVM_MGMT_SEC_REV_DISABLED 0x1
+#define I40E_NVM_MGMT_UPDATE_DISABLED 0x2
+
+ bool mgmt_cem;
+ bool ieee_1588;
+ bool iwarp;
+ bool fd;
+ u32 fd_filters_guaranteed;
+ u32 fd_filters_best_effort;
+ bool rss;
+ u32 rss_table_size;
+ u32 rss_table_entry_width;
+ bool led[I40E_HW_CAP_MAX_GPIO];
+ bool sdp[I40E_HW_CAP_MAX_GPIO];
+ u32 nvm_image_type;
+ u32 num_flow_director_filters;
+ u32 num_vfs;
+ u32 vf_base_id;
+ u32 num_vsis;
+ u32 num_rx_qp;
+ u32 num_tx_qp;
+ u32 base_queue;
+ u32 num_msix_vectors;
+ u32 num_msix_vectors_vf;
+ u32 led_pin_num;
+ u32 sdp_pin_num;
+ u32 mdio_port_num;
+ u32 mdio_port_mode;
+ u8 rx_buf_chain_len;
+ u32 enabled_tcmap;
+ u32 maxtc;
+ u64 wr_csr_prot;
+#ifdef X722_SUPPORT
+ bool apm_wol_support;
+ enum i40e_acpi_programming_method acpi_prog_method;
+ bool proxy_support;
+#endif
+};
+
+struct i40e_mac_info {
+ enum i40e_mac_type type;
+ u8 addr[I40E_ETH_LENGTH_OF_ADDRESS];
+ u8 perm_addr[I40E_ETH_LENGTH_OF_ADDRESS];
+ u8 san_addr[I40E_ETH_LENGTH_OF_ADDRESS];
+ u8 port_addr[I40E_ETH_LENGTH_OF_ADDRESS];
+ u16 max_fcoeq;
+};
+
+enum i40e_aq_resources_ids {
+ I40E_NVM_RESOURCE_ID = 1
+};
+
+enum i40e_aq_resource_access_type {
+ I40E_RESOURCE_READ = 1,
+ I40E_RESOURCE_WRITE
+};
+
+struct i40e_nvm_info {
+ u64 hw_semaphore_timeout; /* usec global time (GTIME resolution) */
+ u32 timeout; /* [ms] */
+ u16 sr_size; /* Shadow RAM size in words */
+ bool blank_nvm_mode; /* is NVM empty (no FW present)*/
+ u16 version; /* NVM package version */
+ u32 eetrack; /* NVM data version */
+ u32 oem_ver; /* OEM version info */
+};
+
+/* definitions used in NVM update support */
+
+enum i40e_nvmupd_cmd {
+ I40E_NVMUPD_INVALID,
+ I40E_NVMUPD_READ_CON,
+ I40E_NVMUPD_READ_SNT,
+ I40E_NVMUPD_READ_LCB,
+ I40E_NVMUPD_READ_SA,
+ I40E_NVMUPD_WRITE_ERA,
+ I40E_NVMUPD_WRITE_CON,
+ I40E_NVMUPD_WRITE_SNT,
+ I40E_NVMUPD_WRITE_LCB,
+ I40E_NVMUPD_WRITE_SA,
+ I40E_NVMUPD_CSUM_CON,
+ I40E_NVMUPD_CSUM_SA,
+ I40E_NVMUPD_CSUM_LCB,
+ I40E_NVMUPD_STATUS,
+ I40E_NVMUPD_EXEC_AQ,
+ I40E_NVMUPD_GET_AQ_RESULT,
+};
+
+enum i40e_nvmupd_state {
+ I40E_NVMUPD_STATE_INIT,
+ I40E_NVMUPD_STATE_READING,
+ I40E_NVMUPD_STATE_WRITING,
+ I40E_NVMUPD_STATE_INIT_WAIT,
+ I40E_NVMUPD_STATE_WRITE_WAIT,
+};
+
+/* nvm_access definition and its masks/shifts need to be accessible to
+ * application, core driver, and shared code. Where is the right file?
+ */
+#define I40E_NVM_READ 0xB
+#define I40E_NVM_WRITE 0xC
+
+#define I40E_NVM_MOD_PNT_MASK 0xFF
+
+#define I40E_NVM_TRANS_SHIFT 8
+#define I40E_NVM_TRANS_MASK (0xf << I40E_NVM_TRANS_SHIFT)
+#define I40E_NVM_CON 0x0
+#define I40E_NVM_SNT 0x1
+#define I40E_NVM_LCB 0x2
+#define I40E_NVM_SA (I40E_NVM_SNT | I40E_NVM_LCB)
+#define I40E_NVM_ERA 0x4
+#define I40E_NVM_CSUM 0x8
+#define I40E_NVM_EXEC 0xf
+
+#define I40E_NVM_ADAPT_SHIFT 16
+#define I40E_NVM_ADAPT_MASK (0xffffULL << I40E_NVM_ADAPT_SHIFT)
+
+#define I40E_NVMUPD_MAX_DATA 4096
+#define I40E_NVMUPD_IFACE_TIMEOUT 2 /* seconds */
+
+struct i40e_nvm_access {
+ u32 command;
+ u32 config;
+ u32 offset; /* in bytes */
+ u32 data_size; /* in bytes */
+ u8 data[1];
+};
+
+/* PCI bus types */
+enum i40e_bus_type {
+ i40e_bus_type_unknown = 0,
+ i40e_bus_type_pci,
+ i40e_bus_type_pcix,
+ i40e_bus_type_pci_express,
+ i40e_bus_type_reserved
+};
+
+/* PCI bus speeds */
+enum i40e_bus_speed {
+ i40e_bus_speed_unknown = 0,
+ i40e_bus_speed_33 = 33,
+ i40e_bus_speed_66 = 66,
+ i40e_bus_speed_100 = 100,
+ i40e_bus_speed_120 = 120,
+ i40e_bus_speed_133 = 133,
+ i40e_bus_speed_2500 = 2500,
+ i40e_bus_speed_5000 = 5000,
+ i40e_bus_speed_8000 = 8000,
+ i40e_bus_speed_reserved
+};
+
+/* PCI bus widths */
+enum i40e_bus_width {
+ i40e_bus_width_unknown = 0,
+ i40e_bus_width_pcie_x1 = 1,
+ i40e_bus_width_pcie_x2 = 2,
+ i40e_bus_width_pcie_x4 = 4,
+ i40e_bus_width_pcie_x8 = 8,
+ i40e_bus_width_32 = 32,
+ i40e_bus_width_64 = 64,
+ i40e_bus_width_reserved
+};
+
+/* Bus parameters */
+struct i40e_bus_info {
+ enum i40e_bus_speed speed;
+ enum i40e_bus_width width;
+ enum i40e_bus_type type;
+
+ u16 func;
+ u16 device;
+ u16 lan_id;
+};
+
+/* Flow control (FC) parameters */
+struct i40e_fc_info {
+ enum i40e_fc_mode current_mode; /* FC mode in effect */
+ enum i40e_fc_mode requested_mode; /* FC mode requested by caller */
+};
+
+#define I40E_MAX_TRAFFIC_CLASS 8
+#define I40E_MAX_USER_PRIORITY 8
+#define I40E_DCBX_MAX_APPS 32
+#define I40E_LLDPDU_SIZE 1500
+#define I40E_TLV_STATUS_OPER 0x1
+#define I40E_TLV_STATUS_SYNC 0x2
+#define I40E_TLV_STATUS_ERR 0x4
+#define I40E_CEE_OPER_MAX_APPS 3
+#define I40E_APP_PROTOID_FCOE 0x8906
+#define I40E_APP_PROTOID_ISCSI 0x0cbc
+#define I40E_APP_PROTOID_FIP 0x8914
+#define I40E_APP_SEL_ETHTYPE 0x1
+#define I40E_APP_SEL_TCPIP 0x2
+#define I40E_CEE_APP_SEL_ETHTYPE 0x0
+#define I40E_CEE_APP_SEL_TCPIP 0x1
+
+/* CEE or IEEE 802.1Qaz ETS Configuration data */
+struct i40e_dcb_ets_config {
+ u8 willing;
+ u8 cbs;
+ u8 maxtcs;
+ u8 prioritytable[I40E_MAX_TRAFFIC_CLASS];
+ u8 tcbwtable[I40E_MAX_TRAFFIC_CLASS];
+ u8 tsatable[I40E_MAX_TRAFFIC_CLASS];
+};
+
+/* CEE or IEEE 802.1Qaz PFC Configuration data */
+struct i40e_dcb_pfc_config {
+ u8 willing;
+ u8 mbc;
+ u8 pfccap;
+ u8 pfcenable;
+};
+
+/* CEE or IEEE 802.1Qaz Application Priority data */
+struct i40e_dcb_app_priority_table {
+ u8 priority;
+ u8 selector;
+ u16 protocolid;
+};
+
+struct i40e_dcbx_config {
+ u8 dcbx_mode;
+#define I40E_DCBX_MODE_CEE 0x1
+#define I40E_DCBX_MODE_IEEE 0x2
+ u8 app_mode;
+#define I40E_DCBX_APPS_NON_WILLING 0x1
+ u32 numapps;
+ u32 tlv_status; /* CEE mode TLV status */
+ struct i40e_dcb_ets_config etscfg;
+ struct i40e_dcb_ets_config etsrec;
+ struct i40e_dcb_pfc_config pfc;
+ struct i40e_dcb_app_priority_table app[I40E_DCBX_MAX_APPS];
+};
+
+/* Port hardware description */
+struct i40e_hw {
+ u8 *hw_addr;
+ void *back;
+
+ /* subsystem structs */
+ struct i40e_phy_info phy;
+ struct i40e_mac_info mac;
+ struct i40e_bus_info bus;
+ struct i40e_nvm_info nvm;
+ struct i40e_fc_info fc;
+
+ /* pci info */
+ u16 device_id;
+ u16 vendor_id;
+ u16 subsystem_device_id;
+ u16 subsystem_vendor_id;
+ u8 revision_id;
+ u8 port;
+ bool adapter_stopped;
+
+ /* capabilities for entire device and PCI func */
+ struct i40e_hw_capabilities dev_caps;
+ struct i40e_hw_capabilities func_caps;
+
+ /* Flow Director shared filter space */
+ u16 fdir_shared_filter_count;
+
+ /* device profile info */
+ u8 pf_id;
+ u16 main_vsi_seid;
+
+ /* for multi-function MACs */
+ u16 partition_id;
+ u16 num_partitions;
+ u16 num_ports;
+
+ /* Closest numa node to the device */
+ u16 numa_node;
+
+ /* Admin Queue info */
+ struct i40e_adminq_info aq;
+
+ /* state of nvm update process */
+ enum i40e_nvmupd_state nvmupd_state;
+ struct i40e_aq_desc nvm_wb_desc;
+ struct i40e_virt_mem nvm_buff;
+ bool nvm_release_on_done;
+ u16 nvm_wait_opcode;
+
+ /* HMC info */
+ struct i40e_hmc_info hmc; /* HMC info struct */
+
+ /* LLDP/DCBX Status */
+ u16 dcbx_status;
+
+ /* DCBX info */
+ struct i40e_dcbx_config local_dcbx_config; /* Oper/Local Cfg */
+ struct i40e_dcbx_config remote_dcbx_config; /* Peer Cfg */
+ struct i40e_dcbx_config desired_dcbx_config; /* CEE Desired Cfg */
+
+#ifdef X722_SUPPORT
+ /* WoL and proxy support */
+ u16 num_wol_proxy_filters;
+ u16 wol_proxy_vsi_seid;
+
+#endif
+#define I40E_HW_FLAG_AQ_SRCTL_ACCESS_ENABLE BIT_ULL(0)
+ u64 flags;
+
+ /* debug mask */
+ u32 debug_mask;
+#ifndef I40E_NDIS_SUPPORT
+ char err_str[16];
+#endif /* I40E_NDIS_SUPPORT */
+};
+
+STATIC INLINE bool i40e_is_vf(struct i40e_hw *hw)
+{
+#ifdef X722_SUPPORT
+ return (hw->mac.type == I40E_MAC_VF ||
+ hw->mac.type == I40E_MAC_X722_VF);
+#else
+ return hw->mac.type == I40E_MAC_VF;
+#endif
+}
+
+struct i40e_driver_version {
+ u8 major_version;
+ u8 minor_version;
+ u8 build_version;
+ u8 subbuild_version;
+ u8 driver_string[32];
+};
+
+/* RX Descriptors */
+union i40e_16byte_rx_desc {
+ struct {
+ __le64 pkt_addr; /* Packet buffer address */
+ __le64 hdr_addr; /* Header buffer address */
+ } read;
+ struct {
+ struct {
+ struct {
+ union {
+ __le16 mirroring_status;
+ __le16 fcoe_ctx_id;
+ } mirr_fcoe;
+ __le16 l2tag1;
+ } lo_dword;
+ union {
+ __le32 rss; /* RSS Hash */
+ __le32 fd_id; /* Flow director filter id */
+ __le32 fcoe_param; /* FCoE DDP Context id */
+ } hi_dword;
+ } qword0;
+ struct {
+ /* ext status/error/pktype/length */
+ __le64 status_error_len;
+ } qword1;
+ } wb; /* writeback */
+};
+
+union i40e_32byte_rx_desc {
+ struct {
+ __le64 pkt_addr; /* Packet buffer address */
+ __le64 hdr_addr; /* Header buffer address */
+ /* bit 0 of hdr_buffer_addr is DD bit */
+ __le64 rsvd1;
+ __le64 rsvd2;
+ } read;
+ struct {
+ struct {
+ struct {
+ union {
+ __le16 mirroring_status;
+ __le16 fcoe_ctx_id;
+ } mirr_fcoe;
+ __le16 l2tag1;
+ } lo_dword;
+ union {
+ __le32 rss; /* RSS Hash */
+ __le32 fcoe_param; /* FCoE DDP Context id */
+ /* Flow director filter id in case of
+ * Programming status desc WB
+ */
+ __le32 fd_id;
+ } hi_dword;
+ } qword0;
+ struct {
+ /* status/error/pktype/length */
+ __le64 status_error_len;
+ } qword1;
+ struct {
+ __le16 ext_status; /* extended status */
+ __le16 rsvd;
+ __le16 l2tag2_1;
+ __le16 l2tag2_2;
+ } qword2;
+ struct {
+ union {
+ __le32 flex_bytes_lo;
+ __le32 pe_status;
+ } lo_dword;
+ union {
+ __le32 flex_bytes_hi;
+ __le32 fd_id;
+ } hi_dword;
+ } qword3;
+ } wb; /* writeback */
+};
+
+#define I40E_RXD_QW0_MIRROR_STATUS_SHIFT 8
+#define I40E_RXD_QW0_MIRROR_STATUS_MASK (0x3FUL << \
+ I40E_RXD_QW0_MIRROR_STATUS_SHIFT)
+#define I40E_RXD_QW0_FCOEINDX_SHIFT 0
+#define I40E_RXD_QW0_FCOEINDX_MASK (0xFFFUL << \
+ I40E_RXD_QW0_FCOEINDX_SHIFT)
+
+enum i40e_rx_desc_status_bits {
+ /* Note: These are predefined bit offsets */
+ I40E_RX_DESC_STATUS_DD_SHIFT = 0,
+ I40E_RX_DESC_STATUS_EOF_SHIFT = 1,
+ I40E_RX_DESC_STATUS_L2TAG1P_SHIFT = 2,
+ I40E_RX_DESC_STATUS_L3L4P_SHIFT = 3,
+ I40E_RX_DESC_STATUS_CRCP_SHIFT = 4,
+ I40E_RX_DESC_STATUS_TSYNINDX_SHIFT = 5, /* 2 BITS */
+ I40E_RX_DESC_STATUS_TSYNVALID_SHIFT = 7,
+#ifdef X722_SUPPORT
+ I40E_RX_DESC_STATUS_EXT_UDP_0_SHIFT = 8,
+#else
+ I40E_RX_DESC_STATUS_RESERVED1_SHIFT = 8,
+#endif
+
+ I40E_RX_DESC_STATUS_UMBCAST_SHIFT = 9, /* 2 BITS */
+ I40E_RX_DESC_STATUS_FLM_SHIFT = 11,
+ I40E_RX_DESC_STATUS_FLTSTAT_SHIFT = 12, /* 2 BITS */
+ I40E_RX_DESC_STATUS_LPBK_SHIFT = 14,
+ I40E_RX_DESC_STATUS_IPV6EXADD_SHIFT = 15,
+ I40E_RX_DESC_STATUS_RESERVED2_SHIFT = 16, /* 2 BITS */
+#ifdef X722_SUPPORT
+ I40E_RX_DESC_STATUS_INT_UDP_0_SHIFT = 18,
+#else
+ I40E_RX_DESC_STATUS_UDP_0_SHIFT = 18,
+#endif
+ I40E_RX_DESC_STATUS_LAST /* this entry must be last!!! */
+};
+
+#define I40E_RXD_QW1_STATUS_SHIFT 0
+#define I40E_RXD_QW1_STATUS_MASK ((BIT(I40E_RX_DESC_STATUS_LAST) - 1) << \
+ I40E_RXD_QW1_STATUS_SHIFT)
+
+#define I40E_RXD_QW1_STATUS_TSYNINDX_SHIFT I40E_RX_DESC_STATUS_TSYNINDX_SHIFT
+#define I40E_RXD_QW1_STATUS_TSYNINDX_MASK (0x3UL << \
+ I40E_RXD_QW1_STATUS_TSYNINDX_SHIFT)
+
+#define I40E_RXD_QW1_STATUS_TSYNVALID_SHIFT I40E_RX_DESC_STATUS_TSYNVALID_SHIFT
+#define I40E_RXD_QW1_STATUS_TSYNVALID_MASK BIT_ULL(I40E_RXD_QW1_STATUS_TSYNVALID_SHIFT)
+
+#define I40E_RXD_QW1_STATUS_UMBCAST_SHIFT I40E_RX_DESC_STATUS_UMBCAST
+#define I40E_RXD_QW1_STATUS_UMBCAST_MASK (0x3UL << \
+ I40E_RXD_QW1_STATUS_UMBCAST_SHIFT)
+
+enum i40e_rx_desc_fltstat_values {
+ I40E_RX_DESC_FLTSTAT_NO_DATA = 0,
+ I40E_RX_DESC_FLTSTAT_RSV_FD_ID = 1, /* 16byte desc? FD_ID : RSV */
+ I40E_RX_DESC_FLTSTAT_RSV = 2,
+ I40E_RX_DESC_FLTSTAT_RSS_HASH = 3,
+};
+
+#define I40E_RXD_PACKET_TYPE_UNICAST 0
+#define I40E_RXD_PACKET_TYPE_MULTICAST 1
+#define I40E_RXD_PACKET_TYPE_BROADCAST 2
+#define I40E_RXD_PACKET_TYPE_MIRRORED 3
+
+#define I40E_RXD_QW1_ERROR_SHIFT 19
+#define I40E_RXD_QW1_ERROR_MASK (0xFFUL << I40E_RXD_QW1_ERROR_SHIFT)
+
+enum i40e_rx_desc_error_bits {
+ /* Note: These are predefined bit offsets */
+ I40E_RX_DESC_ERROR_RXE_SHIFT = 0,
+ I40E_RX_DESC_ERROR_RECIPE_SHIFT = 1,
+ I40E_RX_DESC_ERROR_HBO_SHIFT = 2,
+ I40E_RX_DESC_ERROR_L3L4E_SHIFT = 3, /* 3 BITS */
+ I40E_RX_DESC_ERROR_IPE_SHIFT = 3,
+ I40E_RX_DESC_ERROR_L4E_SHIFT = 4,
+ I40E_RX_DESC_ERROR_EIPE_SHIFT = 5,
+ I40E_RX_DESC_ERROR_OVERSIZE_SHIFT = 6,
+ I40E_RX_DESC_ERROR_PPRS_SHIFT = 7
+};
+
+enum i40e_rx_desc_error_l3l4e_fcoe_masks {
+ I40E_RX_DESC_ERROR_L3L4E_NONE = 0,
+ I40E_RX_DESC_ERROR_L3L4E_PROT = 1,
+ I40E_RX_DESC_ERROR_L3L4E_FC = 2,
+ I40E_RX_DESC_ERROR_L3L4E_DMAC_ERR = 3,
+ I40E_RX_DESC_ERROR_L3L4E_DMAC_WARN = 4
+};
+
+#define I40E_RXD_QW1_PTYPE_SHIFT 30
+#define I40E_RXD_QW1_PTYPE_MASK (0xFFULL << I40E_RXD_QW1_PTYPE_SHIFT)
+
+/* Packet type non-ip values */
+enum i40e_rx_l2_ptype {
+ I40E_RX_PTYPE_L2_RESERVED = 0,
+ I40E_RX_PTYPE_L2_MAC_PAY2 = 1,
+ I40E_RX_PTYPE_L2_TIMESYNC_PAY2 = 2,
+ I40E_RX_PTYPE_L2_FIP_PAY2 = 3,
+ I40E_RX_PTYPE_L2_OUI_PAY2 = 4,
+ I40E_RX_PTYPE_L2_MACCNTRL_PAY2 = 5,
+ I40E_RX_PTYPE_L2_LLDP_PAY2 = 6,
+ I40E_RX_PTYPE_L2_ECP_PAY2 = 7,
+ I40E_RX_PTYPE_L2_EVB_PAY2 = 8,
+ I40E_RX_PTYPE_L2_QCN_PAY2 = 9,
+ I40E_RX_PTYPE_L2_EAPOL_PAY2 = 10,
+ I40E_RX_PTYPE_L2_ARP = 11,
+ I40E_RX_PTYPE_L2_FCOE_PAY3 = 12,
+ I40E_RX_PTYPE_L2_FCOE_FCDATA_PAY3 = 13,
+ I40E_RX_PTYPE_L2_FCOE_FCRDY_PAY3 = 14,
+ I40E_RX_PTYPE_L2_FCOE_FCRSP_PAY3 = 15,
+ I40E_RX_PTYPE_L2_FCOE_FCOTHER_PA = 16,
+ I40E_RX_PTYPE_L2_FCOE_VFT_PAY3 = 17,
+ I40E_RX_PTYPE_L2_FCOE_VFT_FCDATA = 18,
+ I40E_RX_PTYPE_L2_FCOE_VFT_FCRDY = 19,
+ I40E_RX_PTYPE_L2_FCOE_VFT_FCRSP = 20,
+ I40E_RX_PTYPE_L2_FCOE_VFT_FCOTHER = 21,
+ I40E_RX_PTYPE_GRENAT4_MAC_PAY3 = 58,
+ I40E_RX_PTYPE_GRENAT4_MACVLAN_IPV6_ICMP_PAY4 = 87,
+ I40E_RX_PTYPE_GRENAT6_MAC_PAY3 = 124,
+ I40E_RX_PTYPE_GRENAT6_MACVLAN_IPV6_ICMP_PAY4 = 153
+};
+
+struct i40e_rx_ptype_decoded {
+ u32 ptype:8;
+ u32 known:1;
+ u32 outer_ip:1;
+ u32 outer_ip_ver:1;
+ u32 outer_frag:1;
+ u32 tunnel_type:3;
+ u32 tunnel_end_prot:2;
+ u32 tunnel_end_frag:1;
+ u32 inner_prot:4;
+ u32 payload_layer:3;
+};
+
+enum i40e_rx_ptype_outer_ip {
+ I40E_RX_PTYPE_OUTER_L2 = 0,
+ I40E_RX_PTYPE_OUTER_IP = 1
+};
+
+enum i40e_rx_ptype_outer_ip_ver {
+ I40E_RX_PTYPE_OUTER_NONE = 0,
+ I40E_RX_PTYPE_OUTER_IPV4 = 0,
+ I40E_RX_PTYPE_OUTER_IPV6 = 1
+};
+
+enum i40e_rx_ptype_outer_fragmented {
+ I40E_RX_PTYPE_NOT_FRAG = 0,
+ I40E_RX_PTYPE_FRAG = 1
+};
+
+enum i40e_rx_ptype_tunnel_type {
+ I40E_RX_PTYPE_TUNNEL_NONE = 0,
+ I40E_RX_PTYPE_TUNNEL_IP_IP = 1,
+ I40E_RX_PTYPE_TUNNEL_IP_GRENAT = 2,
+ I40E_RX_PTYPE_TUNNEL_IP_GRENAT_MAC = 3,
+ I40E_RX_PTYPE_TUNNEL_IP_GRENAT_MAC_VLAN = 4,
+};
+
+enum i40e_rx_ptype_tunnel_end_prot {
+ I40E_RX_PTYPE_TUNNEL_END_NONE = 0,
+ I40E_RX_PTYPE_TUNNEL_END_IPV4 = 1,
+ I40E_RX_PTYPE_TUNNEL_END_IPV6 = 2,
+};
+
+enum i40e_rx_ptype_inner_prot {
+ I40E_RX_PTYPE_INNER_PROT_NONE = 0,
+ I40E_RX_PTYPE_INNER_PROT_UDP = 1,
+ I40E_RX_PTYPE_INNER_PROT_TCP = 2,
+ I40E_RX_PTYPE_INNER_PROT_SCTP = 3,
+ I40E_RX_PTYPE_INNER_PROT_ICMP = 4,
+ I40E_RX_PTYPE_INNER_PROT_TIMESYNC = 5
+};
+
+enum i40e_rx_ptype_payload_layer {
+ I40E_RX_PTYPE_PAYLOAD_LAYER_NONE = 0,
+ I40E_RX_PTYPE_PAYLOAD_LAYER_PAY2 = 1,
+ I40E_RX_PTYPE_PAYLOAD_LAYER_PAY3 = 2,
+ I40E_RX_PTYPE_PAYLOAD_LAYER_PAY4 = 3,
+};
+
+#define I40E_RX_PTYPE_BIT_MASK 0x0FFFFFFF
+#define I40E_RX_PTYPE_SHIFT 56
+
+#define I40E_RXD_QW1_LENGTH_PBUF_SHIFT 38
+#define I40E_RXD_QW1_LENGTH_PBUF_MASK (0x3FFFULL << \
+ I40E_RXD_QW1_LENGTH_PBUF_SHIFT)
+
+#define I40E_RXD_QW1_LENGTH_HBUF_SHIFT 52
+#define I40E_RXD_QW1_LENGTH_HBUF_MASK (0x7FFULL << \
+ I40E_RXD_QW1_LENGTH_HBUF_SHIFT)
+
+#define I40E_RXD_QW1_LENGTH_SPH_SHIFT 63
+#define I40E_RXD_QW1_LENGTH_SPH_MASK BIT_ULL(I40E_RXD_QW1_LENGTH_SPH_SHIFT)
+
+#define I40E_RXD_QW1_NEXTP_SHIFT 38
+#define I40E_RXD_QW1_NEXTP_MASK (0x1FFFULL << I40E_RXD_QW1_NEXTP_SHIFT)
+
+#define I40E_RXD_QW2_EXT_STATUS_SHIFT 0
+#define I40E_RXD_QW2_EXT_STATUS_MASK (0xFFFFFUL << \
+ I40E_RXD_QW2_EXT_STATUS_SHIFT)
+
+enum i40e_rx_desc_ext_status_bits {
+ /* Note: These are predefined bit offsets */
+ I40E_RX_DESC_EXT_STATUS_L2TAG2P_SHIFT = 0,
+ I40E_RX_DESC_EXT_STATUS_L2TAG3P_SHIFT = 1,
+ I40E_RX_DESC_EXT_STATUS_FLEXBL_SHIFT = 2, /* 2 BITS */
+ I40E_RX_DESC_EXT_STATUS_FLEXBH_SHIFT = 4, /* 2 BITS */
+ I40E_RX_DESC_EXT_STATUS_FDLONGB_SHIFT = 9,
+ I40E_RX_DESC_EXT_STATUS_FCOELONGB_SHIFT = 10,
+ I40E_RX_DESC_EXT_STATUS_PELONGB_SHIFT = 11,
+};
+
+#define I40E_RXD_QW2_L2TAG2_SHIFT 0
+#define I40E_RXD_QW2_L2TAG2_MASK (0xFFFFUL << I40E_RXD_QW2_L2TAG2_SHIFT)
+
+#define I40E_RXD_QW2_L2TAG3_SHIFT 16
+#define I40E_RXD_QW2_L2TAG3_MASK (0xFFFFUL << I40E_RXD_QW2_L2TAG3_SHIFT)
+
+enum i40e_rx_desc_pe_status_bits {
+ /* Note: These are predefined bit offsets */
+ I40E_RX_DESC_PE_STATUS_QPID_SHIFT = 0, /* 18 BITS */
+ I40E_RX_DESC_PE_STATUS_L4PORT_SHIFT = 0, /* 16 BITS */
+ I40E_RX_DESC_PE_STATUS_IPINDEX_SHIFT = 16, /* 8 BITS */
+ I40E_RX_DESC_PE_STATUS_QPIDHIT_SHIFT = 24,
+ I40E_RX_DESC_PE_STATUS_APBVTHIT_SHIFT = 25,
+ I40E_RX_DESC_PE_STATUS_PORTV_SHIFT = 26,
+ I40E_RX_DESC_PE_STATUS_URG_SHIFT = 27,
+ I40E_RX_DESC_PE_STATUS_IPFRAG_SHIFT = 28,
+ I40E_RX_DESC_PE_STATUS_IPOPT_SHIFT = 29
+};
+
+#define I40E_RX_PROG_STATUS_DESC_LENGTH_SHIFT 38
+#define I40E_RX_PROG_STATUS_DESC_LENGTH 0x2000000
+
+#define I40E_RX_PROG_STATUS_DESC_QW1_PROGID_SHIFT 2
+#define I40E_RX_PROG_STATUS_DESC_QW1_PROGID_MASK (0x7UL << \
+ I40E_RX_PROG_STATUS_DESC_QW1_PROGID_SHIFT)
+
+#define I40E_RX_PROG_STATUS_DESC_QW1_STATUS_SHIFT 0
+#define I40E_RX_PROG_STATUS_DESC_QW1_STATUS_MASK (0x7FFFUL << \
+ I40E_RX_PROG_STATUS_DESC_QW1_STATUS_SHIFT)
+
+#define I40E_RX_PROG_STATUS_DESC_QW1_ERROR_SHIFT 19
+#define I40E_RX_PROG_STATUS_DESC_QW1_ERROR_MASK (0x3FUL << \
+ I40E_RX_PROG_STATUS_DESC_QW1_ERROR_SHIFT)
+
+enum i40e_rx_prog_status_desc_status_bits {
+ /* Note: These are predefined bit offsets */
+ I40E_RX_PROG_STATUS_DESC_DD_SHIFT = 0,
+ I40E_RX_PROG_STATUS_DESC_PROG_ID_SHIFT = 2 /* 3 BITS */
+};
+
+enum i40e_rx_prog_status_desc_prog_id_masks {
+ I40E_RX_PROG_STATUS_DESC_FD_FILTER_STATUS = 1,
+ I40E_RX_PROG_STATUS_DESC_FCOE_CTXT_PROG_STATUS = 2,
+ I40E_RX_PROG_STATUS_DESC_FCOE_CTXT_INVL_STATUS = 4,
+};
+
+enum i40e_rx_prog_status_desc_error_bits {
+ /* Note: These are predefined bit offsets */
+ I40E_RX_PROG_STATUS_DESC_FD_TBL_FULL_SHIFT = 0,
+ I40E_RX_PROG_STATUS_DESC_NO_FD_ENTRY_SHIFT = 1,
+ I40E_RX_PROG_STATUS_DESC_FCOE_TBL_FULL_SHIFT = 2,
+ I40E_RX_PROG_STATUS_DESC_FCOE_CONFLICT_SHIFT = 3
+};
+
+#define I40E_TWO_BIT_MASK 0x3
+#define I40E_THREE_BIT_MASK 0x7
+#define I40E_FOUR_BIT_MASK 0xF
+#define I40E_EIGHTEEN_BIT_MASK 0x3FFFF
+
+/* TX Descriptor */
+struct i40e_tx_desc {
+ __le64 buffer_addr; /* Address of descriptor's data buf */
+ __le64 cmd_type_offset_bsz;
+};
+
+#define I40E_TXD_QW1_DTYPE_SHIFT 0
+#define I40E_TXD_QW1_DTYPE_MASK (0xFUL << I40E_TXD_QW1_DTYPE_SHIFT)
+
+enum i40e_tx_desc_dtype_value {
+ I40E_TX_DESC_DTYPE_DATA = 0x0,
+ I40E_TX_DESC_DTYPE_NOP = 0x1, /* same as Context desc */
+ I40E_TX_DESC_DTYPE_CONTEXT = 0x1,
+ I40E_TX_DESC_DTYPE_FCOE_CTX = 0x2,
+ I40E_TX_DESC_DTYPE_FILTER_PROG = 0x8,
+ I40E_TX_DESC_DTYPE_DDP_CTX = 0x9,
+ I40E_TX_DESC_DTYPE_FLEX_DATA = 0xB,
+ I40E_TX_DESC_DTYPE_FLEX_CTX_1 = 0xC,
+ I40E_TX_DESC_DTYPE_FLEX_CTX_2 = 0xD,
+ I40E_TX_DESC_DTYPE_DESC_DONE = 0xF
+};
+
+#define I40E_TXD_QW1_CMD_SHIFT 4
+#define I40E_TXD_QW1_CMD_MASK (0x3FFUL << I40E_TXD_QW1_CMD_SHIFT)
+
+enum i40e_tx_desc_cmd_bits {
+ I40E_TX_DESC_CMD_EOP = 0x0001,
+ I40E_TX_DESC_CMD_RS = 0x0002,
+ I40E_TX_DESC_CMD_ICRC = 0x0004,
+ I40E_TX_DESC_CMD_IL2TAG1 = 0x0008,
+ I40E_TX_DESC_CMD_DUMMY = 0x0010,
+ I40E_TX_DESC_CMD_IIPT_NONIP = 0x0000, /* 2 BITS */
+ I40E_TX_DESC_CMD_IIPT_IPV6 = 0x0020, /* 2 BITS */
+ I40E_TX_DESC_CMD_IIPT_IPV4 = 0x0040, /* 2 BITS */
+ I40E_TX_DESC_CMD_IIPT_IPV4_CSUM = 0x0060, /* 2 BITS */
+ I40E_TX_DESC_CMD_FCOET = 0x0080,
+ I40E_TX_DESC_CMD_L4T_EOFT_UNK = 0x0000, /* 2 BITS */
+ I40E_TX_DESC_CMD_L4T_EOFT_TCP = 0x0100, /* 2 BITS */
+ I40E_TX_DESC_CMD_L4T_EOFT_SCTP = 0x0200, /* 2 BITS */
+ I40E_TX_DESC_CMD_L4T_EOFT_UDP = 0x0300, /* 2 BITS */
+ I40E_TX_DESC_CMD_L4T_EOFT_EOF_N = 0x0000, /* 2 BITS */
+ I40E_TX_DESC_CMD_L4T_EOFT_EOF_T = 0x0100, /* 2 BITS */
+ I40E_TX_DESC_CMD_L4T_EOFT_EOF_NI = 0x0200, /* 2 BITS */
+ I40E_TX_DESC_CMD_L4T_EOFT_EOF_A = 0x0300, /* 2 BITS */
+};
+
+#define I40E_TXD_QW1_OFFSET_SHIFT 16
+#define I40E_TXD_QW1_OFFSET_MASK (0x3FFFFULL << \
+ I40E_TXD_QW1_OFFSET_SHIFT)
+
+enum i40e_tx_desc_length_fields {
+ /* Note: These are predefined bit offsets */
+ I40E_TX_DESC_LENGTH_MACLEN_SHIFT = 0, /* 7 BITS */
+ I40E_TX_DESC_LENGTH_IPLEN_SHIFT = 7, /* 7 BITS */
+ I40E_TX_DESC_LENGTH_L4_FC_LEN_SHIFT = 14 /* 4 BITS */
+};
+
+#define I40E_TXD_QW1_MACLEN_MASK (0x7FUL << I40E_TX_DESC_LENGTH_MACLEN_SHIFT)
+#define I40E_TXD_QW1_IPLEN_MASK (0x7FUL << I40E_TX_DESC_LENGTH_IPLEN_SHIFT)
+#define I40E_TXD_QW1_L4LEN_MASK (0xFUL << I40E_TX_DESC_LENGTH_L4_FC_LEN_SHIFT)
+#define I40E_TXD_QW1_FCLEN_MASK (0xFUL << I40E_TX_DESC_LENGTH_L4_FC_LEN_SHIFT)
+
+#define I40E_TXD_QW1_TX_BUF_SZ_SHIFT 34
+#define I40E_TXD_QW1_TX_BUF_SZ_MASK (0x3FFFULL << \
+ I40E_TXD_QW1_TX_BUF_SZ_SHIFT)
+
+#define I40E_TXD_QW1_L2TAG1_SHIFT 48
+#define I40E_TXD_QW1_L2TAG1_MASK (0xFFFFULL << I40E_TXD_QW1_L2TAG1_SHIFT)
+
+/* Context descriptors */
+struct i40e_tx_context_desc {
+ __le32 tunneling_params;
+ __le16 l2tag2;
+ __le16 rsvd;
+ __le64 type_cmd_tso_mss;
+};
+
+#define I40E_TXD_CTX_QW1_DTYPE_SHIFT 0
+#define I40E_TXD_CTX_QW1_DTYPE_MASK (0xFUL << I40E_TXD_CTX_QW1_DTYPE_SHIFT)
+
+#define I40E_TXD_CTX_QW1_CMD_SHIFT 4
+#define I40E_TXD_CTX_QW1_CMD_MASK (0xFFFFUL << I40E_TXD_CTX_QW1_CMD_SHIFT)
+
+enum i40e_tx_ctx_desc_cmd_bits {
+ I40E_TX_CTX_DESC_TSO = 0x01,
+ I40E_TX_CTX_DESC_TSYN = 0x02,
+ I40E_TX_CTX_DESC_IL2TAG2 = 0x04,
+ I40E_TX_CTX_DESC_IL2TAG2_IL2H = 0x08,
+ I40E_TX_CTX_DESC_SWTCH_NOTAG = 0x00,
+ I40E_TX_CTX_DESC_SWTCH_UPLINK = 0x10,
+ I40E_TX_CTX_DESC_SWTCH_LOCAL = 0x20,
+ I40E_TX_CTX_DESC_SWTCH_VSI = 0x30,
+ I40E_TX_CTX_DESC_SWPE = 0x40
+};
+
+#define I40E_TXD_CTX_QW1_TSO_LEN_SHIFT 30
+#define I40E_TXD_CTX_QW1_TSO_LEN_MASK (0x3FFFFULL << \
+ I40E_TXD_CTX_QW1_TSO_LEN_SHIFT)
+
+#define I40E_TXD_CTX_QW1_MSS_SHIFT 50
+#define I40E_TXD_CTX_QW1_MSS_MASK (0x3FFFULL << \
+ I40E_TXD_CTX_QW1_MSS_SHIFT)
+
+#define I40E_TXD_CTX_QW1_VSI_SHIFT 50
+#define I40E_TXD_CTX_QW1_VSI_MASK (0x1FFULL << I40E_TXD_CTX_QW1_VSI_SHIFT)
+
+#define I40E_TXD_CTX_QW0_EXT_IP_SHIFT 0
+#define I40E_TXD_CTX_QW0_EXT_IP_MASK (0x3ULL << \
+ I40E_TXD_CTX_QW0_EXT_IP_SHIFT)
+
+enum i40e_tx_ctx_desc_eipt_offload {
+ I40E_TX_CTX_EXT_IP_NONE = 0x0,
+ I40E_TX_CTX_EXT_IP_IPV6 = 0x1,
+ I40E_TX_CTX_EXT_IP_IPV4_NO_CSUM = 0x2,
+ I40E_TX_CTX_EXT_IP_IPV4 = 0x3
+};
+
+#define I40E_TXD_CTX_QW0_EXT_IPLEN_SHIFT 2
+#define I40E_TXD_CTX_QW0_EXT_IPLEN_MASK (0x3FULL << \
+ I40E_TXD_CTX_QW0_EXT_IPLEN_SHIFT)
+
+#define I40E_TXD_CTX_QW0_NATT_SHIFT 9
+#define I40E_TXD_CTX_QW0_NATT_MASK (0x3ULL << I40E_TXD_CTX_QW0_NATT_SHIFT)
+
+#define I40E_TXD_CTX_UDP_TUNNELING BIT_ULL(I40E_TXD_CTX_QW0_NATT_SHIFT)
+#define I40E_TXD_CTX_GRE_TUNNELING (0x2ULL << I40E_TXD_CTX_QW0_NATT_SHIFT)
+
+#define I40E_TXD_CTX_QW0_EIP_NOINC_SHIFT 11
+#define I40E_TXD_CTX_QW0_EIP_NOINC_MASK BIT_ULL(I40E_TXD_CTX_QW0_EIP_NOINC_SHIFT)
+
+#define I40E_TXD_CTX_EIP_NOINC_IPID_CONST I40E_TXD_CTX_QW0_EIP_NOINC_MASK
+
+#define I40E_TXD_CTX_QW0_NATLEN_SHIFT 12
+#define I40E_TXD_CTX_QW0_NATLEN_MASK (0X7FULL << \
+ I40E_TXD_CTX_QW0_NATLEN_SHIFT)
+
+#define I40E_TXD_CTX_QW0_DECTTL_SHIFT 19
+#define I40E_TXD_CTX_QW0_DECTTL_MASK (0xFULL << \
+ I40E_TXD_CTX_QW0_DECTTL_SHIFT)
+
+#ifdef X722_SUPPORT
+#define I40E_TXD_CTX_QW0_L4T_CS_SHIFT 23
+#define I40E_TXD_CTX_QW0_L4T_CS_MASK BIT_ULL(I40E_TXD_CTX_QW0_L4T_CS_SHIFT)
+#endif
+struct i40e_nop_desc {
+ __le64 rsvd;
+ __le64 dtype_cmd;
+};
+
+#define I40E_TXD_NOP_QW1_DTYPE_SHIFT 0
+#define I40E_TXD_NOP_QW1_DTYPE_MASK (0xFUL << I40E_TXD_NOP_QW1_DTYPE_SHIFT)
+
+#define I40E_TXD_NOP_QW1_CMD_SHIFT 4
+#define I40E_TXD_NOP_QW1_CMD_MASK (0x7FUL << I40E_TXD_NOP_QW1_CMD_SHIFT)
+
+enum i40e_tx_nop_desc_cmd_bits {
+ /* Note: These are predefined bit offsets */
+ I40E_TX_NOP_DESC_EOP_SHIFT = 0,
+ I40E_TX_NOP_DESC_RS_SHIFT = 1,
+ I40E_TX_NOP_DESC_RSV_SHIFT = 2 /* 5 bits */
+};
+
+struct i40e_filter_program_desc {
+ __le32 qindex_flex_ptype_vsi;
+ __le32 rsvd;
+ __le32 dtype_cmd_cntindex;
+ __le32 fd_id;
+};
+#define I40E_TXD_FLTR_QW0_QINDEX_SHIFT 0
+#define I40E_TXD_FLTR_QW0_QINDEX_MASK (0x7FFUL << \
+ I40E_TXD_FLTR_QW0_QINDEX_SHIFT)
+#define I40E_TXD_FLTR_QW0_FLEXOFF_SHIFT 11
+#define I40E_TXD_FLTR_QW0_FLEXOFF_MASK (0x7UL << \
+ I40E_TXD_FLTR_QW0_FLEXOFF_SHIFT)
+#define I40E_TXD_FLTR_QW0_PCTYPE_SHIFT 17
+#define I40E_TXD_FLTR_QW0_PCTYPE_MASK (0x3FUL << \
+ I40E_TXD_FLTR_QW0_PCTYPE_SHIFT)
+
+/* Packet Classifier Types for filters */
+enum i40e_filter_pctype {
+#ifdef X722_SUPPORT
+ /* Note: Values 0-28 are reserved for future use.
+ * Value 29, 30, 32 are not supported on XL710 and X710.
+ */
+ I40E_FILTER_PCTYPE_NONF_UNICAST_IPV4_UDP = 29,
+ I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV4_UDP = 30,
+#else
+ /* Note: Values 0-30 are reserved for future use */
+#endif
+ I40E_FILTER_PCTYPE_NONF_IPV4_UDP = 31,
+#ifdef X722_SUPPORT
+ I40E_FILTER_PCTYPE_NONF_IPV4_TCP_SYN_NO_ACK = 32,
+#else
+ /* Note: Value 32 is reserved for future use */
+#endif
+ I40E_FILTER_PCTYPE_NONF_IPV4_TCP = 33,
+ I40E_FILTER_PCTYPE_NONF_IPV4_SCTP = 34,
+ I40E_FILTER_PCTYPE_NONF_IPV4_OTHER = 35,
+ I40E_FILTER_PCTYPE_FRAG_IPV4 = 36,
+#ifdef X722_SUPPORT
+ /* Note: Values 37-38 are reserved for future use.
+ * Value 39, 40, 42 are not supported on XL710 and X710.
+ */
+ I40E_FILTER_PCTYPE_NONF_UNICAST_IPV6_UDP = 39,
+ I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV6_UDP = 40,
+#else
+ /* Note: Values 37-40 are reserved for future use */
+#endif
+ I40E_FILTER_PCTYPE_NONF_IPV6_UDP = 41,
+#ifdef X722_SUPPORT
+ I40E_FILTER_PCTYPE_NONF_IPV6_TCP_SYN_NO_ACK = 42,
+#endif
+ I40E_FILTER_PCTYPE_NONF_IPV6_TCP = 43,
+ I40E_FILTER_PCTYPE_NONF_IPV6_SCTP = 44,
+ I40E_FILTER_PCTYPE_NONF_IPV6_OTHER = 45,
+ I40E_FILTER_PCTYPE_FRAG_IPV6 = 46,
+ /* Note: Value 47 is reserved for future use */
+ I40E_FILTER_PCTYPE_FCOE_OX = 48,
+ I40E_FILTER_PCTYPE_FCOE_RX = 49,
+ I40E_FILTER_PCTYPE_FCOE_OTHER = 50,
+ /* Note: Values 51-62 are reserved for future use */
+ I40E_FILTER_PCTYPE_L2_PAYLOAD = 63,
+};
+
+enum i40e_filter_program_desc_dest {
+ I40E_FILTER_PROGRAM_DESC_DEST_DROP_PACKET = 0x0,
+ I40E_FILTER_PROGRAM_DESC_DEST_DIRECT_PACKET_QINDEX = 0x1,
+ I40E_FILTER_PROGRAM_DESC_DEST_DIRECT_PACKET_OTHER = 0x2,
+};
+
+enum i40e_filter_program_desc_fd_status {
+ I40E_FILTER_PROGRAM_DESC_FD_STATUS_NONE = 0x0,
+ I40E_FILTER_PROGRAM_DESC_FD_STATUS_FD_ID = 0x1,
+ I40E_FILTER_PROGRAM_DESC_FD_STATUS_FD_ID_4FLEX_BYTES = 0x2,
+ I40E_FILTER_PROGRAM_DESC_FD_STATUS_8FLEX_BYTES = 0x3,
+};
+
+#define I40E_TXD_FLTR_QW0_DEST_VSI_SHIFT 23
+#define I40E_TXD_FLTR_QW0_DEST_VSI_MASK (0x1FFUL << \
+ I40E_TXD_FLTR_QW0_DEST_VSI_SHIFT)
+
+#define I40E_TXD_FLTR_QW1_DTYPE_SHIFT 0
+#define I40E_TXD_FLTR_QW1_DTYPE_MASK (0xFUL << I40E_TXD_FLTR_QW1_DTYPE_SHIFT)
+
+#define I40E_TXD_FLTR_QW1_CMD_SHIFT 4
+#define I40E_TXD_FLTR_QW1_CMD_MASK (0xFFFFULL << \
+ I40E_TXD_FLTR_QW1_CMD_SHIFT)
+
+#define I40E_TXD_FLTR_QW1_PCMD_SHIFT (0x0ULL + I40E_TXD_FLTR_QW1_CMD_SHIFT)
+#define I40E_TXD_FLTR_QW1_PCMD_MASK (0x7ULL << I40E_TXD_FLTR_QW1_PCMD_SHIFT)
+
+enum i40e_filter_program_desc_pcmd {
+ I40E_FILTER_PROGRAM_DESC_PCMD_ADD_UPDATE = 0x1,
+ I40E_FILTER_PROGRAM_DESC_PCMD_REMOVE = 0x2,
+};
+
+#define I40E_TXD_FLTR_QW1_DEST_SHIFT (0x3ULL + I40E_TXD_FLTR_QW1_CMD_SHIFT)
+#define I40E_TXD_FLTR_QW1_DEST_MASK (0x3ULL << I40E_TXD_FLTR_QW1_DEST_SHIFT)
+
+#define I40E_TXD_FLTR_QW1_CNT_ENA_SHIFT (0x7ULL + I40E_TXD_FLTR_QW1_CMD_SHIFT)
+#define I40E_TXD_FLTR_QW1_CNT_ENA_MASK BIT_ULL(I40E_TXD_FLTR_QW1_CNT_ENA_SHIFT)
+
+#define I40E_TXD_FLTR_QW1_FD_STATUS_SHIFT (0x9ULL + \
+ I40E_TXD_FLTR_QW1_CMD_SHIFT)
+#define I40E_TXD_FLTR_QW1_FD_STATUS_MASK (0x3ULL << \
+ I40E_TXD_FLTR_QW1_FD_STATUS_SHIFT)
+#ifdef X722_SUPPORT
+
+#define I40E_TXD_FLTR_QW1_ATR_SHIFT (0xEULL + \
+ I40E_TXD_FLTR_QW1_CMD_SHIFT)
+#define I40E_TXD_FLTR_QW1_ATR_MASK BIT_ULL(I40E_TXD_FLTR_QW1_ATR_SHIFT)
+#endif
+
+#define I40E_TXD_FLTR_QW1_CNTINDEX_SHIFT 20
+#define I40E_TXD_FLTR_QW1_CNTINDEX_MASK (0x1FFUL << \
+ I40E_TXD_FLTR_QW1_CNTINDEX_SHIFT)
+
+enum i40e_filter_type {
+ I40E_FLOW_DIRECTOR_FLTR = 0,
+ I40E_PE_QUAD_HASH_FLTR = 1,
+ I40E_ETHERTYPE_FLTR,
+ I40E_FCOE_CTX_FLTR,
+ I40E_MAC_VLAN_FLTR,
+ I40E_HASH_FLTR
+};
+
+struct i40e_vsi_context {
+ u16 seid;
+ u16 uplink_seid;
+ u16 vsi_number;
+ u16 vsis_allocated;
+ u16 vsis_unallocated;
+ u16 flags;
+ u8 pf_num;
+ u8 vf_num;
+ u8 connection_type;
+ struct i40e_aqc_vsi_properties_data info;
+};
+
+struct i40e_veb_context {
+ u16 seid;
+ u16 uplink_seid;
+ u16 veb_number;
+ u16 vebs_allocated;
+ u16 vebs_unallocated;
+ u16 flags;
+ struct i40e_aqc_get_veb_parameters_completion info;
+};
+
+/* Statistics collected by each port, VSI, VEB, and S-channel */
+struct i40e_eth_stats {
+ u64 rx_bytes; /* gorc */
+ u64 rx_unicast; /* uprc */
+ u64 rx_multicast; /* mprc */
+ u64 rx_broadcast; /* bprc */
+ u64 rx_discards; /* rdpc */
+ u64 rx_unknown_protocol; /* rupp */
+ u64 tx_bytes; /* gotc */
+ u64 tx_unicast; /* uptc */
+ u64 tx_multicast; /* mptc */
+ u64 tx_broadcast; /* bptc */
+ u64 tx_discards; /* tdpc */
+ u64 tx_errors; /* tepc */
+};
+
+/* Statistics collected per VEB per TC */
+struct i40e_veb_tc_stats {
+ u64 tc_rx_packets[I40E_MAX_TRAFFIC_CLASS];
+ u64 tc_rx_bytes[I40E_MAX_TRAFFIC_CLASS];
+ u64 tc_tx_packets[I40E_MAX_TRAFFIC_CLASS];
+ u64 tc_tx_bytes[I40E_MAX_TRAFFIC_CLASS];
+};
+
+/* Statistics collected by the MAC */
+struct i40e_hw_port_stats {
+ /* eth stats collected by the port */
+ struct i40e_eth_stats eth;
+
+ /* additional port specific stats */
+ u64 tx_dropped_link_down; /* tdold */
+ u64 crc_errors; /* crcerrs */
+ u64 illegal_bytes; /* illerrc */
+ u64 error_bytes; /* errbc */
+ u64 mac_local_faults; /* mlfc */
+ u64 mac_remote_faults; /* mrfc */
+ u64 rx_length_errors; /* rlec */
+ u64 link_xon_rx; /* lxonrxc */
+ u64 link_xoff_rx; /* lxoffrxc */
+ u64 priority_xon_rx[8]; /* pxonrxc[8] */
+ u64 priority_xoff_rx[8]; /* pxoffrxc[8] */
+ u64 link_xon_tx; /* lxontxc */
+ u64 link_xoff_tx; /* lxofftxc */
+ u64 priority_xon_tx[8]; /* pxontxc[8] */
+ u64 priority_xoff_tx[8]; /* pxofftxc[8] */
+ u64 priority_xon_2_xoff[8]; /* pxon2offc[8] */
+ u64 rx_size_64; /* prc64 */
+ u64 rx_size_127; /* prc127 */
+ u64 rx_size_255; /* prc255 */
+ u64 rx_size_511; /* prc511 */
+ u64 rx_size_1023; /* prc1023 */
+ u64 rx_size_1522; /* prc1522 */
+ u64 rx_size_big; /* prc9522 */
+ u64 rx_undersize; /* ruc */
+ u64 rx_fragments; /* rfc */
+ u64 rx_oversize; /* roc */
+ u64 rx_jabber; /* rjc */
+ u64 tx_size_64; /* ptc64 */
+ u64 tx_size_127; /* ptc127 */
+ u64 tx_size_255; /* ptc255 */
+ u64 tx_size_511; /* ptc511 */
+ u64 tx_size_1023; /* ptc1023 */
+ u64 tx_size_1522; /* ptc1522 */
+ u64 tx_size_big; /* ptc9522 */
+ u64 mac_short_packet_dropped; /* mspdc */
+ u64 checksum_error; /* xec */
+ /* flow director stats */
+ u64 fd_atr_match;
+ u64 fd_sb_match;
+ u64 fd_atr_tunnel_match;
+ u32 fd_atr_status;
+ u32 fd_sb_status;
+ /* EEE LPI */
+ u32 tx_lpi_status;
+ u32 rx_lpi_status;
+ u64 tx_lpi_count; /* etlpic */
+ u64 rx_lpi_count; /* erlpic */
+};
+
+/* Checksum and Shadow RAM pointers */
+#define I40E_SR_NVM_CONTROL_WORD 0x00
+#define I40E_SR_PCIE_ANALOG_CONFIG_PTR 0x03
+#define I40E_SR_PHY_ANALOG_CONFIG_PTR 0x04
+#define I40E_SR_OPTION_ROM_PTR 0x05
+#define I40E_SR_RO_PCIR_REGS_AUTO_LOAD_PTR 0x06
+#define I40E_SR_AUTO_GENERATED_POINTERS_PTR 0x07
+#define I40E_SR_PCIR_REGS_AUTO_LOAD_PTR 0x08
+#define I40E_SR_EMP_GLOBAL_MODULE_PTR 0x09
+#define I40E_SR_RO_PCIE_LCB_PTR 0x0A
+#define I40E_SR_EMP_IMAGE_PTR 0x0B
+#define I40E_SR_PE_IMAGE_PTR 0x0C
+#define I40E_SR_CSR_PROTECTED_LIST_PTR 0x0D
+#define I40E_SR_MNG_CONFIG_PTR 0x0E
+#define I40E_SR_EMP_MODULE_PTR 0x0F
+#define I40E_SR_PBA_FLAGS 0x15
+#define I40E_SR_PBA_BLOCK_PTR 0x16
+#define I40E_SR_BOOT_CONFIG_PTR 0x17
+#define I40E_NVM_OEM_VER_OFF 0x83
+#define I40E_SR_NVM_DEV_STARTER_VERSION 0x18
+#define I40E_SR_NVM_WAKE_ON_LAN 0x19
+#define I40E_SR_ALTERNATE_SAN_MAC_ADDRESS_PTR 0x27
+#define I40E_SR_PERMANENT_SAN_MAC_ADDRESS_PTR 0x28
+#define I40E_SR_NVM_MAP_VERSION 0x29
+#define I40E_SR_NVM_IMAGE_VERSION 0x2A
+#define I40E_SR_NVM_STRUCTURE_VERSION 0x2B
+#define I40E_SR_NVM_EETRACK_LO 0x2D
+#define I40E_SR_NVM_EETRACK_HI 0x2E
+#define I40E_SR_VPD_PTR 0x2F
+#define I40E_SR_PXE_SETUP_PTR 0x30
+#define I40E_SR_PXE_CONFIG_CUST_OPTIONS_PTR 0x31
+#define I40E_SR_NVM_ORIGINAL_EETRACK_LO 0x34
+#define I40E_SR_NVM_ORIGINAL_EETRACK_HI 0x35
+#define I40E_SR_SW_ETHERNET_MAC_ADDRESS_PTR 0x37
+#define I40E_SR_POR_REGS_AUTO_LOAD_PTR 0x38
+#define I40E_SR_EMPR_REGS_AUTO_LOAD_PTR 0x3A
+#define I40E_SR_GLOBR_REGS_AUTO_LOAD_PTR 0x3B
+#define I40E_SR_CORER_REGS_AUTO_LOAD_PTR 0x3C
+#define I40E_SR_PCIE_ALT_AUTO_LOAD_PTR 0x3E
+#define I40E_SR_SW_CHECKSUM_WORD 0x3F
+#define I40E_SR_1ST_FREE_PROVISION_AREA_PTR 0x40
+#define I40E_SR_4TH_FREE_PROVISION_AREA_PTR 0x42
+#define I40E_SR_3RD_FREE_PROVISION_AREA_PTR 0x44
+#define I40E_SR_2ND_FREE_PROVISION_AREA_PTR 0x46
+#define I40E_SR_EMP_SR_SETTINGS_PTR 0x48
+#define I40E_SR_FEATURE_CONFIGURATION_PTR 0x49
+#define I40E_SR_CONFIGURATION_METADATA_PTR 0x4D
+#define I40E_SR_IMMEDIATE_VALUES_PTR 0x4E
+
+/* Auxiliary field, mask and shift definition for Shadow RAM and NVM Flash */
+#define I40E_SR_VPD_MODULE_MAX_SIZE 1024
+#define I40E_SR_PCIE_ALT_MODULE_MAX_SIZE 1024
+#define I40E_SR_CONTROL_WORD_1_SHIFT 0x06
+#define I40E_SR_CONTROL_WORD_1_MASK (0x03 << I40E_SR_CONTROL_WORD_1_SHIFT)
+
+/* Shadow RAM related */
+#define I40E_SR_SECTOR_SIZE_IN_WORDS 0x800
+#define I40E_SR_BUF_ALIGNMENT 4096
+#define I40E_SR_WORDS_IN_1KB 512
+/* Checksum should be calculated such that after adding all the words,
+ * including the checksum word itself, the sum should be 0xBABA.
+ */
+#define I40E_SR_SW_CHECKSUM_BASE 0xBABA
+
+#define I40E_SRRD_SRCTL_ATTEMPTS 100000
+
+enum i40e_switch_element_types {
+ I40E_SWITCH_ELEMENT_TYPE_MAC = 1,
+ I40E_SWITCH_ELEMENT_TYPE_PF = 2,
+ I40E_SWITCH_ELEMENT_TYPE_VF = 3,
+ I40E_SWITCH_ELEMENT_TYPE_EMP = 4,
+ I40E_SWITCH_ELEMENT_TYPE_BMC = 6,
+ I40E_SWITCH_ELEMENT_TYPE_PE = 16,
+ I40E_SWITCH_ELEMENT_TYPE_VEB = 17,
+ I40E_SWITCH_ELEMENT_TYPE_PA = 18,
+ I40E_SWITCH_ELEMENT_TYPE_VSI = 19,
+};
+
+/* Supported EtherType filters */
+enum i40e_ether_type_index {
+ I40E_ETHER_TYPE_1588 = 0,
+ I40E_ETHER_TYPE_FIP = 1,
+ I40E_ETHER_TYPE_OUI_EXTENDED = 2,
+ I40E_ETHER_TYPE_MAC_CONTROL = 3,
+ I40E_ETHER_TYPE_LLDP = 4,
+ I40E_ETHER_TYPE_EVB_PROTOCOL1 = 5,
+ I40E_ETHER_TYPE_EVB_PROTOCOL2 = 6,
+ I40E_ETHER_TYPE_QCN_CNM = 7,
+ I40E_ETHER_TYPE_8021X = 8,
+ I40E_ETHER_TYPE_ARP = 9,
+ I40E_ETHER_TYPE_RSV1 = 10,
+ I40E_ETHER_TYPE_RSV2 = 11,
+};
+
+/* Filter context base size is 1K */
+#define I40E_HASH_FILTER_BASE_SIZE 1024
+/* Supported Hash filter values */
+enum i40e_hash_filter_size {
+ I40E_HASH_FILTER_SIZE_1K = 0,
+ I40E_HASH_FILTER_SIZE_2K = 1,
+ I40E_HASH_FILTER_SIZE_4K = 2,
+ I40E_HASH_FILTER_SIZE_8K = 3,
+ I40E_HASH_FILTER_SIZE_16K = 4,
+ I40E_HASH_FILTER_SIZE_32K = 5,
+ I40E_HASH_FILTER_SIZE_64K = 6,
+ I40E_HASH_FILTER_SIZE_128K = 7,
+ I40E_HASH_FILTER_SIZE_256K = 8,
+ I40E_HASH_FILTER_SIZE_512K = 9,
+ I40E_HASH_FILTER_SIZE_1M = 10,
+};
+
+/* DMA context base size is 0.5K */
+#define I40E_DMA_CNTX_BASE_SIZE 512
+/* Supported DMA context values */
+enum i40e_dma_cntx_size {
+ I40E_DMA_CNTX_SIZE_512 = 0,
+ I40E_DMA_CNTX_SIZE_1K = 1,
+ I40E_DMA_CNTX_SIZE_2K = 2,
+ I40E_DMA_CNTX_SIZE_4K = 3,
+ I40E_DMA_CNTX_SIZE_8K = 4,
+ I40E_DMA_CNTX_SIZE_16K = 5,
+ I40E_DMA_CNTX_SIZE_32K = 6,
+ I40E_DMA_CNTX_SIZE_64K = 7,
+ I40E_DMA_CNTX_SIZE_128K = 8,
+ I40E_DMA_CNTX_SIZE_256K = 9,
+};
+
+/* Supported Hash look up table (LUT) sizes */
+enum i40e_hash_lut_size {
+ I40E_HASH_LUT_SIZE_128 = 0,
+ I40E_HASH_LUT_SIZE_512 = 1,
+};
+
+/* Structure to hold a per PF filter control settings */
+struct i40e_filter_control_settings {
+ /* number of PE Quad Hash filter buckets */
+ enum i40e_hash_filter_size pe_filt_num;
+ /* number of PE Quad Hash contexts */
+ enum i40e_dma_cntx_size pe_cntx_num;
+ /* number of FCoE filter buckets */
+ enum i40e_hash_filter_size fcoe_filt_num;
+ /* number of FCoE DDP contexts */
+ enum i40e_dma_cntx_size fcoe_cntx_num;
+ /* size of the Hash LUT */
+ enum i40e_hash_lut_size hash_lut_size;
+ /* enable FDIR filters for PF and its VFs */
+ bool enable_fdir;
+ /* enable Ethertype filters for PF and its VFs */
+ bool enable_ethtype;
+ /* enable MAC/VLAN filters for PF and its VFs */
+ bool enable_macvlan;
+};
+
+/* Structure to hold device level control filter counts */
+struct i40e_control_filter_stats {
+ u16 mac_etype_used; /* Used perfect match MAC/EtherType filters */
+ u16 etype_used; /* Used perfect EtherType filters */
+ u16 mac_etype_free; /* Un-used perfect match MAC/EtherType filters */
+ u16 etype_free; /* Un-used perfect EtherType filters */
+};
+
+enum i40e_reset_type {
+ I40E_RESET_POR = 0,
+ I40E_RESET_CORER = 1,
+ I40E_RESET_GLOBR = 2,
+ I40E_RESET_EMPR = 3,
+};
+
+/* IEEE 802.1AB LLDP Agent Variables from NVM */
+#define I40E_NVM_LLDP_CFG_PTR 0xD
+struct i40e_lldp_variables {
+ u16 length;
+ u16 adminstatus;
+ u16 msgfasttx;
+ u16 msgtxinterval;
+ u16 txparams;
+ u16 timers;
+ u16 crc8;
+};
+
+/* Offsets into Alternate Ram */
+#define I40E_ALT_STRUCT_FIRST_PF_OFFSET 0 /* in dwords */
+#define I40E_ALT_STRUCT_DWORDS_PER_PF 64 /* in dwords */
+#define I40E_ALT_STRUCT_OUTER_VLAN_TAG_OFFSET 0xD /* in dwords */
+#define I40E_ALT_STRUCT_USER_PRIORITY_OFFSET 0xC /* in dwords */
+#define I40E_ALT_STRUCT_MIN_BW_OFFSET 0xE /* in dwords */
+#define I40E_ALT_STRUCT_MAX_BW_OFFSET 0xF /* in dwords */
+
+/* Alternate Ram Bandwidth Masks */
+#define I40E_ALT_BW_VALUE_MASK 0xFF
+#define I40E_ALT_BW_RELATIVE_MASK 0x40000000
+#define I40E_ALT_BW_VALID_MASK 0x80000000
+
+/* RSS Hash Table Size */
+#define I40E_PFQF_CTL_0_HASHLUTSIZE_512 0x00010000
+
+/* INPUT SET MASK for RSS, flow director, and flexible payload */
+#define I40E_L3_SRC_SHIFT 47
+#define I40E_L3_SRC_MASK (0x3ULL << I40E_L3_SRC_SHIFT)
+#define I40E_L3_V6_SRC_SHIFT 43
+#define I40E_L3_V6_SRC_MASK (0xFFULL << I40E_L3_V6_SRC_SHIFT)
+#define I40E_L3_DST_SHIFT 35
+#define I40E_L3_DST_MASK (0x3ULL << I40E_L3_DST_SHIFT)
+#define I40E_L3_V6_DST_SHIFT 35
+#define I40E_L3_V6_DST_MASK (0xFFULL << I40E_L3_V6_DST_SHIFT)
+#define I40E_L4_SRC_SHIFT 34
+#define I40E_L4_SRC_MASK (0x1ULL << I40E_L4_SRC_SHIFT)
+#define I40E_L4_DST_SHIFT 33
+#define I40E_L4_DST_MASK (0x1ULL << I40E_L4_DST_SHIFT)
+#define I40E_VERIFY_TAG_SHIFT 31
+#define I40E_VERIFY_TAG_MASK (0x3ULL << I40E_VERIFY_TAG_SHIFT)
+
+#define I40E_FLEX_50_SHIFT 13
+#define I40E_FLEX_50_MASK (0x1ULL << I40E_FLEX_50_SHIFT)
+#define I40E_FLEX_51_SHIFT 12
+#define I40E_FLEX_51_MASK (0x1ULL << I40E_FLEX_51_SHIFT)
+#define I40E_FLEX_52_SHIFT 11
+#define I40E_FLEX_52_MASK (0x1ULL << I40E_FLEX_52_SHIFT)
+#define I40E_FLEX_53_SHIFT 10
+#define I40E_FLEX_53_MASK (0x1ULL << I40E_FLEX_53_SHIFT)
+#define I40E_FLEX_54_SHIFT 9
+#define I40E_FLEX_54_MASK (0x1ULL << I40E_FLEX_54_SHIFT)
+#define I40E_FLEX_55_SHIFT 8
+#define I40E_FLEX_55_MASK (0x1ULL << I40E_FLEX_55_SHIFT)
+#define I40E_FLEX_56_SHIFT 7
+#define I40E_FLEX_56_MASK (0x1ULL << I40E_FLEX_56_SHIFT)
+#define I40E_FLEX_57_SHIFT 6
+#define I40E_FLEX_57_MASK (0x1ULL << I40E_FLEX_57_SHIFT)
+#endif /* _I40E_TYPE_H_ */
diff --git a/src/dpdk/drivers/net/i40e/base/i40e_virtchnl.h b/src/dpdk/drivers/net/i40e/base/i40e_virtchnl.h
new file mode 100644
index 00000000..fd51ec32
--- /dev/null
+++ b/src/dpdk/drivers/net/i40e/base/i40e_virtchnl.h
@@ -0,0 +1,438 @@
+/*******************************************************************************
+
+Copyright (c) 2013 - 2015, Intel Corporation
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are met:
+
+ 1. Redistributions of source code must retain the above copyright notice,
+ this list of conditions and the following disclaimer.
+
+ 2. Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in the
+ documentation and/or other materials provided with the distribution.
+
+ 3. Neither the name of the Intel Corporation nor the names of its
+ contributors may be used to endorse or promote products derived from
+ this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+POSSIBILITY OF SUCH DAMAGE.
+
+***************************************************************************/
+
+#ifndef _I40E_VIRTCHNL_H_
+#define _I40E_VIRTCHNL_H_
+
+#include "i40e_type.h"
+
+/* Description:
+ * This header file describes the VF-PF communication protocol used
+ * by the various i40e drivers.
+ *
+ * Admin queue buffer usage:
+ * desc->opcode is always i40e_aqc_opc_send_msg_to_pf
+ * flags, retval, datalen, and data addr are all used normally.
+ * Firmware copies the cookie fields when sending messages between the PF and
+ * VF, but uses all other fields internally. Due to this limitation, we
+ * must send all messages as "indirect", i.e. using an external buffer.
+ *
+ * All the vsi indexes are relative to the VF. Each VF can have maximum of
+ * three VSIs. All the queue indexes are relative to the VSI. Each VF can
+ * have a maximum of sixteen queues for all of its VSIs.
+ *
+ * The PF is required to return a status code in v_retval for all messages
+ * except RESET_VF, which does not require any response. The return value is of
+ * i40e_status_code type, defined in the i40e_type.h.
+ *
+ * In general, VF driver initialization should roughly follow the order of these
+ * opcodes. The VF driver must first validate the API version of the PF driver,
+ * then request a reset, then get resources, then configure queues and
+ * interrupts. After these operations are complete, the VF driver may start
+ * its queues, optionally add MAC and VLAN filters, and process traffic.
+ */
+
+/* Opcodes for VF-PF communication. These are placed in the v_opcode field
+ * of the virtchnl_msg structure.
+ */
+enum i40e_virtchnl_ops {
+/* The PF sends status change events to VFs using
+ * the I40E_VIRTCHNL_OP_EVENT opcode.
+ * VFs send requests to the PF using the other ops.
+ */
+ I40E_VIRTCHNL_OP_UNKNOWN = 0,
+ I40E_VIRTCHNL_OP_VERSION = 1, /* must ALWAYS be 1 */
+ I40E_VIRTCHNL_OP_RESET_VF = 2,
+ I40E_VIRTCHNL_OP_GET_VF_RESOURCES = 3,
+ I40E_VIRTCHNL_OP_CONFIG_TX_QUEUE = 4,
+ I40E_VIRTCHNL_OP_CONFIG_RX_QUEUE = 5,
+ I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES = 6,
+ I40E_VIRTCHNL_OP_CONFIG_IRQ_MAP = 7,
+ I40E_VIRTCHNL_OP_ENABLE_QUEUES = 8,
+ I40E_VIRTCHNL_OP_DISABLE_QUEUES = 9,
+ I40E_VIRTCHNL_OP_ADD_ETHER_ADDRESS = 10,
+ I40E_VIRTCHNL_OP_DEL_ETHER_ADDRESS = 11,
+ I40E_VIRTCHNL_OP_ADD_VLAN = 12,
+ I40E_VIRTCHNL_OP_DEL_VLAN = 13,
+ I40E_VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE = 14,
+ I40E_VIRTCHNL_OP_GET_STATS = 15,
+ I40E_VIRTCHNL_OP_FCOE = 16,
+ I40E_VIRTCHNL_OP_EVENT = 17, /* must ALWAYS be 17 */
+#ifdef I40E_SOL_VF_SUPPORT
+ I40E_VIRTCHNL_OP_GET_ADDNL_SOL_CONFIG = 19,
+#endif
+ I40E_VIRTCHNL_OP_CONFIG_RSS_KEY = 23,
+ I40E_VIRTCHNL_OP_CONFIG_RSS_LUT = 24,
+ I40E_VIRTCHNL_OP_GET_RSS_HENA_CAPS = 25,
+ I40E_VIRTCHNL_OP_SET_RSS_HENA = 26,
+
+};
+
+/* Virtual channel message descriptor. This overlays the admin queue
+ * descriptor. All other data is passed in external buffers.
+ */
+
+struct i40e_virtchnl_msg {
+ u8 pad[8]; /* AQ flags/opcode/len/retval fields */
+ enum i40e_virtchnl_ops v_opcode; /* avoid confusion with desc->opcode */
+ enum i40e_status_code v_retval; /* ditto for desc->retval */
+ u32 vfid; /* used by PF when sending to VF */
+};
+
+/* Message descriptions and data structures.*/
+
+/* I40E_VIRTCHNL_OP_VERSION
+ * VF posts its version number to the PF. PF responds with its version number
+ * in the same format, along with a return code.
+ * Reply from PF has its major/minor versions also in param0 and param1.
+ * If there is a major version mismatch, then the VF cannot operate.
+ * If there is a minor version mismatch, then the VF can operate but should
+ * add a warning to the system log.
+ *
+ * This enum element MUST always be specified as == 1, regardless of other
+ * changes in the API. The PF must always respond to this message without
+ * error regardless of version mismatch.
+ */
+#define I40E_VIRTCHNL_VERSION_MAJOR 1
+#define I40E_VIRTCHNL_VERSION_MINOR 1
+#define I40E_VIRTCHNL_VERSION_MINOR_NO_VF_CAPS 0
+
+struct i40e_virtchnl_version_info {
+ u32 major;
+ u32 minor;
+};
+
+/* I40E_VIRTCHNL_OP_RESET_VF
+ * VF sends this request to PF with no parameters
+ * PF does NOT respond! VF driver must delay then poll VFGEN_RSTAT register
+ * until reset completion is indicated. The admin queue must be reinitialized
+ * after this operation.
+ *
+ * When reset is complete, PF must ensure that all queues in all VSIs associated
+ * with the VF are stopped, all queue configurations in the HMC are set to 0,
+ * and all MAC and VLAN filters (except the default MAC address) on all VSIs
+ * are cleared.
+ */
+
+/* I40E_VIRTCHNL_OP_GET_VF_RESOURCES
+ * Version 1.0 VF sends this request to PF with no parameters
+ * Version 1.1 VF sends this request to PF with u32 bitmap of its capabilities
+ * PF responds with an indirect message containing
+ * i40e_virtchnl_vf_resource and one or more
+ * i40e_virtchnl_vsi_resource structures.
+ */
+
+struct i40e_virtchnl_vsi_resource {
+ u16 vsi_id;
+ u16 num_queue_pairs;
+ enum i40e_vsi_type vsi_type;
+ u16 qset_handle;
+ u8 default_mac_addr[I40E_ETH_LENGTH_OF_ADDRESS];
+};
+/* VF offload flags */
+#define I40E_VIRTCHNL_VF_OFFLOAD_L2 0x00000001
+#define I40E_VIRTCHNL_VF_OFFLOAD_IWARP 0x00000002
+#define I40E_VIRTCHNL_VF_OFFLOAD_FCOE 0x00000004
+#define I40E_VIRTCHNL_VF_OFFLOAD_RSS_AQ 0x00000008
+#define I40E_VIRTCHNL_VF_OFFLOAD_RSS_REG 0x00000010
+#define I40E_VIRTCHNL_VF_OFFLOAD_WB_ON_ITR 0x00000020
+#define I40E_VIRTCHNL_VF_OFFLOAD_VLAN 0x00010000
+#define I40E_VIRTCHNL_VF_OFFLOAD_RX_POLLING 0x00020000
+#define I40E_VIRTCHNL_VF_OFFLOAD_RSS_PCTYPE_V2 0x00040000
+#define I40E_VIRTCHNL_VF_OFFLOAD_RSS_PF 0X00080000
+
+struct i40e_virtchnl_vf_resource {
+ u16 num_vsis;
+ u16 num_queue_pairs;
+ u16 max_vectors;
+ u16 max_mtu;
+
+ u32 vf_offload_flags;
+ u32 rss_key_size;
+ u32 rss_lut_size;
+
+ struct i40e_virtchnl_vsi_resource vsi_res[1];
+};
+
+/* I40E_VIRTCHNL_OP_CONFIG_TX_QUEUE
+ * VF sends this message to set up parameters for one TX queue.
+ * External data buffer contains one instance of i40e_virtchnl_txq_info.
+ * PF configures requested queue and returns a status code.
+ */
+
+/* Tx queue config info */
+struct i40e_virtchnl_txq_info {
+ u16 vsi_id;
+ u16 queue_id;
+ u16 ring_len; /* number of descriptors, multiple of 8 */
+ u16 headwb_enabled;
+ u64 dma_ring_addr;
+ u64 dma_headwb_addr;
+};
+
+/* I40E_VIRTCHNL_OP_CONFIG_RX_QUEUE
+ * VF sends this message to set up parameters for one RX queue.
+ * External data buffer contains one instance of i40e_virtchnl_rxq_info.
+ * PF configures requested queue and returns a status code.
+ */
+
+/* Rx queue config info */
+struct i40e_virtchnl_rxq_info {
+ u16 vsi_id;
+ u16 queue_id;
+ u32 ring_len; /* number of descriptors, multiple of 32 */
+ u16 hdr_size;
+ u16 splithdr_enabled;
+ u32 databuffer_size;
+ u32 max_pkt_size;
+ u64 dma_ring_addr;
+ enum i40e_hmc_obj_rx_hsplit_0 rx_split_pos;
+};
+
+/* I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES
+ * VF sends this message to set parameters for all active TX and RX queues
+ * associated with the specified VSI.
+ * PF configures queues and returns status.
+ * If the number of queues specified is greater than the number of queues
+ * associated with the VSI, an error is returned and no queues are configured.
+ */
+struct i40e_virtchnl_queue_pair_info {
+ /* NOTE: vsi_id and queue_id should be identical for both queues. */
+ struct i40e_virtchnl_txq_info txq;
+ struct i40e_virtchnl_rxq_info rxq;
+};
+
+struct i40e_virtchnl_vsi_queue_config_info {
+ u16 vsi_id;
+ u16 num_queue_pairs;
+ struct i40e_virtchnl_queue_pair_info qpair[1];
+};
+
+/* I40E_VIRTCHNL_OP_CONFIG_IRQ_MAP
+ * VF uses this message to map vectors to queues.
+ * The rxq_map and txq_map fields are bitmaps used to indicate which queues
+ * are to be associated with the specified vector.
+ * The "other" causes are always mapped to vector 0.
+ * PF configures interrupt mapping and returns status.
+ */
+struct i40e_virtchnl_vector_map {
+ u16 vsi_id;
+ u16 vector_id;
+ u16 rxq_map;
+ u16 txq_map;
+ u16 rxitr_idx;
+ u16 txitr_idx;
+};
+
+struct i40e_virtchnl_irq_map_info {
+ u16 num_vectors;
+ struct i40e_virtchnl_vector_map vecmap[1];
+};
+
+/* I40E_VIRTCHNL_OP_ENABLE_QUEUES
+ * I40E_VIRTCHNL_OP_DISABLE_QUEUES
+ * VF sends these message to enable or disable TX/RX queue pairs.
+ * The queues fields are bitmaps indicating which queues to act upon.
+ * (Currently, we only support 16 queues per VF, but we make the field
+ * u32 to allow for expansion.)
+ * PF performs requested action and returns status.
+ */
+struct i40e_virtchnl_queue_select {
+ u16 vsi_id;
+ u16 pad;
+ u32 rx_queues;
+ u32 tx_queues;
+};
+
+/* I40E_VIRTCHNL_OP_ADD_ETHER_ADDRESS
+ * VF sends this message in order to add one or more unicast or multicast
+ * address filters for the specified VSI.
+ * PF adds the filters and returns status.
+ */
+
+/* I40E_VIRTCHNL_OP_DEL_ETHER_ADDRESS
+ * VF sends this message in order to remove one or more unicast or multicast
+ * filters for the specified VSI.
+ * PF removes the filters and returns status.
+ */
+
+struct i40e_virtchnl_ether_addr {
+ u8 addr[I40E_ETH_LENGTH_OF_ADDRESS];
+ u8 pad[2];
+};
+
+struct i40e_virtchnl_ether_addr_list {
+ u16 vsi_id;
+ u16 num_elements;
+ struct i40e_virtchnl_ether_addr list[1];
+};
+
+#ifdef I40E_SOL_VF_SUPPORT
+/* I40E_VIRTCHNL_OP_GET_ADDNL_SOL_CONFIG
+ * VF sends this message to get the default MTU and list of additional ethernet
+ * addresses it is allowed to use.
+ * PF responds with an indirect message containing
+ * i40e_virtchnl_addnl_solaris_config with zero or more
+ * i40e_virtchnl_ether_addr structures.
+ *
+ * It is expected that this operation will only ever be needed for Solaris VFs
+ * running under a Solaris PF.
+ */
+struct i40e_virtchnl_addnl_solaris_config {
+ u16 default_mtu;
+ struct i40e_virtchnl_ether_addr_list al;
+};
+
+#endif
+/* I40E_VIRTCHNL_OP_ADD_VLAN
+ * VF sends this message to add one or more VLAN tag filters for receives.
+ * PF adds the filters and returns status.
+ * If a port VLAN is configured by the PF, this operation will return an
+ * error to the VF.
+ */
+
+/* I40E_VIRTCHNL_OP_DEL_VLAN
+ * VF sends this message to remove one or more VLAN tag filters for receives.
+ * PF removes the filters and returns status.
+ * If a port VLAN is configured by the PF, this operation will return an
+ * error to the VF.
+ */
+
+struct i40e_virtchnl_vlan_filter_list {
+ u16 vsi_id;
+ u16 num_elements;
+ u16 vlan_id[1];
+};
+
+/* I40E_VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE
+ * VF sends VSI id and flags.
+ * PF returns status code in retval.
+ * Note: we assume that broadcast accept mode is always enabled.
+ */
+struct i40e_virtchnl_promisc_info {
+ u16 vsi_id;
+ u16 flags;
+};
+
+#define I40E_FLAG_VF_UNICAST_PROMISC 0x00000001
+#define I40E_FLAG_VF_MULTICAST_PROMISC 0x00000002
+
+/* I40E_VIRTCHNL_OP_GET_STATS
+ * VF sends this message to request stats for the selected VSI. VF uses
+ * the i40e_virtchnl_queue_select struct to specify the VSI. The queue_id
+ * field is ignored by the PF.
+ *
+ * PF replies with struct i40e_eth_stats in an external buffer.
+ */
+
+/* I40E_VIRTCHNL_OP_CONFIG_RSS_KEY
+ * I40E_VIRTCHNL_OP_CONFIG_RSS_LUT
+ * VF sends these messages to configure RSS. Only supported if both PF
+ * and VF drivers set the I40E_VIRTCHNL_VF_OFFLOAD_RSS_PF bit during
+ * configuration negotiation. If this is the case, then the rss fields in
+ * the vf resource struct are valid.
+ * Both the key and LUT are initialized to 0 by the PF, meaning that
+ * RSS is effectively disabled until set up by the VF.
+ */
+struct i40e_virtchnl_rss_key {
+ u16 vsi_id;
+ u16 key_len;
+ u8 key[1]; /* RSS hash key, packed bytes */
+};
+
+struct i40e_virtchnl_rss_lut {
+ u16 vsi_id;
+ u16 lut_entries;
+ u8 lut[1]; /* RSS lookup table*/
+};
+
+/* I40E_VIRTCHNL_OP_GET_RSS_HENA_CAPS
+ * I40E_VIRTCHNL_OP_SET_RSS_HENA
+ * VF sends these messages to get and set the hash filter enable bits for RSS.
+ * By default, the PF sets these to all possible traffic types that the
+ * hardware supports. The VF can query this value if it wants to change the
+ * traffic types that are hashed by the hardware.
+ * Traffic types are defined in the i40e_filter_pctype enum in i40e_type.h
+ */
+struct i40e_virtchnl_rss_hena {
+ u64 hena;
+};
+
+/* I40E_VIRTCHNL_OP_EVENT
+ * PF sends this message to inform the VF driver of events that may affect it.
+ * No direct response is expected from the VF, though it may generate other
+ * messages in response to this one.
+ */
+enum i40e_virtchnl_event_codes {
+ I40E_VIRTCHNL_EVENT_UNKNOWN = 0,
+ I40E_VIRTCHNL_EVENT_LINK_CHANGE,
+ I40E_VIRTCHNL_EVENT_RESET_IMPENDING,
+ I40E_VIRTCHNL_EVENT_PF_DRIVER_CLOSE,
+};
+#define I40E_PF_EVENT_SEVERITY_INFO 0
+#define I40E_PF_EVENT_SEVERITY_ATTENTION 1
+#define I40E_PF_EVENT_SEVERITY_ACTION_REQUIRED 2
+#define I40E_PF_EVENT_SEVERITY_CERTAIN_DOOM 255
+
+struct i40e_virtchnl_pf_event {
+ enum i40e_virtchnl_event_codes event;
+ union {
+ struct {
+ enum i40e_aq_link_speed link_speed;
+ bool link_status;
+ } link_event;
+ } event_data;
+
+ int severity;
+};
+
+/* VF reset states - these are written into the RSTAT register:
+ * I40E_VFGEN_RSTAT1 on the PF
+ * I40E_VFGEN_RSTAT on the VF
+ * When the PF initiates a reset, it writes 0
+ * When the reset is complete, it writes 1
+ * When the PF detects that the VF has recovered, it writes 2
+ * VF checks this register periodically to determine if a reset has occurred,
+ * then polls it to know when the reset is complete.
+ * If either the PF or VF reads the register while the hardware
+ * is in a reset state, it will return DEADBEEF, which, when masked
+ * will result in 3.
+ */
+enum i40e_vfr_states {
+ I40E_VFR_INPROGRESS = 0,
+ I40E_VFR_COMPLETED,
+ I40E_VFR_VFACTIVE,
+ I40E_VFR_UNKNOWN,
+};
+
+#endif /* _I40E_VIRTCHNL_H_ */
diff --git a/src/dpdk/drivers/net/i40e/i40e_ethdev.c b/src/dpdk/drivers/net/i40e/i40e_ethdev.c
new file mode 100644
index 00000000..3f9f05ee
--- /dev/null
+++ b/src/dpdk/drivers/net/i40e/i40e_ethdev.c
@@ -0,0 +1,9498 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2010-2016 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <sys/queue.h>
+#include <stdio.h>
+#include <errno.h>
+#include <stdint.h>
+#include <string.h>
+#include <unistd.h>
+#include <stdarg.h>
+#include <inttypes.h>
+#include <assert.h>
+
+#include <rte_string_fns.h>
+#include <rte_pci.h>
+#include <rte_ether.h>
+#include <rte_ethdev.h>
+#include <rte_memzone.h>
+#include <rte_malloc.h>
+#include <rte_memcpy.h>
+#include <rte_alarm.h>
+#include <rte_dev.h>
+#include <rte_eth_ctrl.h>
+
+#include "i40e_logs.h"
+#include "base/i40e_prototype.h"
+#include "base/i40e_adminq_cmd.h"
+#include "base/i40e_type.h"
+#include "base/i40e_register.h"
+#include "base/i40e_dcb.h"
+#include "i40e_ethdev.h"
+#include "i40e_rxtx.h"
+#include "i40e_pf.h"
+#include "i40e_regs.h"
+
+#define ETH_I40E_FLOATING_VEB_ARG "enable_floating_veb"
+#define ETH_I40E_FLOATING_VEB_LIST_ARG "floating_veb_list"
+
+#define I40E_CLEAR_PXE_WAIT_MS 200
+
+/* Maximun number of capability elements */
+#define I40E_MAX_CAP_ELE_NUM 128
+
+/* Wait count and inteval */
+#define I40E_CHK_Q_ENA_COUNT 1000
+#define I40E_CHK_Q_ENA_INTERVAL_US 1000
+
+/* Maximun number of VSI */
+#define I40E_MAX_NUM_VSIS (384UL)
+
+#define I40E_PRE_TX_Q_CFG_WAIT_US 10 /* 10 us */
+
+/* Flow control default timer */
+#define I40E_DEFAULT_PAUSE_TIME 0xFFFFU
+
+/* Flow control default high water */
+#define I40E_DEFAULT_HIGH_WATER (0x1C40/1024)
+
+/* Flow control default low water */
+#define I40E_DEFAULT_LOW_WATER (0x1A40/1024)
+
+/* Flow control enable fwd bit */
+#define I40E_PRTMAC_FWD_CTRL 0x00000001
+
+/* Receive Packet Buffer size */
+#define I40E_RXPBSIZE (968 * 1024)
+
+/* Kilobytes shift */
+#define I40E_KILOSHIFT 10
+
+/* Receive Average Packet Size in Byte*/
+#define I40E_PACKET_AVERAGE_SIZE 128
+
+/* Mask of PF interrupt causes */
+#define I40E_PFINT_ICR0_ENA_MASK ( \
+ I40E_PFINT_ICR0_ENA_ECC_ERR_MASK | \
+ I40E_PFINT_ICR0_ENA_MAL_DETECT_MASK | \
+ I40E_PFINT_ICR0_ENA_GRST_MASK | \
+ I40E_PFINT_ICR0_ENA_PCI_EXCEPTION_MASK | \
+ I40E_PFINT_ICR0_ENA_STORM_DETECT_MASK | \
+ I40E_PFINT_ICR0_ENA_LINK_STAT_CHANGE_MASK | \
+ I40E_PFINT_ICR0_ENA_HMC_ERR_MASK | \
+ I40E_PFINT_ICR0_ENA_PE_CRITERR_MASK | \
+ I40E_PFINT_ICR0_ENA_VFLR_MASK | \
+ I40E_PFINT_ICR0_ENA_ADMINQ_MASK)
+
+#define I40E_FLOW_TYPES ( \
+ (1UL << RTE_ETH_FLOW_FRAG_IPV4) | \
+ (1UL << RTE_ETH_FLOW_NONFRAG_IPV4_TCP) | \
+ (1UL << RTE_ETH_FLOW_NONFRAG_IPV4_UDP) | \
+ (1UL << RTE_ETH_FLOW_NONFRAG_IPV4_SCTP) | \
+ (1UL << RTE_ETH_FLOW_NONFRAG_IPV4_OTHER) | \
+ (1UL << RTE_ETH_FLOW_FRAG_IPV6) | \
+ (1UL << RTE_ETH_FLOW_NONFRAG_IPV6_TCP) | \
+ (1UL << RTE_ETH_FLOW_NONFRAG_IPV6_UDP) | \
+ (1UL << RTE_ETH_FLOW_NONFRAG_IPV6_SCTP) | \
+ (1UL << RTE_ETH_FLOW_NONFRAG_IPV6_OTHER) | \
+ (1UL << RTE_ETH_FLOW_L2_PAYLOAD))
+
+/* Additional timesync values. */
+#define I40E_PTP_40GB_INCVAL 0x0199999999ULL
+#define I40E_PTP_10GB_INCVAL 0x0333333333ULL
+#define I40E_PTP_1GB_INCVAL 0x2000000000ULL
+#define I40E_PRTTSYN_TSYNENA 0x80000000
+#define I40E_PRTTSYN_TSYNTYPE 0x0e000000
+#define I40E_CYCLECOUNTER_MASK 0xffffffffffffffffULL
+
+#define I40E_MAX_PERCENT 100
+#define I40E_DEFAULT_DCB_APP_NUM 1
+#define I40E_DEFAULT_DCB_APP_PRIO 3
+
+#define I40E_INSET_NONE 0x00000000000000000ULL
+
+/* bit0 ~ bit 7 */
+#define I40E_INSET_DMAC 0x0000000000000001ULL
+#define I40E_INSET_SMAC 0x0000000000000002ULL
+#define I40E_INSET_VLAN_OUTER 0x0000000000000004ULL
+#define I40E_INSET_VLAN_INNER 0x0000000000000008ULL
+#define I40E_INSET_VLAN_TUNNEL 0x0000000000000010ULL
+
+/* bit 8 ~ bit 15 */
+#define I40E_INSET_IPV4_SRC 0x0000000000000100ULL
+#define I40E_INSET_IPV4_DST 0x0000000000000200ULL
+#define I40E_INSET_IPV6_SRC 0x0000000000000400ULL
+#define I40E_INSET_IPV6_DST 0x0000000000000800ULL
+#define I40E_INSET_SRC_PORT 0x0000000000001000ULL
+#define I40E_INSET_DST_PORT 0x0000000000002000ULL
+#define I40E_INSET_SCTP_VT 0x0000000000004000ULL
+
+/* bit 16 ~ bit 31 */
+#define I40E_INSET_IPV4_TOS 0x0000000000010000ULL
+#define I40E_INSET_IPV4_PROTO 0x0000000000020000ULL
+#define I40E_INSET_IPV4_TTL 0x0000000000040000ULL
+#define I40E_INSET_IPV6_TC 0x0000000000080000ULL
+#define I40E_INSET_IPV6_FLOW 0x0000000000100000ULL
+#define I40E_INSET_IPV6_NEXT_HDR 0x0000000000200000ULL
+#define I40E_INSET_IPV6_HOP_LIMIT 0x0000000000400000ULL
+#define I40E_INSET_TCP_FLAGS 0x0000000000800000ULL
+
+/* bit 32 ~ bit 47, tunnel fields */
+#define I40E_INSET_TUNNEL_IPV4_DST 0x0000000100000000ULL
+#define I40E_INSET_TUNNEL_IPV6_DST 0x0000000200000000ULL
+#define I40E_INSET_TUNNEL_DMAC 0x0000000400000000ULL
+#define I40E_INSET_TUNNEL_SRC_PORT 0x0000000800000000ULL
+#define I40E_INSET_TUNNEL_DST_PORT 0x0000001000000000ULL
+#define I40E_INSET_TUNNEL_ID 0x0000002000000000ULL
+
+/* bit 48 ~ bit 55 */
+#define I40E_INSET_LAST_ETHER_TYPE 0x0001000000000000ULL
+
+/* bit 56 ~ bit 63, Flex Payload */
+#define I40E_INSET_FLEX_PAYLOAD_W1 0x0100000000000000ULL
+#define I40E_INSET_FLEX_PAYLOAD_W2 0x0200000000000000ULL
+#define I40E_INSET_FLEX_PAYLOAD_W3 0x0400000000000000ULL
+#define I40E_INSET_FLEX_PAYLOAD_W4 0x0800000000000000ULL
+#define I40E_INSET_FLEX_PAYLOAD_W5 0x1000000000000000ULL
+#define I40E_INSET_FLEX_PAYLOAD_W6 0x2000000000000000ULL
+#define I40E_INSET_FLEX_PAYLOAD_W7 0x4000000000000000ULL
+#define I40E_INSET_FLEX_PAYLOAD_W8 0x8000000000000000ULL
+#define I40E_INSET_FLEX_PAYLOAD \
+ (I40E_INSET_FLEX_PAYLOAD_W1 | I40E_INSET_FLEX_PAYLOAD_W2 | \
+ I40E_INSET_FLEX_PAYLOAD_W3 | I40E_INSET_FLEX_PAYLOAD_W4 | \
+ I40E_INSET_FLEX_PAYLOAD_W5 | I40E_INSET_FLEX_PAYLOAD_W6 | \
+ I40E_INSET_FLEX_PAYLOAD_W7 | I40E_INSET_FLEX_PAYLOAD_W8)
+
+/**
+ * Below are values for writing un-exposed registers suggested
+ * by silicon experts
+ */
+/* Destination MAC address */
+#define I40E_REG_INSET_L2_DMAC 0xE000000000000000ULL
+/* Source MAC address */
+#define I40E_REG_INSET_L2_SMAC 0x1C00000000000000ULL
+/* Outer (S-Tag) VLAN tag in the outer L2 header */
+#define I40E_REG_INSET_L2_OUTER_VLAN 0x0200000000000000ULL
+/* Inner (C-Tag) or single VLAN tag in the outer L2 header */
+#define I40E_REG_INSET_L2_INNER_VLAN 0x0080000000000000ULL
+/* Single VLAN tag in the inner L2 header */
+#define I40E_REG_INSET_TUNNEL_VLAN 0x0100000000000000ULL
+/* Source IPv4 address */
+#define I40E_REG_INSET_L3_SRC_IP4 0x0001800000000000ULL
+/* Destination IPv4 address */
+#define I40E_REG_INSET_L3_DST_IP4 0x0000001800000000ULL
+/* IPv4 Type of Service (TOS) */
+#define I40E_REG_INSET_L3_IP4_TOS 0x0040000000000000ULL
+/* IPv4 Protocol */
+#define I40E_REG_INSET_L3_IP4_PROTO 0x0004000000000000ULL
+/* IPv4 Time to Live */
+#define I40E_REG_INSET_L3_IP4_TTL 0x0004000000000000ULL
+/* Source IPv6 address */
+#define I40E_REG_INSET_L3_SRC_IP6 0x0007F80000000000ULL
+/* Destination IPv6 address */
+#define I40E_REG_INSET_L3_DST_IP6 0x000007F800000000ULL
+/* IPv6 Traffic Class (TC) */
+#define I40E_REG_INSET_L3_IP6_TC 0x0040000000000000ULL
+/* IPv6 Next Header */
+#define I40E_REG_INSET_L3_IP6_NEXT_HDR 0x0008000000000000ULL
+/* IPv6 Hop Limit */
+#define I40E_REG_INSET_L3_IP6_HOP_LIMIT 0x0008000000000000ULL
+/* Source L4 port */
+#define I40E_REG_INSET_L4_SRC_PORT 0x0000000400000000ULL
+/* Destination L4 port */
+#define I40E_REG_INSET_L4_DST_PORT 0x0000000200000000ULL
+/* SCTP verification tag */
+#define I40E_REG_INSET_L4_SCTP_VERIFICATION_TAG 0x0000000180000000ULL
+/* Inner destination MAC address (MAC-in-UDP/MAC-in-GRE)*/
+#define I40E_REG_INSET_TUNNEL_L2_INNER_DST_MAC 0x0000000001C00000ULL
+/* Source port of tunneling UDP */
+#define I40E_REG_INSET_TUNNEL_L4_UDP_SRC_PORT 0x0000000000200000ULL
+/* Destination port of tunneling UDP */
+#define I40E_REG_INSET_TUNNEL_L4_UDP_DST_PORT 0x0000000000100000ULL
+/* UDP Tunneling ID, NVGRE/GRE key */
+#define I40E_REG_INSET_TUNNEL_ID 0x00000000000C0000ULL
+/* Last ether type */
+#define I40E_REG_INSET_LAST_ETHER_TYPE 0x0000000000004000ULL
+/* Tunneling outer destination IPv4 address */
+#define I40E_REG_INSET_TUNNEL_L3_DST_IP4 0x00000000000000C0ULL
+/* Tunneling outer destination IPv6 address */
+#define I40E_REG_INSET_TUNNEL_L3_DST_IP6 0x0000000000003FC0ULL
+/* 1st word of flex payload */
+#define I40E_REG_INSET_FLEX_PAYLOAD_WORD1 0x0000000000002000ULL
+/* 2nd word of flex payload */
+#define I40E_REG_INSET_FLEX_PAYLOAD_WORD2 0x0000000000001000ULL
+/* 3rd word of flex payload */
+#define I40E_REG_INSET_FLEX_PAYLOAD_WORD3 0x0000000000000800ULL
+/* 4th word of flex payload */
+#define I40E_REG_INSET_FLEX_PAYLOAD_WORD4 0x0000000000000400ULL
+/* 5th word of flex payload */
+#define I40E_REG_INSET_FLEX_PAYLOAD_WORD5 0x0000000000000200ULL
+/* 6th word of flex payload */
+#define I40E_REG_INSET_FLEX_PAYLOAD_WORD6 0x0000000000000100ULL
+/* 7th word of flex payload */
+#define I40E_REG_INSET_FLEX_PAYLOAD_WORD7 0x0000000000000080ULL
+/* 8th word of flex payload */
+#define I40E_REG_INSET_FLEX_PAYLOAD_WORD8 0x0000000000000040ULL
+/* all 8 words flex payload */
+#define I40E_REG_INSET_FLEX_PAYLOAD_WORDS 0x0000000000003FC0ULL
+#define I40E_REG_INSET_MASK_DEFAULT 0x0000000000000000ULL
+
+#define I40E_TRANSLATE_INSET 0
+#define I40E_TRANSLATE_REG 1
+
+#define I40E_INSET_IPV4_TOS_MASK 0x0009FF00UL
+#define I40E_INSET_IPv4_TTL_MASK 0x000D00FFUL
+#define I40E_INSET_IPV4_PROTO_MASK 0x000DFF00UL
+#define I40E_INSET_IPV6_TC_MASK 0x0009F00FUL
+#define I40E_INSET_IPV6_HOP_LIMIT_MASK 0x000CFF00UL
+#define I40E_INSET_IPV6_NEXT_HDR_MASK 0x000C00FFUL
+
+#define I40E_GL_SWT_L2TAGCTRL(_i) (0x001C0A70 + ((_i) * 4))
+#define I40E_GL_SWT_L2TAGCTRL_ETHERTYPE_SHIFT 16
+#define I40E_GL_SWT_L2TAGCTRL_ETHERTYPE_MASK \
+ I40E_MASK(0xFFFF, I40E_GL_SWT_L2TAGCTRL_ETHERTYPE_SHIFT)
+
+/* PCI offset for querying capability */
+#define PCI_DEV_CAP_REG 0xA4
+/* PCI offset for enabling/disabling Extended Tag */
+#define PCI_DEV_CTRL_REG 0xA8
+/* Bit mask of Extended Tag capability */
+#define PCI_DEV_CAP_EXT_TAG_MASK 0x20
+/* Bit shift of Extended Tag enable/disable */
+#define PCI_DEV_CTRL_EXT_TAG_SHIFT 8
+/* Bit mask of Extended Tag enable/disable */
+#define PCI_DEV_CTRL_EXT_TAG_MASK (1 << PCI_DEV_CTRL_EXT_TAG_SHIFT)
+
+static int eth_i40e_dev_init(struct rte_eth_dev *eth_dev);
+static int eth_i40e_dev_uninit(struct rte_eth_dev *eth_dev);
+static int i40e_dev_configure(struct rte_eth_dev *dev);
+static int i40e_dev_start(struct rte_eth_dev *dev);
+static void i40e_dev_stop(struct rte_eth_dev *dev);
+static void i40e_dev_close(struct rte_eth_dev *dev);
+static void i40e_dev_promiscuous_enable(struct rte_eth_dev *dev);
+static void i40e_dev_promiscuous_disable(struct rte_eth_dev *dev);
+static void i40e_dev_allmulticast_enable(struct rte_eth_dev *dev);
+static void i40e_dev_allmulticast_disable(struct rte_eth_dev *dev);
+static int i40e_dev_set_link_up(struct rte_eth_dev *dev);
+static int i40e_dev_set_link_down(struct rte_eth_dev *dev);
+static void i40e_dev_stats_get(struct rte_eth_dev *dev,
+ struct rte_eth_stats *stats);
+static int i40e_dev_xstats_get(struct rte_eth_dev *dev,
+ struct rte_eth_xstat *xstats, unsigned n);
+static int i40e_dev_xstats_get_names(struct rte_eth_dev *dev,
+ struct rte_eth_xstat_name *xstats_names,
+ unsigned limit);
+static void i40e_dev_stats_reset(struct rte_eth_dev *dev);
+static int i40e_dev_queue_stats_mapping_set(struct rte_eth_dev *dev,
+ uint16_t queue_id,
+ uint8_t stat_idx,
+ uint8_t is_rx);
+static void i40e_dev_info_get(struct rte_eth_dev *dev,
+ struct rte_eth_dev_info *dev_info);
+static int i40e_vlan_filter_set(struct rte_eth_dev *dev,
+ uint16_t vlan_id,
+ int on);
+static int i40e_vlan_tpid_set(struct rte_eth_dev *dev,
+ enum rte_vlan_type vlan_type,
+ uint16_t tpid);
+static void i40e_vlan_offload_set(struct rte_eth_dev *dev, int mask);
+static void i40e_vlan_strip_queue_set(struct rte_eth_dev *dev,
+ uint16_t queue,
+ int on);
+static int i40e_vlan_pvid_set(struct rte_eth_dev *dev, uint16_t pvid, int on);
+static int i40e_dev_led_on(struct rte_eth_dev *dev);
+static int i40e_dev_led_off(struct rte_eth_dev *dev);
+static int i40e_flow_ctrl_get(struct rte_eth_dev *dev,
+ struct rte_eth_fc_conf *fc_conf);
+static int i40e_flow_ctrl_set(struct rte_eth_dev *dev,
+ struct rte_eth_fc_conf *fc_conf);
+static int i40e_priority_flow_ctrl_set(struct rte_eth_dev *dev,
+ struct rte_eth_pfc_conf *pfc_conf);
+static void i40e_macaddr_add(struct rte_eth_dev *dev,
+ struct ether_addr *mac_addr,
+ uint32_t index,
+ uint32_t pool);
+static void i40e_macaddr_remove(struct rte_eth_dev *dev, uint32_t index);
+static int i40e_dev_rss_reta_update(struct rte_eth_dev *dev,
+ struct rte_eth_rss_reta_entry64 *reta_conf,
+ uint16_t reta_size);
+static int i40e_dev_rss_reta_query(struct rte_eth_dev *dev,
+ struct rte_eth_rss_reta_entry64 *reta_conf,
+ uint16_t reta_size);
+
+static int i40e_get_cap(struct i40e_hw *hw);
+static int i40e_pf_parameter_init(struct rte_eth_dev *dev);
+static int i40e_pf_setup(struct i40e_pf *pf);
+static int i40e_dev_rxtx_init(struct i40e_pf *pf);
+static int i40e_vmdq_setup(struct rte_eth_dev *dev);
+static int i40e_dcb_init_configure(struct rte_eth_dev *dev, bool sw_dcb);
+static int i40e_dcb_setup(struct rte_eth_dev *dev);
+static void i40e_stat_update_32(struct i40e_hw *hw, uint32_t reg,
+ bool offset_loaded, uint64_t *offset, uint64_t *stat);
+static void i40e_stat_update_48(struct i40e_hw *hw,
+ uint32_t hireg,
+ uint32_t loreg,
+ bool offset_loaded,
+ uint64_t *offset,
+ uint64_t *stat);
+static void i40e_pf_config_irq0(struct i40e_hw *hw, bool no_queue);
+static void i40e_dev_interrupt_handler(
+ __rte_unused struct rte_intr_handle *handle, void *param);
+static int i40e_res_pool_init(struct i40e_res_pool_info *pool,
+ uint32_t base, uint32_t num);
+static void i40e_res_pool_destroy(struct i40e_res_pool_info *pool);
+static int i40e_res_pool_free(struct i40e_res_pool_info *pool,
+ uint32_t base);
+static int i40e_res_pool_alloc(struct i40e_res_pool_info *pool,
+ uint16_t num);
+static int i40e_dev_init_vlan(struct rte_eth_dev *dev);
+static int i40e_veb_release(struct i40e_veb *veb);
+static struct i40e_veb *i40e_veb_setup(struct i40e_pf *pf,
+ struct i40e_vsi *vsi);
+static int i40e_pf_config_mq_rx(struct i40e_pf *pf);
+static int i40e_vsi_config_double_vlan(struct i40e_vsi *vsi, int on);
+static inline int i40e_find_all_vlan_for_mac(struct i40e_vsi *vsi,
+ struct i40e_macvlan_filter *mv_f,
+ int num,
+ struct ether_addr *addr);
+static inline int i40e_find_all_mac_for_vlan(struct i40e_vsi *vsi,
+ struct i40e_macvlan_filter *mv_f,
+ int num,
+ uint16_t vlan);
+static int i40e_vsi_remove_all_macvlan_filter(struct i40e_vsi *vsi);
+static int i40e_dev_rss_hash_update(struct rte_eth_dev *dev,
+ struct rte_eth_rss_conf *rss_conf);
+static int i40e_dev_rss_hash_conf_get(struct rte_eth_dev *dev,
+ struct rte_eth_rss_conf *rss_conf);
+static int i40e_dev_udp_tunnel_port_add(struct rte_eth_dev *dev,
+ struct rte_eth_udp_tunnel *udp_tunnel);
+static int i40e_dev_udp_tunnel_port_del(struct rte_eth_dev *dev,
+ struct rte_eth_udp_tunnel *udp_tunnel);
+static void i40e_filter_input_set_init(struct i40e_pf *pf);
+static int i40e_ethertype_filter_set(struct i40e_pf *pf,
+ struct rte_eth_ethertype_filter *filter,
+ bool add);
+static int i40e_ethertype_filter_handle(struct rte_eth_dev *dev,
+ enum rte_filter_op filter_op,
+ void *arg);
+static int i40e_dev_filter_ctrl(struct rte_eth_dev *dev,
+ enum rte_filter_type filter_type,
+ enum rte_filter_op filter_op,
+ void *arg);
+static int i40e_dev_get_dcb_info(struct rte_eth_dev *dev,
+ struct rte_eth_dcb_info *dcb_info);
+static void i40e_configure_registers(struct i40e_hw *hw);
+static void i40e_hw_init(struct rte_eth_dev *dev);
+static int i40e_config_qinq(struct i40e_hw *hw, struct i40e_vsi *vsi);
+static int i40e_mirror_rule_set(struct rte_eth_dev *dev,
+ struct rte_eth_mirror_conf *mirror_conf,
+ uint8_t sw_id, uint8_t on);
+static int i40e_mirror_rule_reset(struct rte_eth_dev *dev, uint8_t sw_id);
+
+static int i40e_timesync_enable(struct rte_eth_dev *dev);
+static int i40e_timesync_disable(struct rte_eth_dev *dev);
+static int i40e_timesync_read_rx_timestamp(struct rte_eth_dev *dev,
+ struct timespec *timestamp,
+ uint32_t flags);
+static int i40e_timesync_read_tx_timestamp(struct rte_eth_dev *dev,
+ struct timespec *timestamp);
+static void i40e_read_stats_registers(struct i40e_pf *pf, struct i40e_hw *hw);
+
+static int i40e_timesync_adjust_time(struct rte_eth_dev *dev, int64_t delta);
+
+static int i40e_timesync_read_time(struct rte_eth_dev *dev,
+ struct timespec *timestamp);
+static int i40e_timesync_write_time(struct rte_eth_dev *dev,
+ const struct timespec *timestamp);
+
+static int i40e_dev_rx_queue_intr_enable(struct rte_eth_dev *dev,
+ uint16_t queue_id);
+static int i40e_dev_rx_queue_intr_disable(struct rte_eth_dev *dev,
+ uint16_t queue_id);
+
+static int i40e_get_regs(struct rte_eth_dev *dev,
+ struct rte_dev_reg_info *regs);
+
+static int i40e_get_eeprom_length(struct rte_eth_dev *dev);
+
+static int i40e_get_eeprom(struct rte_eth_dev *dev,
+ struct rte_dev_eeprom_info *eeprom);
+
+static void i40e_set_default_mac_addr(struct rte_eth_dev *dev,
+ struct ether_addr *mac_addr);
+
+static int i40e_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu);
+
+static const struct rte_pci_id pci_id_i40e_map[] = {
+ { RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_SFP_XL710) },
+ { RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_QEMU) },
+ { RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_KX_B) },
+ { RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_KX_C) },
+ { RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_QSFP_A) },
+ { RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_QSFP_B) },
+ { RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_QSFP_C) },
+ { RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_10G_BASE_T) },
+ { RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_20G_KR2) },
+ { RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_20G_KR2_A) },
+ { RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_10G_BASE_T4) },
+ { RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_25G_B) },
+ { RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_25G_SFP28) },
+ { RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_X722_A0) },
+ { RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_KX_X722) },
+ { RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_QSFP_X722) },
+ { RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_SFP_X722) },
+ { RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_1G_BASE_T_X722) },
+ { RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_10G_BASE_T_X722) },
+ { RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_SFP_I_X722) },
+ { RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_QSFP_I_X722) },
+ { .vendor_id = 0, /* sentinel */ },
+};
+
+static const struct eth_dev_ops i40e_eth_dev_ops = {
+ .dev_configure = i40e_dev_configure,
+ .dev_start = i40e_dev_start,
+ .dev_stop = i40e_dev_stop,
+ .dev_close = i40e_dev_close,
+ .promiscuous_enable = i40e_dev_promiscuous_enable,
+ .promiscuous_disable = i40e_dev_promiscuous_disable,
+ .allmulticast_enable = i40e_dev_allmulticast_enable,
+ .allmulticast_disable = i40e_dev_allmulticast_disable,
+ .dev_set_link_up = i40e_dev_set_link_up,
+ .dev_set_link_down = i40e_dev_set_link_down,
+ .link_update = i40e_dev_link_update,
+ .stats_get = i40e_dev_stats_get,
+ .xstats_get = i40e_dev_xstats_get,
+ .xstats_get_names = i40e_dev_xstats_get_names,
+ .stats_reset = i40e_dev_stats_reset,
+ .xstats_reset = i40e_dev_stats_reset,
+ .queue_stats_mapping_set = i40e_dev_queue_stats_mapping_set,
+ .dev_infos_get = i40e_dev_info_get,
+ .dev_supported_ptypes_get = i40e_dev_supported_ptypes_get,
+ .vlan_filter_set = i40e_vlan_filter_set,
+ .vlan_tpid_set = i40e_vlan_tpid_set,
+ .vlan_offload_set = i40e_vlan_offload_set,
+ .vlan_strip_queue_set = i40e_vlan_strip_queue_set,
+ .vlan_pvid_set = i40e_vlan_pvid_set,
+ .rx_queue_start = i40e_dev_rx_queue_start,
+ .rx_queue_stop = i40e_dev_rx_queue_stop,
+ .tx_queue_start = i40e_dev_tx_queue_start,
+ .tx_queue_stop = i40e_dev_tx_queue_stop,
+ .rx_queue_setup = i40e_dev_rx_queue_setup,
+ .rx_queue_intr_enable = i40e_dev_rx_queue_intr_enable,
+ .rx_queue_intr_disable = i40e_dev_rx_queue_intr_disable,
+ .rx_queue_release = i40e_dev_rx_queue_release,
+ .rx_queue_count = i40e_dev_rx_queue_count,
+ .rx_descriptor_done = i40e_dev_rx_descriptor_done,
+ .tx_queue_setup = i40e_dev_tx_queue_setup,
+ .tx_queue_release = i40e_dev_tx_queue_release,
+ .dev_led_on = i40e_dev_led_on,
+ .dev_led_off = i40e_dev_led_off,
+ .flow_ctrl_get = i40e_flow_ctrl_get,
+ .flow_ctrl_set = i40e_flow_ctrl_set,
+ .priority_flow_ctrl_set = i40e_priority_flow_ctrl_set,
+ .mac_addr_add = i40e_macaddr_add,
+ .mac_addr_remove = i40e_macaddr_remove,
+ .reta_update = i40e_dev_rss_reta_update,
+ .reta_query = i40e_dev_rss_reta_query,
+ .rss_hash_update = i40e_dev_rss_hash_update,
+ .rss_hash_conf_get = i40e_dev_rss_hash_conf_get,
+ .udp_tunnel_port_add = i40e_dev_udp_tunnel_port_add,
+ .udp_tunnel_port_del = i40e_dev_udp_tunnel_port_del,
+ .filter_ctrl = i40e_dev_filter_ctrl,
+ .rxq_info_get = i40e_rxq_info_get,
+ .txq_info_get = i40e_txq_info_get,
+ .mirror_rule_set = i40e_mirror_rule_set,
+ .mirror_rule_reset = i40e_mirror_rule_reset,
+ .timesync_enable = i40e_timesync_enable,
+ .timesync_disable = i40e_timesync_disable,
+ .timesync_read_rx_timestamp = i40e_timesync_read_rx_timestamp,
+ .timesync_read_tx_timestamp = i40e_timesync_read_tx_timestamp,
+ .get_dcb_info = i40e_dev_get_dcb_info,
+ .timesync_adjust_time = i40e_timesync_adjust_time,
+ .timesync_read_time = i40e_timesync_read_time,
+ .timesync_write_time = i40e_timesync_write_time,
+ .get_reg = i40e_get_regs,
+ .get_eeprom_length = i40e_get_eeprom_length,
+ .get_eeprom = i40e_get_eeprom,
+ .mac_addr_set = i40e_set_default_mac_addr,
+ .mtu_set = i40e_dev_mtu_set,
+};
+
+/* store statistics names and its offset in stats structure */
+struct rte_i40e_xstats_name_off {
+ char name[RTE_ETH_XSTATS_NAME_SIZE];
+ unsigned offset;
+};
+
+static const struct rte_i40e_xstats_name_off rte_i40e_stats_strings[] = {
+ {"rx_unicast_packets", offsetof(struct i40e_eth_stats, rx_unicast)},
+ {"rx_multicast_packets", offsetof(struct i40e_eth_stats, rx_multicast)},
+ {"rx_broadcast_packets", offsetof(struct i40e_eth_stats, rx_broadcast)},
+ {"rx_dropped", offsetof(struct i40e_eth_stats, rx_discards)},
+ {"rx_unknown_protocol_packets", offsetof(struct i40e_eth_stats,
+ rx_unknown_protocol)},
+ {"tx_unicast_packets", offsetof(struct i40e_eth_stats, tx_unicast)},
+ {"tx_multicast_packets", offsetof(struct i40e_eth_stats, tx_multicast)},
+ {"tx_broadcast_packets", offsetof(struct i40e_eth_stats, tx_broadcast)},
+ {"tx_dropped", offsetof(struct i40e_eth_stats, tx_discards)},
+};
+
+#define I40E_NB_ETH_XSTATS (sizeof(rte_i40e_stats_strings) / \
+ sizeof(rte_i40e_stats_strings[0]))
+
+static const struct rte_i40e_xstats_name_off rte_i40e_hw_port_strings[] = {
+ {"tx_link_down_dropped", offsetof(struct i40e_hw_port_stats,
+ tx_dropped_link_down)},
+ {"rx_crc_errors", offsetof(struct i40e_hw_port_stats, crc_errors)},
+ {"rx_illegal_byte_errors", offsetof(struct i40e_hw_port_stats,
+ illegal_bytes)},
+ {"rx_error_bytes", offsetof(struct i40e_hw_port_stats, error_bytes)},
+ {"mac_local_errors", offsetof(struct i40e_hw_port_stats,
+ mac_local_faults)},
+ {"mac_remote_errors", offsetof(struct i40e_hw_port_stats,
+ mac_remote_faults)},
+ {"rx_length_errors", offsetof(struct i40e_hw_port_stats,
+ rx_length_errors)},
+ {"tx_xon_packets", offsetof(struct i40e_hw_port_stats, link_xon_tx)},
+ {"rx_xon_packets", offsetof(struct i40e_hw_port_stats, link_xon_rx)},
+ {"tx_xoff_packets", offsetof(struct i40e_hw_port_stats, link_xoff_tx)},
+ {"rx_xoff_packets", offsetof(struct i40e_hw_port_stats, link_xoff_rx)},
+ {"rx_size_64_packets", offsetof(struct i40e_hw_port_stats, rx_size_64)},
+ {"rx_size_65_to_127_packets", offsetof(struct i40e_hw_port_stats,
+ rx_size_127)},
+ {"rx_size_128_to_255_packets", offsetof(struct i40e_hw_port_stats,
+ rx_size_255)},
+ {"rx_size_256_to_511_packets", offsetof(struct i40e_hw_port_stats,
+ rx_size_511)},
+ {"rx_size_512_to_1023_packets", offsetof(struct i40e_hw_port_stats,
+ rx_size_1023)},
+ {"rx_size_1024_to_1522_packets", offsetof(struct i40e_hw_port_stats,
+ rx_size_1522)},
+ {"rx_size_1523_to_max_packets", offsetof(struct i40e_hw_port_stats,
+ rx_size_big)},
+ {"rx_undersized_errors", offsetof(struct i40e_hw_port_stats,
+ rx_undersize)},
+ {"rx_oversize_errors", offsetof(struct i40e_hw_port_stats,
+ rx_oversize)},
+ {"rx_mac_short_dropped", offsetof(struct i40e_hw_port_stats,
+ mac_short_packet_dropped)},
+ {"rx_fragmented_errors", offsetof(struct i40e_hw_port_stats,
+ rx_fragments)},
+ {"rx_jabber_errors", offsetof(struct i40e_hw_port_stats, rx_jabber)},
+ {"tx_size_64_packets", offsetof(struct i40e_hw_port_stats, tx_size_64)},
+ {"tx_size_65_to_127_packets", offsetof(struct i40e_hw_port_stats,
+ tx_size_127)},
+ {"tx_size_128_to_255_packets", offsetof(struct i40e_hw_port_stats,
+ tx_size_255)},
+ {"tx_size_256_to_511_packets", offsetof(struct i40e_hw_port_stats,
+ tx_size_511)},
+ {"tx_size_512_to_1023_packets", offsetof(struct i40e_hw_port_stats,
+ tx_size_1023)},
+ {"tx_size_1024_to_1522_packets", offsetof(struct i40e_hw_port_stats,
+ tx_size_1522)},
+ {"tx_size_1523_to_max_packets", offsetof(struct i40e_hw_port_stats,
+ tx_size_big)},
+ {"rx_flow_director_atr_match_packets",
+ offsetof(struct i40e_hw_port_stats, fd_atr_match)},
+ {"rx_flow_director_sb_match_packets",
+ offsetof(struct i40e_hw_port_stats, fd_sb_match)},
+ {"tx_low_power_idle_status", offsetof(struct i40e_hw_port_stats,
+ tx_lpi_status)},
+ {"rx_low_power_idle_status", offsetof(struct i40e_hw_port_stats,
+ rx_lpi_status)},
+ {"tx_low_power_idle_count", offsetof(struct i40e_hw_port_stats,
+ tx_lpi_count)},
+ {"rx_low_power_idle_count", offsetof(struct i40e_hw_port_stats,
+ rx_lpi_count)},
+};
+
+#define I40E_NB_HW_PORT_XSTATS (sizeof(rte_i40e_hw_port_strings) / \
+ sizeof(rte_i40e_hw_port_strings[0]))
+
+static const struct rte_i40e_xstats_name_off rte_i40e_rxq_prio_strings[] = {
+ {"xon_packets", offsetof(struct i40e_hw_port_stats,
+ priority_xon_rx)},
+ {"xoff_packets", offsetof(struct i40e_hw_port_stats,
+ priority_xoff_rx)},
+};
+
+#define I40E_NB_RXQ_PRIO_XSTATS (sizeof(rte_i40e_rxq_prio_strings) / \
+ sizeof(rte_i40e_rxq_prio_strings[0]))
+
+static const struct rte_i40e_xstats_name_off rte_i40e_txq_prio_strings[] = {
+ {"xon_packets", offsetof(struct i40e_hw_port_stats,
+ priority_xon_tx)},
+ {"xoff_packets", offsetof(struct i40e_hw_port_stats,
+ priority_xoff_tx)},
+ {"xon_to_xoff_packets", offsetof(struct i40e_hw_port_stats,
+ priority_xon_2_xoff)},
+};
+
+#define I40E_NB_TXQ_PRIO_XSTATS (sizeof(rte_i40e_txq_prio_strings) / \
+ sizeof(rte_i40e_txq_prio_strings[0]))
+
+static struct eth_driver rte_i40e_pmd = {
+ .pci_drv = {
+ .name = "rte_i40e_pmd",
+ .id_table = pci_id_i40e_map,
+ .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC |
+ RTE_PCI_DRV_DETACHABLE,
+ },
+ .eth_dev_init = eth_i40e_dev_init,
+ .eth_dev_uninit = eth_i40e_dev_uninit,
+ .dev_private_size = sizeof(struct i40e_adapter),
+};
+
+static inline int
+rte_i40e_dev_atomic_read_link_status(struct rte_eth_dev *dev,
+ struct rte_eth_link *link)
+{
+ struct rte_eth_link *dst = link;
+ struct rte_eth_link *src = &(dev->data->dev_link);
+
+ if (rte_atomic64_cmpset((uint64_t *)dst, *(uint64_t *)dst,
+ *(uint64_t *)src) == 0)
+ return -1;
+
+ return 0;
+}
+
+static inline int
+rte_i40e_dev_atomic_write_link_status(struct rte_eth_dev *dev,
+ struct rte_eth_link *link)
+{
+ struct rte_eth_link *dst = &(dev->data->dev_link);
+ struct rte_eth_link *src = link;
+
+ if (rte_atomic64_cmpset((uint64_t *)dst, *(uint64_t *)dst,
+ *(uint64_t *)src) == 0)
+ return -1;
+
+ return 0;
+}
+
+/*
+ * Driver initialization routine.
+ * Invoked once at EAL init time.
+ * Register itself as the [Poll Mode] Driver of PCI IXGBE devices.
+ */
+static int
+rte_i40e_pmd_init(const char *name __rte_unused,
+ const char *params __rte_unused)
+{
+ PMD_INIT_FUNC_TRACE();
+ rte_eth_driver_register(&rte_i40e_pmd);
+
+ return 0;
+}
+
+static struct rte_driver rte_i40e_driver = {
+ .type = PMD_PDEV,
+ .init = rte_i40e_pmd_init,
+};
+
+PMD_REGISTER_DRIVER(rte_i40e_driver, i40e);
+DRIVER_REGISTER_PCI_TABLE(i40e, pci_id_i40e_map);
+
+/*
+ * Initialize registers for flexible payload, which should be set by NVM.
+ * This should be removed from code once it is fixed in NVM.
+ */
+#ifndef I40E_GLQF_ORT
+#define I40E_GLQF_ORT(_i) (0x00268900 + ((_i) * 4))
+#endif
+#ifndef I40E_GLQF_PIT
+#define I40E_GLQF_PIT(_i) (0x00268C80 + ((_i) * 4))
+#endif
+
+static inline void i40e_flex_payload_reg_init(struct i40e_hw *hw)
+{
+ I40E_WRITE_REG(hw, I40E_GLQF_ORT(18), 0x00000030);
+ I40E_WRITE_REG(hw, I40E_GLQF_ORT(19), 0x00000030);
+ I40E_WRITE_REG(hw, I40E_GLQF_ORT(26), 0x0000002B);
+ I40E_WRITE_REG(hw, I40E_GLQF_ORT(30), 0x0000002B);
+ I40E_WRITE_REG(hw, I40E_GLQF_ORT(33), 0x000000E0);
+ I40E_WRITE_REG(hw, I40E_GLQF_ORT(34), 0x000000E3);
+ I40E_WRITE_REG(hw, I40E_GLQF_ORT(35), 0x000000E6);
+ I40E_WRITE_REG(hw, I40E_GLQF_ORT(20), 0x00000031);
+ I40E_WRITE_REG(hw, I40E_GLQF_ORT(23), 0x00000031);
+ I40E_WRITE_REG(hw, I40E_GLQF_ORT(63), 0x0000002D);
+
+ /* GLQF_PIT Registers */
+ I40E_WRITE_REG(hw, I40E_GLQF_PIT(16), 0x00007480);
+ I40E_WRITE_REG(hw, I40E_GLQF_PIT(17), 0x00007440);
+}
+
+#define I40E_FLOW_CONTROL_ETHERTYPE 0x8808
+
+/*
+ * Add a ethertype filter to drop all flow control frames transmitted
+ * from VSIs.
+*/
+static void
+i40e_add_tx_flow_control_drop_filter(struct i40e_pf *pf)
+{
+ struct i40e_hw *hw = I40E_PF_TO_HW(pf);
+ uint16_t flags = I40E_AQC_ADD_CONTROL_PACKET_FLAGS_IGNORE_MAC |
+ I40E_AQC_ADD_CONTROL_PACKET_FLAGS_DROP |
+ I40E_AQC_ADD_CONTROL_PACKET_FLAGS_TX;
+ int ret;
+
+ ret = i40e_aq_add_rem_control_packet_filter(hw, NULL,
+ I40E_FLOW_CONTROL_ETHERTYPE, flags,
+ pf->main_vsi_seid, 0,
+ TRUE, NULL, NULL);
+ if (ret)
+ PMD_INIT_LOG(ERR, "Failed to add filter to drop flow control "
+ " frames from VSIs.");
+}
+
+static int
+floating_veb_list_handler(__rte_unused const char *key,
+ const char *floating_veb_value,
+ void *opaque)
+{
+ int idx = 0;
+ unsigned int count = 0;
+ char *end = NULL;
+ int min, max;
+ bool *vf_floating_veb = opaque;
+
+ while (isblank(*floating_veb_value))
+ floating_veb_value++;
+
+ /* Reset floating VEB configuration for VFs */
+ for (idx = 0; idx < I40E_MAX_VF; idx++)
+ vf_floating_veb[idx] = false;
+
+ min = I40E_MAX_VF;
+ do {
+ while (isblank(*floating_veb_value))
+ floating_veb_value++;
+ if (*floating_veb_value == '\0')
+ return -1;
+ errno = 0;
+ idx = strtoul(floating_veb_value, &end, 10);
+ if (errno || end == NULL)
+ return -1;
+ while (isblank(*end))
+ end++;
+ if (*end == '-') {
+ min = idx;
+ } else if ((*end == ';') || (*end == '\0')) {
+ max = idx;
+ if (min == I40E_MAX_VF)
+ min = idx;
+ if (max >= I40E_MAX_VF)
+ max = I40E_MAX_VF - 1;
+ for (idx = min; idx <= max; idx++) {
+ vf_floating_veb[idx] = true;
+ count++;
+ }
+ min = I40E_MAX_VF;
+ } else {
+ return -1;
+ }
+ floating_veb_value = end + 1;
+ } while (*end != '\0');
+
+ if (count == 0)
+ return -1;
+
+ return 0;
+}
+
+static void
+config_vf_floating_veb(struct rte_devargs *devargs,
+ uint16_t floating_veb,
+ bool *vf_floating_veb)
+{
+ struct rte_kvargs *kvlist;
+ int i;
+ const char *floating_veb_list = ETH_I40E_FLOATING_VEB_LIST_ARG;
+
+ if (!floating_veb)
+ return;
+ /* All the VFs attach to the floating VEB by default
+ * when the floating VEB is enabled.
+ */
+ for (i = 0; i < I40E_MAX_VF; i++)
+ vf_floating_veb[i] = true;
+
+ if (devargs == NULL)
+ return;
+
+ kvlist = rte_kvargs_parse(devargs->args, NULL);
+ if (kvlist == NULL)
+ return;
+
+ if (!rte_kvargs_count(kvlist, floating_veb_list)) {
+ rte_kvargs_free(kvlist);
+ return;
+ }
+ /* When the floating_veb_list parameter exists, all the VFs
+ * will attach to the legacy VEB firstly, then configure VFs
+ * to the floating VEB according to the floating_veb_list.
+ */
+ if (rte_kvargs_process(kvlist, floating_veb_list,
+ floating_veb_list_handler,
+ vf_floating_veb) < 0) {
+ rte_kvargs_free(kvlist);
+ return;
+ }
+ rte_kvargs_free(kvlist);
+}
+
+static int
+i40e_check_floating_handler(__rte_unused const char *key,
+ const char *value,
+ __rte_unused void *opaque)
+{
+ if (strcmp(value, "1"))
+ return -1;
+
+ return 0;
+}
+
+static int
+is_floating_veb_supported(struct rte_devargs *devargs)
+{
+ struct rte_kvargs *kvlist;
+ const char *floating_veb_key = ETH_I40E_FLOATING_VEB_ARG;
+
+ if (devargs == NULL)
+ return 0;
+
+ kvlist = rte_kvargs_parse(devargs->args, NULL);
+ if (kvlist == NULL)
+ return 0;
+
+ if (!rte_kvargs_count(kvlist, floating_veb_key)) {
+ rte_kvargs_free(kvlist);
+ return 0;
+ }
+ /* Floating VEB is enabled when there's key-value:
+ * enable_floating_veb=1
+ */
+ if (rte_kvargs_process(kvlist, floating_veb_key,
+ i40e_check_floating_handler, NULL) < 0) {
+ rte_kvargs_free(kvlist);
+ return 0;
+ }
+ rte_kvargs_free(kvlist);
+
+ return 1;
+}
+
+static void
+config_floating_veb(struct rte_eth_dev *dev)
+{
+ struct rte_pci_device *pci_dev = dev->pci_dev;
+ struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
+ struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+
+ memset(pf->floating_veb_list, 0, sizeof(pf->floating_veb_list));
+
+ if (hw->aq.fw_maj_ver >= FLOATING_VEB_SUPPORTED_FW_MAJ) {
+ pf->floating_veb = is_floating_veb_supported(pci_dev->devargs);
+ config_vf_floating_veb(pci_dev->devargs, pf->floating_veb,
+ pf->floating_veb_list);
+ } else {
+ pf->floating_veb = false;
+ }
+}
+
+static int
+eth_i40e_dev_init(struct rte_eth_dev *dev)
+{
+ struct rte_pci_device *pci_dev;
+ struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
+ struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ struct i40e_vsi *vsi;
+ int ret;
+ uint32_t len;
+ uint8_t aq_fail = 0;
+
+ PMD_INIT_FUNC_TRACE();
+
+ dev->dev_ops = &i40e_eth_dev_ops;
+ dev->rx_pkt_burst = i40e_recv_pkts;
+ dev->tx_pkt_burst = i40e_xmit_pkts;
+
+ /* for secondary processes, we don't initialise any further as primary
+ * has already done this work. Only check we don't need a different
+ * RX function */
+ if (rte_eal_process_type() != RTE_PROC_PRIMARY){
+ i40e_set_rx_function(dev);
+ i40e_set_tx_function(dev);
+ return 0;
+ }
+ pci_dev = dev->pci_dev;
+
+ rte_eth_copy_pci_info(dev, pci_dev);
+
+ pf->adapter = I40E_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
+ pf->adapter->eth_dev = dev;
+ pf->dev_data = dev->data;
+
+ hw->back = I40E_PF_TO_ADAPTER(pf);
+ hw->hw_addr = (uint8_t *)(pci_dev->mem_resource[0].addr);
+ if (!hw->hw_addr) {
+ PMD_INIT_LOG(ERR, "Hardware is not available, "
+ "as address is NULL");
+ return -ENODEV;
+ }
+
+ hw->vendor_id = pci_dev->id.vendor_id;
+ hw->device_id = pci_dev->id.device_id;
+ hw->subsystem_vendor_id = pci_dev->id.subsystem_vendor_id;
+ hw->subsystem_device_id = pci_dev->id.subsystem_device_id;
+ hw->bus.device = pci_dev->addr.devid;
+ hw->bus.func = pci_dev->addr.function;
+ hw->adapter_stopped = 0;
+
+ /* Make sure all is clean before doing PF reset */
+ i40e_clear_hw(hw);
+
+ /* Initialize the hardware */
+ i40e_hw_init(dev);
+
+ /* Reset here to make sure all is clean for each PF */
+ ret = i40e_pf_reset(hw);
+ if (ret) {
+ PMD_INIT_LOG(ERR, "Failed to reset pf: %d", ret);
+ return ret;
+ }
+
+ /* Initialize the shared code (base driver) */
+ ret = i40e_init_shared_code(hw);
+ if (ret) {
+ PMD_INIT_LOG(ERR, "Failed to init shared code (base driver): %d", ret);
+ return ret;
+ }
+
+ /*
+ * To work around the NVM issue,initialize registers
+ * for flexible payload by software.
+ * It should be removed once issues are fixed in NVM.
+ */
+ i40e_flex_payload_reg_init(hw);
+
+ /* Initialize the input set for filters (hash and fd) to default value */
+ i40e_filter_input_set_init(pf);
+
+ /* Initialize the parameters for adminq */
+ i40e_init_adminq_parameter(hw);
+ ret = i40e_init_adminq(hw);
+ if (ret != I40E_SUCCESS) {
+ PMD_INIT_LOG(ERR, "Failed to init adminq: %d", ret);
+ return -EIO;
+ }
+ PMD_INIT_LOG(INFO, "FW %d.%d API %d.%d NVM %02d.%02d.%02d eetrack %04x",
+ hw->aq.fw_maj_ver, hw->aq.fw_min_ver,
+ hw->aq.api_maj_ver, hw->aq.api_min_ver,
+ ((hw->nvm.version >> 12) & 0xf),
+ ((hw->nvm.version >> 4) & 0xff),
+ (hw->nvm.version & 0xf), hw->nvm.eetrack);
+
+ /* Need the special FW version to support floating VEB */
+ config_floating_veb(dev);
+ /* Clear PXE mode */
+ i40e_clear_pxe_mode(hw);
+
+ /*
+ * On X710, performance number is far from the expectation on recent
+ * firmware versions. The fix for this issue may not be integrated in
+ * the following firmware version. So the workaround in software driver
+ * is needed. It needs to modify the initial values of 3 internal only
+ * registers. Note that the workaround can be removed when it is fixed
+ * in firmware in the future.
+ */
+ i40e_configure_registers(hw);
+
+ /* Get hw capabilities */
+ ret = i40e_get_cap(hw);
+ if (ret != I40E_SUCCESS) {
+ PMD_INIT_LOG(ERR, "Failed to get capabilities: %d", ret);
+ goto err_get_capabilities;
+ }
+
+ /* Initialize parameters for PF */
+ ret = i40e_pf_parameter_init(dev);
+ if (ret != 0) {
+ PMD_INIT_LOG(ERR, "Failed to do parameter init: %d", ret);
+ goto err_parameter_init;
+ }
+
+ /* Initialize the queue management */
+ ret = i40e_res_pool_init(&pf->qp_pool, 0, hw->func_caps.num_tx_qp);
+ if (ret < 0) {
+ PMD_INIT_LOG(ERR, "Failed to init queue pool");
+ goto err_qp_pool_init;
+ }
+ ret = i40e_res_pool_init(&pf->msix_pool, 1,
+ hw->func_caps.num_msix_vectors - 1);
+ if (ret < 0) {
+ PMD_INIT_LOG(ERR, "Failed to init MSIX pool");
+ goto err_msix_pool_init;
+ }
+
+ /* Initialize lan hmc */
+ ret = i40e_init_lan_hmc(hw, hw->func_caps.num_tx_qp,
+ hw->func_caps.num_rx_qp, 0, 0);
+ if (ret != I40E_SUCCESS) {
+ PMD_INIT_LOG(ERR, "Failed to init lan hmc: %d", ret);
+ goto err_init_lan_hmc;
+ }
+
+ /* Configure lan hmc */
+ ret = i40e_configure_lan_hmc(hw, I40E_HMC_MODEL_DIRECT_ONLY);
+ if (ret != I40E_SUCCESS) {
+ PMD_INIT_LOG(ERR, "Failed to configure lan hmc: %d", ret);
+ goto err_configure_lan_hmc;
+ }
+
+ /* Get and check the mac address */
+ i40e_get_mac_addr(hw, hw->mac.addr);
+ if (i40e_validate_mac_addr(hw->mac.addr) != I40E_SUCCESS) {
+ PMD_INIT_LOG(ERR, "mac address is not valid");
+ ret = -EIO;
+ goto err_get_mac_addr;
+ }
+ /* Copy the permanent MAC address */
+ ether_addr_copy((struct ether_addr *) hw->mac.addr,
+ (struct ether_addr *) hw->mac.perm_addr);
+
+ /* Disable flow control */
+ hw->fc.requested_mode = I40E_FC_NONE;
+ i40e_set_fc(hw, &aq_fail, TRUE);
+
+ /* Set the global registers with default ether type value */
+ ret = i40e_vlan_tpid_set(dev, ETH_VLAN_TYPE_OUTER, ETHER_TYPE_VLAN);
+ if (ret != I40E_SUCCESS) {
+ PMD_INIT_LOG(ERR, "Failed to set the default outer "
+ "VLAN ether type");
+ goto err_setup_pf_switch;
+ }
+
+ /* PF setup, which includes VSI setup */
+ ret = i40e_pf_setup(pf);
+ if (ret) {
+ PMD_INIT_LOG(ERR, "Failed to setup pf switch: %d", ret);
+ goto err_setup_pf_switch;
+ }
+
+ /* reset all stats of the device, including pf and main vsi */
+ i40e_dev_stats_reset(dev);
+
+ vsi = pf->main_vsi;
+
+ /* Disable double vlan by default */
+ i40e_vsi_config_double_vlan(vsi, FALSE);
+
+ if (!vsi->max_macaddrs)
+ len = ETHER_ADDR_LEN;
+ else
+ len = ETHER_ADDR_LEN * vsi->max_macaddrs;
+
+ /* Should be after VSI initialized */
+ dev->data->mac_addrs = rte_zmalloc("i40e", len, 0);
+ if (!dev->data->mac_addrs) {
+ PMD_INIT_LOG(ERR, "Failed to allocated memory "
+ "for storing mac address");
+ goto err_mac_alloc;
+ }
+ ether_addr_copy((struct ether_addr *)hw->mac.perm_addr,
+ &dev->data->mac_addrs[0]);
+
+ /* initialize pf host driver to setup SRIOV resource if applicable */
+ i40e_pf_host_init(dev);
+
+ /* register callback func to eal lib */
+ rte_intr_callback_register(&(pci_dev->intr_handle),
+ i40e_dev_interrupt_handler, (void *)dev);
+
+ /* configure and enable device interrupt */
+ i40e_pf_config_irq0(hw, TRUE);
+ i40e_pf_enable_irq0(hw);
+
+ /* enable uio intr after callback register */
+ rte_intr_enable(&(pci_dev->intr_handle));
+ /*
+ * Add an ethertype filter to drop all flow control frames transmitted
+ * from VSIs. By doing so, we stop VF from sending out PAUSE or PFC
+ * frames to wire.
+ */
+ i40e_add_tx_flow_control_drop_filter(pf);
+
+ /* Set the max frame size to 0x2600 by default,
+ * in case other drivers changed the default value.
+ */
+ i40e_aq_set_mac_config(hw, I40E_FRAME_SIZE_MAX, TRUE, 0, NULL);
+
+ /* initialize mirror rule list */
+ TAILQ_INIT(&pf->mirror_list);
+
+ /* Init dcb to sw mode by default */
+ ret = i40e_dcb_init_configure(dev, TRUE);
+ if (ret != I40E_SUCCESS) {
+ PMD_INIT_LOG(INFO, "Failed to init dcb.");
+ pf->flags &= ~I40E_FLAG_DCB;
+ }
+
+ return 0;
+
+err_mac_alloc:
+ i40e_vsi_release(pf->main_vsi);
+err_setup_pf_switch:
+err_get_mac_addr:
+err_configure_lan_hmc:
+ (void)i40e_shutdown_lan_hmc(hw);
+err_init_lan_hmc:
+ i40e_res_pool_destroy(&pf->msix_pool);
+err_msix_pool_init:
+ i40e_res_pool_destroy(&pf->qp_pool);
+err_qp_pool_init:
+err_parameter_init:
+err_get_capabilities:
+ (void)i40e_shutdown_adminq(hw);
+
+ return ret;
+}
+
+static int
+eth_i40e_dev_uninit(struct rte_eth_dev *dev)
+{
+ struct rte_pci_device *pci_dev;
+ struct i40e_hw *hw;
+ struct i40e_filter_control_settings settings;
+ int ret;
+ uint8_t aq_fail = 0;
+
+ PMD_INIT_FUNC_TRACE();
+
+ if (rte_eal_process_type() != RTE_PROC_PRIMARY)
+ return 0;
+
+ hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ pci_dev = dev->pci_dev;
+
+ if (hw->adapter_stopped == 0)
+ i40e_dev_close(dev);
+
+ dev->dev_ops = NULL;
+ dev->rx_pkt_burst = NULL;
+ dev->tx_pkt_burst = NULL;
+
+ /* Disable LLDP */
+ ret = i40e_aq_stop_lldp(hw, true, NULL);
+ if (ret != I40E_SUCCESS) /* Its failure can be ignored */
+ PMD_INIT_LOG(INFO, "Failed to stop lldp");
+
+ /* Clear PXE mode */
+ i40e_clear_pxe_mode(hw);
+
+ /* Unconfigure filter control */
+ memset(&settings, 0, sizeof(settings));
+ ret = i40e_set_filter_control(hw, &settings);
+ if (ret)
+ PMD_INIT_LOG(WARNING, "setup_pf_filter_control failed: %d",
+ ret);
+
+ /* Disable flow control */
+ hw->fc.requested_mode = I40E_FC_NONE;
+ i40e_set_fc(hw, &aq_fail, TRUE);
+
+ /* uninitialize pf host driver */
+ i40e_pf_host_uninit(dev);
+
+ rte_free(dev->data->mac_addrs);
+ dev->data->mac_addrs = NULL;
+
+ /* disable uio intr before callback unregister */
+ rte_intr_disable(&(pci_dev->intr_handle));
+
+ /* register callback func to eal lib */
+ rte_intr_callback_unregister(&(pci_dev->intr_handle),
+ i40e_dev_interrupt_handler, (void *)dev);
+
+ return 0;
+}
+
+static int
+i40e_dev_configure(struct rte_eth_dev *dev)
+{
+ struct i40e_adapter *ad =
+ I40E_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
+ struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
+ enum rte_eth_rx_mq_mode mq_mode = dev->data->dev_conf.rxmode.mq_mode;
+ int i, ret;
+
+ /* Initialize to TRUE. If any of Rx queues doesn't meet the
+ * bulk allocation or vector Rx preconditions we will reset it.
+ */
+ ad->rx_bulk_alloc_allowed = true;
+ ad->rx_vec_allowed = true;
+ ad->tx_simple_allowed = true;
+ ad->tx_vec_allowed = true;
+
+ if (dev->data->dev_conf.fdir_conf.mode == RTE_FDIR_MODE_PERFECT) {
+ ret = i40e_fdir_setup(pf);
+ if (ret != I40E_SUCCESS) {
+ PMD_DRV_LOG(ERR, "Failed to setup flow director.");
+ return -ENOTSUP;
+ }
+ ret = i40e_fdir_configure(dev);
+ if (ret < 0) {
+ PMD_DRV_LOG(ERR, "failed to configure fdir.");
+ goto err;
+ }
+ } else
+ i40e_fdir_teardown(pf);
+
+ ret = i40e_dev_init_vlan(dev);
+ if (ret < 0)
+ goto err;
+
+ /* VMDQ setup.
+ * Needs to move VMDQ setting out of i40e_pf_config_mq_rx() as VMDQ and
+ * RSS setting have different requirements.
+ * General PMD driver call sequence are NIC init, configure,
+ * rx/tx_queue_setup and dev_start. In rx/tx_queue_setup() function, it
+ * will try to lookup the VSI that specific queue belongs to if VMDQ
+ * applicable. So, VMDQ setting has to be done before
+ * rx/tx_queue_setup(). This function is good to place vmdq_setup.
+ * For RSS setting, it will try to calculate actual configured RX queue
+ * number, which will be available after rx_queue_setup(). dev_start()
+ * function is good to place RSS setup.
+ */
+ if (mq_mode & ETH_MQ_RX_VMDQ_FLAG) {
+ ret = i40e_vmdq_setup(dev);
+ if (ret)
+ goto err;
+ }
+
+ if (mq_mode & ETH_MQ_RX_DCB_FLAG) {
+ ret = i40e_dcb_setup(dev);
+ if (ret) {
+ PMD_DRV_LOG(ERR, "failed to configure DCB.");
+ goto err_dcb;
+ }
+ }
+
+ return 0;
+
+err_dcb:
+ /* need to release vmdq resource if exists */
+ for (i = 0; i < pf->nb_cfg_vmdq_vsi; i++) {
+ i40e_vsi_release(pf->vmdq[i].vsi);
+ pf->vmdq[i].vsi = NULL;
+ }
+ rte_free(pf->vmdq);
+ pf->vmdq = NULL;
+err:
+ /* need to release fdir resource if exists */
+ i40e_fdir_teardown(pf);
+ return ret;
+}
+
+void
+i40e_vsi_queues_unbind_intr(struct i40e_vsi *vsi)
+{
+ struct rte_eth_dev *dev = vsi->adapter->eth_dev;
+ struct rte_intr_handle *intr_handle = &dev->pci_dev->intr_handle;
+ struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
+ uint16_t msix_vect = vsi->msix_intr;
+ uint16_t i;
+
+ for (i = 0; i < vsi->nb_qps; i++) {
+ I40E_WRITE_REG(hw, I40E_QINT_TQCTL(vsi->base_queue + i), 0);
+ I40E_WRITE_REG(hw, I40E_QINT_RQCTL(vsi->base_queue + i), 0);
+ rte_wmb();
+ }
+
+ if (vsi->type != I40E_VSI_SRIOV) {
+ if (!rte_intr_allow_others(intr_handle)) {
+ I40E_WRITE_REG(hw, I40E_PFINT_LNKLST0,
+ I40E_PFINT_LNKLST0_FIRSTQ_INDX_MASK);
+ I40E_WRITE_REG(hw,
+ I40E_PFINT_ITR0(I40E_ITR_INDEX_DEFAULT),
+ 0);
+ } else {
+ I40E_WRITE_REG(hw, I40E_PFINT_LNKLSTN(msix_vect - 1),
+ I40E_PFINT_LNKLSTN_FIRSTQ_INDX_MASK);
+ I40E_WRITE_REG(hw,
+ I40E_PFINT_ITRN(I40E_ITR_INDEX_DEFAULT,
+ msix_vect - 1), 0);
+ }
+ } else {
+ uint32_t reg;
+ reg = (hw->func_caps.num_msix_vectors_vf - 1) *
+ vsi->user_param + (msix_vect - 1);
+
+ I40E_WRITE_REG(hw, I40E_VPINT_LNKLSTN(reg),
+ I40E_VPINT_LNKLSTN_FIRSTQ_INDX_MASK);
+ }
+ I40E_WRITE_FLUSH(hw);
+}
+
+static void
+__vsi_queues_bind_intr(struct i40e_vsi *vsi, uint16_t msix_vect,
+ int base_queue, int nb_queue)
+{
+ int i;
+ uint32_t val;
+ struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
+
+ /* Bind all RX queues to allocated MSIX interrupt */
+ for (i = 0; i < nb_queue; i++) {
+ val = (msix_vect << I40E_QINT_RQCTL_MSIX_INDX_SHIFT) |
+ I40E_QINT_RQCTL_ITR_INDX_MASK |
+ ((base_queue + i + 1) <<
+ I40E_QINT_RQCTL_NEXTQ_INDX_SHIFT) |
+ (0 << I40E_QINT_RQCTL_NEXTQ_TYPE_SHIFT) |
+ I40E_QINT_RQCTL_CAUSE_ENA_MASK;
+
+ if (i == nb_queue - 1)
+ val |= I40E_QINT_RQCTL_NEXTQ_INDX_MASK;
+ I40E_WRITE_REG(hw, I40E_QINT_RQCTL(base_queue + i), val);
+ }
+
+ /* Write first RX queue to Link list register as the head element */
+ if (vsi->type != I40E_VSI_SRIOV) {
+ uint16_t interval =
+ i40e_calc_itr_interval(RTE_LIBRTE_I40E_ITR_INTERVAL);
+
+ if (msix_vect == I40E_MISC_VEC_ID) {
+ I40E_WRITE_REG(hw, I40E_PFINT_LNKLST0,
+ (base_queue <<
+ I40E_PFINT_LNKLST0_FIRSTQ_INDX_SHIFT) |
+ (0x0 <<
+ I40E_PFINT_LNKLST0_FIRSTQ_TYPE_SHIFT));
+ I40E_WRITE_REG(hw,
+ I40E_PFINT_ITR0(I40E_ITR_INDEX_DEFAULT),
+ interval);
+ } else {
+ I40E_WRITE_REG(hw, I40E_PFINT_LNKLSTN(msix_vect - 1),
+ (base_queue <<
+ I40E_PFINT_LNKLSTN_FIRSTQ_INDX_SHIFT) |
+ (0x0 <<
+ I40E_PFINT_LNKLSTN_FIRSTQ_TYPE_SHIFT));
+ I40E_WRITE_REG(hw,
+ I40E_PFINT_ITRN(I40E_ITR_INDEX_DEFAULT,
+ msix_vect - 1),
+ interval);
+ }
+ } else {
+ uint32_t reg;
+
+ if (msix_vect == I40E_MISC_VEC_ID) {
+ I40E_WRITE_REG(hw,
+ I40E_VPINT_LNKLST0(vsi->user_param),
+ (base_queue <<
+ I40E_VPINT_LNKLST0_FIRSTQ_INDX_SHIFT) |
+ (0x0 <<
+ I40E_VPINT_LNKLST0_FIRSTQ_TYPE_SHIFT));
+ } else {
+ /* num_msix_vectors_vf needs to minus irq0 */
+ reg = (hw->func_caps.num_msix_vectors_vf - 1) *
+ vsi->user_param + (msix_vect - 1);
+
+ I40E_WRITE_REG(hw, I40E_VPINT_LNKLSTN(reg),
+ (base_queue <<
+ I40E_VPINT_LNKLSTN_FIRSTQ_INDX_SHIFT) |
+ (0x0 <<
+ I40E_VPINT_LNKLSTN_FIRSTQ_TYPE_SHIFT));
+ }
+ }
+
+ I40E_WRITE_FLUSH(hw);
+}
+
+void
+i40e_vsi_queues_bind_intr(struct i40e_vsi *vsi)
+{
+ struct rte_eth_dev *dev = vsi->adapter->eth_dev;
+ struct rte_intr_handle *intr_handle = &dev->pci_dev->intr_handle;
+ struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
+ uint16_t msix_vect = vsi->msix_intr;
+ uint16_t nb_msix = RTE_MIN(vsi->nb_msix, intr_handle->nb_efd);
+ uint16_t queue_idx = 0;
+ int record = 0;
+ uint32_t val;
+ int i;
+
+ for (i = 0; i < vsi->nb_qps; i++) {
+ I40E_WRITE_REG(hw, I40E_QINT_TQCTL(vsi->base_queue + i), 0);
+ I40E_WRITE_REG(hw, I40E_QINT_RQCTL(vsi->base_queue + i), 0);
+ }
+
+ /* INTENA flag is not auto-cleared for interrupt */
+ val = I40E_READ_REG(hw, I40E_GLINT_CTL);
+ val |= I40E_GLINT_CTL_DIS_AUTOMASK_PF0_MASK |
+ I40E_GLINT_CTL_DIS_AUTOMASK_N_MASK |
+ I40E_GLINT_CTL_DIS_AUTOMASK_VF0_MASK;
+ I40E_WRITE_REG(hw, I40E_GLINT_CTL, val);
+
+ /* VF bind interrupt */
+ if (vsi->type == I40E_VSI_SRIOV) {
+ __vsi_queues_bind_intr(vsi, msix_vect,
+ vsi->base_queue, vsi->nb_qps);
+ return;
+ }
+
+ /* PF & VMDq bind interrupt */
+ if (rte_intr_dp_is_en(intr_handle)) {
+ if (vsi->type == I40E_VSI_MAIN) {
+ queue_idx = 0;
+ record = 1;
+ } else if (vsi->type == I40E_VSI_VMDQ2) {
+ struct i40e_vsi *main_vsi =
+ I40E_DEV_PRIVATE_TO_MAIN_VSI(vsi->adapter);
+ queue_idx = vsi->base_queue - main_vsi->nb_qps;
+ record = 1;
+ }
+ }
+
+ for (i = 0; i < vsi->nb_used_qps; i++) {
+ if (nb_msix <= 1) {
+ if (!rte_intr_allow_others(intr_handle))
+ /* allow to share MISC_VEC_ID */
+ msix_vect = I40E_MISC_VEC_ID;
+
+ /* no enough msix_vect, map all to one */
+ __vsi_queues_bind_intr(vsi, msix_vect,
+ vsi->base_queue + i,
+ vsi->nb_used_qps - i);
+ for (; !!record && i < vsi->nb_used_qps; i++)
+ intr_handle->intr_vec[queue_idx + i] =
+ msix_vect;
+ break;
+ }
+ /* 1:1 queue/msix_vect mapping */
+ __vsi_queues_bind_intr(vsi, msix_vect,
+ vsi->base_queue + i, 1);
+ if (!!record)
+ intr_handle->intr_vec[queue_idx + i] = msix_vect;
+
+ msix_vect++;
+ nb_msix--;
+ }
+}
+
+static void
+i40e_vsi_enable_queues_intr(struct i40e_vsi *vsi)
+{
+ struct rte_eth_dev *dev = vsi->adapter->eth_dev;
+ struct rte_intr_handle *intr_handle = &dev->pci_dev->intr_handle;
+ struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
+ uint16_t interval = i40e_calc_itr_interval(\
+ RTE_LIBRTE_I40E_ITR_INTERVAL);
+ uint16_t msix_intr, i;
+
+ if (rte_intr_allow_others(intr_handle))
+ for (i = 0; i < vsi->nb_msix; i++) {
+ msix_intr = vsi->msix_intr + i;
+ I40E_WRITE_REG(hw, I40E_PFINT_DYN_CTLN(msix_intr - 1),
+ I40E_PFINT_DYN_CTLN_INTENA_MASK |
+ I40E_PFINT_DYN_CTLN_CLEARPBA_MASK |
+ (0 << I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT) |
+ (interval <<
+ I40E_PFINT_DYN_CTLN_INTERVAL_SHIFT));
+ }
+ else
+ I40E_WRITE_REG(hw, I40E_PFINT_DYN_CTL0,
+ I40E_PFINT_DYN_CTL0_INTENA_MASK |
+ I40E_PFINT_DYN_CTL0_CLEARPBA_MASK |
+ (0 << I40E_PFINT_DYN_CTL0_ITR_INDX_SHIFT) |
+ (interval <<
+ I40E_PFINT_DYN_CTL0_INTERVAL_SHIFT));
+
+ I40E_WRITE_FLUSH(hw);
+}
+
+static void
+i40e_vsi_disable_queues_intr(struct i40e_vsi *vsi)
+{
+ struct rte_eth_dev *dev = vsi->adapter->eth_dev;
+ struct rte_intr_handle *intr_handle = &dev->pci_dev->intr_handle;
+ struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
+ uint16_t msix_intr, i;
+
+ if (rte_intr_allow_others(intr_handle))
+ for (i = 0; i < vsi->nb_msix; i++) {
+ msix_intr = vsi->msix_intr + i;
+ I40E_WRITE_REG(hw, I40E_PFINT_DYN_CTLN(msix_intr - 1),
+ 0);
+ }
+ else
+ I40E_WRITE_REG(hw, I40E_PFINT_DYN_CTL0, 0);
+
+ I40E_WRITE_FLUSH(hw);
+}
+
+static inline uint8_t
+i40e_parse_link_speeds(uint16_t link_speeds)
+{
+ uint8_t link_speed = I40E_LINK_SPEED_UNKNOWN;
+
+ if (link_speeds & ETH_LINK_SPEED_40G)
+ link_speed |= I40E_LINK_SPEED_40GB;
+ if (link_speeds & ETH_LINK_SPEED_20G)
+ link_speed |= I40E_LINK_SPEED_20GB;
+ if (link_speeds & ETH_LINK_SPEED_10G)
+ link_speed |= I40E_LINK_SPEED_10GB;
+ if (link_speeds & ETH_LINK_SPEED_1G)
+ link_speed |= I40E_LINK_SPEED_1GB;
+ if (link_speeds & ETH_LINK_SPEED_100M)
+ link_speed |= I40E_LINK_SPEED_100MB;
+
+ return link_speed;
+}
+
+static int
+i40e_phy_conf_link(struct i40e_hw *hw,
+ uint8_t abilities,
+ uint8_t force_speed)
+{
+ enum i40e_status_code status;
+ struct i40e_aq_get_phy_abilities_resp phy_ab;
+ struct i40e_aq_set_phy_config phy_conf;
+ const uint8_t mask = I40E_AQ_PHY_FLAG_PAUSE_TX |
+ I40E_AQ_PHY_FLAG_PAUSE_RX |
+ I40E_AQ_PHY_FLAG_PAUSE_RX |
+ I40E_AQ_PHY_FLAG_LOW_POWER;
+ const uint8_t advt = I40E_LINK_SPEED_40GB |
+ I40E_LINK_SPEED_10GB |
+ I40E_LINK_SPEED_1GB |
+ I40E_LINK_SPEED_100MB;
+ int ret = -ENOTSUP;
+
+
+ status = i40e_aq_get_phy_capabilities(hw, false, false, &phy_ab,
+ NULL);
+ if (status)
+ return ret;
+
+ memset(&phy_conf, 0, sizeof(phy_conf));
+
+ /* bits 0-2 use the values from get_phy_abilities_resp */
+ abilities &= ~mask;
+ abilities |= phy_ab.abilities & mask;
+
+ /* update ablities and speed */
+ if (abilities & I40E_AQ_PHY_AN_ENABLED)
+ phy_conf.link_speed = advt;
+ else
+ phy_conf.link_speed = force_speed;
+
+ phy_conf.abilities = abilities;
+
+ /* use get_phy_abilities_resp value for the rest */
+ phy_conf.phy_type = phy_ab.phy_type;
+ phy_conf.eee_capability = phy_ab.eee_capability;
+ phy_conf.eeer = phy_ab.eeer_val;
+ phy_conf.low_power_ctrl = phy_ab.d3_lpan;
+
+ PMD_DRV_LOG(DEBUG, "\tCurrent: abilities %x, link_speed %x",
+ phy_ab.abilities, phy_ab.link_speed);
+ PMD_DRV_LOG(DEBUG, "\tConfig: abilities %x, link_speed %x",
+ phy_conf.abilities, phy_conf.link_speed);
+
+ status = i40e_aq_set_phy_config(hw, &phy_conf, NULL);
+ if (status)
+ return ret;
+
+ return I40E_SUCCESS;
+}
+
+static int
+i40e_apply_link_speed(struct rte_eth_dev *dev)
+{
+ uint8_t speed;
+ uint8_t abilities = 0;
+ struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ struct rte_eth_conf *conf = &dev->data->dev_conf;
+
+ speed = i40e_parse_link_speeds(conf->link_speeds);
+ abilities |= I40E_AQ_PHY_ENABLE_ATOMIC_LINK;
+ if (!(conf->link_speeds & ETH_LINK_SPEED_FIXED))
+ abilities |= I40E_AQ_PHY_AN_ENABLED;
+ abilities |= I40E_AQ_PHY_LINK_ENABLED;
+
+ /* Skip changing speed on 40G interfaces, FW does not support */
+ if (i40e_is_40G_device(hw->device_id)) {
+ speed = I40E_LINK_SPEED_UNKNOWN;
+ abilities |= I40E_AQ_PHY_AN_ENABLED;
+ }
+
+ return i40e_phy_conf_link(hw, abilities, speed);
+}
+
+static int
+i40e_dev_start(struct rte_eth_dev *dev)
+{
+ struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
+ struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ struct i40e_vsi *main_vsi = pf->main_vsi;
+ int ret, i;
+ struct rte_intr_handle *intr_handle = &dev->pci_dev->intr_handle;
+ uint32_t intr_vector = 0;
+
+ hw->adapter_stopped = 0;
+
+ if (dev->data->dev_conf.link_speeds & ETH_LINK_SPEED_FIXED) {
+ PMD_INIT_LOG(ERR, "Invalid link_speeds for port %hhu; autonegotiation disabled",
+ dev->data->port_id);
+ return -EINVAL;
+ }
+
+ rte_intr_disable(intr_handle);
+
+ if ((rte_intr_cap_multiple(intr_handle) ||
+ !RTE_ETH_DEV_SRIOV(dev).active) &&
+ dev->data->dev_conf.intr_conf.rxq != 0) {
+ intr_vector = dev->data->nb_rx_queues;
+ if (rte_intr_efd_enable(intr_handle, intr_vector))
+ return -1;
+ }
+
+ if (rte_intr_dp_is_en(intr_handle) && !intr_handle->intr_vec) {
+ intr_handle->intr_vec =
+ rte_zmalloc("intr_vec",
+ dev->data->nb_rx_queues * sizeof(int),
+ 0);
+ if (!intr_handle->intr_vec) {
+ PMD_INIT_LOG(ERR, "Failed to allocate %d rx_queues"
+ " intr_vec\n", dev->data->nb_rx_queues);
+ return -ENOMEM;
+ }
+ }
+
+ /* Initialize VSI */
+ ret = i40e_dev_rxtx_init(pf);
+ if (ret != I40E_SUCCESS) {
+ PMD_DRV_LOG(ERR, "Failed to init rx/tx queues");
+ goto err_up;
+ }
+
+ /* Map queues with MSIX interrupt */
+ main_vsi->nb_used_qps = dev->data->nb_rx_queues -
+ pf->nb_cfg_vmdq_vsi * RTE_LIBRTE_I40E_QUEUE_NUM_PER_VM;
+ i40e_vsi_queues_bind_intr(main_vsi);
+ i40e_vsi_enable_queues_intr(main_vsi);
+
+ /* Map VMDQ VSI queues with MSIX interrupt */
+ for (i = 0; i < pf->nb_cfg_vmdq_vsi; i++) {
+ pf->vmdq[i].vsi->nb_used_qps = RTE_LIBRTE_I40E_QUEUE_NUM_PER_VM;
+ i40e_vsi_queues_bind_intr(pf->vmdq[i].vsi);
+ i40e_vsi_enable_queues_intr(pf->vmdq[i].vsi);
+ }
+
+ /* enable FDIR MSIX interrupt */
+ if (pf->fdir.fdir_vsi) {
+ i40e_vsi_queues_bind_intr(pf->fdir.fdir_vsi);
+ i40e_vsi_enable_queues_intr(pf->fdir.fdir_vsi);
+ }
+
+ /* Enable all queues which have been configured */
+ ret = i40e_dev_switch_queues(pf, TRUE);
+ if (ret != I40E_SUCCESS) {
+ PMD_DRV_LOG(ERR, "Failed to enable VSI");
+ goto err_up;
+ }
+
+ /* Enable receiving broadcast packets */
+ ret = i40e_aq_set_vsi_broadcast(hw, main_vsi->seid, true, NULL);
+ if (ret != I40E_SUCCESS)
+ PMD_DRV_LOG(INFO, "fail to set vsi broadcast");
+
+ for (i = 0; i < pf->nb_cfg_vmdq_vsi; i++) {
+ ret = i40e_aq_set_vsi_broadcast(hw, pf->vmdq[i].vsi->seid,
+ true, NULL);
+ if (ret != I40E_SUCCESS)
+ PMD_DRV_LOG(INFO, "fail to set vsi broadcast");
+ }
+
+ /* Apply link configure */
+ if (dev->data->dev_conf.link_speeds & ~(ETH_LINK_SPEED_100M |
+ ETH_LINK_SPEED_1G | ETH_LINK_SPEED_10G |
+ ETH_LINK_SPEED_20G | ETH_LINK_SPEED_40G)) {
+ PMD_DRV_LOG(ERR, "Invalid link setting");
+ goto err_up;
+ }
+ ret = i40e_apply_link_speed(dev);
+ if (I40E_SUCCESS != ret) {
+ PMD_DRV_LOG(ERR, "Fail to apply link setting");
+ goto err_up;
+ }
+
+ if (!rte_intr_allow_others(intr_handle)) {
+ rte_intr_callback_unregister(intr_handle,
+ i40e_dev_interrupt_handler,
+ (void *)dev);
+ /* configure and enable device interrupt */
+ i40e_pf_config_irq0(hw, FALSE);
+ i40e_pf_enable_irq0(hw);
+
+ if (dev->data->dev_conf.intr_conf.lsc != 0)
+ PMD_INIT_LOG(INFO, "lsc won't enable because of"
+ " no intr multiplex\n");
+ }
+
+ /* enable uio intr after callback register */
+ rte_intr_enable(intr_handle);
+
+ return I40E_SUCCESS;
+
+err_up:
+ i40e_dev_switch_queues(pf, FALSE);
+ i40e_dev_clear_queues(dev);
+
+ return ret;
+}
+
+static void
+i40e_dev_stop(struct rte_eth_dev *dev)
+{
+ struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
+ struct i40e_vsi *main_vsi = pf->main_vsi;
+ struct i40e_mirror_rule *p_mirror;
+ struct rte_intr_handle *intr_handle = &dev->pci_dev->intr_handle;
+ int i;
+
+ /* Disable all queues */
+ i40e_dev_switch_queues(pf, FALSE);
+
+ /* un-map queues with interrupt registers */
+ i40e_vsi_disable_queues_intr(main_vsi);
+ i40e_vsi_queues_unbind_intr(main_vsi);
+
+ for (i = 0; i < pf->nb_cfg_vmdq_vsi; i++) {
+ i40e_vsi_disable_queues_intr(pf->vmdq[i].vsi);
+ i40e_vsi_queues_unbind_intr(pf->vmdq[i].vsi);
+ }
+
+ if (pf->fdir.fdir_vsi) {
+ i40e_vsi_queues_unbind_intr(pf->fdir.fdir_vsi);
+ i40e_vsi_disable_queues_intr(pf->fdir.fdir_vsi);
+ }
+ /* Clear all queues and release memory */
+ i40e_dev_clear_queues(dev);
+
+ /* Set link down */
+ i40e_dev_set_link_down(dev);
+
+ /* Remove all mirror rules */
+ while ((p_mirror = TAILQ_FIRST(&pf->mirror_list))) {
+ TAILQ_REMOVE(&pf->mirror_list, p_mirror, rules);
+ rte_free(p_mirror);
+ }
+ pf->nb_mirror_rule = 0;
+
+ if (!rte_intr_allow_others(intr_handle))
+ /* resume to the default handler */
+ rte_intr_callback_register(intr_handle,
+ i40e_dev_interrupt_handler,
+ (void *)dev);
+
+ /* Clean datapath event and queue/vec mapping */
+ rte_intr_efd_disable(intr_handle);
+ if (intr_handle->intr_vec) {
+ rte_free(intr_handle->intr_vec);
+ intr_handle->intr_vec = NULL;
+ }
+}
+
+static void
+i40e_dev_close(struct rte_eth_dev *dev)
+{
+ struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
+ struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ uint32_t reg;
+ int i;
+
+ PMD_INIT_FUNC_TRACE();
+
+ i40e_dev_stop(dev);
+ hw->adapter_stopped = 1;
+ i40e_dev_free_queues(dev);
+
+ /* Disable interrupt */
+ i40e_pf_disable_irq0(hw);
+ rte_intr_disable(&(dev->pci_dev->intr_handle));
+
+ /* shutdown and destroy the HMC */
+ i40e_shutdown_lan_hmc(hw);
+
+ /* release all the existing VSIs and VEBs */
+ i40e_fdir_teardown(pf);
+ i40e_vsi_release(pf->main_vsi);
+
+ for (i = 0; i < pf->nb_cfg_vmdq_vsi; i++) {
+ i40e_vsi_release(pf->vmdq[i].vsi);
+ pf->vmdq[i].vsi = NULL;
+ }
+
+ rte_free(pf->vmdq);
+ pf->vmdq = NULL;
+
+ /* shutdown the adminq */
+ i40e_aq_queue_shutdown(hw, true);
+ i40e_shutdown_adminq(hw);
+
+ i40e_res_pool_destroy(&pf->qp_pool);
+ i40e_res_pool_destroy(&pf->msix_pool);
+
+ /* force a PF reset to clean anything leftover */
+ reg = I40E_READ_REG(hw, I40E_PFGEN_CTRL);
+ I40E_WRITE_REG(hw, I40E_PFGEN_CTRL,
+ (reg | I40E_PFGEN_CTRL_PFSWR_MASK));
+ I40E_WRITE_FLUSH(hw);
+}
+
+static void
+i40e_dev_promiscuous_enable(struct rte_eth_dev *dev)
+{
+ struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
+ struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ struct i40e_vsi *vsi = pf->main_vsi;
+ int status;
+
+ status = i40e_aq_set_vsi_unicast_promiscuous(hw, vsi->seid,
+ true, NULL, true);
+ if (status != I40E_SUCCESS)
+ PMD_DRV_LOG(ERR, "Failed to enable unicast promiscuous");
+
+ status = i40e_aq_set_vsi_multicast_promiscuous(hw, vsi->seid,
+ TRUE, NULL);
+ if (status != I40E_SUCCESS)
+ PMD_DRV_LOG(ERR, "Failed to enable multicast promiscuous");
+
+}
+
+static void
+i40e_dev_promiscuous_disable(struct rte_eth_dev *dev)
+{
+ struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
+ struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ struct i40e_vsi *vsi = pf->main_vsi;
+ int status;
+
+ status = i40e_aq_set_vsi_unicast_promiscuous(hw, vsi->seid,
+ false, NULL, true);
+ if (status != I40E_SUCCESS)
+ PMD_DRV_LOG(ERR, "Failed to disable unicast promiscuous");
+
+ status = i40e_aq_set_vsi_multicast_promiscuous(hw, vsi->seid,
+ false, NULL);
+ if (status != I40E_SUCCESS)
+ PMD_DRV_LOG(ERR, "Failed to disable multicast promiscuous");
+}
+
+static void
+i40e_dev_allmulticast_enable(struct rte_eth_dev *dev)
+{
+ struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
+ struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ struct i40e_vsi *vsi = pf->main_vsi;
+ int ret;
+
+ ret = i40e_aq_set_vsi_multicast_promiscuous(hw, vsi->seid, TRUE, NULL);
+ if (ret != I40E_SUCCESS)
+ PMD_DRV_LOG(ERR, "Failed to enable multicast promiscuous");
+}
+
+static void
+i40e_dev_allmulticast_disable(struct rte_eth_dev *dev)
+{
+ struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
+ struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ struct i40e_vsi *vsi = pf->main_vsi;
+ int ret;
+
+ if (dev->data->promiscuous == 1)
+ return; /* must remain in all_multicast mode */
+
+ ret = i40e_aq_set_vsi_multicast_promiscuous(hw,
+ vsi->seid, FALSE, NULL);
+ if (ret != I40E_SUCCESS)
+ PMD_DRV_LOG(ERR, "Failed to disable multicast promiscuous");
+}
+
+/*
+ * Set device link up.
+ */
+static int
+i40e_dev_set_link_up(struct rte_eth_dev *dev)
+{
+ /* re-apply link speed setting */
+ return i40e_apply_link_speed(dev);
+}
+
+/*
+ * Set device link down.
+ */
+static int
+i40e_dev_set_link_down(struct rte_eth_dev *dev)
+{
+ uint8_t speed = I40E_LINK_SPEED_UNKNOWN;
+ uint8_t abilities = I40E_AQ_PHY_ENABLE_ATOMIC_LINK;
+ struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+
+ return i40e_phy_conf_link(hw, abilities, speed);
+}
+
+int
+i40e_dev_link_update(struct rte_eth_dev *dev,
+ int wait_to_complete)
+{
+#define CHECK_INTERVAL 100 /* 100ms */
+#define MAX_REPEAT_TIME 10 /* 1s (10 * 100ms) in total */
+ struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ struct i40e_link_status link_status;
+ struct rte_eth_link link, old;
+ int status;
+ unsigned rep_cnt = MAX_REPEAT_TIME;
+
+ memset(&link, 0, sizeof(link));
+ memset(&old, 0, sizeof(old));
+ memset(&link_status, 0, sizeof(link_status));
+ rte_i40e_dev_atomic_read_link_status(dev, &old);
+
+ do {
+ /* Get link status information from hardware */
+ status = i40e_aq_get_link_info(hw, false, &link_status, NULL);
+ if (status != I40E_SUCCESS) {
+ link.link_speed = ETH_SPEED_NUM_100M;
+ link.link_duplex = ETH_LINK_FULL_DUPLEX;
+ PMD_DRV_LOG(ERR, "Failed to get link info");
+ goto out;
+ }
+
+ link.link_status = link_status.link_info & I40E_AQ_LINK_UP;
+ if (!wait_to_complete)
+ break;
+
+ rte_delay_ms(CHECK_INTERVAL);
+ } while (!link.link_status && rep_cnt--);
+
+ if (!link.link_status)
+ goto out;
+
+ /* i40e uses full duplex only */
+ link.link_duplex = ETH_LINK_FULL_DUPLEX;
+
+ /* Parse the link status */
+ switch (link_status.link_speed) {
+ case I40E_LINK_SPEED_100MB:
+ link.link_speed = ETH_SPEED_NUM_100M;
+ break;
+ case I40E_LINK_SPEED_1GB:
+ link.link_speed = ETH_SPEED_NUM_1G;
+ break;
+ case I40E_LINK_SPEED_10GB:
+ link.link_speed = ETH_SPEED_NUM_10G;
+ break;
+ case I40E_LINK_SPEED_20GB:
+ link.link_speed = ETH_SPEED_NUM_20G;
+ break;
+ case I40E_LINK_SPEED_40GB:
+ link.link_speed = ETH_SPEED_NUM_40G;
+ break;
+ default:
+ link.link_speed = ETH_SPEED_NUM_100M;
+ break;
+ }
+
+ link.link_autoneg = !(dev->data->dev_conf.link_speeds &
+ ETH_LINK_SPEED_FIXED);
+
+out:
+ rte_i40e_dev_atomic_write_link_status(dev, &link);
+ if (link.link_status == old.link_status)
+ return -1;
+
+ return 0;
+}
+
+/* Get all the statistics of a VSI */
+void
+i40e_update_vsi_stats(struct i40e_vsi *vsi)
+{
+ struct i40e_eth_stats *oes = &vsi->eth_stats_offset;
+ struct i40e_eth_stats *nes = &vsi->eth_stats;
+ struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
+ int idx = rte_le_to_cpu_16(vsi->info.stat_counter_idx);
+
+ i40e_stat_update_48(hw, I40E_GLV_GORCH(idx), I40E_GLV_GORCL(idx),
+ vsi->offset_loaded, &oes->rx_bytes,
+ &nes->rx_bytes);
+ i40e_stat_update_48(hw, I40E_GLV_UPRCH(idx), I40E_GLV_UPRCL(idx),
+ vsi->offset_loaded, &oes->rx_unicast,
+ &nes->rx_unicast);
+ i40e_stat_update_48(hw, I40E_GLV_MPRCH(idx), I40E_GLV_MPRCL(idx),
+ vsi->offset_loaded, &oes->rx_multicast,
+ &nes->rx_multicast);
+ i40e_stat_update_48(hw, I40E_GLV_BPRCH(idx), I40E_GLV_BPRCL(idx),
+ vsi->offset_loaded, &oes->rx_broadcast,
+ &nes->rx_broadcast);
+ i40e_stat_update_32(hw, I40E_GLV_RDPC(idx), vsi->offset_loaded,
+ &oes->rx_discards, &nes->rx_discards);
+ /* GLV_REPC not supported */
+ /* GLV_RMPC not supported */
+ i40e_stat_update_32(hw, I40E_GLV_RUPP(idx), vsi->offset_loaded,
+ &oes->rx_unknown_protocol,
+ &nes->rx_unknown_protocol);
+ i40e_stat_update_48(hw, I40E_GLV_GOTCH(idx), I40E_GLV_GOTCL(idx),
+ vsi->offset_loaded, &oes->tx_bytes,
+ &nes->tx_bytes);
+ i40e_stat_update_48(hw, I40E_GLV_UPTCH(idx), I40E_GLV_UPTCL(idx),
+ vsi->offset_loaded, &oes->tx_unicast,
+ &nes->tx_unicast);
+ i40e_stat_update_48(hw, I40E_GLV_MPTCH(idx), I40E_GLV_MPTCL(idx),
+ vsi->offset_loaded, &oes->tx_multicast,
+ &nes->tx_multicast);
+ i40e_stat_update_48(hw, I40E_GLV_BPTCH(idx), I40E_GLV_BPTCL(idx),
+ vsi->offset_loaded, &oes->tx_broadcast,
+ &nes->tx_broadcast);
+ /* GLV_TDPC not supported */
+ i40e_stat_update_32(hw, I40E_GLV_TEPC(idx), vsi->offset_loaded,
+ &oes->tx_errors, &nes->tx_errors);
+ vsi->offset_loaded = true;
+
+ PMD_DRV_LOG(DEBUG, "***************** VSI[%u] stats start *******************",
+ vsi->vsi_id);
+ PMD_DRV_LOG(DEBUG, "rx_bytes: %"PRIu64"", nes->rx_bytes);
+ PMD_DRV_LOG(DEBUG, "rx_unicast: %"PRIu64"", nes->rx_unicast);
+ PMD_DRV_LOG(DEBUG, "rx_multicast: %"PRIu64"", nes->rx_multicast);
+ PMD_DRV_LOG(DEBUG, "rx_broadcast: %"PRIu64"", nes->rx_broadcast);
+ PMD_DRV_LOG(DEBUG, "rx_discards: %"PRIu64"", nes->rx_discards);
+ PMD_DRV_LOG(DEBUG, "rx_unknown_protocol: %"PRIu64"",
+ nes->rx_unknown_protocol);
+ PMD_DRV_LOG(DEBUG, "tx_bytes: %"PRIu64"", nes->tx_bytes);
+ PMD_DRV_LOG(DEBUG, "tx_unicast: %"PRIu64"", nes->tx_unicast);
+ PMD_DRV_LOG(DEBUG, "tx_multicast: %"PRIu64"", nes->tx_multicast);
+ PMD_DRV_LOG(DEBUG, "tx_broadcast: %"PRIu64"", nes->tx_broadcast);
+ PMD_DRV_LOG(DEBUG, "tx_discards: %"PRIu64"", nes->tx_discards);
+ PMD_DRV_LOG(DEBUG, "tx_errors: %"PRIu64"", nes->tx_errors);
+ PMD_DRV_LOG(DEBUG, "***************** VSI[%u] stats end *******************",
+ vsi->vsi_id);
+}
+
+static void
+i40e_read_stats_registers(struct i40e_pf *pf, struct i40e_hw *hw)
+{
+ unsigned int i;
+ struct i40e_hw_port_stats *ns = &pf->stats; /* new stats */
+ struct i40e_hw_port_stats *os = &pf->stats_offset; /* old stats */
+
+ /* Get statistics of struct i40e_eth_stats */
+ i40e_stat_update_48(hw, I40E_GLPRT_GORCH(hw->port),
+ I40E_GLPRT_GORCL(hw->port),
+ pf->offset_loaded, &os->eth.rx_bytes,
+ &ns->eth.rx_bytes);
+ i40e_stat_update_48(hw, I40E_GLPRT_UPRCH(hw->port),
+ I40E_GLPRT_UPRCL(hw->port),
+ pf->offset_loaded, &os->eth.rx_unicast,
+ &ns->eth.rx_unicast);
+ i40e_stat_update_48(hw, I40E_GLPRT_MPRCH(hw->port),
+ I40E_GLPRT_MPRCL(hw->port),
+ pf->offset_loaded, &os->eth.rx_multicast,
+ &ns->eth.rx_multicast);
+ i40e_stat_update_48(hw, I40E_GLPRT_BPRCH(hw->port),
+ I40E_GLPRT_BPRCL(hw->port),
+ pf->offset_loaded, &os->eth.rx_broadcast,
+ &ns->eth.rx_broadcast);
+ /* Workaround: CRC size should not be included in byte statistics,
+ * so subtract ETHER_CRC_LEN from the byte counter for each rx packet.
+ */
+ ns->eth.rx_bytes -= (ns->eth.rx_unicast + ns->eth.rx_multicast +
+ ns->eth.rx_broadcast) * ETHER_CRC_LEN;
+
+ i40e_stat_update_32(hw, I40E_GLPRT_RDPC(hw->port),
+ pf->offset_loaded, &os->eth.rx_discards,
+ &ns->eth.rx_discards);
+ /* GLPRT_REPC not supported */
+ /* GLPRT_RMPC not supported */
+ i40e_stat_update_32(hw, I40E_GLPRT_RUPP(hw->port),
+ pf->offset_loaded,
+ &os->eth.rx_unknown_protocol,
+ &ns->eth.rx_unknown_protocol);
+ i40e_stat_update_48(hw, I40E_GLPRT_GOTCH(hw->port),
+ I40E_GLPRT_GOTCL(hw->port),
+ pf->offset_loaded, &os->eth.tx_bytes,
+ &ns->eth.tx_bytes);
+ i40e_stat_update_48(hw, I40E_GLPRT_UPTCH(hw->port),
+ I40E_GLPRT_UPTCL(hw->port),
+ pf->offset_loaded, &os->eth.tx_unicast,
+ &ns->eth.tx_unicast);
+ i40e_stat_update_48(hw, I40E_GLPRT_MPTCH(hw->port),
+ I40E_GLPRT_MPTCL(hw->port),
+ pf->offset_loaded, &os->eth.tx_multicast,
+ &ns->eth.tx_multicast);
+ i40e_stat_update_48(hw, I40E_GLPRT_BPTCH(hw->port),
+ I40E_GLPRT_BPTCL(hw->port),
+ pf->offset_loaded, &os->eth.tx_broadcast,
+ &ns->eth.tx_broadcast);
+ ns->eth.tx_bytes -= (ns->eth.tx_unicast + ns->eth.tx_multicast +
+ ns->eth.tx_broadcast) * ETHER_CRC_LEN;
+ /* GLPRT_TEPC not supported */
+
+ /* additional port specific stats */
+ i40e_stat_update_32(hw, I40E_GLPRT_TDOLD(hw->port),
+ pf->offset_loaded, &os->tx_dropped_link_down,
+ &ns->tx_dropped_link_down);
+ i40e_stat_update_32(hw, I40E_GLPRT_CRCERRS(hw->port),
+ pf->offset_loaded, &os->crc_errors,
+ &ns->crc_errors);
+ i40e_stat_update_32(hw, I40E_GLPRT_ILLERRC(hw->port),
+ pf->offset_loaded, &os->illegal_bytes,
+ &ns->illegal_bytes);
+ /* GLPRT_ERRBC not supported */
+ i40e_stat_update_32(hw, I40E_GLPRT_MLFC(hw->port),
+ pf->offset_loaded, &os->mac_local_faults,
+ &ns->mac_local_faults);
+ i40e_stat_update_32(hw, I40E_GLPRT_MRFC(hw->port),
+ pf->offset_loaded, &os->mac_remote_faults,
+ &ns->mac_remote_faults);
+ i40e_stat_update_32(hw, I40E_GLPRT_RLEC(hw->port),
+ pf->offset_loaded, &os->rx_length_errors,
+ &ns->rx_length_errors);
+ i40e_stat_update_32(hw, I40E_GLPRT_LXONRXC(hw->port),
+ pf->offset_loaded, &os->link_xon_rx,
+ &ns->link_xon_rx);
+ i40e_stat_update_32(hw, I40E_GLPRT_LXOFFRXC(hw->port),
+ pf->offset_loaded, &os->link_xoff_rx,
+ &ns->link_xoff_rx);
+ for (i = 0; i < 8; i++) {
+ i40e_stat_update_32(hw, I40E_GLPRT_PXONRXC(hw->port, i),
+ pf->offset_loaded,
+ &os->priority_xon_rx[i],
+ &ns->priority_xon_rx[i]);
+ i40e_stat_update_32(hw, I40E_GLPRT_PXOFFRXC(hw->port, i),
+ pf->offset_loaded,
+ &os->priority_xoff_rx[i],
+ &ns->priority_xoff_rx[i]);
+ }
+ i40e_stat_update_32(hw, I40E_GLPRT_LXONTXC(hw->port),
+ pf->offset_loaded, &os->link_xon_tx,
+ &ns->link_xon_tx);
+ i40e_stat_update_32(hw, I40E_GLPRT_LXOFFTXC(hw->port),
+ pf->offset_loaded, &os->link_xoff_tx,
+ &ns->link_xoff_tx);
+ for (i = 0; i < 8; i++) {
+ i40e_stat_update_32(hw, I40E_GLPRT_PXONTXC(hw->port, i),
+ pf->offset_loaded,
+ &os->priority_xon_tx[i],
+ &ns->priority_xon_tx[i]);
+ i40e_stat_update_32(hw, I40E_GLPRT_PXOFFTXC(hw->port, i),
+ pf->offset_loaded,
+ &os->priority_xoff_tx[i],
+ &ns->priority_xoff_tx[i]);
+ i40e_stat_update_32(hw, I40E_GLPRT_RXON2OFFCNT(hw->port, i),
+ pf->offset_loaded,
+ &os->priority_xon_2_xoff[i],
+ &ns->priority_xon_2_xoff[i]);
+ }
+ i40e_stat_update_48(hw, I40E_GLPRT_PRC64H(hw->port),
+ I40E_GLPRT_PRC64L(hw->port),
+ pf->offset_loaded, &os->rx_size_64,
+ &ns->rx_size_64);
+ i40e_stat_update_48(hw, I40E_GLPRT_PRC127H(hw->port),
+ I40E_GLPRT_PRC127L(hw->port),
+ pf->offset_loaded, &os->rx_size_127,
+ &ns->rx_size_127);
+ i40e_stat_update_48(hw, I40E_GLPRT_PRC255H(hw->port),
+ I40E_GLPRT_PRC255L(hw->port),
+ pf->offset_loaded, &os->rx_size_255,
+ &ns->rx_size_255);
+ i40e_stat_update_48(hw, I40E_GLPRT_PRC511H(hw->port),
+ I40E_GLPRT_PRC511L(hw->port),
+ pf->offset_loaded, &os->rx_size_511,
+ &ns->rx_size_511);
+ i40e_stat_update_48(hw, I40E_GLPRT_PRC1023H(hw->port),
+ I40E_GLPRT_PRC1023L(hw->port),
+ pf->offset_loaded, &os->rx_size_1023,
+ &ns->rx_size_1023);
+ i40e_stat_update_48(hw, I40E_GLPRT_PRC1522H(hw->port),
+ I40E_GLPRT_PRC1522L(hw->port),
+ pf->offset_loaded, &os->rx_size_1522,
+ &ns->rx_size_1522);
+ i40e_stat_update_48(hw, I40E_GLPRT_PRC9522H(hw->port),
+ I40E_GLPRT_PRC9522L(hw->port),
+ pf->offset_loaded, &os->rx_size_big,
+ &ns->rx_size_big);
+ i40e_stat_update_32(hw, I40E_GLPRT_RUC(hw->port),
+ pf->offset_loaded, &os->rx_undersize,
+ &ns->rx_undersize);
+ i40e_stat_update_32(hw, I40E_GLPRT_RFC(hw->port),
+ pf->offset_loaded, &os->rx_fragments,
+ &ns->rx_fragments);
+ i40e_stat_update_32(hw, I40E_GLPRT_ROC(hw->port),
+ pf->offset_loaded, &os->rx_oversize,
+ &ns->rx_oversize);
+ i40e_stat_update_32(hw, I40E_GLPRT_RJC(hw->port),
+ pf->offset_loaded, &os->rx_jabber,
+ &ns->rx_jabber);
+ i40e_stat_update_48(hw, I40E_GLPRT_PTC64H(hw->port),
+ I40E_GLPRT_PTC64L(hw->port),
+ pf->offset_loaded, &os->tx_size_64,
+ &ns->tx_size_64);
+ i40e_stat_update_48(hw, I40E_GLPRT_PTC127H(hw->port),
+ I40E_GLPRT_PTC127L(hw->port),
+ pf->offset_loaded, &os->tx_size_127,
+ &ns->tx_size_127);
+ i40e_stat_update_48(hw, I40E_GLPRT_PTC255H(hw->port),
+ I40E_GLPRT_PTC255L(hw->port),
+ pf->offset_loaded, &os->tx_size_255,
+ &ns->tx_size_255);
+ i40e_stat_update_48(hw, I40E_GLPRT_PTC511H(hw->port),
+ I40E_GLPRT_PTC511L(hw->port),
+ pf->offset_loaded, &os->tx_size_511,
+ &ns->tx_size_511);
+ i40e_stat_update_48(hw, I40E_GLPRT_PTC1023H(hw->port),
+ I40E_GLPRT_PTC1023L(hw->port),
+ pf->offset_loaded, &os->tx_size_1023,
+ &ns->tx_size_1023);
+ i40e_stat_update_48(hw, I40E_GLPRT_PTC1522H(hw->port),
+ I40E_GLPRT_PTC1522L(hw->port),
+ pf->offset_loaded, &os->tx_size_1522,
+ &ns->tx_size_1522);
+ i40e_stat_update_48(hw, I40E_GLPRT_PTC9522H(hw->port),
+ I40E_GLPRT_PTC9522L(hw->port),
+ pf->offset_loaded, &os->tx_size_big,
+ &ns->tx_size_big);
+ i40e_stat_update_32(hw, I40E_GLQF_PCNT(pf->fdir.match_counter_index),
+ pf->offset_loaded,
+ &os->fd_sb_match, &ns->fd_sb_match);
+ /* GLPRT_MSPDC not supported */
+ /* GLPRT_XEC not supported */
+
+ pf->offset_loaded = true;
+
+ if (pf->main_vsi)
+ i40e_update_vsi_stats(pf->main_vsi);
+}
+
+/* Get all statistics of a port */
+static void
+i40e_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
+{
+ struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
+ struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ struct i40e_hw_port_stats *ns = &pf->stats; /* new stats */
+ unsigned i;
+
+ /* call read registers - updates values, now write them to struct */
+ i40e_read_stats_registers(pf, hw);
+
+ stats->ipackets = pf->main_vsi->eth_stats.rx_unicast +
+ pf->main_vsi->eth_stats.rx_multicast +
+ pf->main_vsi->eth_stats.rx_broadcast -
+ pf->main_vsi->eth_stats.rx_discards;
+ stats->opackets = pf->main_vsi->eth_stats.tx_unicast +
+ pf->main_vsi->eth_stats.tx_multicast +
+ pf->main_vsi->eth_stats.tx_broadcast;
+ stats->ibytes = ns->eth.rx_bytes;
+ stats->obytes = ns->eth.tx_bytes;
+ stats->oerrors = ns->eth.tx_errors +
+ pf->main_vsi->eth_stats.tx_errors;
+
+ /* Rx Errors */
+ stats->imissed = ns->eth.rx_discards +
+ pf->main_vsi->eth_stats.rx_discards;
+ stats->ierrors = ns->crc_errors +
+ ns->rx_length_errors + ns->rx_undersize +
+ ns->rx_oversize + ns->rx_fragments + ns->rx_jabber;
+
+ PMD_DRV_LOG(DEBUG, "***************** PF stats start *******************");
+ PMD_DRV_LOG(DEBUG, "rx_bytes: %"PRIu64"", ns->eth.rx_bytes);
+ PMD_DRV_LOG(DEBUG, "rx_unicast: %"PRIu64"", ns->eth.rx_unicast);
+ PMD_DRV_LOG(DEBUG, "rx_multicast: %"PRIu64"", ns->eth.rx_multicast);
+ PMD_DRV_LOG(DEBUG, "rx_broadcast: %"PRIu64"", ns->eth.rx_broadcast);
+ PMD_DRV_LOG(DEBUG, "rx_discards: %"PRIu64"", ns->eth.rx_discards);
+ PMD_DRV_LOG(DEBUG, "rx_unknown_protocol: %"PRIu64"",
+ ns->eth.rx_unknown_protocol);
+ PMD_DRV_LOG(DEBUG, "tx_bytes: %"PRIu64"", ns->eth.tx_bytes);
+ PMD_DRV_LOG(DEBUG, "tx_unicast: %"PRIu64"", ns->eth.tx_unicast);
+ PMD_DRV_LOG(DEBUG, "tx_multicast: %"PRIu64"", ns->eth.tx_multicast);
+ PMD_DRV_LOG(DEBUG, "tx_broadcast: %"PRIu64"", ns->eth.tx_broadcast);
+ PMD_DRV_LOG(DEBUG, "tx_discards: %"PRIu64"", ns->eth.tx_discards);
+ PMD_DRV_LOG(DEBUG, "tx_errors: %"PRIu64"", ns->eth.tx_errors);
+
+ PMD_DRV_LOG(DEBUG, "tx_dropped_link_down: %"PRIu64"",
+ ns->tx_dropped_link_down);
+ PMD_DRV_LOG(DEBUG, "crc_errors: %"PRIu64"", ns->crc_errors);
+ PMD_DRV_LOG(DEBUG, "illegal_bytes: %"PRIu64"",
+ ns->illegal_bytes);
+ PMD_DRV_LOG(DEBUG, "error_bytes: %"PRIu64"", ns->error_bytes);
+ PMD_DRV_LOG(DEBUG, "mac_local_faults: %"PRIu64"",
+ ns->mac_local_faults);
+ PMD_DRV_LOG(DEBUG, "mac_remote_faults: %"PRIu64"",
+ ns->mac_remote_faults);
+ PMD_DRV_LOG(DEBUG, "rx_length_errors: %"PRIu64"",
+ ns->rx_length_errors);
+ PMD_DRV_LOG(DEBUG, "link_xon_rx: %"PRIu64"", ns->link_xon_rx);
+ PMD_DRV_LOG(DEBUG, "link_xoff_rx: %"PRIu64"", ns->link_xoff_rx);
+ for (i = 0; i < 8; i++) {
+ PMD_DRV_LOG(DEBUG, "priority_xon_rx[%d]: %"PRIu64"",
+ i, ns->priority_xon_rx[i]);
+ PMD_DRV_LOG(DEBUG, "priority_xoff_rx[%d]: %"PRIu64"",
+ i, ns->priority_xoff_rx[i]);
+ }
+ PMD_DRV_LOG(DEBUG, "link_xon_tx: %"PRIu64"", ns->link_xon_tx);
+ PMD_DRV_LOG(DEBUG, "link_xoff_tx: %"PRIu64"", ns->link_xoff_tx);
+ for (i = 0; i < 8; i++) {
+ PMD_DRV_LOG(DEBUG, "priority_xon_tx[%d]: %"PRIu64"",
+ i, ns->priority_xon_tx[i]);
+ PMD_DRV_LOG(DEBUG, "priority_xoff_tx[%d]: %"PRIu64"",
+ i, ns->priority_xoff_tx[i]);
+ PMD_DRV_LOG(DEBUG, "priority_xon_2_xoff[%d]: %"PRIu64"",
+ i, ns->priority_xon_2_xoff[i]);
+ }
+ PMD_DRV_LOG(DEBUG, "rx_size_64: %"PRIu64"", ns->rx_size_64);
+ PMD_DRV_LOG(DEBUG, "rx_size_127: %"PRIu64"", ns->rx_size_127);
+ PMD_DRV_LOG(DEBUG, "rx_size_255: %"PRIu64"", ns->rx_size_255);
+ PMD_DRV_LOG(DEBUG, "rx_size_511: %"PRIu64"", ns->rx_size_511);
+ PMD_DRV_LOG(DEBUG, "rx_size_1023: %"PRIu64"", ns->rx_size_1023);
+ PMD_DRV_LOG(DEBUG, "rx_size_1522: %"PRIu64"", ns->rx_size_1522);
+ PMD_DRV_LOG(DEBUG, "rx_size_big: %"PRIu64"", ns->rx_size_big);
+ PMD_DRV_LOG(DEBUG, "rx_undersize: %"PRIu64"", ns->rx_undersize);
+ PMD_DRV_LOG(DEBUG, "rx_fragments: %"PRIu64"", ns->rx_fragments);
+ PMD_DRV_LOG(DEBUG, "rx_oversize: %"PRIu64"", ns->rx_oversize);
+ PMD_DRV_LOG(DEBUG, "rx_jabber: %"PRIu64"", ns->rx_jabber);
+ PMD_DRV_LOG(DEBUG, "tx_size_64: %"PRIu64"", ns->tx_size_64);
+ PMD_DRV_LOG(DEBUG, "tx_size_127: %"PRIu64"", ns->tx_size_127);
+ PMD_DRV_LOG(DEBUG, "tx_size_255: %"PRIu64"", ns->tx_size_255);
+ PMD_DRV_LOG(DEBUG, "tx_size_511: %"PRIu64"", ns->tx_size_511);
+ PMD_DRV_LOG(DEBUG, "tx_size_1023: %"PRIu64"", ns->tx_size_1023);
+ PMD_DRV_LOG(DEBUG, "tx_size_1522: %"PRIu64"", ns->tx_size_1522);
+ PMD_DRV_LOG(DEBUG, "tx_size_big: %"PRIu64"", ns->tx_size_big);
+ PMD_DRV_LOG(DEBUG, "mac_short_packet_dropped: %"PRIu64"",
+ ns->mac_short_packet_dropped);
+ PMD_DRV_LOG(DEBUG, "checksum_error: %"PRIu64"",
+ ns->checksum_error);
+ PMD_DRV_LOG(DEBUG, "fdir_match: %"PRIu64"", ns->fd_sb_match);
+ PMD_DRV_LOG(DEBUG, "***************** PF stats end ********************");
+}
+
+/* Reset the statistics */
+static void
+i40e_dev_stats_reset(struct rte_eth_dev *dev)
+{
+ struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
+ struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+
+ /* Mark PF and VSI stats to update the offset, aka "reset" */
+ pf->offset_loaded = false;
+ if (pf->main_vsi)
+ pf->main_vsi->offset_loaded = false;
+
+ /* read the stats, reading current register values into offset */
+ i40e_read_stats_registers(pf, hw);
+}
+
+static uint32_t
+i40e_xstats_calc_num(void)
+{
+ return I40E_NB_ETH_XSTATS + I40E_NB_HW_PORT_XSTATS +
+ (I40E_NB_RXQ_PRIO_XSTATS * 8) +
+ (I40E_NB_TXQ_PRIO_XSTATS * 8);
+}
+
+static int i40e_dev_xstats_get_names(__rte_unused struct rte_eth_dev *dev,
+ struct rte_eth_xstat_name *xstats_names,
+ __rte_unused unsigned limit)
+{
+ unsigned count = 0;
+ unsigned i, prio;
+
+ if (xstats_names == NULL)
+ return i40e_xstats_calc_num();
+
+ /* Note: limit checked in rte_eth_xstats_names() */
+
+ /* Get stats from i40e_eth_stats struct */
+ for (i = 0; i < I40E_NB_ETH_XSTATS; i++) {
+ snprintf(xstats_names[count].name,
+ sizeof(xstats_names[count].name),
+ "%s", rte_i40e_stats_strings[i].name);
+ count++;
+ }
+
+ /* Get individiual stats from i40e_hw_port struct */
+ for (i = 0; i < I40E_NB_HW_PORT_XSTATS; i++) {
+ snprintf(xstats_names[count].name,
+ sizeof(xstats_names[count].name),
+ "%s", rte_i40e_hw_port_strings[i].name);
+ count++;
+ }
+
+ for (i = 0; i < I40E_NB_RXQ_PRIO_XSTATS; i++) {
+ for (prio = 0; prio < 8; prio++) {
+ snprintf(xstats_names[count].name,
+ sizeof(xstats_names[count].name),
+ "rx_priority%u_%s", prio,
+ rte_i40e_rxq_prio_strings[i].name);
+ count++;
+ }
+ }
+
+ for (i = 0; i < I40E_NB_TXQ_PRIO_XSTATS; i++) {
+ for (prio = 0; prio < 8; prio++) {
+ snprintf(xstats_names[count].name,
+ sizeof(xstats_names[count].name),
+ "tx_priority%u_%s", prio,
+ rte_i40e_txq_prio_strings[i].name);
+ count++;
+ }
+ }
+ return count;
+}
+
+static int
+i40e_dev_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats,
+ unsigned n)
+{
+ struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
+ struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ unsigned i, count, prio;
+ struct i40e_hw_port_stats *hw_stats = &pf->stats;
+
+ count = i40e_xstats_calc_num();
+ if (n < count)
+ return count;
+
+ i40e_read_stats_registers(pf, hw);
+
+ if (xstats == NULL)
+ return 0;
+
+ count = 0;
+
+ /* Get stats from i40e_eth_stats struct */
+ for (i = 0; i < I40E_NB_ETH_XSTATS; i++) {
+ xstats[count].value = *(uint64_t *)(((char *)&hw_stats->eth) +
+ rte_i40e_stats_strings[i].offset);
+ count++;
+ }
+
+ /* Get individiual stats from i40e_hw_port struct */
+ for (i = 0; i < I40E_NB_HW_PORT_XSTATS; i++) {
+ xstats[count].value = *(uint64_t *)(((char *)hw_stats) +
+ rte_i40e_hw_port_strings[i].offset);
+ count++;
+ }
+
+ for (i = 0; i < I40E_NB_RXQ_PRIO_XSTATS; i++) {
+ for (prio = 0; prio < 8; prio++) {
+ xstats[count].value =
+ *(uint64_t *)(((char *)hw_stats) +
+ rte_i40e_rxq_prio_strings[i].offset +
+ (sizeof(uint64_t) * prio));
+ count++;
+ }
+ }
+
+ for (i = 0; i < I40E_NB_TXQ_PRIO_XSTATS; i++) {
+ for (prio = 0; prio < 8; prio++) {
+ xstats[count].value =
+ *(uint64_t *)(((char *)hw_stats) +
+ rte_i40e_txq_prio_strings[i].offset +
+ (sizeof(uint64_t) * prio));
+ count++;
+ }
+ }
+
+ return count;
+}
+
+static int
+i40e_dev_queue_stats_mapping_set(__rte_unused struct rte_eth_dev *dev,
+ __rte_unused uint16_t queue_id,
+ __rte_unused uint8_t stat_idx,
+ __rte_unused uint8_t is_rx)
+{
+ PMD_INIT_FUNC_TRACE();
+
+ return -ENOSYS;
+}
+
+static void
+i40e_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
+{
+ struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
+ struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ struct i40e_vsi *vsi = pf->main_vsi;
+
+ dev_info->max_rx_queues = vsi->nb_qps;
+ dev_info->max_tx_queues = vsi->nb_qps;
+ dev_info->min_rx_bufsize = I40E_BUF_SIZE_MIN;
+ dev_info->max_rx_pktlen = I40E_FRAME_SIZE_MAX;
+ dev_info->max_mac_addrs = vsi->max_macaddrs;
+ dev_info->max_vfs = dev->pci_dev->max_vfs;
+ dev_info->rx_offload_capa =
+ DEV_RX_OFFLOAD_VLAN_STRIP |
+ DEV_RX_OFFLOAD_QINQ_STRIP |
+ DEV_RX_OFFLOAD_IPV4_CKSUM |
+ DEV_RX_OFFLOAD_UDP_CKSUM |
+ DEV_RX_OFFLOAD_TCP_CKSUM;
+ dev_info->tx_offload_capa =
+ DEV_TX_OFFLOAD_VLAN_INSERT |
+ DEV_TX_OFFLOAD_QINQ_INSERT |
+ DEV_TX_OFFLOAD_IPV4_CKSUM |
+ DEV_TX_OFFLOAD_UDP_CKSUM |
+ DEV_TX_OFFLOAD_TCP_CKSUM |
+ DEV_TX_OFFLOAD_SCTP_CKSUM |
+ DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM |
+ DEV_TX_OFFLOAD_TCP_TSO;
+ dev_info->hash_key_size = (I40E_PFQF_HKEY_MAX_INDEX + 1) *
+ sizeof(uint32_t);
+ dev_info->reta_size = pf->hash_lut_size;
+ dev_info->flow_type_rss_offloads = I40E_RSS_OFFLOAD_ALL;
+
+ dev_info->default_rxconf = (struct rte_eth_rxconf) {
+ .rx_thresh = {
+ .pthresh = I40E_DEFAULT_RX_PTHRESH,
+ .hthresh = I40E_DEFAULT_RX_HTHRESH,
+ .wthresh = I40E_DEFAULT_RX_WTHRESH,
+ },
+ .rx_free_thresh = I40E_DEFAULT_RX_FREE_THRESH,
+ .rx_drop_en = 0,
+ };
+
+ dev_info->default_txconf = (struct rte_eth_txconf) {
+ .tx_thresh = {
+ .pthresh = I40E_DEFAULT_TX_PTHRESH,
+ .hthresh = I40E_DEFAULT_TX_HTHRESH,
+ .wthresh = I40E_DEFAULT_TX_WTHRESH,
+ },
+ .tx_free_thresh = I40E_DEFAULT_TX_FREE_THRESH,
+ .tx_rs_thresh = I40E_DEFAULT_TX_RSBIT_THRESH,
+ .txq_flags = ETH_TXQ_FLAGS_NOMULTSEGS |
+ ETH_TXQ_FLAGS_NOOFFLOADS,
+ };
+
+ dev_info->rx_desc_lim = (struct rte_eth_desc_lim) {
+ .nb_max = I40E_MAX_RING_DESC,
+ .nb_min = I40E_MIN_RING_DESC,
+ .nb_align = I40E_ALIGN_RING_DESC,
+ };
+
+ dev_info->tx_desc_lim = (struct rte_eth_desc_lim) {
+ .nb_max = I40E_MAX_RING_DESC,
+ .nb_min = I40E_MIN_RING_DESC,
+ .nb_align = I40E_ALIGN_RING_DESC,
+ };
+
+ if (pf->flags & I40E_FLAG_VMDQ) {
+ dev_info->max_vmdq_pools = pf->max_nb_vmdq_vsi;
+ dev_info->vmdq_queue_base = dev_info->max_rx_queues;
+ dev_info->vmdq_queue_num = pf->vmdq_nb_qps *
+ pf->max_nb_vmdq_vsi;
+ dev_info->vmdq_pool_base = I40E_VMDQ_POOL_BASE;
+ dev_info->max_rx_queues += dev_info->vmdq_queue_num;
+ dev_info->max_tx_queues += dev_info->vmdq_queue_num;
+ }
+
+ if (i40e_is_40G_device(hw->device_id))
+ /* For XL710 */
+ dev_info->speed_capa = ETH_LINK_SPEED_1G | ETH_LINK_SPEED_10G;
+ else
+ /* For X710 */
+ dev_info->speed_capa = ETH_LINK_SPEED_10G | ETH_LINK_SPEED_40G;
+}
+
+static int
+i40e_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on)
+{
+ struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
+ struct i40e_vsi *vsi = pf->main_vsi;
+ PMD_INIT_FUNC_TRACE();
+
+ if (on)
+ return i40e_vsi_add_vlan(vsi, vlan_id);
+ else
+ return i40e_vsi_delete_vlan(vsi, vlan_id);
+}
+
+static int
+i40e_vlan_tpid_set(struct rte_eth_dev *dev,
+ enum rte_vlan_type vlan_type,
+ uint16_t tpid)
+{
+ struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ uint64_t reg_r = 0, reg_w = 0;
+ uint16_t reg_id = 0;
+ int ret = 0;
+ int qinq = dev->data->dev_conf.rxmode.hw_vlan_extend;
+
+ switch (vlan_type) {
+ case ETH_VLAN_TYPE_OUTER:
+ if (qinq)
+ reg_id = 2;
+ else
+ reg_id = 3;
+ break;
+ case ETH_VLAN_TYPE_INNER:
+ if (qinq)
+ reg_id = 3;
+ else {
+ ret = -EINVAL;
+ PMD_DRV_LOG(ERR,
+ "Unsupported vlan type in single vlan.\n");
+ return ret;
+ }
+ break;
+ default:
+ ret = -EINVAL;
+ PMD_DRV_LOG(ERR, "Unsupported vlan type %d", vlan_type);
+ return ret;
+ }
+ ret = i40e_aq_debug_read_register(hw, I40E_GL_SWT_L2TAGCTRL(reg_id),
+ &reg_r, NULL);
+ if (ret != I40E_SUCCESS) {
+ PMD_DRV_LOG(ERR, "Fail to debug read from "
+ "I40E_GL_SWT_L2TAGCTRL[%d]", reg_id);
+ ret = -EIO;
+ return ret;
+ }
+ PMD_DRV_LOG(DEBUG, "Debug read from I40E_GL_SWT_L2TAGCTRL[%d]: "
+ "0x%08"PRIx64"", reg_id, reg_r);
+
+ reg_w = reg_r & (~(I40E_GL_SWT_L2TAGCTRL_ETHERTYPE_MASK));
+ reg_w |= ((uint64_t)tpid << I40E_GL_SWT_L2TAGCTRL_ETHERTYPE_SHIFT);
+ if (reg_r == reg_w) {
+ ret = 0;
+ PMD_DRV_LOG(DEBUG, "No need to write");
+ return ret;
+ }
+
+ ret = i40e_aq_debug_write_register(hw, I40E_GL_SWT_L2TAGCTRL(reg_id),
+ reg_w, NULL);
+ if (ret != I40E_SUCCESS) {
+ ret = -EIO;
+ PMD_DRV_LOG(ERR, "Fail to debug write to "
+ "I40E_GL_SWT_L2TAGCTRL[%d]", reg_id);
+ return ret;
+ }
+ PMD_DRV_LOG(DEBUG, "Debug write 0x%08"PRIx64" to "
+ "I40E_GL_SWT_L2TAGCTRL[%d]", reg_w, reg_id);
+
+ return ret;
+}
+
+static void
+i40e_vlan_offload_set(struct rte_eth_dev *dev, int mask)
+{
+ struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
+ struct i40e_vsi *vsi = pf->main_vsi;
+
+ if (mask & ETH_VLAN_FILTER_MASK) {
+ if (dev->data->dev_conf.rxmode.hw_vlan_filter)
+ i40e_vsi_config_vlan_filter(vsi, TRUE);
+ else
+ i40e_vsi_config_vlan_filter(vsi, FALSE);
+ }
+
+ if (mask & ETH_VLAN_STRIP_MASK) {
+ /* Enable or disable VLAN stripping */
+ if (dev->data->dev_conf.rxmode.hw_vlan_strip)
+ i40e_vsi_config_vlan_stripping(vsi, TRUE);
+ else
+ i40e_vsi_config_vlan_stripping(vsi, FALSE);
+ }
+
+ if (mask & ETH_VLAN_EXTEND_MASK) {
+ if (dev->data->dev_conf.rxmode.hw_vlan_extend) {
+ i40e_vsi_config_double_vlan(vsi, TRUE);
+ /* Set global registers with default ether type value */
+ i40e_vlan_tpid_set(dev, ETH_VLAN_TYPE_OUTER,
+ ETHER_TYPE_VLAN);
+ i40e_vlan_tpid_set(dev, ETH_VLAN_TYPE_INNER,
+ ETHER_TYPE_VLAN);
+ }
+ else
+ i40e_vsi_config_double_vlan(vsi, FALSE);
+ }
+}
+
+static void
+i40e_vlan_strip_queue_set(__rte_unused struct rte_eth_dev *dev,
+ __rte_unused uint16_t queue,
+ __rte_unused int on)
+{
+ PMD_INIT_FUNC_TRACE();
+}
+
+static int
+i40e_vlan_pvid_set(struct rte_eth_dev *dev, uint16_t pvid, int on)
+{
+ struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
+ struct i40e_vsi *vsi = pf->main_vsi;
+ struct rte_eth_dev_data *data = I40E_VSI_TO_DEV_DATA(vsi);
+ struct i40e_vsi_vlan_pvid_info info;
+
+ memset(&info, 0, sizeof(info));
+ info.on = on;
+ if (info.on)
+ info.config.pvid = pvid;
+ else {
+ info.config.reject.tagged =
+ data->dev_conf.txmode.hw_vlan_reject_tagged;
+ info.config.reject.untagged =
+ data->dev_conf.txmode.hw_vlan_reject_untagged;
+ }
+
+ return i40e_vsi_vlan_pvid_set(vsi, &info);
+}
+
+static int
+i40e_dev_led_on(struct rte_eth_dev *dev)
+{
+ struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ uint32_t mode = i40e_led_get(hw);
+
+ if (mode == 0)
+ i40e_led_set(hw, 0xf, true); /* 0xf means led always true */
+
+ return 0;
+}
+
+static int
+i40e_dev_led_off(struct rte_eth_dev *dev)
+{
+ struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ uint32_t mode = i40e_led_get(hw);
+
+ if (mode != 0)
+ i40e_led_set(hw, 0, false);
+
+ return 0;
+}
+
+static int
+i40e_flow_ctrl_get(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
+{
+ struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
+
+ fc_conf->pause_time = pf->fc_conf.pause_time;
+ fc_conf->high_water = pf->fc_conf.high_water[I40E_MAX_TRAFFIC_CLASS];
+ fc_conf->low_water = pf->fc_conf.low_water[I40E_MAX_TRAFFIC_CLASS];
+
+ /* Return current mode according to actual setting*/
+ switch (hw->fc.current_mode) {
+ case I40E_FC_FULL:
+ fc_conf->mode = RTE_FC_FULL;
+ break;
+ case I40E_FC_TX_PAUSE:
+ fc_conf->mode = RTE_FC_TX_PAUSE;
+ break;
+ case I40E_FC_RX_PAUSE:
+ fc_conf->mode = RTE_FC_RX_PAUSE;
+ break;
+ case I40E_FC_NONE:
+ default:
+ fc_conf->mode = RTE_FC_NONE;
+ };
+
+ return 0;
+}
+
+static int
+i40e_flow_ctrl_set(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
+{
+ uint32_t mflcn_reg, fctrl_reg, reg;
+ uint32_t max_high_water;
+ uint8_t i, aq_failure;
+ int err;
+ struct i40e_hw *hw;
+ struct i40e_pf *pf;
+ enum i40e_fc_mode rte_fcmode_2_i40e_fcmode[] = {
+ [RTE_FC_NONE] = I40E_FC_NONE,
+ [RTE_FC_RX_PAUSE] = I40E_FC_RX_PAUSE,
+ [RTE_FC_TX_PAUSE] = I40E_FC_TX_PAUSE,
+ [RTE_FC_FULL] = I40E_FC_FULL
+ };
+
+ /* high_water field in the rte_eth_fc_conf using the kilobytes unit */
+
+ max_high_water = I40E_RXPBSIZE >> I40E_KILOSHIFT;
+ if ((fc_conf->high_water > max_high_water) ||
+ (fc_conf->high_water < fc_conf->low_water)) {
+ PMD_INIT_LOG(ERR, "Invalid high/low water setup value in KB, "
+ "High_water must <= %d.", max_high_water);
+ return -EINVAL;
+ }
+
+ hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
+ hw->fc.requested_mode = rte_fcmode_2_i40e_fcmode[fc_conf->mode];
+
+ pf->fc_conf.pause_time = fc_conf->pause_time;
+ pf->fc_conf.high_water[I40E_MAX_TRAFFIC_CLASS] = fc_conf->high_water;
+ pf->fc_conf.low_water[I40E_MAX_TRAFFIC_CLASS] = fc_conf->low_water;
+
+ PMD_INIT_FUNC_TRACE();
+
+ /* All the link flow control related enable/disable register
+ * configuration is handle by the F/W
+ */
+ err = i40e_set_fc(hw, &aq_failure, true);
+ if (err < 0)
+ return -ENOSYS;
+
+ if (i40e_is_40G_device(hw->device_id)) {
+ /* Configure flow control refresh threshold,
+ * the value for stat_tx_pause_refresh_timer[8]
+ * is used for global pause operation.
+ */
+
+ I40E_WRITE_REG(hw,
+ I40E_PRTMAC_HSEC_CTL_TX_PAUSE_REFRESH_TIMER(8),
+ pf->fc_conf.pause_time);
+
+ /* configure the timer value included in transmitted pause
+ * frame,
+ * the value for stat_tx_pause_quanta[8] is used for global
+ * pause operation
+ */
+ I40E_WRITE_REG(hw, I40E_PRTMAC_HSEC_CTL_TX_PAUSE_QUANTA(8),
+ pf->fc_conf.pause_time);
+
+ fctrl_reg = I40E_READ_REG(hw,
+ I40E_PRTMAC_HSEC_CTL_RX_FORWARD_CONTROL);
+
+ if (fc_conf->mac_ctrl_frame_fwd != 0)
+ fctrl_reg |= I40E_PRTMAC_FWD_CTRL;
+ else
+ fctrl_reg &= ~I40E_PRTMAC_FWD_CTRL;
+
+ I40E_WRITE_REG(hw, I40E_PRTMAC_HSEC_CTL_RX_FORWARD_CONTROL,
+ fctrl_reg);
+ } else {
+ /* Configure pause time (2 TCs per register) */
+ reg = (uint32_t)pf->fc_conf.pause_time * (uint32_t)0x00010001;
+ for (i = 0; i < I40E_MAX_TRAFFIC_CLASS / 2; i++)
+ I40E_WRITE_REG(hw, I40E_PRTDCB_FCTTVN(i), reg);
+
+ /* Configure flow control refresh threshold value */
+ I40E_WRITE_REG(hw, I40E_PRTDCB_FCRTV,
+ pf->fc_conf.pause_time / 2);
+
+ mflcn_reg = I40E_READ_REG(hw, I40E_PRTDCB_MFLCN);
+
+ /* set or clear MFLCN.PMCF & MFLCN.DPF bits
+ *depending on configuration
+ */
+ if (fc_conf->mac_ctrl_frame_fwd != 0) {
+ mflcn_reg |= I40E_PRTDCB_MFLCN_PMCF_MASK;
+ mflcn_reg &= ~I40E_PRTDCB_MFLCN_DPF_MASK;
+ } else {
+ mflcn_reg &= ~I40E_PRTDCB_MFLCN_PMCF_MASK;
+ mflcn_reg |= I40E_PRTDCB_MFLCN_DPF_MASK;
+ }
+
+ I40E_WRITE_REG(hw, I40E_PRTDCB_MFLCN, mflcn_reg);
+ }
+
+ /* config the water marker both based on the packets and bytes */
+ I40E_WRITE_REG(hw, I40E_GLRPB_PHW,
+ (pf->fc_conf.high_water[I40E_MAX_TRAFFIC_CLASS]
+ << I40E_KILOSHIFT) / I40E_PACKET_AVERAGE_SIZE);
+ I40E_WRITE_REG(hw, I40E_GLRPB_PLW,
+ (pf->fc_conf.low_water[I40E_MAX_TRAFFIC_CLASS]
+ << I40E_KILOSHIFT) / I40E_PACKET_AVERAGE_SIZE);
+ I40E_WRITE_REG(hw, I40E_GLRPB_GHW,
+ pf->fc_conf.high_water[I40E_MAX_TRAFFIC_CLASS]
+ << I40E_KILOSHIFT);
+ I40E_WRITE_REG(hw, I40E_GLRPB_GLW,
+ pf->fc_conf.low_water[I40E_MAX_TRAFFIC_CLASS]
+ << I40E_KILOSHIFT);
+
+ I40E_WRITE_FLUSH(hw);
+
+ return 0;
+}
+
+static int
+i40e_priority_flow_ctrl_set(__rte_unused struct rte_eth_dev *dev,
+ __rte_unused struct rte_eth_pfc_conf *pfc_conf)
+{
+ PMD_INIT_FUNC_TRACE();
+
+ return -ENOSYS;
+}
+
+/* Add a MAC address, and update filters */
+static void
+i40e_macaddr_add(struct rte_eth_dev *dev,
+ struct ether_addr *mac_addr,
+ __rte_unused uint32_t index,
+ uint32_t pool)
+{
+ struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
+ struct i40e_mac_filter_info mac_filter;
+ struct i40e_vsi *vsi;
+ int ret;
+
+ /* If VMDQ not enabled or configured, return */
+ if (pool != 0 && (!(pf->flags & I40E_FLAG_VMDQ) ||
+ !pf->nb_cfg_vmdq_vsi)) {
+ PMD_DRV_LOG(ERR, "VMDQ not %s, can't set mac to pool %u",
+ pf->flags & I40E_FLAG_VMDQ ? "configured" : "enabled",
+ pool);
+ return;
+ }
+
+ if (pool > pf->nb_cfg_vmdq_vsi) {
+ PMD_DRV_LOG(ERR, "Pool number %u invalid. Max pool is %u",
+ pool, pf->nb_cfg_vmdq_vsi);
+ return;
+ }
+
+ (void)rte_memcpy(&mac_filter.mac_addr, mac_addr, ETHER_ADDR_LEN);
+ if (dev->data->dev_conf.rxmode.hw_vlan_filter)
+ mac_filter.filter_type = RTE_MACVLAN_PERFECT_MATCH;
+ else
+ mac_filter.filter_type = RTE_MAC_PERFECT_MATCH;
+
+ if (pool == 0)
+ vsi = pf->main_vsi;
+ else
+ vsi = pf->vmdq[pool - 1].vsi;
+
+ ret = i40e_vsi_add_mac(vsi, &mac_filter);
+ if (ret != I40E_SUCCESS) {
+ PMD_DRV_LOG(ERR, "Failed to add MACVLAN filter");
+ return;
+ }
+}
+
+/* Remove a MAC address, and update filters */
+static void
+i40e_macaddr_remove(struct rte_eth_dev *dev, uint32_t index)
+{
+ struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
+ struct i40e_vsi *vsi;
+ struct rte_eth_dev_data *data = dev->data;
+ struct ether_addr *macaddr;
+ int ret;
+ uint32_t i;
+ uint64_t pool_sel;
+
+ macaddr = &(data->mac_addrs[index]);
+
+ pool_sel = dev->data->mac_pool_sel[index];
+
+ for (i = 0; i < sizeof(pool_sel) * CHAR_BIT; i++) {
+ if (pool_sel & (1ULL << i)) {
+ if (i == 0)
+ vsi = pf->main_vsi;
+ else {
+ /* No VMDQ pool enabled or configured */
+ if (!(pf->flags & I40E_FLAG_VMDQ) ||
+ (i > pf->nb_cfg_vmdq_vsi)) {
+ PMD_DRV_LOG(ERR, "No VMDQ pool enabled"
+ "/configured");
+ return;
+ }
+ vsi = pf->vmdq[i - 1].vsi;
+ }
+ ret = i40e_vsi_delete_mac(vsi, macaddr);
+
+ if (ret) {
+ PMD_DRV_LOG(ERR, "Failed to remove MACVLAN filter");
+ return;
+ }
+ }
+ }
+}
+
+/* Set perfect match or hash match of MAC and VLAN for a VF */
+static int
+i40e_vf_mac_filter_set(struct i40e_pf *pf,
+ struct rte_eth_mac_filter *filter,
+ bool add)
+{
+ struct i40e_hw *hw;
+ struct i40e_mac_filter_info mac_filter;
+ struct ether_addr old_mac;
+ struct ether_addr *new_mac;
+ struct i40e_pf_vf *vf = NULL;
+ uint16_t vf_id;
+ int ret;
+
+ if (pf == NULL) {
+ PMD_DRV_LOG(ERR, "Invalid PF argument.");
+ return -EINVAL;
+ }
+ hw = I40E_PF_TO_HW(pf);
+
+ if (filter == NULL) {
+ PMD_DRV_LOG(ERR, "Invalid mac filter argument.");
+ return -EINVAL;
+ }
+
+ new_mac = &filter->mac_addr;
+
+ if (is_zero_ether_addr(new_mac)) {
+ PMD_DRV_LOG(ERR, "Invalid ethernet address.");
+ return -EINVAL;
+ }
+
+ vf_id = filter->dst_id;
+
+ if (vf_id > pf->vf_num - 1 || !pf->vfs) {
+ PMD_DRV_LOG(ERR, "Invalid argument.");
+ return -EINVAL;
+ }
+ vf = &pf->vfs[vf_id];
+
+ if (add && is_same_ether_addr(new_mac, &(pf->dev_addr))) {
+ PMD_DRV_LOG(INFO, "Ignore adding permanent MAC address.");
+ return -EINVAL;
+ }
+
+ if (add) {
+ (void)rte_memcpy(&old_mac, hw->mac.addr, ETHER_ADDR_LEN);
+ (void)rte_memcpy(hw->mac.addr, new_mac->addr_bytes,
+ ETHER_ADDR_LEN);
+ (void)rte_memcpy(&mac_filter.mac_addr, &filter->mac_addr,
+ ETHER_ADDR_LEN);
+
+ mac_filter.filter_type = filter->filter_type;
+ ret = i40e_vsi_add_mac(vf->vsi, &mac_filter);
+ if (ret != I40E_SUCCESS) {
+ PMD_DRV_LOG(ERR, "Failed to add MAC filter.");
+ return -1;
+ }
+ ether_addr_copy(new_mac, &pf->dev_addr);
+ } else {
+ (void)rte_memcpy(hw->mac.addr, hw->mac.perm_addr,
+ ETHER_ADDR_LEN);
+ ret = i40e_vsi_delete_mac(vf->vsi, &filter->mac_addr);
+ if (ret != I40E_SUCCESS) {
+ PMD_DRV_LOG(ERR, "Failed to delete MAC filter.");
+ return -1;
+ }
+
+ /* Clear device address as it has been removed */
+ if (is_same_ether_addr(&(pf->dev_addr), new_mac))
+ memset(&pf->dev_addr, 0, sizeof(struct ether_addr));
+ }
+
+ return 0;
+}
+
+/* MAC filter handle */
+static int
+i40e_mac_filter_handle(struct rte_eth_dev *dev, enum rte_filter_op filter_op,
+ void *arg)
+{
+ struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
+ struct rte_eth_mac_filter *filter;
+ struct i40e_hw *hw = I40E_PF_TO_HW(pf);
+ int ret = I40E_NOT_SUPPORTED;
+
+ filter = (struct rte_eth_mac_filter *)(arg);
+
+ switch (filter_op) {
+ case RTE_ETH_FILTER_NOP:
+ ret = I40E_SUCCESS;
+ break;
+ case RTE_ETH_FILTER_ADD:
+ i40e_pf_disable_irq0(hw);
+ if (filter->is_vf)
+ ret = i40e_vf_mac_filter_set(pf, filter, 1);
+ i40e_pf_enable_irq0(hw);
+ break;
+ case RTE_ETH_FILTER_DELETE:
+ i40e_pf_disable_irq0(hw);
+ if (filter->is_vf)
+ ret = i40e_vf_mac_filter_set(pf, filter, 0);
+ i40e_pf_enable_irq0(hw);
+ break;
+ default:
+ PMD_DRV_LOG(ERR, "unknown operation %u", filter_op);
+ ret = I40E_ERR_PARAM;
+ break;
+ }
+
+ return ret;
+}
+
+static int
+i40e_get_rss_lut(struct i40e_vsi *vsi, uint8_t *lut, uint16_t lut_size)
+{
+ struct i40e_pf *pf = I40E_VSI_TO_PF(vsi);
+ struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
+ int ret;
+
+ if (!lut)
+ return -EINVAL;
+
+ if (pf->flags & I40E_FLAG_RSS_AQ_CAPABLE) {
+ ret = i40e_aq_get_rss_lut(hw, vsi->vsi_id, TRUE,
+ lut, lut_size);
+ if (ret) {
+ PMD_DRV_LOG(ERR, "Failed to get RSS lookup table");
+ return ret;
+ }
+ } else {
+ uint32_t *lut_dw = (uint32_t *)lut;
+ uint16_t i, lut_size_dw = lut_size / 4;
+
+ for (i = 0; i < lut_size_dw; i++)
+ lut_dw[i] = I40E_READ_REG(hw, I40E_PFQF_HLUT(i));
+ }
+
+ return 0;
+}
+
+static int
+i40e_set_rss_lut(struct i40e_vsi *vsi, uint8_t *lut, uint16_t lut_size)
+{
+ struct i40e_pf *pf;
+ struct i40e_hw *hw;
+ int ret;
+
+ if (!vsi || !lut)
+ return -EINVAL;
+
+ pf = I40E_VSI_TO_PF(vsi);
+ hw = I40E_VSI_TO_HW(vsi);
+
+ if (pf->flags & I40E_FLAG_RSS_AQ_CAPABLE) {
+ ret = i40e_aq_set_rss_lut(hw, vsi->vsi_id, TRUE,
+ lut, lut_size);
+ if (ret) {
+ PMD_DRV_LOG(ERR, "Failed to set RSS lookup table");
+ return ret;
+ }
+ } else {
+ uint32_t *lut_dw = (uint32_t *)lut;
+ uint16_t i, lut_size_dw = lut_size / 4;
+
+ for (i = 0; i < lut_size_dw; i++)
+ I40E_WRITE_REG(hw, I40E_PFQF_HLUT(i), lut_dw[i]);
+ I40E_WRITE_FLUSH(hw);
+ }
+
+ return 0;
+}
+
+static int
+i40e_dev_rss_reta_update(struct rte_eth_dev *dev,
+ struct rte_eth_rss_reta_entry64 *reta_conf,
+ uint16_t reta_size)
+{
+ struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
+ uint16_t i, lut_size = pf->hash_lut_size;
+ uint16_t idx, shift;
+ uint8_t *lut;
+ int ret;
+
+ if (reta_size != lut_size ||
+ reta_size > ETH_RSS_RETA_SIZE_512) {
+ PMD_DRV_LOG(ERR, "The size of hash lookup table configured "
+ "(%d) doesn't match the number hardware can supported "
+ "(%d)\n", reta_size, lut_size);
+ return -EINVAL;
+ }
+
+ lut = rte_zmalloc("i40e_rss_lut", reta_size, 0);
+ if (!lut) {
+ PMD_DRV_LOG(ERR, "No memory can be allocated");
+ return -ENOMEM;
+ }
+ ret = i40e_get_rss_lut(pf->main_vsi, lut, reta_size);
+ if (ret)
+ goto out;
+ for (i = 0; i < reta_size; i++) {
+ idx = i / RTE_RETA_GROUP_SIZE;
+ shift = i % RTE_RETA_GROUP_SIZE;
+ if (reta_conf[idx].mask & (1ULL << shift))
+ lut[i] = reta_conf[idx].reta[shift];
+ }
+ ret = i40e_set_rss_lut(pf->main_vsi, lut, reta_size);
+
+out:
+ rte_free(lut);
+
+ return ret;
+}
+
+static int
+i40e_dev_rss_reta_query(struct rte_eth_dev *dev,
+ struct rte_eth_rss_reta_entry64 *reta_conf,
+ uint16_t reta_size)
+{
+ struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
+ uint16_t i, lut_size = pf->hash_lut_size;
+ uint16_t idx, shift;
+ uint8_t *lut;
+ int ret;
+
+ if (reta_size != lut_size ||
+ reta_size > ETH_RSS_RETA_SIZE_512) {
+ PMD_DRV_LOG(ERR, "The size of hash lookup table configured "
+ "(%d) doesn't match the number hardware can supported "
+ "(%d)\n", reta_size, lut_size);
+ return -EINVAL;
+ }
+
+ lut = rte_zmalloc("i40e_rss_lut", reta_size, 0);
+ if (!lut) {
+ PMD_DRV_LOG(ERR, "No memory can be allocated");
+ return -ENOMEM;
+ }
+
+ ret = i40e_get_rss_lut(pf->main_vsi, lut, reta_size);
+ if (ret)
+ goto out;
+ for (i = 0; i < reta_size; i++) {
+ idx = i / RTE_RETA_GROUP_SIZE;
+ shift = i % RTE_RETA_GROUP_SIZE;
+ if (reta_conf[idx].mask & (1ULL << shift))
+ reta_conf[idx].reta[shift] = lut[i];
+ }
+
+out:
+ rte_free(lut);
+
+ return ret;
+}
+
+/**
+ * i40e_allocate_dma_mem_d - specific memory alloc for shared code (base driver)
+ * @hw: pointer to the HW structure
+ * @mem: pointer to mem struct to fill out
+ * @size: size of memory requested
+ * @alignment: what to align the allocation to
+ **/
+enum i40e_status_code
+i40e_allocate_dma_mem_d(__attribute__((unused)) struct i40e_hw *hw,
+ struct i40e_dma_mem *mem,
+ u64 size,
+ u32 alignment)
+{
+ const struct rte_memzone *mz = NULL;
+ char z_name[RTE_MEMZONE_NAMESIZE];
+
+ if (!mem)
+ return I40E_ERR_PARAM;
+
+ snprintf(z_name, sizeof(z_name), "i40e_dma_%"PRIu64, rte_rand());
+ mz = rte_memzone_reserve_bounded(z_name, size, SOCKET_ID_ANY, 0,
+ alignment, RTE_PGSIZE_2M);
+ if (!mz)
+ return I40E_ERR_NO_MEMORY;
+
+ mem->size = size;
+ mem->va = mz->addr;
+ mem->pa = rte_mem_phy2mch(mz->memseg_id, mz->phys_addr);
+ mem->zone = (const void *)mz;
+ PMD_DRV_LOG(DEBUG, "memzone %s allocated with physical address: "
+ "%"PRIu64, mz->name, mem->pa);
+
+ return I40E_SUCCESS;
+}
+
+/**
+ * i40e_free_dma_mem_d - specific memory free for shared code (base driver)
+ * @hw: pointer to the HW structure
+ * @mem: ptr to mem struct to free
+ **/
+enum i40e_status_code
+i40e_free_dma_mem_d(__attribute__((unused)) struct i40e_hw *hw,
+ struct i40e_dma_mem *mem)
+{
+ if (!mem)
+ return I40E_ERR_PARAM;
+
+ PMD_DRV_LOG(DEBUG, "memzone %s to be freed with physical address: "
+ "%"PRIu64, ((const struct rte_memzone *)mem->zone)->name,
+ mem->pa);
+ rte_memzone_free((const struct rte_memzone *)mem->zone);
+ mem->zone = NULL;
+ mem->va = NULL;
+ mem->pa = (u64)0;
+
+ return I40E_SUCCESS;
+}
+
+/**
+ * i40e_allocate_virt_mem_d - specific memory alloc for shared code (base driver)
+ * @hw: pointer to the HW structure
+ * @mem: pointer to mem struct to fill out
+ * @size: size of memory requested
+ **/
+enum i40e_status_code
+i40e_allocate_virt_mem_d(__attribute__((unused)) struct i40e_hw *hw,
+ struct i40e_virt_mem *mem,
+ u32 size)
+{
+ if (!mem)
+ return I40E_ERR_PARAM;
+
+ mem->size = size;
+ mem->va = rte_zmalloc("i40e", size, 0);
+
+ if (mem->va)
+ return I40E_SUCCESS;
+ else
+ return I40E_ERR_NO_MEMORY;
+}
+
+/**
+ * i40e_free_virt_mem_d - specific memory free for shared code (base driver)
+ * @hw: pointer to the HW structure
+ * @mem: pointer to mem struct to free
+ **/
+enum i40e_status_code
+i40e_free_virt_mem_d(__attribute__((unused)) struct i40e_hw *hw,
+ struct i40e_virt_mem *mem)
+{
+ if (!mem)
+ return I40E_ERR_PARAM;
+
+ rte_free(mem->va);
+ mem->va = NULL;
+
+ return I40E_SUCCESS;
+}
+
+void
+i40e_init_spinlock_d(struct i40e_spinlock *sp)
+{
+ rte_spinlock_init(&sp->spinlock);
+}
+
+void
+i40e_acquire_spinlock_d(struct i40e_spinlock *sp)
+{
+ rte_spinlock_lock(&sp->spinlock);
+}
+
+void
+i40e_release_spinlock_d(struct i40e_spinlock *sp)
+{
+ rte_spinlock_unlock(&sp->spinlock);
+}
+
+void
+i40e_destroy_spinlock_d(__attribute__((unused)) struct i40e_spinlock *sp)
+{
+ return;
+}
+
+/**
+ * Get the hardware capabilities, which will be parsed
+ * and saved into struct i40e_hw.
+ */
+static int
+i40e_get_cap(struct i40e_hw *hw)
+{
+ struct i40e_aqc_list_capabilities_element_resp *buf;
+ uint16_t len, size = 0;
+ int ret;
+
+ /* Calculate a huge enough buff for saving response data temporarily */
+ len = sizeof(struct i40e_aqc_list_capabilities_element_resp) *
+ I40E_MAX_CAP_ELE_NUM;
+ buf = rte_zmalloc("i40e", len, 0);
+ if (!buf) {
+ PMD_DRV_LOG(ERR, "Failed to allocate memory");
+ return I40E_ERR_NO_MEMORY;
+ }
+
+ /* Get, parse the capabilities and save it to hw */
+ ret = i40e_aq_discover_capabilities(hw, buf, len, &size,
+ i40e_aqc_opc_list_func_capabilities, NULL);
+ if (ret != I40E_SUCCESS)
+ PMD_DRV_LOG(ERR, "Failed to discover capabilities");
+
+ /* Free the temporary buffer after being used */
+ rte_free(buf);
+
+ return ret;
+}
+
+static int
+i40e_pf_parameter_init(struct rte_eth_dev *dev)
+{
+ struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
+ struct i40e_hw *hw = I40E_PF_TO_HW(pf);
+ uint16_t qp_count = 0, vsi_count = 0;
+
+ if (dev->pci_dev->max_vfs && !hw->func_caps.sr_iov_1_1) {
+ PMD_INIT_LOG(ERR, "HW configuration doesn't support SRIOV");
+ return -EINVAL;
+ }
+ /* Add the parameter init for LFC */
+ pf->fc_conf.pause_time = I40E_DEFAULT_PAUSE_TIME;
+ pf->fc_conf.high_water[I40E_MAX_TRAFFIC_CLASS] = I40E_DEFAULT_HIGH_WATER;
+ pf->fc_conf.low_water[I40E_MAX_TRAFFIC_CLASS] = I40E_DEFAULT_LOW_WATER;
+
+ pf->flags = I40E_FLAG_HEADER_SPLIT_DISABLED;
+ pf->max_num_vsi = hw->func_caps.num_vsis;
+ pf->lan_nb_qp_max = RTE_LIBRTE_I40E_QUEUE_NUM_PER_PF;
+ pf->vmdq_nb_qp_max = RTE_LIBRTE_I40E_QUEUE_NUM_PER_VM;
+ pf->vf_nb_qp_max = RTE_LIBRTE_I40E_QUEUE_NUM_PER_VF;
+
+ /* FDir queue/VSI allocation */
+ pf->fdir_qp_offset = 0;
+ if (hw->func_caps.fd) {
+ pf->flags |= I40E_FLAG_FDIR;
+ pf->fdir_nb_qps = I40E_DEFAULT_QP_NUM_FDIR;
+ } else {
+ pf->fdir_nb_qps = 0;
+ }
+ qp_count += pf->fdir_nb_qps;
+ vsi_count += 1;
+
+ /* LAN queue/VSI allocation */
+ pf->lan_qp_offset = pf->fdir_qp_offset + pf->fdir_nb_qps;
+ if (!hw->func_caps.rss) {
+ pf->lan_nb_qps = 1;
+ } else {
+ pf->flags |= I40E_FLAG_RSS;
+ if (hw->mac.type == I40E_MAC_X722)
+ pf->flags |= I40E_FLAG_RSS_AQ_CAPABLE;
+ pf->lan_nb_qps = pf->lan_nb_qp_max;
+ }
+ qp_count += pf->lan_nb_qps;
+ vsi_count += 1;
+
+ /* VF queue/VSI allocation */
+ pf->vf_qp_offset = pf->lan_qp_offset + pf->lan_nb_qps;
+ if (hw->func_caps.sr_iov_1_1 && dev->pci_dev->max_vfs) {
+ pf->flags |= I40E_FLAG_SRIOV;
+ pf->vf_nb_qps = RTE_LIBRTE_I40E_QUEUE_NUM_PER_VF;
+ pf->vf_num = dev->pci_dev->max_vfs;
+ PMD_DRV_LOG(DEBUG, "%u VF VSIs, %u queues per VF VSI, "
+ "in total %u queues", pf->vf_num, pf->vf_nb_qps,
+ pf->vf_nb_qps * pf->vf_num);
+ } else {
+ pf->vf_nb_qps = 0;
+ pf->vf_num = 0;
+ }
+ qp_count += pf->vf_nb_qps * pf->vf_num;
+ vsi_count += pf->vf_num;
+
+ /* VMDq queue/VSI allocation */
+ pf->vmdq_qp_offset = pf->vf_qp_offset + pf->vf_nb_qps * pf->vf_num;
+ pf->vmdq_nb_qps = 0;
+ pf->max_nb_vmdq_vsi = 0;
+ if (hw->func_caps.vmdq) {
+ if (qp_count < hw->func_caps.num_tx_qp &&
+ vsi_count < hw->func_caps.num_vsis) {
+ pf->max_nb_vmdq_vsi = (hw->func_caps.num_tx_qp -
+ qp_count) / pf->vmdq_nb_qp_max;
+
+ /* Limit the maximum number of VMDq vsi to the maximum
+ * ethdev can support
+ */
+ pf->max_nb_vmdq_vsi = RTE_MIN(pf->max_nb_vmdq_vsi,
+ hw->func_caps.num_vsis - vsi_count);
+ pf->max_nb_vmdq_vsi = RTE_MIN(pf->max_nb_vmdq_vsi,
+ ETH_64_POOLS);
+ if (pf->max_nb_vmdq_vsi) {
+ pf->flags |= I40E_FLAG_VMDQ;
+ pf->vmdq_nb_qps = pf->vmdq_nb_qp_max;
+ PMD_DRV_LOG(DEBUG, "%u VMDQ VSIs, %u queues "
+ "per VMDQ VSI, in total %u queues",
+ pf->max_nb_vmdq_vsi,
+ pf->vmdq_nb_qps, pf->vmdq_nb_qps *
+ pf->max_nb_vmdq_vsi);
+ } else {
+ PMD_DRV_LOG(INFO, "No enough queues left for "
+ "VMDq");
+ }
+ } else {
+ PMD_DRV_LOG(INFO, "No queue or VSI left for VMDq");
+ }
+ }
+ qp_count += pf->vmdq_nb_qps * pf->max_nb_vmdq_vsi;
+ vsi_count += pf->max_nb_vmdq_vsi;
+
+ if (hw->func_caps.dcb)
+ pf->flags |= I40E_FLAG_DCB;
+
+ if (qp_count > hw->func_caps.num_tx_qp) {
+ PMD_DRV_LOG(ERR, "Failed to allocate %u queues, which exceeds "
+ "the hardware maximum %u", qp_count,
+ hw->func_caps.num_tx_qp);
+ return -EINVAL;
+ }
+ if (vsi_count > hw->func_caps.num_vsis) {
+ PMD_DRV_LOG(ERR, "Failed to allocate %u VSIs, which exceeds "
+ "the hardware maximum %u", vsi_count,
+ hw->func_caps.num_vsis);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int
+i40e_pf_get_switch_config(struct i40e_pf *pf)
+{
+ struct i40e_hw *hw = I40E_PF_TO_HW(pf);
+ struct i40e_aqc_get_switch_config_resp *switch_config;
+ struct i40e_aqc_switch_config_element_resp *element;
+ uint16_t start_seid = 0, num_reported;
+ int ret;
+
+ switch_config = (struct i40e_aqc_get_switch_config_resp *)\
+ rte_zmalloc("i40e", I40E_AQ_LARGE_BUF, 0);
+ if (!switch_config) {
+ PMD_DRV_LOG(ERR, "Failed to allocated memory");
+ return -ENOMEM;
+ }
+
+ /* Get the switch configurations */
+ ret = i40e_aq_get_switch_config(hw, switch_config,
+ I40E_AQ_LARGE_BUF, &start_seid, NULL);
+ if (ret != I40E_SUCCESS) {
+ PMD_DRV_LOG(ERR, "Failed to get switch configurations");
+ goto fail;
+ }
+ num_reported = rte_le_to_cpu_16(switch_config->header.num_reported);
+ if (num_reported != 1) { /* The number should be 1 */
+ PMD_DRV_LOG(ERR, "Wrong number of switch config reported");
+ goto fail;
+ }
+
+ /* Parse the switch configuration elements */
+ element = &(switch_config->element[0]);
+ if (element->element_type == I40E_SWITCH_ELEMENT_TYPE_VSI) {
+ pf->mac_seid = rte_le_to_cpu_16(element->uplink_seid);
+ pf->main_vsi_seid = rte_le_to_cpu_16(element->seid);
+ } else
+ PMD_DRV_LOG(INFO, "Unknown element type");
+
+fail:
+ rte_free(switch_config);
+
+ return ret;
+}
+
+static int
+i40e_res_pool_init (struct i40e_res_pool_info *pool, uint32_t base,
+ uint32_t num)
+{
+ struct pool_entry *entry;
+
+ if (pool == NULL || num == 0)
+ return -EINVAL;
+
+ entry = rte_zmalloc("i40e", sizeof(*entry), 0);
+ if (entry == NULL) {
+ PMD_DRV_LOG(ERR, "Failed to allocate memory for resource pool");
+ return -ENOMEM;
+ }
+
+ /* queue heap initialize */
+ pool->num_free = num;
+ pool->num_alloc = 0;
+ pool->base = base;
+ LIST_INIT(&pool->alloc_list);
+ LIST_INIT(&pool->free_list);
+
+ /* Initialize element */
+ entry->base = 0;
+ entry->len = num;
+
+ LIST_INSERT_HEAD(&pool->free_list, entry, next);
+ return 0;
+}
+
+static void
+i40e_res_pool_destroy(struct i40e_res_pool_info *pool)
+{
+ struct pool_entry *entry, *next_entry;
+
+ if (pool == NULL)
+ return;
+
+ for (entry = LIST_FIRST(&pool->alloc_list);
+ entry && (next_entry = LIST_NEXT(entry, next), 1);
+ entry = next_entry) {
+ LIST_REMOVE(entry, next);
+ rte_free(entry);
+ }
+
+ for (entry = LIST_FIRST(&pool->free_list);
+ entry && (next_entry = LIST_NEXT(entry, next), 1);
+ entry = next_entry) {
+ LIST_REMOVE(entry, next);
+ rte_free(entry);
+ }
+
+ pool->num_free = 0;
+ pool->num_alloc = 0;
+ pool->base = 0;
+ LIST_INIT(&pool->alloc_list);
+ LIST_INIT(&pool->free_list);
+}
+
+static int
+i40e_res_pool_free(struct i40e_res_pool_info *pool,
+ uint32_t base)
+{
+ struct pool_entry *entry, *next, *prev, *valid_entry = NULL;
+ uint32_t pool_offset;
+ int insert;
+
+ if (pool == NULL) {
+ PMD_DRV_LOG(ERR, "Invalid parameter");
+ return -EINVAL;
+ }
+
+ pool_offset = base - pool->base;
+ /* Lookup in alloc list */
+ LIST_FOREACH(entry, &pool->alloc_list, next) {
+ if (entry->base == pool_offset) {
+ valid_entry = entry;
+ LIST_REMOVE(entry, next);
+ break;
+ }
+ }
+
+ /* Not find, return */
+ if (valid_entry == NULL) {
+ PMD_DRV_LOG(ERR, "Failed to find entry");
+ return -EINVAL;
+ }
+
+ /**
+ * Found it, move it to free list and try to merge.
+ * In order to make merge easier, always sort it by qbase.
+ * Find adjacent prev and last entries.
+ */
+ prev = next = NULL;
+ LIST_FOREACH(entry, &pool->free_list, next) {
+ if (entry->base > valid_entry->base) {
+ next = entry;
+ break;
+ }
+ prev = entry;
+ }
+
+ insert = 0;
+ /* Try to merge with next one*/
+ if (next != NULL) {
+ /* Merge with next one */
+ if (valid_entry->base + valid_entry->len == next->base) {
+ next->base = valid_entry->base;
+ next->len += valid_entry->len;
+ rte_free(valid_entry);
+ valid_entry = next;
+ insert = 1;
+ }
+ }
+
+ if (prev != NULL) {
+ /* Merge with previous one */
+ if (prev->base + prev->len == valid_entry->base) {
+ prev->len += valid_entry->len;
+ /* If it merge with next one, remove next node */
+ if (insert == 1) {
+ LIST_REMOVE(valid_entry, next);
+ rte_free(valid_entry);
+ } else {
+ rte_free(valid_entry);
+ insert = 1;
+ }
+ }
+ }
+
+ /* Not find any entry to merge, insert */
+ if (insert == 0) {
+ if (prev != NULL)
+ LIST_INSERT_AFTER(prev, valid_entry, next);
+ else if (next != NULL)
+ LIST_INSERT_BEFORE(next, valid_entry, next);
+ else /* It's empty list, insert to head */
+ LIST_INSERT_HEAD(&pool->free_list, valid_entry, next);
+ }
+
+ pool->num_free += valid_entry->len;
+ pool->num_alloc -= valid_entry->len;
+
+ return 0;
+}
+
+static int
+i40e_res_pool_alloc(struct i40e_res_pool_info *pool,
+ uint16_t num)
+{
+ struct pool_entry *entry, *valid_entry;
+
+ if (pool == NULL || num == 0) {
+ PMD_DRV_LOG(ERR, "Invalid parameter");
+ return -EINVAL;
+ }
+
+ if (pool->num_free < num) {
+ PMD_DRV_LOG(ERR, "No resource. ask:%u, available:%u",
+ num, pool->num_free);
+ return -ENOMEM;
+ }
+
+ valid_entry = NULL;
+ /* Lookup in free list and find most fit one */
+ LIST_FOREACH(entry, &pool->free_list, next) {
+ if (entry->len >= num) {
+ /* Find best one */
+ if (entry->len == num) {
+ valid_entry = entry;
+ break;
+ }
+ if (valid_entry == NULL || valid_entry->len > entry->len)
+ valid_entry = entry;
+ }
+ }
+
+ /* Not find one to satisfy the request, return */
+ if (valid_entry == NULL) {
+ PMD_DRV_LOG(ERR, "No valid entry found");
+ return -ENOMEM;
+ }
+ /**
+ * The entry have equal queue number as requested,
+ * remove it from alloc_list.
+ */
+ if (valid_entry->len == num) {
+ LIST_REMOVE(valid_entry, next);
+ } else {
+ /**
+ * The entry have more numbers than requested,
+ * create a new entry for alloc_list and minus its
+ * queue base and number in free_list.
+ */
+ entry = rte_zmalloc("res_pool", sizeof(*entry), 0);
+ if (entry == NULL) {
+ PMD_DRV_LOG(ERR, "Failed to allocate memory for "
+ "resource pool");
+ return -ENOMEM;
+ }
+ entry->base = valid_entry->base;
+ entry->len = num;
+ valid_entry->base += num;
+ valid_entry->len -= num;
+ valid_entry = entry;
+ }
+
+ /* Insert it into alloc list, not sorted */
+ LIST_INSERT_HEAD(&pool->alloc_list, valid_entry, next);
+
+ pool->num_free -= valid_entry->len;
+ pool->num_alloc += valid_entry->len;
+
+ return valid_entry->base + pool->base;
+}
+
+/**
+ * bitmap_is_subset - Check whether src2 is subset of src1
+ **/
+static inline int
+bitmap_is_subset(uint8_t src1, uint8_t src2)
+{
+ return !((src1 ^ src2) & src2);
+}
+
+static enum i40e_status_code
+validate_tcmap_parameter(struct i40e_vsi *vsi, uint8_t enabled_tcmap)
+{
+ struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
+
+ /* If DCB is not supported, only default TC is supported */
+ if (!hw->func_caps.dcb && enabled_tcmap != I40E_DEFAULT_TCMAP) {
+ PMD_DRV_LOG(ERR, "DCB is not enabled, only TC0 is supported");
+ return I40E_NOT_SUPPORTED;
+ }
+
+ if (!bitmap_is_subset(hw->func_caps.enabled_tcmap, enabled_tcmap)) {
+ PMD_DRV_LOG(ERR, "Enabled TC map 0x%x not applicable to "
+ "HW support 0x%x", hw->func_caps.enabled_tcmap,
+ enabled_tcmap);
+ return I40E_NOT_SUPPORTED;
+ }
+ return I40E_SUCCESS;
+}
+
+int
+i40e_vsi_vlan_pvid_set(struct i40e_vsi *vsi,
+ struct i40e_vsi_vlan_pvid_info *info)
+{
+ struct i40e_hw *hw;
+ struct i40e_vsi_context ctxt;
+ uint8_t vlan_flags = 0;
+ int ret;
+
+ if (vsi == NULL || info == NULL) {
+ PMD_DRV_LOG(ERR, "invalid parameters");
+ return I40E_ERR_PARAM;
+ }
+
+ if (info->on) {
+ vsi->info.pvid = info->config.pvid;
+ /**
+ * If insert pvid is enabled, only tagged pkts are
+ * allowed to be sent out.
+ */
+ vlan_flags |= I40E_AQ_VSI_PVLAN_INSERT_PVID |
+ I40E_AQ_VSI_PVLAN_MODE_TAGGED;
+ } else {
+ vsi->info.pvid = 0;
+ if (info->config.reject.tagged == 0)
+ vlan_flags |= I40E_AQ_VSI_PVLAN_MODE_TAGGED;
+
+ if (info->config.reject.untagged == 0)
+ vlan_flags |= I40E_AQ_VSI_PVLAN_MODE_UNTAGGED;
+ }
+ vsi->info.port_vlan_flags &= ~(I40E_AQ_VSI_PVLAN_INSERT_PVID |
+ I40E_AQ_VSI_PVLAN_MODE_MASK);
+ vsi->info.port_vlan_flags |= vlan_flags;
+ vsi->info.valid_sections =
+ rte_cpu_to_le_16(I40E_AQ_VSI_PROP_VLAN_VALID);
+ memset(&ctxt, 0, sizeof(ctxt));
+ (void)rte_memcpy(&ctxt.info, &vsi->info, sizeof(vsi->info));
+ ctxt.seid = vsi->seid;
+
+ hw = I40E_VSI_TO_HW(vsi);
+ ret = i40e_aq_update_vsi_params(hw, &ctxt, NULL);
+ if (ret != I40E_SUCCESS)
+ PMD_DRV_LOG(ERR, "Failed to update VSI params");
+
+ return ret;
+}
+
+static int
+i40e_vsi_update_tc_bandwidth(struct i40e_vsi *vsi, uint8_t enabled_tcmap)
+{
+ struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
+ int i, ret;
+ struct i40e_aqc_configure_vsi_tc_bw_data tc_bw_data;
+
+ ret = validate_tcmap_parameter(vsi, enabled_tcmap);
+ if (ret != I40E_SUCCESS)
+ return ret;
+
+ if (!vsi->seid) {
+ PMD_DRV_LOG(ERR, "seid not valid");
+ return -EINVAL;
+ }
+
+ memset(&tc_bw_data, 0, sizeof(tc_bw_data));
+ tc_bw_data.tc_valid_bits = enabled_tcmap;
+ for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++)
+ tc_bw_data.tc_bw_credits[i] =
+ (enabled_tcmap & (1 << i)) ? 1 : 0;
+
+ ret = i40e_aq_config_vsi_tc_bw(hw, vsi->seid, &tc_bw_data, NULL);
+ if (ret != I40E_SUCCESS) {
+ PMD_DRV_LOG(ERR, "Failed to configure TC BW");
+ return ret;
+ }
+
+ (void)rte_memcpy(vsi->info.qs_handle, tc_bw_data.qs_handles,
+ sizeof(vsi->info.qs_handle));
+ return I40E_SUCCESS;
+}
+
+static enum i40e_status_code
+i40e_vsi_config_tc_queue_mapping(struct i40e_vsi *vsi,
+ struct i40e_aqc_vsi_properties_data *info,
+ uint8_t enabled_tcmap)
+{
+ enum i40e_status_code ret;
+ int i, total_tc = 0;
+ uint16_t qpnum_per_tc, bsf, qp_idx;
+
+ ret = validate_tcmap_parameter(vsi, enabled_tcmap);
+ if (ret != I40E_SUCCESS)
+ return ret;
+
+ for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++)
+ if (enabled_tcmap & (1 << i))
+ total_tc++;
+ vsi->enabled_tc = enabled_tcmap;
+
+ /* Number of queues per enabled TC */
+ qpnum_per_tc = i40e_align_floor(vsi->nb_qps / total_tc);
+ qpnum_per_tc = RTE_MIN(qpnum_per_tc, I40E_MAX_Q_PER_TC);
+ bsf = rte_bsf32(qpnum_per_tc);
+
+ /* Adjust the queue number to actual queues that can be applied */
+ if (!(vsi->type == I40E_VSI_MAIN && total_tc == 1))
+ vsi->nb_qps = qpnum_per_tc * total_tc;
+
+ /**
+ * Configure TC and queue mapping parameters, for enabled TC,
+ * allocate qpnum_per_tc queues to this traffic. For disabled TC,
+ * default queue will serve it.
+ */
+ qp_idx = 0;
+ for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
+ if (vsi->enabled_tc & (1 << i)) {
+ info->tc_mapping[i] = rte_cpu_to_le_16((qp_idx <<
+ I40E_AQ_VSI_TC_QUE_OFFSET_SHIFT) |
+ (bsf << I40E_AQ_VSI_TC_QUE_NUMBER_SHIFT));
+ qp_idx += qpnum_per_tc;
+ } else
+ info->tc_mapping[i] = 0;
+ }
+
+ /* Associate queue number with VSI */
+ if (vsi->type == I40E_VSI_SRIOV) {
+ info->mapping_flags |=
+ rte_cpu_to_le_16(I40E_AQ_VSI_QUE_MAP_NONCONTIG);
+ for (i = 0; i < vsi->nb_qps; i++)
+ info->queue_mapping[i] =
+ rte_cpu_to_le_16(vsi->base_queue + i);
+ } else {
+ info->mapping_flags |=
+ rte_cpu_to_le_16(I40E_AQ_VSI_QUE_MAP_CONTIG);
+ info->queue_mapping[0] = rte_cpu_to_le_16(vsi->base_queue);
+ }
+ info->valid_sections |=
+ rte_cpu_to_le_16(I40E_AQ_VSI_PROP_QUEUE_MAP_VALID);
+
+ return I40E_SUCCESS;
+}
+
+static int
+i40e_veb_release(struct i40e_veb *veb)
+{
+ struct i40e_vsi *vsi;
+ struct i40e_hw *hw;
+
+ if (veb == NULL)
+ return -EINVAL;
+
+ if (!TAILQ_EMPTY(&veb->head)) {
+ PMD_DRV_LOG(ERR, "VEB still has VSI attached, can't remove");
+ return -EACCES;
+ }
+ /* associate_vsi field is NULL for floating VEB */
+ if (veb->associate_vsi != NULL) {
+ vsi = veb->associate_vsi;
+ hw = I40E_VSI_TO_HW(vsi);
+
+ vsi->uplink_seid = veb->uplink_seid;
+ vsi->veb = NULL;
+ } else {
+ veb->associate_pf->main_vsi->floating_veb = NULL;
+ hw = I40E_VSI_TO_HW(veb->associate_pf->main_vsi);
+ }
+
+ i40e_aq_delete_element(hw, veb->seid, NULL);
+ rte_free(veb);
+ return I40E_SUCCESS;
+}
+
+/* Setup a veb */
+static struct i40e_veb *
+i40e_veb_setup(struct i40e_pf *pf, struct i40e_vsi *vsi)
+{
+ struct i40e_veb *veb;
+ int ret;
+ struct i40e_hw *hw;
+
+ if (pf == NULL) {
+ PMD_DRV_LOG(ERR,
+ "veb setup failed, associated PF shouldn't null");
+ return NULL;
+ }
+ hw = I40E_PF_TO_HW(pf);
+
+ veb = rte_zmalloc("i40e_veb", sizeof(struct i40e_veb), 0);
+ if (!veb) {
+ PMD_DRV_LOG(ERR, "Failed to allocate memory for veb");
+ goto fail;
+ }
+
+ veb->associate_vsi = vsi;
+ veb->associate_pf = pf;
+ TAILQ_INIT(&veb->head);
+ veb->uplink_seid = vsi ? vsi->uplink_seid : 0;
+
+ /* create floating veb if vsi is NULL */
+ if (vsi != NULL) {
+ ret = i40e_aq_add_veb(hw, veb->uplink_seid, vsi->seid,
+ I40E_DEFAULT_TCMAP, false,
+ &veb->seid, false, NULL);
+ } else {
+ ret = i40e_aq_add_veb(hw, 0, 0, I40E_DEFAULT_TCMAP,
+ true, &veb->seid, false, NULL);
+ }
+
+ if (ret != I40E_SUCCESS) {
+ PMD_DRV_LOG(ERR, "Add veb failed, aq_err: %d",
+ hw->aq.asq_last_status);
+ goto fail;
+ }
+
+ /* get statistics index */
+ ret = i40e_aq_get_veb_parameters(hw, veb->seid, NULL, NULL,
+ &veb->stats_idx, NULL, NULL, NULL);
+ if (ret != I40E_SUCCESS) {
+ PMD_DRV_LOG(ERR, "Get veb statics index failed, aq_err: %d",
+ hw->aq.asq_last_status);
+ goto fail;
+ }
+ /* Get VEB bandwidth, to be implemented */
+ /* Now associated vsi binding to the VEB, set uplink to this VEB */
+ if (vsi)
+ vsi->uplink_seid = veb->seid;
+
+ return veb;
+fail:
+ rte_free(veb);
+ return NULL;
+}
+
+int
+i40e_vsi_release(struct i40e_vsi *vsi)
+{
+ struct i40e_pf *pf;
+ struct i40e_hw *hw;
+ struct i40e_vsi_list *vsi_list;
+ int ret;
+ struct i40e_mac_filter *f;
+ uint16_t user_param = vsi->user_param;
+
+ if (!vsi)
+ return I40E_SUCCESS;
+
+ pf = I40E_VSI_TO_PF(vsi);
+ hw = I40E_VSI_TO_HW(vsi);
+
+ /* VSI has child to attach, release child first */
+ if (vsi->veb) {
+ TAILQ_FOREACH(vsi_list, &vsi->veb->head, list) {
+ if (i40e_vsi_release(vsi_list->vsi) != I40E_SUCCESS)
+ return -1;
+ TAILQ_REMOVE(&vsi->veb->head, vsi_list, list);
+ }
+ i40e_veb_release(vsi->veb);
+ }
+
+ if (vsi->floating_veb) {
+ TAILQ_FOREACH(vsi_list, &vsi->floating_veb->head, list) {
+ if (i40e_vsi_release(vsi_list->vsi) != I40E_SUCCESS)
+ return -1;
+ TAILQ_REMOVE(&vsi->floating_veb->head, vsi_list, list);
+ }
+ }
+
+ /* Remove all macvlan filters of the VSI */
+ i40e_vsi_remove_all_macvlan_filter(vsi);
+ TAILQ_FOREACH(f, &vsi->mac_list, next)
+ rte_free(f);
+
+ if (vsi->type != I40E_VSI_MAIN &&
+ ((vsi->type != I40E_VSI_SRIOV) ||
+ !pf->floating_veb_list[user_param])) {
+ /* Remove vsi from parent's sibling list */
+ if (vsi->parent_vsi == NULL || vsi->parent_vsi->veb == NULL) {
+ PMD_DRV_LOG(ERR, "VSI's parent VSI is NULL");
+ return I40E_ERR_PARAM;
+ }
+ TAILQ_REMOVE(&vsi->parent_vsi->veb->head,
+ &vsi->sib_vsi_list, list);
+
+ /* Remove all switch element of the VSI */
+ ret = i40e_aq_delete_element(hw, vsi->seid, NULL);
+ if (ret != I40E_SUCCESS)
+ PMD_DRV_LOG(ERR, "Failed to delete element");
+ }
+
+ if ((vsi->type == I40E_VSI_SRIOV) &&
+ pf->floating_veb_list[user_param]) {
+ /* Remove vsi from parent's sibling list */
+ if (vsi->parent_vsi == NULL ||
+ vsi->parent_vsi->floating_veb == NULL) {
+ PMD_DRV_LOG(ERR, "VSI's parent VSI is NULL");
+ return I40E_ERR_PARAM;
+ }
+ TAILQ_REMOVE(&vsi->parent_vsi->floating_veb->head,
+ &vsi->sib_vsi_list, list);
+
+ /* Remove all switch element of the VSI */
+ ret = i40e_aq_delete_element(hw, vsi->seid, NULL);
+ if (ret != I40E_SUCCESS)
+ PMD_DRV_LOG(ERR, "Failed to delete element");
+ }
+
+ i40e_res_pool_free(&pf->qp_pool, vsi->base_queue);
+
+ if (vsi->type != I40E_VSI_SRIOV)
+ i40e_res_pool_free(&pf->msix_pool, vsi->msix_intr);
+ rte_free(vsi);
+
+ return I40E_SUCCESS;
+}
+
+static int
+i40e_update_default_filter_setting(struct i40e_vsi *vsi)
+{
+ struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
+ struct i40e_aqc_remove_macvlan_element_data def_filter;
+ struct i40e_mac_filter_info filter;
+ int ret;
+
+ if (vsi->type != I40E_VSI_MAIN)
+ return I40E_ERR_CONFIG;
+ memset(&def_filter, 0, sizeof(def_filter));
+ (void)rte_memcpy(def_filter.mac_addr, hw->mac.perm_addr,
+ ETH_ADDR_LEN);
+ def_filter.vlan_tag = 0;
+ def_filter.flags = I40E_AQC_MACVLAN_DEL_PERFECT_MATCH |
+ I40E_AQC_MACVLAN_DEL_IGNORE_VLAN;
+ ret = i40e_aq_remove_macvlan(hw, vsi->seid, &def_filter, 1, NULL);
+ if (ret != I40E_SUCCESS) {
+ struct i40e_mac_filter *f;
+ struct ether_addr *mac;
+
+ PMD_DRV_LOG(WARNING, "Cannot remove the default "
+ "macvlan filter");
+ /* It needs to add the permanent mac into mac list */
+ f = rte_zmalloc("macv_filter", sizeof(*f), 0);
+ if (f == NULL) {
+ PMD_DRV_LOG(ERR, "failed to allocate memory");
+ return I40E_ERR_NO_MEMORY;
+ }
+ mac = &f->mac_info.mac_addr;
+ (void)rte_memcpy(&mac->addr_bytes, hw->mac.perm_addr,
+ ETH_ADDR_LEN);
+ f->mac_info.filter_type = RTE_MACVLAN_PERFECT_MATCH;
+ TAILQ_INSERT_TAIL(&vsi->mac_list, f, next);
+ vsi->mac_num++;
+
+ return ret;
+ }
+ (void)rte_memcpy(&filter.mac_addr,
+ (struct ether_addr *)(hw->mac.perm_addr), ETH_ADDR_LEN);
+ filter.filter_type = RTE_MACVLAN_PERFECT_MATCH;
+ return i40e_vsi_add_mac(vsi, &filter);
+}
+
+/*
+ * i40e_vsi_get_bw_config - Query VSI BW Information
+ * @vsi: the VSI to be queried
+ *
+ * Returns 0 on success, negative value on failure
+ */
+static enum i40e_status_code
+i40e_vsi_get_bw_config(struct i40e_vsi *vsi)
+{
+ struct i40e_aqc_query_vsi_bw_config_resp bw_config;
+ struct i40e_aqc_query_vsi_ets_sla_config_resp ets_sla_config;
+ struct i40e_hw *hw = &vsi->adapter->hw;
+ i40e_status ret;
+ int i;
+ uint32_t bw_max;
+
+ memset(&bw_config, 0, sizeof(bw_config));
+ ret = i40e_aq_query_vsi_bw_config(hw, vsi->seid, &bw_config, NULL);
+ if (ret != I40E_SUCCESS) {
+ PMD_DRV_LOG(ERR, "VSI failed to get bandwidth configuration %u",
+ hw->aq.asq_last_status);
+ return ret;
+ }
+
+ memset(&ets_sla_config, 0, sizeof(ets_sla_config));
+ ret = i40e_aq_query_vsi_ets_sla_config(hw, vsi->seid,
+ &ets_sla_config, NULL);
+ if (ret != I40E_SUCCESS) {
+ PMD_DRV_LOG(ERR, "VSI failed to get TC bandwdith "
+ "configuration %u", hw->aq.asq_last_status);
+ return ret;
+ }
+
+ /* store and print out BW info */
+ vsi->bw_info.bw_limit = rte_le_to_cpu_16(bw_config.port_bw_limit);
+ vsi->bw_info.bw_max = bw_config.max_bw;
+ PMD_DRV_LOG(DEBUG, "VSI bw limit:%u", vsi->bw_info.bw_limit);
+ PMD_DRV_LOG(DEBUG, "VSI max_bw:%u", vsi->bw_info.bw_max);
+ bw_max = rte_le_to_cpu_16(ets_sla_config.tc_bw_max[0]) |
+ (rte_le_to_cpu_16(ets_sla_config.tc_bw_max[1]) <<
+ I40E_16_BIT_WIDTH);
+ for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
+ vsi->bw_info.bw_ets_share_credits[i] =
+ ets_sla_config.share_credits[i];
+ vsi->bw_info.bw_ets_credits[i] =
+ rte_le_to_cpu_16(ets_sla_config.credits[i]);
+ /* 4 bits per TC, 4th bit is reserved */
+ vsi->bw_info.bw_ets_max[i] =
+ (uint8_t)((bw_max >> (i * I40E_4_BIT_WIDTH)) &
+ RTE_LEN2MASK(3, uint8_t));
+ PMD_DRV_LOG(DEBUG, "\tVSI TC%u:share credits %u", i,
+ vsi->bw_info.bw_ets_share_credits[i]);
+ PMD_DRV_LOG(DEBUG, "\tVSI TC%u:credits %u", i,
+ vsi->bw_info.bw_ets_credits[i]);
+ PMD_DRV_LOG(DEBUG, "\tVSI TC%u: max credits: %u", i,
+ vsi->bw_info.bw_ets_max[i]);
+ }
+
+ return I40E_SUCCESS;
+}
+
+/* i40e_enable_pf_lb
+ * @pf: pointer to the pf structure
+ *
+ * allow loopback on pf
+ */
+static inline void
+i40e_enable_pf_lb(struct i40e_pf *pf)
+{
+ struct i40e_hw *hw = I40E_PF_TO_HW(pf);
+ struct i40e_vsi_context ctxt;
+ int ret;
+
+ /* Use the FW API if FW >= v5.0 */
+ if (hw->aq.fw_maj_ver < 5) {
+ PMD_INIT_LOG(ERR, "FW < v5.0, cannot enable loopback");
+ return;
+ }
+
+ memset(&ctxt, 0, sizeof(ctxt));
+ ctxt.seid = pf->main_vsi_seid;
+ ctxt.pf_num = hw->pf_id;
+ ret = i40e_aq_get_vsi_params(hw, &ctxt, NULL);
+ if (ret) {
+ PMD_DRV_LOG(ERR, "cannot get pf vsi config, err %d, aq_err %d",
+ ret, hw->aq.asq_last_status);
+ return;
+ }
+ ctxt.flags = I40E_AQ_VSI_TYPE_PF;
+ ctxt.info.valid_sections =
+ rte_cpu_to_le_16(I40E_AQ_VSI_PROP_SWITCH_VALID);
+ ctxt.info.switch_id |=
+ rte_cpu_to_le_16(I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB);
+
+ ret = i40e_aq_update_vsi_params(hw, &ctxt, NULL);
+ if (ret)
+ PMD_DRV_LOG(ERR, "update vsi switch failed, aq_err=%d\n",
+ hw->aq.asq_last_status);
+}
+
+/* Setup a VSI */
+struct i40e_vsi *
+i40e_vsi_setup(struct i40e_pf *pf,
+ enum i40e_vsi_type type,
+ struct i40e_vsi *uplink_vsi,
+ uint16_t user_param)
+{
+ struct i40e_hw *hw = I40E_PF_TO_HW(pf);
+ struct i40e_vsi *vsi;
+ struct i40e_mac_filter_info filter;
+ int ret;
+ struct i40e_vsi_context ctxt;
+ struct ether_addr broadcast =
+ {.addr_bytes = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff}};
+
+ if (type != I40E_VSI_MAIN && type != I40E_VSI_SRIOV &&
+ uplink_vsi == NULL) {
+ PMD_DRV_LOG(ERR, "VSI setup failed, "
+ "VSI link shouldn't be NULL");
+ return NULL;
+ }
+
+ if (type == I40E_VSI_MAIN && uplink_vsi != NULL) {
+ PMD_DRV_LOG(ERR, "VSI setup failed, MAIN VSI "
+ "uplink VSI should be NULL");
+ return NULL;
+ }
+
+ /* two situations
+ * 1.type is not MAIN and uplink vsi is not NULL
+ * If uplink vsi didn't setup VEB, create one first under veb field
+ * 2.type is SRIOV and the uplink is NULL
+ * If floating VEB is NULL, create one veb under floating veb field
+ */
+
+ if (type != I40E_VSI_MAIN && uplink_vsi != NULL &&
+ uplink_vsi->veb == NULL) {
+ uplink_vsi->veb = i40e_veb_setup(pf, uplink_vsi);
+
+ if (uplink_vsi->veb == NULL) {
+ PMD_DRV_LOG(ERR, "VEB setup failed");
+ return NULL;
+ }
+ /* set ALLOWLOOPBACk on pf, when veb is created */
+ i40e_enable_pf_lb(pf);
+ }
+
+ if (type == I40E_VSI_SRIOV && uplink_vsi == NULL &&
+ pf->main_vsi->floating_veb == NULL) {
+ pf->main_vsi->floating_veb = i40e_veb_setup(pf, uplink_vsi);
+
+ if (pf->main_vsi->floating_veb == NULL) {
+ PMD_DRV_LOG(ERR, "VEB setup failed");
+ return NULL;
+ }
+ }
+
+ vsi = rte_zmalloc("i40e_vsi", sizeof(struct i40e_vsi), 0);
+ if (!vsi) {
+ PMD_DRV_LOG(ERR, "Failed to allocate memory for vsi");
+ return NULL;
+ }
+ TAILQ_INIT(&vsi->mac_list);
+ vsi->type = type;
+ vsi->adapter = I40E_PF_TO_ADAPTER(pf);
+ vsi->max_macaddrs = I40E_NUM_MACADDR_MAX;
+ vsi->parent_vsi = uplink_vsi ? uplink_vsi : pf->main_vsi;
+ vsi->user_param = user_param;
+ /* Allocate queues */
+ switch (vsi->type) {
+ case I40E_VSI_MAIN :
+ vsi->nb_qps = pf->lan_nb_qps;
+ break;
+ case I40E_VSI_SRIOV :
+ vsi->nb_qps = pf->vf_nb_qps;
+ break;
+ case I40E_VSI_VMDQ2:
+ vsi->nb_qps = pf->vmdq_nb_qps;
+ break;
+ case I40E_VSI_FDIR:
+ vsi->nb_qps = pf->fdir_nb_qps;
+ break;
+ default:
+ goto fail_mem;
+ }
+ /*
+ * The filter status descriptor is reported in rx queue 0,
+ * while the tx queue for fdir filter programming has no
+ * such constraints, can be non-zero queues.
+ * To simplify it, choose FDIR vsi use queue 0 pair.
+ * To make sure it will use queue 0 pair, queue allocation
+ * need be done before this function is called
+ */
+ if (type != I40E_VSI_FDIR) {
+ ret = i40e_res_pool_alloc(&pf->qp_pool, vsi->nb_qps);
+ if (ret < 0) {
+ PMD_DRV_LOG(ERR, "VSI %d allocate queue failed %d",
+ vsi->seid, ret);
+ goto fail_mem;
+ }
+ vsi->base_queue = ret;
+ } else
+ vsi->base_queue = I40E_FDIR_QUEUE_ID;
+
+ /* VF has MSIX interrupt in VF range, don't allocate here */
+ if (type == I40E_VSI_MAIN) {
+ ret = i40e_res_pool_alloc(&pf->msix_pool,
+ RTE_MIN(vsi->nb_qps,
+ RTE_MAX_RXTX_INTR_VEC_ID));
+ if (ret < 0) {
+ PMD_DRV_LOG(ERR, "VSI MAIN %d get heap failed %d",
+ vsi->seid, ret);
+ goto fail_queue_alloc;
+ }
+ vsi->msix_intr = ret;
+ vsi->nb_msix = RTE_MIN(vsi->nb_qps, RTE_MAX_RXTX_INTR_VEC_ID);
+ } else if (type != I40E_VSI_SRIOV) {
+ ret = i40e_res_pool_alloc(&pf->msix_pool, 1);
+ if (ret < 0) {
+ PMD_DRV_LOG(ERR, "VSI %d get heap failed %d", vsi->seid, ret);
+ goto fail_queue_alloc;
+ }
+ vsi->msix_intr = ret;
+ vsi->nb_msix = 1;
+ } else {
+ vsi->msix_intr = 0;
+ vsi->nb_msix = 0;
+ }
+
+ /* Add VSI */
+ if (type == I40E_VSI_MAIN) {
+ /* For main VSI, no need to add since it's default one */
+ vsi->uplink_seid = pf->mac_seid;
+ vsi->seid = pf->main_vsi_seid;
+ /* Bind queues with specific MSIX interrupt */
+ /**
+ * Needs 2 interrupt at least, one for misc cause which will
+ * enabled from OS side, Another for queues binding the
+ * interrupt from device side only.
+ */
+
+ /* Get default VSI parameters from hardware */
+ memset(&ctxt, 0, sizeof(ctxt));
+ ctxt.seid = vsi->seid;
+ ctxt.pf_num = hw->pf_id;
+ ctxt.uplink_seid = vsi->uplink_seid;
+ ctxt.vf_num = 0;
+ ret = i40e_aq_get_vsi_params(hw, &ctxt, NULL);
+ if (ret != I40E_SUCCESS) {
+ PMD_DRV_LOG(ERR, "Failed to get VSI params");
+ goto fail_msix_alloc;
+ }
+ (void)rte_memcpy(&vsi->info, &ctxt.info,
+ sizeof(struct i40e_aqc_vsi_properties_data));
+ vsi->vsi_id = ctxt.vsi_number;
+ vsi->info.valid_sections = 0;
+
+ /* Configure tc, enabled TC0 only */
+ if (i40e_vsi_update_tc_bandwidth(vsi, I40E_DEFAULT_TCMAP) !=
+ I40E_SUCCESS) {
+ PMD_DRV_LOG(ERR, "Failed to update TC bandwidth");
+ goto fail_msix_alloc;
+ }
+
+ /* TC, queue mapping */
+ memset(&ctxt, 0, sizeof(ctxt));
+ vsi->info.valid_sections |=
+ rte_cpu_to_le_16(I40E_AQ_VSI_PROP_VLAN_VALID);
+ vsi->info.port_vlan_flags = I40E_AQ_VSI_PVLAN_MODE_ALL |
+ I40E_AQ_VSI_PVLAN_EMOD_STR_BOTH;
+ (void)rte_memcpy(&ctxt.info, &vsi->info,
+ sizeof(struct i40e_aqc_vsi_properties_data));
+ ret = i40e_vsi_config_tc_queue_mapping(vsi, &ctxt.info,
+ I40E_DEFAULT_TCMAP);
+ if (ret != I40E_SUCCESS) {
+ PMD_DRV_LOG(ERR, "Failed to configure "
+ "TC queue mapping");
+ goto fail_msix_alloc;
+ }
+ ctxt.seid = vsi->seid;
+ ctxt.pf_num = hw->pf_id;
+ ctxt.uplink_seid = vsi->uplink_seid;
+ ctxt.vf_num = 0;
+
+ /* Update VSI parameters */
+ ret = i40e_aq_update_vsi_params(hw, &ctxt, NULL);
+ if (ret != I40E_SUCCESS) {
+ PMD_DRV_LOG(ERR, "Failed to update VSI params");
+ goto fail_msix_alloc;
+ }
+
+ (void)rte_memcpy(&vsi->info.tc_mapping, &ctxt.info.tc_mapping,
+ sizeof(vsi->info.tc_mapping));
+ (void)rte_memcpy(&vsi->info.queue_mapping,
+ &ctxt.info.queue_mapping,
+ sizeof(vsi->info.queue_mapping));
+ vsi->info.mapping_flags = ctxt.info.mapping_flags;
+ vsi->info.valid_sections = 0;
+
+ (void)rte_memcpy(pf->dev_addr.addr_bytes, hw->mac.perm_addr,
+ ETH_ADDR_LEN);
+
+ /**
+ * Updating default filter settings are necessary to prevent
+ * reception of tagged packets.
+ * Some old firmware configurations load a default macvlan
+ * filter which accepts both tagged and untagged packets.
+ * The updating is to use a normal filter instead if needed.
+ * For NVM 4.2.2 or after, the updating is not needed anymore.
+ * The firmware with correct configurations load the default
+ * macvlan filter which is expected and cannot be removed.
+ */
+ i40e_update_default_filter_setting(vsi);
+ i40e_config_qinq(hw, vsi);
+ } else if (type == I40E_VSI_SRIOV) {
+ memset(&ctxt, 0, sizeof(ctxt));
+ /**
+ * For other VSI, the uplink_seid equals to uplink VSI's
+ * uplink_seid since they share same VEB
+ */
+ if (uplink_vsi == NULL)
+ vsi->uplink_seid = pf->main_vsi->floating_veb->seid;
+ else
+ vsi->uplink_seid = uplink_vsi->uplink_seid;
+ ctxt.pf_num = hw->pf_id;
+ ctxt.vf_num = hw->func_caps.vf_base_id + user_param;
+ ctxt.uplink_seid = vsi->uplink_seid;
+ ctxt.connection_type = 0x1;
+ ctxt.flags = I40E_AQ_VSI_TYPE_VF;
+
+ /* Use the VEB configuration if FW >= v5.0 */
+ if (hw->aq.fw_maj_ver >= 5) {
+ /* Configure switch ID */
+ ctxt.info.valid_sections |=
+ rte_cpu_to_le_16(I40E_AQ_VSI_PROP_SWITCH_VALID);
+ ctxt.info.switch_id =
+ rte_cpu_to_le_16(I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB);
+ }
+
+ /* Configure port/vlan */
+ ctxt.info.valid_sections |=
+ rte_cpu_to_le_16(I40E_AQ_VSI_PROP_VLAN_VALID);
+ ctxt.info.port_vlan_flags |= I40E_AQ_VSI_PVLAN_MODE_ALL;
+ ret = i40e_vsi_config_tc_queue_mapping(vsi, &ctxt.info,
+ I40E_DEFAULT_TCMAP);
+ if (ret != I40E_SUCCESS) {
+ PMD_DRV_LOG(ERR, "Failed to configure "
+ "TC queue mapping");
+ goto fail_msix_alloc;
+ }
+ ctxt.info.up_enable_bits = I40E_DEFAULT_TCMAP;
+ ctxt.info.valid_sections |=
+ rte_cpu_to_le_16(I40E_AQ_VSI_PROP_SCHED_VALID);
+ /**
+ * Since VSI is not created yet, only configure parameter,
+ * will add vsi below.
+ */
+
+ i40e_config_qinq(hw, vsi);
+ } else if (type == I40E_VSI_VMDQ2) {
+ memset(&ctxt, 0, sizeof(ctxt));
+ /*
+ * For other VSI, the uplink_seid equals to uplink VSI's
+ * uplink_seid since they share same VEB
+ */
+ vsi->uplink_seid = uplink_vsi->uplink_seid;
+ ctxt.pf_num = hw->pf_id;
+ ctxt.vf_num = 0;
+ ctxt.uplink_seid = vsi->uplink_seid;
+ ctxt.connection_type = 0x1;
+ ctxt.flags = I40E_AQ_VSI_TYPE_VMDQ2;
+
+ ctxt.info.valid_sections |=
+ rte_cpu_to_le_16(I40E_AQ_VSI_PROP_SWITCH_VALID);
+ /* user_param carries flag to enable loop back */
+ if (user_param) {
+ ctxt.info.switch_id =
+ rte_cpu_to_le_16(I40E_AQ_VSI_SW_ID_FLAG_LOCAL_LB);
+ ctxt.info.switch_id |=
+ rte_cpu_to_le_16(I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB);
+ }
+
+ /* Configure port/vlan */
+ ctxt.info.valid_sections |=
+ rte_cpu_to_le_16(I40E_AQ_VSI_PROP_VLAN_VALID);
+ ctxt.info.port_vlan_flags |= I40E_AQ_VSI_PVLAN_MODE_ALL;
+ ret = i40e_vsi_config_tc_queue_mapping(vsi, &ctxt.info,
+ I40E_DEFAULT_TCMAP);
+ if (ret != I40E_SUCCESS) {
+ PMD_DRV_LOG(ERR, "Failed to configure "
+ "TC queue mapping");
+ goto fail_msix_alloc;
+ }
+ ctxt.info.up_enable_bits = I40E_DEFAULT_TCMAP;
+ ctxt.info.valid_sections |=
+ rte_cpu_to_le_16(I40E_AQ_VSI_PROP_SCHED_VALID);
+ } else if (type == I40E_VSI_FDIR) {
+ memset(&ctxt, 0, sizeof(ctxt));
+ vsi->uplink_seid = uplink_vsi->uplink_seid;
+ ctxt.pf_num = hw->pf_id;
+ ctxt.vf_num = 0;
+ ctxt.uplink_seid = vsi->uplink_seid;
+ ctxt.connection_type = 0x1; /* regular data port */
+ ctxt.flags = I40E_AQ_VSI_TYPE_PF;
+ ret = i40e_vsi_config_tc_queue_mapping(vsi, &ctxt.info,
+ I40E_DEFAULT_TCMAP);
+ if (ret != I40E_SUCCESS) {
+ PMD_DRV_LOG(ERR, "Failed to configure "
+ "TC queue mapping.");
+ goto fail_msix_alloc;
+ }
+ ctxt.info.up_enable_bits = I40E_DEFAULT_TCMAP;
+ ctxt.info.valid_sections |=
+ rte_cpu_to_le_16(I40E_AQ_VSI_PROP_SCHED_VALID);
+ } else {
+ PMD_DRV_LOG(ERR, "VSI: Not support other type VSI yet");
+ goto fail_msix_alloc;
+ }
+
+ if (vsi->type != I40E_VSI_MAIN) {
+ ret = i40e_aq_add_vsi(hw, &ctxt, NULL);
+ if (ret != I40E_SUCCESS) {
+ PMD_DRV_LOG(ERR, "add vsi failed, aq_err=%d",
+ hw->aq.asq_last_status);
+ goto fail_msix_alloc;
+ }
+ memcpy(&vsi->info, &ctxt.info, sizeof(ctxt.info));
+ vsi->info.valid_sections = 0;
+ vsi->seid = ctxt.seid;
+ vsi->vsi_id = ctxt.vsi_number;
+ vsi->sib_vsi_list.vsi = vsi;
+ if (vsi->type == I40E_VSI_SRIOV && uplink_vsi == NULL) {
+ TAILQ_INSERT_TAIL(&pf->main_vsi->floating_veb->head,
+ &vsi->sib_vsi_list, list);
+ } else {
+ TAILQ_INSERT_TAIL(&uplink_vsi->veb->head,
+ &vsi->sib_vsi_list, list);
+ }
+ }
+
+ /* MAC/VLAN configuration */
+ (void)rte_memcpy(&filter.mac_addr, &broadcast, ETHER_ADDR_LEN);
+ filter.filter_type = RTE_MACVLAN_PERFECT_MATCH;
+
+ ret = i40e_vsi_add_mac(vsi, &filter);
+ if (ret != I40E_SUCCESS) {
+ PMD_DRV_LOG(ERR, "Failed to add MACVLAN filter");
+ goto fail_msix_alloc;
+ }
+
+ /* Get VSI BW information */
+ i40e_vsi_get_bw_config(vsi);
+ return vsi;
+fail_msix_alloc:
+ i40e_res_pool_free(&pf->msix_pool,vsi->msix_intr);
+fail_queue_alloc:
+ i40e_res_pool_free(&pf->qp_pool,vsi->base_queue);
+fail_mem:
+ rte_free(vsi);
+ return NULL;
+}
+
+/* Configure vlan filter on or off */
+int
+i40e_vsi_config_vlan_filter(struct i40e_vsi *vsi, bool on)
+{
+ int i, num;
+ struct i40e_mac_filter *f;
+ struct i40e_mac_filter_info *mac_filter;
+ enum rte_mac_filter_type desired_filter;
+ int ret = I40E_SUCCESS;
+
+ if (on) {
+ /* Filter to match MAC and VLAN */
+ desired_filter = RTE_MACVLAN_PERFECT_MATCH;
+ } else {
+ /* Filter to match only MAC */
+ desired_filter = RTE_MAC_PERFECT_MATCH;
+ }
+
+ num = vsi->mac_num;
+
+ mac_filter = rte_zmalloc("mac_filter_info_data",
+ num * sizeof(*mac_filter), 0);
+ if (mac_filter == NULL) {
+ PMD_DRV_LOG(ERR, "failed to allocate memory");
+ return I40E_ERR_NO_MEMORY;
+ }
+
+ i = 0;
+
+ /* Remove all existing mac */
+ TAILQ_FOREACH(f, &vsi->mac_list, next) {
+ mac_filter[i] = f->mac_info;
+ ret = i40e_vsi_delete_mac(vsi, &f->mac_info.mac_addr);
+ if (ret) {
+ PMD_DRV_LOG(ERR, "Update VSI failed to %s vlan filter",
+ on ? "enable" : "disable");
+ goto DONE;
+ }
+ i++;
+ }
+
+ /* Override with new filter */
+ for (i = 0; i < num; i++) {
+ mac_filter[i].filter_type = desired_filter;
+ ret = i40e_vsi_add_mac(vsi, &mac_filter[i]);
+ if (ret) {
+ PMD_DRV_LOG(ERR, "Update VSI failed to %s vlan filter",
+ on ? "enable" : "disable");
+ goto DONE;
+ }
+ }
+
+DONE:
+ rte_free(mac_filter);
+ return ret;
+}
+
+/* Configure vlan stripping on or off */
+int
+i40e_vsi_config_vlan_stripping(struct i40e_vsi *vsi, bool on)
+{
+ struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
+ struct i40e_vsi_context ctxt;
+ uint8_t vlan_flags;
+ int ret = I40E_SUCCESS;
+
+ /* Check if it has been already on or off */
+ if (vsi->info.valid_sections &
+ rte_cpu_to_le_16(I40E_AQ_VSI_PROP_VLAN_VALID)) {
+ if (on) {
+ if ((vsi->info.port_vlan_flags &
+ I40E_AQ_VSI_PVLAN_EMOD_MASK) == 0)
+ return 0; /* already on */
+ } else {
+ if ((vsi->info.port_vlan_flags &
+ I40E_AQ_VSI_PVLAN_EMOD_MASK) ==
+ I40E_AQ_VSI_PVLAN_EMOD_MASK)
+ return 0; /* already off */
+ }
+ }
+
+ if (on)
+ vlan_flags = I40E_AQ_VSI_PVLAN_EMOD_STR_BOTH;
+ else
+ vlan_flags = I40E_AQ_VSI_PVLAN_EMOD_NOTHING;
+ vsi->info.valid_sections =
+ rte_cpu_to_le_16(I40E_AQ_VSI_PROP_VLAN_VALID);
+ vsi->info.port_vlan_flags &= ~(I40E_AQ_VSI_PVLAN_EMOD_MASK);
+ vsi->info.port_vlan_flags |= vlan_flags;
+ ctxt.seid = vsi->seid;
+ (void)rte_memcpy(&ctxt.info, &vsi->info, sizeof(vsi->info));
+ ret = i40e_aq_update_vsi_params(hw, &ctxt, NULL);
+ if (ret)
+ PMD_DRV_LOG(INFO, "Update VSI failed to %s vlan stripping",
+ on ? "enable" : "disable");
+
+ return ret;
+}
+
+static int
+i40e_dev_init_vlan(struct rte_eth_dev *dev)
+{
+ struct rte_eth_dev_data *data = dev->data;
+ int ret;
+ int mask = 0;
+
+ /* Apply vlan offload setting */
+ mask = ETH_VLAN_STRIP_MASK | ETH_VLAN_FILTER_MASK;
+ i40e_vlan_offload_set(dev, mask);
+
+ /* Apply double-vlan setting, not implemented yet */
+
+ /* Apply pvid setting */
+ ret = i40e_vlan_pvid_set(dev, data->dev_conf.txmode.pvid,
+ data->dev_conf.txmode.hw_vlan_insert_pvid);
+ if (ret)
+ PMD_DRV_LOG(INFO, "Failed to update VSI params");
+
+ return ret;
+}
+
+static int
+i40e_vsi_config_double_vlan(struct i40e_vsi *vsi, int on)
+{
+ struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
+
+ return i40e_aq_set_port_parameters(hw, vsi->seid, 0, 1, on, NULL);
+}
+
+static int
+i40e_update_flow_control(struct i40e_hw *hw)
+{
+#define I40E_LINK_PAUSE_RXTX (I40E_AQ_LINK_PAUSE_RX | I40E_AQ_LINK_PAUSE_TX)
+ struct i40e_link_status link_status;
+ uint32_t rxfc = 0, txfc = 0, reg;
+ uint8_t an_info;
+ int ret;
+
+ memset(&link_status, 0, sizeof(link_status));
+ ret = i40e_aq_get_link_info(hw, FALSE, &link_status, NULL);
+ if (ret != I40E_SUCCESS) {
+ PMD_DRV_LOG(ERR, "Failed to get link status information");
+ goto write_reg; /* Disable flow control */
+ }
+
+ an_info = hw->phy.link_info.an_info;
+ if (!(an_info & I40E_AQ_AN_COMPLETED)) {
+ PMD_DRV_LOG(INFO, "Link auto negotiation not completed");
+ ret = I40E_ERR_NOT_READY;
+ goto write_reg; /* Disable flow control */
+ }
+ /**
+ * If link auto negotiation is enabled, flow control needs to
+ * be configured according to it
+ */
+ switch (an_info & I40E_LINK_PAUSE_RXTX) {
+ case I40E_LINK_PAUSE_RXTX:
+ rxfc = 1;
+ txfc = 1;
+ hw->fc.current_mode = I40E_FC_FULL;
+ break;
+ case I40E_AQ_LINK_PAUSE_RX:
+ rxfc = 1;
+ hw->fc.current_mode = I40E_FC_RX_PAUSE;
+ break;
+ case I40E_AQ_LINK_PAUSE_TX:
+ txfc = 1;
+ hw->fc.current_mode = I40E_FC_TX_PAUSE;
+ break;
+ default:
+ hw->fc.current_mode = I40E_FC_NONE;
+ break;
+ }
+
+write_reg:
+ I40E_WRITE_REG(hw, I40E_PRTDCB_FCCFG,
+ txfc << I40E_PRTDCB_FCCFG_TFCE_SHIFT);
+ reg = I40E_READ_REG(hw, I40E_PRTDCB_MFLCN);
+ reg &= ~I40E_PRTDCB_MFLCN_RFCE_MASK;
+ reg |= rxfc << I40E_PRTDCB_MFLCN_RFCE_SHIFT;
+ I40E_WRITE_REG(hw, I40E_PRTDCB_MFLCN, reg);
+
+ return ret;
+}
+
+/* PF setup */
+static int
+i40e_pf_setup(struct i40e_pf *pf)
+{
+ struct i40e_hw *hw = I40E_PF_TO_HW(pf);
+ struct i40e_filter_control_settings settings;
+ struct i40e_vsi *vsi;
+ int ret;
+
+ /* Clear all stats counters */
+ pf->offset_loaded = FALSE;
+ memset(&pf->stats, 0, sizeof(struct i40e_hw_port_stats));
+ memset(&pf->stats_offset, 0, sizeof(struct i40e_hw_port_stats));
+
+ ret = i40e_pf_get_switch_config(pf);
+ if (ret != I40E_SUCCESS) {
+ PMD_DRV_LOG(ERR, "Could not get switch config, err %d", ret);
+ return ret;
+ }
+ if (pf->flags & I40E_FLAG_FDIR) {
+ /* make queue allocated first, let FDIR use queue pair 0*/
+ ret = i40e_res_pool_alloc(&pf->qp_pool, I40E_DEFAULT_QP_NUM_FDIR);
+ if (ret != I40E_FDIR_QUEUE_ID) {
+ PMD_DRV_LOG(ERR, "queue allocation fails for FDIR :"
+ " ret =%d", ret);
+ pf->flags &= ~I40E_FLAG_FDIR;
+ }
+ }
+ /* main VSI setup */
+ vsi = i40e_vsi_setup(pf, I40E_VSI_MAIN, NULL, 0);
+ if (!vsi) {
+ PMD_DRV_LOG(ERR, "Setup of main vsi failed");
+ return I40E_ERR_NOT_READY;
+ }
+ pf->main_vsi = vsi;
+
+ /* Configure filter control */
+ memset(&settings, 0, sizeof(settings));
+ if (hw->func_caps.rss_table_size == ETH_RSS_RETA_SIZE_128)
+ settings.hash_lut_size = I40E_HASH_LUT_SIZE_128;
+ else if (hw->func_caps.rss_table_size == ETH_RSS_RETA_SIZE_512)
+ settings.hash_lut_size = I40E_HASH_LUT_SIZE_512;
+ else {
+ PMD_DRV_LOG(ERR, "Hash lookup table size (%u) not supported\n",
+ hw->func_caps.rss_table_size);
+ return I40E_ERR_PARAM;
+ }
+ PMD_DRV_LOG(INFO, "Hardware capability of hash lookup table "
+ "size: %u\n", hw->func_caps.rss_table_size);
+ pf->hash_lut_size = hw->func_caps.rss_table_size;
+
+ /* Enable ethtype and macvlan filters */
+ settings.enable_ethtype = TRUE;
+ settings.enable_macvlan = TRUE;
+ ret = i40e_set_filter_control(hw, &settings);
+ if (ret)
+ PMD_INIT_LOG(WARNING, "setup_pf_filter_control failed: %d",
+ ret);
+
+ /* Update flow control according to the auto negotiation */
+ i40e_update_flow_control(hw);
+
+ return I40E_SUCCESS;
+}
+
+int
+i40e_switch_tx_queue(struct i40e_hw *hw, uint16_t q_idx, bool on)
+{
+ uint32_t reg;
+ uint16_t j;
+
+ /**
+ * Set or clear TX Queue Disable flags,
+ * which is required by hardware.
+ */
+ i40e_pre_tx_queue_cfg(hw, q_idx, on);
+ rte_delay_us(I40E_PRE_TX_Q_CFG_WAIT_US);
+
+ /* Wait until the request is finished */
+ for (j = 0; j < I40E_CHK_Q_ENA_COUNT; j++) {
+ rte_delay_us(I40E_CHK_Q_ENA_INTERVAL_US);
+ reg = I40E_READ_REG(hw, I40E_QTX_ENA(q_idx));
+ if (!(((reg >> I40E_QTX_ENA_QENA_REQ_SHIFT) & 0x1) ^
+ ((reg >> I40E_QTX_ENA_QENA_STAT_SHIFT)
+ & 0x1))) {
+ break;
+ }
+ }
+ if (on) {
+ if (reg & I40E_QTX_ENA_QENA_STAT_MASK)
+ return I40E_SUCCESS; /* already on, skip next steps */
+
+ I40E_WRITE_REG(hw, I40E_QTX_HEAD(q_idx), 0);
+ reg |= I40E_QTX_ENA_QENA_REQ_MASK;
+ } else {
+ if (!(reg & I40E_QTX_ENA_QENA_STAT_MASK))
+ return I40E_SUCCESS; /* already off, skip next steps */
+ reg &= ~I40E_QTX_ENA_QENA_REQ_MASK;
+ }
+ /* Write the register */
+ I40E_WRITE_REG(hw, I40E_QTX_ENA(q_idx), reg);
+ /* Check the result */
+ for (j = 0; j < I40E_CHK_Q_ENA_COUNT; j++) {
+ rte_delay_us(I40E_CHK_Q_ENA_INTERVAL_US);
+ reg = I40E_READ_REG(hw, I40E_QTX_ENA(q_idx));
+ if (on) {
+ if ((reg & I40E_QTX_ENA_QENA_REQ_MASK) &&
+ (reg & I40E_QTX_ENA_QENA_STAT_MASK))
+ break;
+ } else {
+ if (!(reg & I40E_QTX_ENA_QENA_REQ_MASK) &&
+ !(reg & I40E_QTX_ENA_QENA_STAT_MASK))
+ break;
+ }
+ }
+ /* Check if it is timeout */
+ if (j >= I40E_CHK_Q_ENA_COUNT) {
+ PMD_DRV_LOG(ERR, "Failed to %s tx queue[%u]",
+ (on ? "enable" : "disable"), q_idx);
+ return I40E_ERR_TIMEOUT;
+ }
+
+ return I40E_SUCCESS;
+}
+
+/* Swith on or off the tx queues */
+static int
+i40e_dev_switch_tx_queues(struct i40e_pf *pf, bool on)
+{
+ struct rte_eth_dev_data *dev_data = pf->dev_data;
+ struct i40e_tx_queue *txq;
+ struct rte_eth_dev *dev = pf->adapter->eth_dev;
+ uint16_t i;
+ int ret;
+
+ for (i = 0; i < dev_data->nb_tx_queues; i++) {
+ txq = dev_data->tx_queues[i];
+ /* Don't operate the queue if not configured or
+ * if starting only per queue */
+ if (!txq || !txq->q_set || (on && txq->tx_deferred_start))
+ continue;
+ if (on)
+ ret = i40e_dev_tx_queue_start(dev, i);
+ else
+ ret = i40e_dev_tx_queue_stop(dev, i);
+ if ( ret != I40E_SUCCESS)
+ return ret;
+ }
+
+ return I40E_SUCCESS;
+}
+
+int
+i40e_switch_rx_queue(struct i40e_hw *hw, uint16_t q_idx, bool on)
+{
+ uint32_t reg;
+ uint16_t j;
+
+ /* Wait until the request is finished */
+ for (j = 0; j < I40E_CHK_Q_ENA_COUNT; j++) {
+ rte_delay_us(I40E_CHK_Q_ENA_INTERVAL_US);
+ reg = I40E_READ_REG(hw, I40E_QRX_ENA(q_idx));
+ if (!((reg >> I40E_QRX_ENA_QENA_REQ_SHIFT) & 0x1) ^
+ ((reg >> I40E_QRX_ENA_QENA_STAT_SHIFT) & 0x1))
+ break;
+ }
+
+ if (on) {
+ if (reg & I40E_QRX_ENA_QENA_STAT_MASK)
+ return I40E_SUCCESS; /* Already on, skip next steps */
+ reg |= I40E_QRX_ENA_QENA_REQ_MASK;
+ } else {
+ if (!(reg & I40E_QRX_ENA_QENA_STAT_MASK))
+ return I40E_SUCCESS; /* Already off, skip next steps */
+ reg &= ~I40E_QRX_ENA_QENA_REQ_MASK;
+ }
+
+ /* Write the register */
+ I40E_WRITE_REG(hw, I40E_QRX_ENA(q_idx), reg);
+ /* Check the result */
+ for (j = 0; j < I40E_CHK_Q_ENA_COUNT; j++) {
+ rte_delay_us(I40E_CHK_Q_ENA_INTERVAL_US);
+ reg = I40E_READ_REG(hw, I40E_QRX_ENA(q_idx));
+ if (on) {
+ if ((reg & I40E_QRX_ENA_QENA_REQ_MASK) &&
+ (reg & I40E_QRX_ENA_QENA_STAT_MASK))
+ break;
+ } else {
+ if (!(reg & I40E_QRX_ENA_QENA_REQ_MASK) &&
+ !(reg & I40E_QRX_ENA_QENA_STAT_MASK))
+ break;
+ }
+ }
+
+ /* Check if it is timeout */
+ if (j >= I40E_CHK_Q_ENA_COUNT) {
+ PMD_DRV_LOG(ERR, "Failed to %s rx queue[%u]",
+ (on ? "enable" : "disable"), q_idx);
+ return I40E_ERR_TIMEOUT;
+ }
+
+ return I40E_SUCCESS;
+}
+/* Switch on or off the rx queues */
+static int
+i40e_dev_switch_rx_queues(struct i40e_pf *pf, bool on)
+{
+ struct rte_eth_dev_data *dev_data = pf->dev_data;
+ struct i40e_rx_queue *rxq;
+ struct rte_eth_dev *dev = pf->adapter->eth_dev;
+ uint16_t i;
+ int ret;
+
+ for (i = 0; i < dev_data->nb_rx_queues; i++) {
+ rxq = dev_data->rx_queues[i];
+ /* Don't operate the queue if not configured or
+ * if starting only per queue */
+ if (!rxq || !rxq->q_set || (on && rxq->rx_deferred_start))
+ continue;
+ if (on)
+ ret = i40e_dev_rx_queue_start(dev, i);
+ else
+ ret = i40e_dev_rx_queue_stop(dev, i);
+ if (ret != I40E_SUCCESS)
+ return ret;
+ }
+
+ return I40E_SUCCESS;
+}
+
+/* Switch on or off all the rx/tx queues */
+int
+i40e_dev_switch_queues(struct i40e_pf *pf, bool on)
+{
+ int ret;
+
+ if (on) {
+ /* enable rx queues before enabling tx queues */
+ ret = i40e_dev_switch_rx_queues(pf, on);
+ if (ret) {
+ PMD_DRV_LOG(ERR, "Failed to switch rx queues");
+ return ret;
+ }
+ ret = i40e_dev_switch_tx_queues(pf, on);
+ } else {
+ /* Stop tx queues before stopping rx queues */
+ ret = i40e_dev_switch_tx_queues(pf, on);
+ if (ret) {
+ PMD_DRV_LOG(ERR, "Failed to switch tx queues");
+ return ret;
+ }
+ ret = i40e_dev_switch_rx_queues(pf, on);
+ }
+
+ return ret;
+}
+
+/* Initialize VSI for TX */
+static int
+i40e_dev_tx_init(struct i40e_pf *pf)
+{
+ struct rte_eth_dev_data *data = pf->dev_data;
+ uint16_t i;
+ uint32_t ret = I40E_SUCCESS;
+ struct i40e_tx_queue *txq;
+
+ for (i = 0; i < data->nb_tx_queues; i++) {
+ txq = data->tx_queues[i];
+ if (!txq || !txq->q_set)
+ continue;
+ ret = i40e_tx_queue_init(txq);
+ if (ret != I40E_SUCCESS)
+ break;
+ }
+ if (ret == I40E_SUCCESS)
+ i40e_set_tx_function(container_of(pf, struct i40e_adapter, pf)
+ ->eth_dev);
+
+ return ret;
+}
+
+/* Initialize VSI for RX */
+static int
+i40e_dev_rx_init(struct i40e_pf *pf)
+{
+ struct rte_eth_dev_data *data = pf->dev_data;
+ int ret = I40E_SUCCESS;
+ uint16_t i;
+ struct i40e_rx_queue *rxq;
+
+ i40e_pf_config_mq_rx(pf);
+ for (i = 0; i < data->nb_rx_queues; i++) {
+ rxq = data->rx_queues[i];
+ if (!rxq || !rxq->q_set)
+ continue;
+
+ ret = i40e_rx_queue_init(rxq);
+ if (ret != I40E_SUCCESS) {
+ PMD_DRV_LOG(ERR, "Failed to do RX queue "
+ "initialization");
+ break;
+ }
+ }
+ if (ret == I40E_SUCCESS)
+ i40e_set_rx_function(container_of(pf, struct i40e_adapter, pf)
+ ->eth_dev);
+
+ return ret;
+}
+
+static int
+i40e_dev_rxtx_init(struct i40e_pf *pf)
+{
+ int err;
+
+ err = i40e_dev_tx_init(pf);
+ if (err) {
+ PMD_DRV_LOG(ERR, "Failed to do TX initialization");
+ return err;
+ }
+ err = i40e_dev_rx_init(pf);
+ if (err) {
+ PMD_DRV_LOG(ERR, "Failed to do RX initialization");
+ return err;
+ }
+
+ return err;
+}
+
+static int
+i40e_vmdq_setup(struct rte_eth_dev *dev)
+{
+ struct rte_eth_conf *conf = &dev->data->dev_conf;
+ struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
+ int i, err, conf_vsis, j, loop;
+ struct i40e_vsi *vsi;
+ struct i40e_vmdq_info *vmdq_info;
+ struct rte_eth_vmdq_rx_conf *vmdq_conf;
+ struct i40e_hw *hw = I40E_PF_TO_HW(pf);
+
+ /*
+ * Disable interrupt to avoid message from VF. Furthermore, it will
+ * avoid race condition in VSI creation/destroy.
+ */
+ i40e_pf_disable_irq0(hw);
+
+ if ((pf->flags & I40E_FLAG_VMDQ) == 0) {
+ PMD_INIT_LOG(ERR, "FW doesn't support VMDQ");
+ return -ENOTSUP;
+ }
+
+ conf_vsis = conf->rx_adv_conf.vmdq_rx_conf.nb_queue_pools;
+ if (conf_vsis > pf->max_nb_vmdq_vsi) {
+ PMD_INIT_LOG(ERR, "VMDQ config: %u, max support:%u",
+ conf->rx_adv_conf.vmdq_rx_conf.nb_queue_pools,
+ pf->max_nb_vmdq_vsi);
+ return -ENOTSUP;
+ }
+
+ if (pf->vmdq != NULL) {
+ PMD_INIT_LOG(INFO, "VMDQ already configured");
+ return 0;
+ }
+
+ pf->vmdq = rte_zmalloc("vmdq_info_struct",
+ sizeof(*vmdq_info) * conf_vsis, 0);
+
+ if (pf->vmdq == NULL) {
+ PMD_INIT_LOG(ERR, "Failed to allocate memory");
+ return -ENOMEM;
+ }
+
+ vmdq_conf = &conf->rx_adv_conf.vmdq_rx_conf;
+
+ /* Create VMDQ VSI */
+ for (i = 0; i < conf_vsis; i++) {
+ vsi = i40e_vsi_setup(pf, I40E_VSI_VMDQ2, pf->main_vsi,
+ vmdq_conf->enable_loop_back);
+ if (vsi == NULL) {
+ PMD_INIT_LOG(ERR, "Failed to create VMDQ VSI");
+ err = -1;
+ goto err_vsi_setup;
+ }
+ vmdq_info = &pf->vmdq[i];
+ vmdq_info->pf = pf;
+ vmdq_info->vsi = vsi;
+ }
+ pf->nb_cfg_vmdq_vsi = conf_vsis;
+
+ /* Configure Vlan */
+ loop = sizeof(vmdq_conf->pool_map[0].pools) * CHAR_BIT;
+ for (i = 0; i < vmdq_conf->nb_pool_maps; i++) {
+ for (j = 0; j < loop && j < pf->nb_cfg_vmdq_vsi; j++) {
+ if (vmdq_conf->pool_map[i].pools & (1UL << j)) {
+ PMD_INIT_LOG(INFO, "Add vlan %u to vmdq pool %u",
+ vmdq_conf->pool_map[i].vlan_id, j);
+
+ err = i40e_vsi_add_vlan(pf->vmdq[j].vsi,
+ vmdq_conf->pool_map[i].vlan_id);
+ if (err) {
+ PMD_INIT_LOG(ERR, "Failed to add vlan");
+ err = -1;
+ goto err_vsi_setup;
+ }
+ }
+ }
+ }
+
+ i40e_pf_enable_irq0(hw);
+
+ return 0;
+
+err_vsi_setup:
+ for (i = 0; i < conf_vsis; i++)
+ if (pf->vmdq[i].vsi == NULL)
+ break;
+ else
+ i40e_vsi_release(pf->vmdq[i].vsi);
+
+ rte_free(pf->vmdq);
+ pf->vmdq = NULL;
+ i40e_pf_enable_irq0(hw);
+ return err;
+}
+
+static void
+i40e_stat_update_32(struct i40e_hw *hw,
+ uint32_t reg,
+ bool offset_loaded,
+ uint64_t *offset,
+ uint64_t *stat)
+{
+ uint64_t new_data;
+
+ new_data = (uint64_t)I40E_READ_REG(hw, reg);
+ if (!offset_loaded)
+ *offset = new_data;
+
+ if (new_data >= *offset)
+ *stat = (uint64_t)(new_data - *offset);
+ else
+ *stat = (uint64_t)((new_data +
+ ((uint64_t)1 << I40E_32_BIT_WIDTH)) - *offset);
+}
+
+static void
+i40e_stat_update_48(struct i40e_hw *hw,
+ uint32_t hireg,
+ uint32_t loreg,
+ bool offset_loaded,
+ uint64_t *offset,
+ uint64_t *stat)
+{
+ uint64_t new_data;
+
+ new_data = (uint64_t)I40E_READ_REG(hw, loreg);
+ new_data |= ((uint64_t)(I40E_READ_REG(hw, hireg) &
+ I40E_16_BIT_MASK)) << I40E_32_BIT_WIDTH;
+
+ if (!offset_loaded)
+ *offset = new_data;
+
+ if (new_data >= *offset)
+ *stat = new_data - *offset;
+ else
+ *stat = (uint64_t)((new_data +
+ ((uint64_t)1 << I40E_48_BIT_WIDTH)) - *offset);
+
+ *stat &= I40E_48_BIT_MASK;
+}
+
+/* Disable IRQ0 */
+void
+i40e_pf_disable_irq0(struct i40e_hw *hw)
+{
+ /* Disable all interrupt types */
+ I40E_WRITE_REG(hw, I40E_PFINT_DYN_CTL0, 0);
+ I40E_WRITE_FLUSH(hw);
+}
+
+/* Enable IRQ0 */
+void
+i40e_pf_enable_irq0(struct i40e_hw *hw)
+{
+ I40E_WRITE_REG(hw, I40E_PFINT_DYN_CTL0,
+ I40E_PFINT_DYN_CTL0_INTENA_MASK |
+ I40E_PFINT_DYN_CTL0_CLEARPBA_MASK |
+ I40E_PFINT_DYN_CTL0_ITR_INDX_MASK);
+ I40E_WRITE_FLUSH(hw);
+}
+
+static void
+i40e_pf_config_irq0(struct i40e_hw *hw, bool no_queue)
+{
+ /* read pending request and disable first */
+ i40e_pf_disable_irq0(hw);
+ I40E_WRITE_REG(hw, I40E_PFINT_ICR0_ENA, I40E_PFINT_ICR0_ENA_MASK);
+ I40E_WRITE_REG(hw, I40E_PFINT_STAT_CTL0,
+ I40E_PFINT_STAT_CTL0_OTHER_ITR_INDX_MASK);
+
+ if (no_queue)
+ /* Link no queues with irq0 */
+ I40E_WRITE_REG(hw, I40E_PFINT_LNKLST0,
+ I40E_PFINT_LNKLST0_FIRSTQ_INDX_MASK);
+}
+
+static void
+i40e_dev_handle_vfr_event(struct rte_eth_dev *dev)
+{
+ struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
+ int i;
+ uint16_t abs_vf_id;
+ uint32_t index, offset, val;
+
+ if (!pf->vfs)
+ return;
+ /**
+ * Try to find which VF trigger a reset, use absolute VF id to access
+ * since the reg is global register.
+ */
+ for (i = 0; i < pf->vf_num; i++) {
+ abs_vf_id = hw->func_caps.vf_base_id + i;
+ index = abs_vf_id / I40E_UINT32_BIT_SIZE;
+ offset = abs_vf_id % I40E_UINT32_BIT_SIZE;
+ val = I40E_READ_REG(hw, I40E_GLGEN_VFLRSTAT(index));
+ /* VFR event occured */
+ if (val & (0x1 << offset)) {
+ int ret;
+
+ /* Clear the event first */
+ I40E_WRITE_REG(hw, I40E_GLGEN_VFLRSTAT(index),
+ (0x1 << offset));
+ PMD_DRV_LOG(INFO, "VF %u reset occured", abs_vf_id);
+ /**
+ * Only notify a VF reset event occured,
+ * don't trigger another SW reset
+ */
+ ret = i40e_pf_host_vf_reset(&pf->vfs[i], 0);
+ if (ret != I40E_SUCCESS)
+ PMD_DRV_LOG(ERR, "Failed to do VF reset");
+ }
+ }
+}
+
+static void
+i40e_dev_handle_aq_msg(struct rte_eth_dev *dev)
+{
+ struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ struct i40e_arq_event_info info;
+ uint16_t pending, opcode;
+ int ret;
+
+ info.buf_len = I40E_AQ_BUF_SZ;
+ info.msg_buf = rte_zmalloc("msg_buffer", info.buf_len, 0);
+ if (!info.msg_buf) {
+ PMD_DRV_LOG(ERR, "Failed to allocate mem");
+ return;
+ }
+
+ pending = 1;
+ while (pending) {
+ ret = i40e_clean_arq_element(hw, &info, &pending);
+
+ if (ret != I40E_SUCCESS) {
+ PMD_DRV_LOG(INFO, "Failed to read msg from AdminQ, "
+ "aq_err: %u", hw->aq.asq_last_status);
+ break;
+ }
+ opcode = rte_le_to_cpu_16(info.desc.opcode);
+
+ switch (opcode) {
+ case i40e_aqc_opc_send_msg_to_pf:
+ /* Refer to i40e_aq_send_msg_to_pf() for argument layout*/
+ i40e_pf_host_handle_vf_msg(dev,
+ rte_le_to_cpu_16(info.desc.retval),
+ rte_le_to_cpu_32(info.desc.cookie_high),
+ rte_le_to_cpu_32(info.desc.cookie_low),
+ info.msg_buf,
+ info.msg_len);
+ break;
+ default:
+ PMD_DRV_LOG(ERR, "Request %u is not supported yet",
+ opcode);
+ break;
+ }
+ }
+ rte_free(info.msg_buf);
+}
+
+/*
+ * Interrupt handler is registered as the alarm callback for handling LSC
+ * interrupt in a definite of time, in order to wait the NIC into a stable
+ * state. Currently it waits 1 sec in i40e for the link up interrupt, and
+ * no need for link down interrupt.
+ */
+static void
+i40e_dev_interrupt_delayed_handler(void *param)
+{
+ struct rte_eth_dev *dev = (struct rte_eth_dev *)param;
+ struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ uint32_t icr0;
+
+ /* read interrupt causes again */
+ icr0 = I40E_READ_REG(hw, I40E_PFINT_ICR0);
+
+#ifdef RTE_LIBRTE_I40E_DEBUG_DRIVER
+ if (icr0 & I40E_PFINT_ICR0_ECC_ERR_MASK)
+ PMD_DRV_LOG(ERR, "ICR0: unrecoverable ECC error\n");
+ if (icr0 & I40E_PFINT_ICR0_MAL_DETECT_MASK)
+ PMD_DRV_LOG(ERR, "ICR0: malicious programming detected\n");
+ if (icr0 & I40E_PFINT_ICR0_GRST_MASK)
+ PMD_DRV_LOG(INFO, "ICR0: global reset requested\n");
+ if (icr0 & I40E_PFINT_ICR0_PCI_EXCEPTION_MASK)
+ PMD_DRV_LOG(INFO, "ICR0: PCI exception\n activated\n");
+ if (icr0 & I40E_PFINT_ICR0_STORM_DETECT_MASK)
+ PMD_DRV_LOG(INFO, "ICR0: a change in the storm control "
+ "state\n");
+ if (icr0 & I40E_PFINT_ICR0_HMC_ERR_MASK)
+ PMD_DRV_LOG(ERR, "ICR0: HMC error\n");
+ if (icr0 & I40E_PFINT_ICR0_PE_CRITERR_MASK)
+ PMD_DRV_LOG(ERR, "ICR0: protocol engine critical error\n");
+#endif /* RTE_LIBRTE_I40E_DEBUG_DRIVER */
+
+ if (icr0 & I40E_PFINT_ICR0_VFLR_MASK) {
+ PMD_DRV_LOG(INFO, "INT:VF reset detected\n");
+ i40e_dev_handle_vfr_event(dev);
+ }
+ if (icr0 & I40E_PFINT_ICR0_ADMINQ_MASK) {
+ PMD_DRV_LOG(INFO, "INT:ADMINQ event\n");
+ i40e_dev_handle_aq_msg(dev);
+ }
+
+ /* handle the link up interrupt in an alarm callback */
+ i40e_dev_link_update(dev, 0);
+ _rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_LSC);
+
+ i40e_pf_enable_irq0(hw);
+ rte_intr_enable(&(dev->pci_dev->intr_handle));
+}
+
+/**
+ * Interrupt handler triggered by NIC for handling
+ * specific interrupt.
+ *
+ * @param handle
+ * Pointer to interrupt handle.
+ * @param param
+ * The address of parameter (struct rte_eth_dev *) regsitered before.
+ *
+ * @return
+ * void
+ */
+static void
+i40e_dev_interrupt_handler(__rte_unused struct rte_intr_handle *handle,
+ void *param)
+{
+ struct rte_eth_dev *dev = (struct rte_eth_dev *)param;
+ struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ uint32_t icr0;
+
+ /* Disable interrupt */
+ i40e_pf_disable_irq0(hw);
+
+ /* read out interrupt causes */
+ icr0 = I40E_READ_REG(hw, I40E_PFINT_ICR0);
+
+ /* No interrupt event indicated */
+ if (!(icr0 & I40E_PFINT_ICR0_INTEVENT_MASK)) {
+ PMD_DRV_LOG(INFO, "No interrupt event");
+ goto done;
+ }
+#ifdef RTE_LIBRTE_I40E_DEBUG_DRIVER
+ if (icr0 & I40E_PFINT_ICR0_ECC_ERR_MASK)
+ PMD_DRV_LOG(ERR, "ICR0: unrecoverable ECC error");
+ if (icr0 & I40E_PFINT_ICR0_MAL_DETECT_MASK)
+ PMD_DRV_LOG(ERR, "ICR0: malicious programming detected");
+ if (icr0 & I40E_PFINT_ICR0_GRST_MASK)
+ PMD_DRV_LOG(INFO, "ICR0: global reset requested");
+ if (icr0 & I40E_PFINT_ICR0_PCI_EXCEPTION_MASK)
+ PMD_DRV_LOG(INFO, "ICR0: PCI exception activated");
+ if (icr0 & I40E_PFINT_ICR0_STORM_DETECT_MASK)
+ PMD_DRV_LOG(INFO, "ICR0: a change in the storm control state");
+ if (icr0 & I40E_PFINT_ICR0_HMC_ERR_MASK)
+ PMD_DRV_LOG(ERR, "ICR0: HMC error");
+ if (icr0 & I40E_PFINT_ICR0_PE_CRITERR_MASK)
+ PMD_DRV_LOG(ERR, "ICR0: protocol engine critical error");
+#endif /* RTE_LIBRTE_I40E_DEBUG_DRIVER */
+
+ if (icr0 & I40E_PFINT_ICR0_VFLR_MASK) {
+ PMD_DRV_LOG(INFO, "ICR0: VF reset detected");
+ i40e_dev_handle_vfr_event(dev);
+ }
+ if (icr0 & I40E_PFINT_ICR0_ADMINQ_MASK) {
+ PMD_DRV_LOG(INFO, "ICR0: adminq event");
+ i40e_dev_handle_aq_msg(dev);
+ }
+
+ /* Link Status Change interrupt */
+ if (icr0 & I40E_PFINT_ICR0_LINK_STAT_CHANGE_MASK) {
+#define I40E_US_PER_SECOND 1000000
+ struct rte_eth_link link;
+
+ PMD_DRV_LOG(INFO, "ICR0: link status changed\n");
+ memset(&link, 0, sizeof(link));
+ rte_i40e_dev_atomic_read_link_status(dev, &link);
+ i40e_dev_link_update(dev, 0);
+
+ /*
+ * For link up interrupt, it needs to wait 1 second to let the
+ * hardware be a stable state. Otherwise several consecutive
+ * interrupts can be observed.
+ * For link down interrupt, no need to wait.
+ */
+ if (!link.link_status && rte_eal_alarm_set(I40E_US_PER_SECOND,
+ i40e_dev_interrupt_delayed_handler, (void *)dev) >= 0)
+ return;
+ else
+ _rte_eth_dev_callback_process(dev,
+ RTE_ETH_EVENT_INTR_LSC);
+ }
+
+done:
+ /* Enable interrupt */
+ i40e_pf_enable_irq0(hw);
+ rte_intr_enable(&(dev->pci_dev->intr_handle));
+}
+
+static int
+i40e_add_macvlan_filters(struct i40e_vsi *vsi,
+ struct i40e_macvlan_filter *filter,
+ int total)
+{
+ int ele_num, ele_buff_size;
+ int num, actual_num, i;
+ uint16_t flags;
+ int ret = I40E_SUCCESS;
+ struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
+ struct i40e_aqc_add_macvlan_element_data *req_list;
+
+ if (filter == NULL || total == 0)
+ return I40E_ERR_PARAM;
+ ele_num = hw->aq.asq_buf_size / sizeof(*req_list);
+ ele_buff_size = hw->aq.asq_buf_size;
+
+ req_list = rte_zmalloc("macvlan_add", ele_buff_size, 0);
+ if (req_list == NULL) {
+ PMD_DRV_LOG(ERR, "Fail to allocate memory");
+ return I40E_ERR_NO_MEMORY;
+ }
+
+ num = 0;
+ do {
+ actual_num = (num + ele_num > total) ? (total - num) : ele_num;
+ memset(req_list, 0, ele_buff_size);
+
+ for (i = 0; i < actual_num; i++) {
+ (void)rte_memcpy(req_list[i].mac_addr,
+ &filter[num + i].macaddr, ETH_ADDR_LEN);
+ req_list[i].vlan_tag =
+ rte_cpu_to_le_16(filter[num + i].vlan_id);
+
+ switch (filter[num + i].filter_type) {
+ case RTE_MAC_PERFECT_MATCH:
+ flags = I40E_AQC_MACVLAN_ADD_PERFECT_MATCH |
+ I40E_AQC_MACVLAN_ADD_IGNORE_VLAN;
+ break;
+ case RTE_MACVLAN_PERFECT_MATCH:
+ flags = I40E_AQC_MACVLAN_ADD_PERFECT_MATCH;
+ break;
+ case RTE_MAC_HASH_MATCH:
+ flags = I40E_AQC_MACVLAN_ADD_HASH_MATCH |
+ I40E_AQC_MACVLAN_ADD_IGNORE_VLAN;
+ break;
+ case RTE_MACVLAN_HASH_MATCH:
+ flags = I40E_AQC_MACVLAN_ADD_HASH_MATCH;
+ break;
+ default:
+ PMD_DRV_LOG(ERR, "Invalid MAC match type\n");
+ ret = I40E_ERR_PARAM;
+ goto DONE;
+ }
+
+ req_list[i].queue_number = 0;
+
+ req_list[i].flags = rte_cpu_to_le_16(flags);
+ }
+
+ ret = i40e_aq_add_macvlan(hw, vsi->seid, req_list,
+ actual_num, NULL);
+ if (ret != I40E_SUCCESS) {
+ PMD_DRV_LOG(ERR, "Failed to add macvlan filter");
+ goto DONE;
+ }
+ num += actual_num;
+ } while (num < total);
+
+DONE:
+ rte_free(req_list);
+ return ret;
+}
+
+static int
+i40e_remove_macvlan_filters(struct i40e_vsi *vsi,
+ struct i40e_macvlan_filter *filter,
+ int total)
+{
+ int ele_num, ele_buff_size;
+ int num, actual_num, i;
+ uint16_t flags;
+ int ret = I40E_SUCCESS;
+ struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
+ struct i40e_aqc_remove_macvlan_element_data *req_list;
+
+ if (filter == NULL || total == 0)
+ return I40E_ERR_PARAM;
+
+ ele_num = hw->aq.asq_buf_size / sizeof(*req_list);
+ ele_buff_size = hw->aq.asq_buf_size;
+
+ req_list = rte_zmalloc("macvlan_remove", ele_buff_size, 0);
+ if (req_list == NULL) {
+ PMD_DRV_LOG(ERR, "Fail to allocate memory");
+ return I40E_ERR_NO_MEMORY;
+ }
+
+ num = 0;
+ do {
+ actual_num = (num + ele_num > total) ? (total - num) : ele_num;
+ memset(req_list, 0, ele_buff_size);
+
+ for (i = 0; i < actual_num; i++) {
+ (void)rte_memcpy(req_list[i].mac_addr,
+ &filter[num + i].macaddr, ETH_ADDR_LEN);
+ req_list[i].vlan_tag =
+ rte_cpu_to_le_16(filter[num + i].vlan_id);
+
+ switch (filter[num + i].filter_type) {
+ case RTE_MAC_PERFECT_MATCH:
+ flags = I40E_AQC_MACVLAN_DEL_PERFECT_MATCH |
+ I40E_AQC_MACVLAN_DEL_IGNORE_VLAN;
+ break;
+ case RTE_MACVLAN_PERFECT_MATCH:
+ flags = I40E_AQC_MACVLAN_DEL_PERFECT_MATCH;
+ break;
+ case RTE_MAC_HASH_MATCH:
+ flags = I40E_AQC_MACVLAN_DEL_HASH_MATCH |
+ I40E_AQC_MACVLAN_DEL_IGNORE_VLAN;
+ break;
+ case RTE_MACVLAN_HASH_MATCH:
+ flags = I40E_AQC_MACVLAN_DEL_HASH_MATCH;
+ break;
+ default:
+ PMD_DRV_LOG(ERR, "Invalid MAC filter type\n");
+ ret = I40E_ERR_PARAM;
+ goto DONE;
+ }
+ req_list[i].flags = rte_cpu_to_le_16(flags);
+ }
+
+ ret = i40e_aq_remove_macvlan(hw, vsi->seid, req_list,
+ actual_num, NULL);
+ if (ret != I40E_SUCCESS) {
+ PMD_DRV_LOG(ERR, "Failed to remove macvlan filter");
+ goto DONE;
+ }
+ num += actual_num;
+ } while (num < total);
+
+DONE:
+ rte_free(req_list);
+ return ret;
+}
+
+/* Find out specific MAC filter */
+static struct i40e_mac_filter *
+i40e_find_mac_filter(struct i40e_vsi *vsi,
+ struct ether_addr *macaddr)
+{
+ struct i40e_mac_filter *f;
+
+ TAILQ_FOREACH(f, &vsi->mac_list, next) {
+ if (is_same_ether_addr(macaddr, &f->mac_info.mac_addr))
+ return f;
+ }
+
+ return NULL;
+}
+
+static bool
+i40e_find_vlan_filter(struct i40e_vsi *vsi,
+ uint16_t vlan_id)
+{
+ uint32_t vid_idx, vid_bit;
+
+ if (vlan_id > ETH_VLAN_ID_MAX)
+ return 0;
+
+ vid_idx = I40E_VFTA_IDX(vlan_id);
+ vid_bit = I40E_VFTA_BIT(vlan_id);
+
+ if (vsi->vfta[vid_idx] & vid_bit)
+ return 1;
+ else
+ return 0;
+}
+
+static void
+i40e_set_vlan_filter(struct i40e_vsi *vsi,
+ uint16_t vlan_id, bool on)
+{
+ uint32_t vid_idx, vid_bit;
+
+ if (vlan_id > ETH_VLAN_ID_MAX)
+ return;
+
+ vid_idx = I40E_VFTA_IDX(vlan_id);
+ vid_bit = I40E_VFTA_BIT(vlan_id);
+
+ if (on)
+ vsi->vfta[vid_idx] |= vid_bit;
+ else
+ vsi->vfta[vid_idx] &= ~vid_bit;
+}
+
+/**
+ * Find all vlan options for specific mac addr,
+ * return with actual vlan found.
+ */
+static inline int
+i40e_find_all_vlan_for_mac(struct i40e_vsi *vsi,
+ struct i40e_macvlan_filter *mv_f,
+ int num, struct ether_addr *addr)
+{
+ int i;
+ uint32_t j, k;
+
+ /**
+ * Not to use i40e_find_vlan_filter to decrease the loop time,
+ * although the code looks complex.
+ */
+ if (num < vsi->vlan_num)
+ return I40E_ERR_PARAM;
+
+ i = 0;
+ for (j = 0; j < I40E_VFTA_SIZE; j++) {
+ if (vsi->vfta[j]) {
+ for (k = 0; k < I40E_UINT32_BIT_SIZE; k++) {
+ if (vsi->vfta[j] & (1 << k)) {
+ if (i > num - 1) {
+ PMD_DRV_LOG(ERR, "vlan number "
+ "not match");
+ return I40E_ERR_PARAM;
+ }
+ (void)rte_memcpy(&mv_f[i].macaddr,
+ addr, ETH_ADDR_LEN);
+ mv_f[i].vlan_id =
+ j * I40E_UINT32_BIT_SIZE + k;
+ i++;
+ }
+ }
+ }
+ }
+ return I40E_SUCCESS;
+}
+
+static inline int
+i40e_find_all_mac_for_vlan(struct i40e_vsi *vsi,
+ struct i40e_macvlan_filter *mv_f,
+ int num,
+ uint16_t vlan)
+{
+ int i = 0;
+ struct i40e_mac_filter *f;
+
+ if (num < vsi->mac_num)
+ return I40E_ERR_PARAM;
+
+ TAILQ_FOREACH(f, &vsi->mac_list, next) {
+ if (i > num - 1) {
+ PMD_DRV_LOG(ERR, "buffer number not match");
+ return I40E_ERR_PARAM;
+ }
+ (void)rte_memcpy(&mv_f[i].macaddr, &f->mac_info.mac_addr,
+ ETH_ADDR_LEN);
+ mv_f[i].vlan_id = vlan;
+ mv_f[i].filter_type = f->mac_info.filter_type;
+ i++;
+ }
+
+ return I40E_SUCCESS;
+}
+
+static int
+i40e_vsi_remove_all_macvlan_filter(struct i40e_vsi *vsi)
+{
+ int i, num;
+ struct i40e_mac_filter *f;
+ struct i40e_macvlan_filter *mv_f;
+ int ret = I40E_SUCCESS;
+
+ if (vsi == NULL || vsi->mac_num == 0)
+ return I40E_ERR_PARAM;
+
+ /* Case that no vlan is set */
+ if (vsi->vlan_num == 0)
+ num = vsi->mac_num;
+ else
+ num = vsi->mac_num * vsi->vlan_num;
+
+ mv_f = rte_zmalloc("macvlan_data", num * sizeof(*mv_f), 0);
+ if (mv_f == NULL) {
+ PMD_DRV_LOG(ERR, "failed to allocate memory");
+ return I40E_ERR_NO_MEMORY;
+ }
+
+ i = 0;
+ if (vsi->vlan_num == 0) {
+ TAILQ_FOREACH(f, &vsi->mac_list, next) {
+ (void)rte_memcpy(&mv_f[i].macaddr,
+ &f->mac_info.mac_addr, ETH_ADDR_LEN);
+ mv_f[i].vlan_id = 0;
+ i++;
+ }
+ } else {
+ TAILQ_FOREACH(f, &vsi->mac_list, next) {
+ ret = i40e_find_all_vlan_for_mac(vsi,&mv_f[i],
+ vsi->vlan_num, &f->mac_info.mac_addr);
+ if (ret != I40E_SUCCESS)
+ goto DONE;
+ i += vsi->vlan_num;
+ }
+ }
+
+ ret = i40e_remove_macvlan_filters(vsi, mv_f, num);
+DONE:
+ rte_free(mv_f);
+
+ return ret;
+}
+
+int
+i40e_vsi_add_vlan(struct i40e_vsi *vsi, uint16_t vlan)
+{
+ struct i40e_macvlan_filter *mv_f;
+ int mac_num;
+ int ret = I40E_SUCCESS;
+
+ if (!vsi || vlan > ETHER_MAX_VLAN_ID)
+ return I40E_ERR_PARAM;
+
+ /* If it's already set, just return */
+ if (i40e_find_vlan_filter(vsi,vlan))
+ return I40E_SUCCESS;
+
+ mac_num = vsi->mac_num;
+
+ if (mac_num == 0) {
+ PMD_DRV_LOG(ERR, "Error! VSI doesn't have a mac addr");
+ return I40E_ERR_PARAM;
+ }
+
+ mv_f = rte_zmalloc("macvlan_data", mac_num * sizeof(*mv_f), 0);
+
+ if (mv_f == NULL) {
+ PMD_DRV_LOG(ERR, "failed to allocate memory");
+ return I40E_ERR_NO_MEMORY;
+ }
+
+ ret = i40e_find_all_mac_for_vlan(vsi, mv_f, mac_num, vlan);
+
+ if (ret != I40E_SUCCESS)
+ goto DONE;
+
+ ret = i40e_add_macvlan_filters(vsi, mv_f, mac_num);
+
+ if (ret != I40E_SUCCESS)
+ goto DONE;
+
+ i40e_set_vlan_filter(vsi, vlan, 1);
+
+ vsi->vlan_num++;
+ ret = I40E_SUCCESS;
+DONE:
+ rte_free(mv_f);
+ return ret;
+}
+
+int
+i40e_vsi_delete_vlan(struct i40e_vsi *vsi, uint16_t vlan)
+{
+ struct i40e_macvlan_filter *mv_f;
+ int mac_num;
+ int ret = I40E_SUCCESS;
+
+ /**
+ * Vlan 0 is the generic filter for untagged packets
+ * and can't be removed.
+ */
+ if (!vsi || vlan == 0 || vlan > ETHER_MAX_VLAN_ID)
+ return I40E_ERR_PARAM;
+
+ /* If can't find it, just return */
+ if (!i40e_find_vlan_filter(vsi, vlan))
+ return I40E_ERR_PARAM;
+
+ mac_num = vsi->mac_num;
+
+ if (mac_num == 0) {
+ PMD_DRV_LOG(ERR, "Error! VSI doesn't have a mac addr");
+ return I40E_ERR_PARAM;
+ }
+
+ mv_f = rte_zmalloc("macvlan_data", mac_num * sizeof(*mv_f), 0);
+
+ if (mv_f == NULL) {
+ PMD_DRV_LOG(ERR, "failed to allocate memory");
+ return I40E_ERR_NO_MEMORY;
+ }
+
+ ret = i40e_find_all_mac_for_vlan(vsi, mv_f, mac_num, vlan);
+
+ if (ret != I40E_SUCCESS)
+ goto DONE;
+
+ ret = i40e_remove_macvlan_filters(vsi, mv_f, mac_num);
+
+ if (ret != I40E_SUCCESS)
+ goto DONE;
+
+ /* This is last vlan to remove, replace all mac filter with vlan 0 */
+ if (vsi->vlan_num == 1) {
+ ret = i40e_find_all_mac_for_vlan(vsi, mv_f, mac_num, 0);
+ if (ret != I40E_SUCCESS)
+ goto DONE;
+
+ ret = i40e_add_macvlan_filters(vsi, mv_f, mac_num);
+ if (ret != I40E_SUCCESS)
+ goto DONE;
+ }
+
+ i40e_set_vlan_filter(vsi, vlan, 0);
+
+ vsi->vlan_num--;
+ ret = I40E_SUCCESS;
+DONE:
+ rte_free(mv_f);
+ return ret;
+}
+
+int
+i40e_vsi_add_mac(struct i40e_vsi *vsi, struct i40e_mac_filter_info *mac_filter)
+{
+ struct i40e_mac_filter *f;
+ struct i40e_macvlan_filter *mv_f;
+ int i, vlan_num = 0;
+ int ret = I40E_SUCCESS;
+
+ /* If it's add and we've config it, return */
+ f = i40e_find_mac_filter(vsi, &mac_filter->mac_addr);
+ if (f != NULL)
+ return I40E_SUCCESS;
+ if ((mac_filter->filter_type == RTE_MACVLAN_PERFECT_MATCH) ||
+ (mac_filter->filter_type == RTE_MACVLAN_HASH_MATCH)) {
+
+ /**
+ * If vlan_num is 0, that's the first time to add mac,
+ * set mask for vlan_id 0.
+ */
+ if (vsi->vlan_num == 0) {
+ i40e_set_vlan_filter(vsi, 0, 1);
+ vsi->vlan_num = 1;
+ }
+ vlan_num = vsi->vlan_num;
+ } else if ((mac_filter->filter_type == RTE_MAC_PERFECT_MATCH) ||
+ (mac_filter->filter_type == RTE_MAC_HASH_MATCH))
+ vlan_num = 1;
+
+ mv_f = rte_zmalloc("macvlan_data", vlan_num * sizeof(*mv_f), 0);
+ if (mv_f == NULL) {
+ PMD_DRV_LOG(ERR, "failed to allocate memory");
+ return I40E_ERR_NO_MEMORY;
+ }
+
+ for (i = 0; i < vlan_num; i++) {
+ mv_f[i].filter_type = mac_filter->filter_type;
+ (void)rte_memcpy(&mv_f[i].macaddr, &mac_filter->mac_addr,
+ ETH_ADDR_LEN);
+ }
+
+ if (mac_filter->filter_type == RTE_MACVLAN_PERFECT_MATCH ||
+ mac_filter->filter_type == RTE_MACVLAN_HASH_MATCH) {
+ ret = i40e_find_all_vlan_for_mac(vsi, mv_f, vlan_num,
+ &mac_filter->mac_addr);
+ if (ret != I40E_SUCCESS)
+ goto DONE;
+ }
+
+ ret = i40e_add_macvlan_filters(vsi, mv_f, vlan_num);
+ if (ret != I40E_SUCCESS)
+ goto DONE;
+
+ /* Add the mac addr into mac list */
+ f = rte_zmalloc("macv_filter", sizeof(*f), 0);
+ if (f == NULL) {
+ PMD_DRV_LOG(ERR, "failed to allocate memory");
+ ret = I40E_ERR_NO_MEMORY;
+ goto DONE;
+ }
+ (void)rte_memcpy(&f->mac_info.mac_addr, &mac_filter->mac_addr,
+ ETH_ADDR_LEN);
+ f->mac_info.filter_type = mac_filter->filter_type;
+ TAILQ_INSERT_TAIL(&vsi->mac_list, f, next);
+ vsi->mac_num++;
+
+ ret = I40E_SUCCESS;
+DONE:
+ rte_free(mv_f);
+
+ return ret;
+}
+
+int
+i40e_vsi_delete_mac(struct i40e_vsi *vsi, struct ether_addr *addr)
+{
+ struct i40e_mac_filter *f;
+ struct i40e_macvlan_filter *mv_f;
+ int i, vlan_num;
+ enum rte_mac_filter_type filter_type;
+ int ret = I40E_SUCCESS;
+
+ /* Can't find it, return an error */
+ f = i40e_find_mac_filter(vsi, addr);
+ if (f == NULL)
+ return I40E_ERR_PARAM;
+
+ vlan_num = vsi->vlan_num;
+ filter_type = f->mac_info.filter_type;
+ if (filter_type == RTE_MACVLAN_PERFECT_MATCH ||
+ filter_type == RTE_MACVLAN_HASH_MATCH) {
+ if (vlan_num == 0) {
+ PMD_DRV_LOG(ERR, "VLAN number shouldn't be 0\n");
+ return I40E_ERR_PARAM;
+ }
+ } else if (filter_type == RTE_MAC_PERFECT_MATCH ||
+ filter_type == RTE_MAC_HASH_MATCH)
+ vlan_num = 1;
+
+ mv_f = rte_zmalloc("macvlan_data", vlan_num * sizeof(*mv_f), 0);
+ if (mv_f == NULL) {
+ PMD_DRV_LOG(ERR, "failed to allocate memory");
+ return I40E_ERR_NO_MEMORY;
+ }
+
+ for (i = 0; i < vlan_num; i++) {
+ mv_f[i].filter_type = filter_type;
+ (void)rte_memcpy(&mv_f[i].macaddr, &f->mac_info.mac_addr,
+ ETH_ADDR_LEN);
+ }
+ if (filter_type == RTE_MACVLAN_PERFECT_MATCH ||
+ filter_type == RTE_MACVLAN_HASH_MATCH) {
+ ret = i40e_find_all_vlan_for_mac(vsi, mv_f, vlan_num, addr);
+ if (ret != I40E_SUCCESS)
+ goto DONE;
+ }
+
+ ret = i40e_remove_macvlan_filters(vsi, mv_f, vlan_num);
+ if (ret != I40E_SUCCESS)
+ goto DONE;
+
+ /* Remove the mac addr into mac list */
+ TAILQ_REMOVE(&vsi->mac_list, f, next);
+ rte_free(f);
+ vsi->mac_num--;
+
+ ret = I40E_SUCCESS;
+DONE:
+ rte_free(mv_f);
+ return ret;
+}
+
+/* Configure hash enable flags for RSS */
+uint64_t
+i40e_config_hena(uint64_t flags)
+{
+ uint64_t hena = 0;
+
+ if (!flags)
+ return hena;
+
+ if (flags & ETH_RSS_FRAG_IPV4)
+ hena |= 1ULL << I40E_FILTER_PCTYPE_FRAG_IPV4;
+ if (flags & ETH_RSS_NONFRAG_IPV4_TCP)
+ hena |= 1ULL << I40E_FILTER_PCTYPE_NONF_IPV4_TCP;
+ if (flags & ETH_RSS_NONFRAG_IPV4_UDP)
+ hena |= 1ULL << I40E_FILTER_PCTYPE_NONF_IPV4_UDP;
+ if (flags & ETH_RSS_NONFRAG_IPV4_SCTP)
+ hena |= 1ULL << I40E_FILTER_PCTYPE_NONF_IPV4_SCTP;
+ if (flags & ETH_RSS_NONFRAG_IPV4_OTHER)
+ hena |= 1ULL << I40E_FILTER_PCTYPE_NONF_IPV4_OTHER;
+ if (flags & ETH_RSS_FRAG_IPV6)
+ hena |= 1ULL << I40E_FILTER_PCTYPE_FRAG_IPV6;
+ if (flags & ETH_RSS_NONFRAG_IPV6_TCP)
+ hena |= 1ULL << I40E_FILTER_PCTYPE_NONF_IPV6_TCP;
+ if (flags & ETH_RSS_NONFRAG_IPV6_UDP)
+ hena |= 1ULL << I40E_FILTER_PCTYPE_NONF_IPV6_UDP;
+ if (flags & ETH_RSS_NONFRAG_IPV6_SCTP)
+ hena |= 1ULL << I40E_FILTER_PCTYPE_NONF_IPV6_SCTP;
+ if (flags & ETH_RSS_NONFRAG_IPV6_OTHER)
+ hena |= 1ULL << I40E_FILTER_PCTYPE_NONF_IPV6_OTHER;
+ if (flags & ETH_RSS_L2_PAYLOAD)
+ hena |= 1ULL << I40E_FILTER_PCTYPE_L2_PAYLOAD;
+
+ return hena;
+}
+
+/* Parse the hash enable flags */
+uint64_t
+i40e_parse_hena(uint64_t flags)
+{
+ uint64_t rss_hf = 0;
+
+ if (!flags)
+ return rss_hf;
+ if (flags & (1ULL << I40E_FILTER_PCTYPE_FRAG_IPV4))
+ rss_hf |= ETH_RSS_FRAG_IPV4;
+ if (flags & (1ULL << I40E_FILTER_PCTYPE_NONF_IPV4_TCP))
+ rss_hf |= ETH_RSS_NONFRAG_IPV4_TCP;
+ if (flags & (1ULL << I40E_FILTER_PCTYPE_NONF_IPV4_UDP))
+ rss_hf |= ETH_RSS_NONFRAG_IPV4_UDP;
+ if (flags & (1ULL << I40E_FILTER_PCTYPE_NONF_IPV4_SCTP))
+ rss_hf |= ETH_RSS_NONFRAG_IPV4_SCTP;
+ if (flags & (1ULL << I40E_FILTER_PCTYPE_NONF_IPV4_OTHER))
+ rss_hf |= ETH_RSS_NONFRAG_IPV4_OTHER;
+ if (flags & (1ULL << I40E_FILTER_PCTYPE_FRAG_IPV6))
+ rss_hf |= ETH_RSS_FRAG_IPV6;
+ if (flags & (1ULL << I40E_FILTER_PCTYPE_NONF_IPV6_TCP))
+ rss_hf |= ETH_RSS_NONFRAG_IPV6_TCP;
+ if (flags & (1ULL << I40E_FILTER_PCTYPE_NONF_IPV6_UDP))
+ rss_hf |= ETH_RSS_NONFRAG_IPV6_UDP;
+ if (flags & (1ULL << I40E_FILTER_PCTYPE_NONF_IPV6_SCTP))
+ rss_hf |= ETH_RSS_NONFRAG_IPV6_SCTP;
+ if (flags & (1ULL << I40E_FILTER_PCTYPE_NONF_IPV6_OTHER))
+ rss_hf |= ETH_RSS_NONFRAG_IPV6_OTHER;
+ if (flags & (1ULL << I40E_FILTER_PCTYPE_L2_PAYLOAD))
+ rss_hf |= ETH_RSS_L2_PAYLOAD;
+
+ return rss_hf;
+}
+
+/* Disable RSS */
+static void
+i40e_pf_disable_rss(struct i40e_pf *pf)
+{
+ struct i40e_hw *hw = I40E_PF_TO_HW(pf);
+ uint64_t hena;
+
+ hena = (uint64_t)i40e_read_rx_ctl(hw, I40E_PFQF_HENA(0));
+ hena |= ((uint64_t)i40e_read_rx_ctl(hw, I40E_PFQF_HENA(1))) << 32;
+ hena &= ~I40E_RSS_HENA_ALL;
+ i40e_write_rx_ctl(hw, I40E_PFQF_HENA(0), (uint32_t)hena);
+ i40e_write_rx_ctl(hw, I40E_PFQF_HENA(1), (uint32_t)(hena >> 32));
+ I40E_WRITE_FLUSH(hw);
+}
+
+static int
+i40e_set_rss_key(struct i40e_vsi *vsi, uint8_t *key, uint8_t key_len)
+{
+ struct i40e_pf *pf = I40E_VSI_TO_PF(vsi);
+ struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
+ int ret = 0;
+
+ if (!key || key_len == 0) {
+ PMD_DRV_LOG(DEBUG, "No key to be configured");
+ return 0;
+ } else if (key_len != (I40E_PFQF_HKEY_MAX_INDEX + 1) *
+ sizeof(uint32_t)) {
+ PMD_DRV_LOG(ERR, "Invalid key length %u", key_len);
+ return -EINVAL;
+ }
+
+ if (pf->flags & I40E_FLAG_RSS_AQ_CAPABLE) {
+ struct i40e_aqc_get_set_rss_key_data *key_dw =
+ (struct i40e_aqc_get_set_rss_key_data *)key;
+
+ ret = i40e_aq_set_rss_key(hw, vsi->vsi_id, key_dw);
+ if (ret)
+ PMD_INIT_LOG(ERR, "Failed to configure RSS key "
+ "via AQ");
+ } else {
+ uint32_t *hash_key = (uint32_t *)key;
+ uint16_t i;
+
+ for (i = 0; i <= I40E_PFQF_HKEY_MAX_INDEX; i++)
+ i40e_write_rx_ctl(hw, I40E_PFQF_HKEY(i), hash_key[i]);
+ I40E_WRITE_FLUSH(hw);
+ }
+
+ return ret;
+}
+
+static int
+i40e_get_rss_key(struct i40e_vsi *vsi, uint8_t *key, uint8_t *key_len)
+{
+ struct i40e_pf *pf = I40E_VSI_TO_PF(vsi);
+ struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
+ int ret;
+
+ if (!key || !key_len)
+ return -EINVAL;
+
+ if (pf->flags & I40E_FLAG_RSS_AQ_CAPABLE) {
+ ret = i40e_aq_get_rss_key(hw, vsi->vsi_id,
+ (struct i40e_aqc_get_set_rss_key_data *)key);
+ if (ret) {
+ PMD_INIT_LOG(ERR, "Failed to get RSS key via AQ");
+ return ret;
+ }
+ } else {
+ uint32_t *key_dw = (uint32_t *)key;
+ uint16_t i;
+
+ for (i = 0; i <= I40E_PFQF_HKEY_MAX_INDEX; i++)
+ key_dw[i] = i40e_read_rx_ctl(hw, I40E_PFQF_HKEY(i));
+ }
+ *key_len = (I40E_PFQF_HKEY_MAX_INDEX + 1) * sizeof(uint32_t);
+
+ return 0;
+}
+
+static int
+i40e_hw_rss_hash_set(struct i40e_pf *pf, struct rte_eth_rss_conf *rss_conf)
+{
+ struct i40e_hw *hw = I40E_PF_TO_HW(pf);
+ uint64_t rss_hf;
+ uint64_t hena;
+ int ret;
+
+ ret = i40e_set_rss_key(pf->main_vsi, rss_conf->rss_key,
+ rss_conf->rss_key_len);
+ if (ret)
+ return ret;
+
+ rss_hf = rss_conf->rss_hf;
+ hena = (uint64_t)i40e_read_rx_ctl(hw, I40E_PFQF_HENA(0));
+ hena |= ((uint64_t)i40e_read_rx_ctl(hw, I40E_PFQF_HENA(1))) << 32;
+ hena &= ~I40E_RSS_HENA_ALL;
+ hena |= i40e_config_hena(rss_hf);
+ i40e_write_rx_ctl(hw, I40E_PFQF_HENA(0), (uint32_t)hena);
+ i40e_write_rx_ctl(hw, I40E_PFQF_HENA(1), (uint32_t)(hena >> 32));
+ I40E_WRITE_FLUSH(hw);
+
+ return 0;
+}
+
+static int
+i40e_dev_rss_hash_update(struct rte_eth_dev *dev,
+ struct rte_eth_rss_conf *rss_conf)
+{
+ struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
+ struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ uint64_t rss_hf = rss_conf->rss_hf & I40E_RSS_OFFLOAD_ALL;
+ uint64_t hena;
+
+ hena = (uint64_t)i40e_read_rx_ctl(hw, I40E_PFQF_HENA(0));
+ hena |= ((uint64_t)i40e_read_rx_ctl(hw, I40E_PFQF_HENA(1))) << 32;
+ if (!(hena & I40E_RSS_HENA_ALL)) { /* RSS disabled */
+ if (rss_hf != 0) /* Enable RSS */
+ return -EINVAL;
+ return 0; /* Nothing to do */
+ }
+ /* RSS enabled */
+ if (rss_hf == 0) /* Disable RSS */
+ return -EINVAL;
+
+ return i40e_hw_rss_hash_set(pf, rss_conf);
+}
+
+static int
+i40e_dev_rss_hash_conf_get(struct rte_eth_dev *dev,
+ struct rte_eth_rss_conf *rss_conf)
+{
+ struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
+ struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ uint64_t hena;
+
+ i40e_get_rss_key(pf->main_vsi, rss_conf->rss_key,
+ &rss_conf->rss_key_len);
+
+ hena = (uint64_t)i40e_read_rx_ctl(hw, I40E_PFQF_HENA(0));
+ hena |= ((uint64_t)i40e_read_rx_ctl(hw, I40E_PFQF_HENA(1))) << 32;
+ rss_conf->rss_hf = i40e_parse_hena(hena);
+
+ return 0;
+}
+
+static int
+i40e_dev_get_filter_type(uint16_t filter_type, uint16_t *flag)
+{
+ switch (filter_type) {
+ case RTE_TUNNEL_FILTER_IMAC_IVLAN:
+ *flag = I40E_AQC_ADD_CLOUD_FILTER_IMAC_IVLAN;
+ break;
+ case RTE_TUNNEL_FILTER_IMAC_IVLAN_TENID:
+ *flag = I40E_AQC_ADD_CLOUD_FILTER_IMAC_IVLAN_TEN_ID;
+ break;
+ case RTE_TUNNEL_FILTER_IMAC_TENID:
+ *flag = I40E_AQC_ADD_CLOUD_FILTER_IMAC_TEN_ID;
+ break;
+ case RTE_TUNNEL_FILTER_OMAC_TENID_IMAC:
+ *flag = I40E_AQC_ADD_CLOUD_FILTER_OMAC_TEN_ID_IMAC;
+ break;
+ case ETH_TUNNEL_FILTER_IMAC:
+ *flag = I40E_AQC_ADD_CLOUD_FILTER_IMAC;
+ break;
+ case ETH_TUNNEL_FILTER_OIP:
+ *flag = I40E_AQC_ADD_CLOUD_FILTER_OIP;
+ break;
+ case ETH_TUNNEL_FILTER_IIP:
+ *flag = I40E_AQC_ADD_CLOUD_FILTER_IIP;
+ break;
+ default:
+ PMD_DRV_LOG(ERR, "invalid tunnel filter type");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int
+i40e_dev_tunnel_filter_set(struct i40e_pf *pf,
+ struct rte_eth_tunnel_filter_conf *tunnel_filter,
+ uint8_t add)
+{
+ uint16_t ip_type;
+ uint32_t ipv4_addr;
+ uint8_t i, tun_type = 0;
+ /* internal varialbe to convert ipv6 byte order */
+ uint32_t convert_ipv6[4];
+ int val, ret = 0;
+ struct i40e_hw *hw = I40E_PF_TO_HW(pf);
+ struct i40e_vsi *vsi = pf->main_vsi;
+ struct i40e_aqc_add_remove_cloud_filters_element_data *cld_filter;
+ struct i40e_aqc_add_remove_cloud_filters_element_data *pfilter;
+
+ cld_filter = rte_zmalloc("tunnel_filter",
+ sizeof(struct i40e_aqc_add_remove_cloud_filters_element_data),
+ 0);
+
+ if (NULL == cld_filter) {
+ PMD_DRV_LOG(ERR, "Failed to alloc memory.");
+ return -EINVAL;
+ }
+ pfilter = cld_filter;
+
+ ether_addr_copy(&tunnel_filter->outer_mac, (struct ether_addr*)&pfilter->outer_mac);
+ ether_addr_copy(&tunnel_filter->inner_mac, (struct ether_addr*)&pfilter->inner_mac);
+
+ pfilter->inner_vlan = rte_cpu_to_le_16(tunnel_filter->inner_vlan);
+ if (tunnel_filter->ip_type == RTE_TUNNEL_IPTYPE_IPV4) {
+ ip_type = I40E_AQC_ADD_CLOUD_FLAGS_IPV4;
+ ipv4_addr = rte_be_to_cpu_32(tunnel_filter->ip_addr.ipv4_addr);
+ rte_memcpy(&pfilter->ipaddr.v4.data,
+ &rte_cpu_to_le_32(ipv4_addr),
+ sizeof(pfilter->ipaddr.v4.data));
+ } else {
+ ip_type = I40E_AQC_ADD_CLOUD_FLAGS_IPV6;
+ for (i = 0; i < 4; i++) {
+ convert_ipv6[i] =
+ rte_cpu_to_le_32(rte_be_to_cpu_32(tunnel_filter->ip_addr.ipv6_addr[i]));
+ }
+ rte_memcpy(&pfilter->ipaddr.v6.data, &convert_ipv6,
+ sizeof(pfilter->ipaddr.v6.data));
+ }
+
+ /* check tunneled type */
+ switch (tunnel_filter->tunnel_type) {
+ case RTE_TUNNEL_TYPE_VXLAN:
+ tun_type = I40E_AQC_ADD_CLOUD_TNL_TYPE_VXLAN;
+ break;
+ case RTE_TUNNEL_TYPE_NVGRE:
+ tun_type = I40E_AQC_ADD_CLOUD_TNL_TYPE_NVGRE_OMAC;
+ break;
+ case RTE_TUNNEL_TYPE_IP_IN_GRE:
+ tun_type = I40E_AQC_ADD_CLOUD_TNL_TYPE_IP;
+ break;
+ default:
+ /* Other tunnel types is not supported. */
+ PMD_DRV_LOG(ERR, "tunnel type is not supported.");
+ rte_free(cld_filter);
+ return -EINVAL;
+ }
+
+ val = i40e_dev_get_filter_type(tunnel_filter->filter_type,
+ &pfilter->flags);
+ if (val < 0) {
+ rte_free(cld_filter);
+ return -EINVAL;
+ }
+
+ pfilter->flags |= rte_cpu_to_le_16(
+ I40E_AQC_ADD_CLOUD_FLAGS_TO_QUEUE |
+ ip_type | (tun_type << I40E_AQC_ADD_CLOUD_TNL_TYPE_SHIFT));
+ pfilter->tenant_id = rte_cpu_to_le_32(tunnel_filter->tenant_id);
+ pfilter->queue_number = rte_cpu_to_le_16(tunnel_filter->queue_id);
+
+ if (add)
+ ret = i40e_aq_add_cloud_filters(hw, vsi->seid, cld_filter, 1);
+ else
+ ret = i40e_aq_remove_cloud_filters(hw, vsi->seid,
+ cld_filter, 1);
+
+ rte_free(cld_filter);
+ return ret;
+}
+
+static int
+i40e_get_vxlan_port_idx(struct i40e_pf *pf, uint16_t port)
+{
+ uint8_t i;
+
+ for (i = 0; i < I40E_MAX_PF_UDP_OFFLOAD_PORTS; i++) {
+ if (pf->vxlan_ports[i] == port)
+ return i;
+ }
+
+ return -1;
+}
+
+static int
+i40e_add_vxlan_port(struct i40e_pf *pf, uint16_t port)
+{
+ int idx, ret;
+ uint8_t filter_idx;
+ struct i40e_hw *hw = I40E_PF_TO_HW(pf);
+
+ idx = i40e_get_vxlan_port_idx(pf, port);
+
+ /* Check if port already exists */
+ if (idx >= 0) {
+ PMD_DRV_LOG(ERR, "Port %d already offloaded", port);
+ return -EINVAL;
+ }
+
+ /* Now check if there is space to add the new port */
+ idx = i40e_get_vxlan_port_idx(pf, 0);
+ if (idx < 0) {
+ PMD_DRV_LOG(ERR, "Maximum number of UDP ports reached,"
+ "not adding port %d", port);
+ return -ENOSPC;
+ }
+
+ ret = i40e_aq_add_udp_tunnel(hw, port, I40E_AQC_TUNNEL_TYPE_VXLAN,
+ &filter_idx, NULL);
+ if (ret < 0) {
+ PMD_DRV_LOG(ERR, "Failed to add VXLAN UDP port %d", port);
+ return -1;
+ }
+
+ PMD_DRV_LOG(INFO, "Added port %d with AQ command with index %d",
+ port, filter_idx);
+
+ /* New port: add it and mark its index in the bitmap */
+ pf->vxlan_ports[idx] = port;
+ pf->vxlan_bitmap |= (1 << idx);
+
+ if (!(pf->flags & I40E_FLAG_VXLAN))
+ pf->flags |= I40E_FLAG_VXLAN;
+
+ return 0;
+}
+
+static int
+i40e_del_vxlan_port(struct i40e_pf *pf, uint16_t port)
+{
+ int idx;
+ struct i40e_hw *hw = I40E_PF_TO_HW(pf);
+
+ if (!(pf->flags & I40E_FLAG_VXLAN)) {
+ PMD_DRV_LOG(ERR, "VXLAN UDP port was not configured.");
+ return -EINVAL;
+ }
+
+ idx = i40e_get_vxlan_port_idx(pf, port);
+
+ if (idx < 0) {
+ PMD_DRV_LOG(ERR, "Port %d doesn't exist", port);
+ return -EINVAL;
+ }
+
+ if (i40e_aq_del_udp_tunnel(hw, idx, NULL) < 0) {
+ PMD_DRV_LOG(ERR, "Failed to delete VXLAN UDP port %d", port);
+ return -1;
+ }
+
+ PMD_DRV_LOG(INFO, "Deleted port %d with AQ command with index %d",
+ port, idx);
+
+ pf->vxlan_ports[idx] = 0;
+ pf->vxlan_bitmap &= ~(1 << idx);
+
+ if (!pf->vxlan_bitmap)
+ pf->flags &= ~I40E_FLAG_VXLAN;
+
+ return 0;
+}
+
+/* Add UDP tunneling port */
+static int
+i40e_dev_udp_tunnel_port_add(struct rte_eth_dev *dev,
+ struct rte_eth_udp_tunnel *udp_tunnel)
+{
+ int ret = 0;
+ struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
+
+ if (udp_tunnel == NULL)
+ return -EINVAL;
+
+ switch (udp_tunnel->prot_type) {
+ case RTE_TUNNEL_TYPE_VXLAN:
+ ret = i40e_add_vxlan_port(pf, udp_tunnel->udp_port);
+ break;
+
+ case RTE_TUNNEL_TYPE_GENEVE:
+ case RTE_TUNNEL_TYPE_TEREDO:
+ PMD_DRV_LOG(ERR, "Tunnel type is not supported now.");
+ ret = -1;
+ break;
+
+ default:
+ PMD_DRV_LOG(ERR, "Invalid tunnel type");
+ ret = -1;
+ break;
+ }
+
+ return ret;
+}
+
+/* Remove UDP tunneling port */
+static int
+i40e_dev_udp_tunnel_port_del(struct rte_eth_dev *dev,
+ struct rte_eth_udp_tunnel *udp_tunnel)
+{
+ int ret = 0;
+ struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
+
+ if (udp_tunnel == NULL)
+ return -EINVAL;
+
+ switch (udp_tunnel->prot_type) {
+ case RTE_TUNNEL_TYPE_VXLAN:
+ ret = i40e_del_vxlan_port(pf, udp_tunnel->udp_port);
+ break;
+ case RTE_TUNNEL_TYPE_GENEVE:
+ case RTE_TUNNEL_TYPE_TEREDO:
+ PMD_DRV_LOG(ERR, "Tunnel type is not supported now.");
+ ret = -1;
+ break;
+ default:
+ PMD_DRV_LOG(ERR, "Invalid tunnel type");
+ ret = -1;
+ break;
+ }
+
+ return ret;
+}
+
+/* Calculate the maximum number of contiguous PF queues that are configured */
+static int
+i40e_pf_calc_configured_queues_num(struct i40e_pf *pf)
+{
+ struct rte_eth_dev_data *data = pf->dev_data;
+ int i, num;
+ struct i40e_rx_queue *rxq;
+
+ num = 0;
+ for (i = 0; i < pf->lan_nb_qps; i++) {
+ rxq = data->rx_queues[i];
+ if (rxq && rxq->q_set)
+ num++;
+ else
+ break;
+ }
+
+ return num;
+}
+
+/* Configure RSS */
+static int
+i40e_pf_config_rss(struct i40e_pf *pf)
+{
+ struct i40e_hw *hw = I40E_PF_TO_HW(pf);
+ struct rte_eth_rss_conf rss_conf;
+ uint32_t i, lut = 0;
+ uint16_t j, num;
+
+ /*
+ * If both VMDQ and RSS enabled, not all of PF queues are configured.
+ * It's necessary to calulate the actual PF queues that are configured.
+ */
+ if (pf->dev_data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_VMDQ_FLAG)
+ num = i40e_pf_calc_configured_queues_num(pf);
+ else
+ num = pf->dev_data->nb_rx_queues;
+
+ num = RTE_MIN(num, I40E_MAX_Q_PER_TC);
+ PMD_INIT_LOG(INFO, "Max of contiguous %u PF queues are configured",
+ num);
+
+ if (num == 0) {
+ PMD_INIT_LOG(ERR, "No PF queues are configured to enable RSS");
+ return -ENOTSUP;
+ }
+
+ for (i = 0, j = 0; i < hw->func_caps.rss_table_size; i++, j++) {
+ if (j == num)
+ j = 0;
+ lut = (lut << 8) | (j & ((0x1 <<
+ hw->func_caps.rss_table_entry_width) - 1));
+ if ((i & 3) == 3)
+ I40E_WRITE_REG(hw, I40E_PFQF_HLUT(i >> 2), lut);
+ }
+
+ rss_conf = pf->dev_data->dev_conf.rx_adv_conf.rss_conf;
+ if ((rss_conf.rss_hf & I40E_RSS_OFFLOAD_ALL) == 0) {
+ i40e_pf_disable_rss(pf);
+ return 0;
+ }
+ if (rss_conf.rss_key == NULL || rss_conf.rss_key_len <
+ (I40E_PFQF_HKEY_MAX_INDEX + 1) * sizeof(uint32_t)) {
+ /* Random default keys */
+ static uint32_t rss_key_default[] = {0x6b793944,
+ 0x23504cb5, 0x5bea75b6, 0x309f4f12, 0x3dc0a2b8,
+ 0x024ddcdf, 0x339b8ca0, 0x4c4af64a, 0x34fac605,
+ 0x55d85839, 0x3a58997d, 0x2ec938e1, 0x66031581};
+
+ rss_conf.rss_key = (uint8_t *)rss_key_default;
+ rss_conf.rss_key_len = (I40E_PFQF_HKEY_MAX_INDEX + 1) *
+ sizeof(uint32_t);
+ }
+
+ return i40e_hw_rss_hash_set(pf, &rss_conf);
+}
+
+static int
+i40e_tunnel_filter_param_check(struct i40e_pf *pf,
+ struct rte_eth_tunnel_filter_conf *filter)
+{
+ if (pf == NULL || filter == NULL) {
+ PMD_DRV_LOG(ERR, "Invalid parameter");
+ return -EINVAL;
+ }
+
+ if (filter->queue_id >= pf->dev_data->nb_rx_queues) {
+ PMD_DRV_LOG(ERR, "Invalid queue ID");
+ return -EINVAL;
+ }
+
+ if (filter->inner_vlan > ETHER_MAX_VLAN_ID) {
+ PMD_DRV_LOG(ERR, "Invalid inner VLAN ID");
+ return -EINVAL;
+ }
+
+ if ((filter->filter_type & ETH_TUNNEL_FILTER_OMAC) &&
+ (is_zero_ether_addr(&filter->outer_mac))) {
+ PMD_DRV_LOG(ERR, "Cannot add NULL outer MAC address");
+ return -EINVAL;
+ }
+
+ if ((filter->filter_type & ETH_TUNNEL_FILTER_IMAC) &&
+ (is_zero_ether_addr(&filter->inner_mac))) {
+ PMD_DRV_LOG(ERR, "Cannot add NULL inner MAC address");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+#define I40E_GL_PRS_FVBM_MSK_ENA 0x80000000
+#define I40E_GL_PRS_FVBM(_i) (0x00269760 + ((_i) * 4))
+static int
+i40e_dev_set_gre_key_len(struct i40e_hw *hw, uint8_t len)
+{
+ uint32_t val, reg;
+ int ret = -EINVAL;
+
+ val = I40E_READ_REG(hw, I40E_GL_PRS_FVBM(2));
+ PMD_DRV_LOG(DEBUG, "Read original GL_PRS_FVBM with 0x%08x\n", val);
+
+ if (len == 3) {
+ reg = val | I40E_GL_PRS_FVBM_MSK_ENA;
+ } else if (len == 4) {
+ reg = val & ~I40E_GL_PRS_FVBM_MSK_ENA;
+ } else {
+ PMD_DRV_LOG(ERR, "Unsupported GRE key length of %u", len);
+ return ret;
+ }
+
+ if (reg != val) {
+ ret = i40e_aq_debug_write_register(hw, I40E_GL_PRS_FVBM(2),
+ reg, NULL);
+ if (ret != 0)
+ return ret;
+ } else {
+ ret = 0;
+ }
+ PMD_DRV_LOG(DEBUG, "Read modified GL_PRS_FVBM with 0x%08x\n",
+ I40E_READ_REG(hw, I40E_GL_PRS_FVBM(2)));
+
+ return ret;
+}
+
+static int
+i40e_dev_global_config_set(struct i40e_hw *hw, struct rte_eth_global_cfg *cfg)
+{
+ int ret = -EINVAL;
+
+ if (!hw || !cfg)
+ return -EINVAL;
+
+ switch (cfg->cfg_type) {
+ case RTE_ETH_GLOBAL_CFG_TYPE_GRE_KEY_LEN:
+ ret = i40e_dev_set_gre_key_len(hw, cfg->cfg.gre_key_len);
+ break;
+ default:
+ PMD_DRV_LOG(ERR, "Unknown config type %u", cfg->cfg_type);
+ break;
+ }
+
+ return ret;
+}
+
+static int
+i40e_filter_ctrl_global_config(struct rte_eth_dev *dev,
+ enum rte_filter_op filter_op,
+ void *arg)
+{
+ struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ int ret = I40E_ERR_PARAM;
+
+ switch (filter_op) {
+ case RTE_ETH_FILTER_SET:
+ ret = i40e_dev_global_config_set(hw,
+ (struct rte_eth_global_cfg *)arg);
+ break;
+ default:
+ PMD_DRV_LOG(ERR, "unknown operation %u", filter_op);
+ break;
+ }
+
+ return ret;
+}
+
+static int
+i40e_tunnel_filter_handle(struct rte_eth_dev *dev,
+ enum rte_filter_op filter_op,
+ void *arg)
+{
+ struct rte_eth_tunnel_filter_conf *filter;
+ struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
+ int ret = I40E_SUCCESS;
+
+ filter = (struct rte_eth_tunnel_filter_conf *)(arg);
+
+ if (i40e_tunnel_filter_param_check(pf, filter) < 0)
+ return I40E_ERR_PARAM;
+
+ switch (filter_op) {
+ case RTE_ETH_FILTER_NOP:
+ if (!(pf->flags & I40E_FLAG_VXLAN))
+ ret = I40E_NOT_SUPPORTED;
+ break;
+ case RTE_ETH_FILTER_ADD:
+ ret = i40e_dev_tunnel_filter_set(pf, filter, 1);
+ break;
+ case RTE_ETH_FILTER_DELETE:
+ ret = i40e_dev_tunnel_filter_set(pf, filter, 0);
+ break;
+ default:
+ PMD_DRV_LOG(ERR, "unknown operation %u", filter_op);
+ ret = I40E_ERR_PARAM;
+ break;
+ }
+
+ return ret;
+}
+
+static int
+i40e_pf_config_mq_rx(struct i40e_pf *pf)
+{
+ int ret = 0;
+ enum rte_eth_rx_mq_mode mq_mode = pf->dev_data->dev_conf.rxmode.mq_mode;
+
+ /* RSS setup */
+ if (mq_mode & ETH_MQ_RX_RSS_FLAG)
+ ret = i40e_pf_config_rss(pf);
+ else
+ i40e_pf_disable_rss(pf);
+
+ return ret;
+}
+
+/* Get the symmetric hash enable configurations per port */
+static void
+i40e_get_symmetric_hash_enable_per_port(struct i40e_hw *hw, uint8_t *enable)
+{
+ uint32_t reg = i40e_read_rx_ctl(hw, I40E_PRTQF_CTL_0);
+
+ *enable = reg & I40E_PRTQF_CTL_0_HSYM_ENA_MASK ? 1 : 0;
+}
+
+/* Set the symmetric hash enable configurations per port */
+static void
+i40e_set_symmetric_hash_enable_per_port(struct i40e_hw *hw, uint8_t enable)
+{
+ uint32_t reg = i40e_read_rx_ctl(hw, I40E_PRTQF_CTL_0);
+
+ if (enable > 0) {
+ if (reg & I40E_PRTQF_CTL_0_HSYM_ENA_MASK) {
+ PMD_DRV_LOG(INFO, "Symmetric hash has already "
+ "been enabled");
+ return;
+ }
+ reg |= I40E_PRTQF_CTL_0_HSYM_ENA_MASK;
+ } else {
+ if (!(reg & I40E_PRTQF_CTL_0_HSYM_ENA_MASK)) {
+ PMD_DRV_LOG(INFO, "Symmetric hash has already "
+ "been disabled");
+ return;
+ }
+ reg &= ~I40E_PRTQF_CTL_0_HSYM_ENA_MASK;
+ }
+ i40e_write_rx_ctl(hw, I40E_PRTQF_CTL_0, reg);
+ I40E_WRITE_FLUSH(hw);
+}
+
+/*
+ * Get global configurations of hash function type and symmetric hash enable
+ * per flow type (pctype). Note that global configuration means it affects all
+ * the ports on the same NIC.
+ */
+static int
+i40e_get_hash_filter_global_config(struct i40e_hw *hw,
+ struct rte_eth_hash_global_conf *g_cfg)
+{
+ uint32_t reg, mask = I40E_FLOW_TYPES;
+ uint16_t i;
+ enum i40e_filter_pctype pctype;
+
+ memset(g_cfg, 0, sizeof(*g_cfg));
+ reg = i40e_read_rx_ctl(hw, I40E_GLQF_CTL);
+ if (reg & I40E_GLQF_CTL_HTOEP_MASK)
+ g_cfg->hash_func = RTE_ETH_HASH_FUNCTION_TOEPLITZ;
+ else
+ g_cfg->hash_func = RTE_ETH_HASH_FUNCTION_SIMPLE_XOR;
+ PMD_DRV_LOG(DEBUG, "Hash function is %s",
+ (reg & I40E_GLQF_CTL_HTOEP_MASK) ? "Toeplitz" : "Simple XOR");
+
+ for (i = 0; mask && i < RTE_ETH_FLOW_MAX; i++) {
+ if (!(mask & (1UL << i)))
+ continue;
+ mask &= ~(1UL << i);
+ /* Bit set indicats the coresponding flow type is supported */
+ g_cfg->valid_bit_mask[0] |= (1UL << i);
+ /* if flowtype is invalid, continue */
+ if (!I40E_VALID_FLOW(i))
+ continue;
+ pctype = i40e_flowtype_to_pctype(i);
+ reg = i40e_read_rx_ctl(hw, I40E_GLQF_HSYM(pctype));
+ if (reg & I40E_GLQF_HSYM_SYMH_ENA_MASK)
+ g_cfg->sym_hash_enable_mask[0] |= (1UL << i);
+ }
+
+ return 0;
+}
+
+static int
+i40e_hash_global_config_check(struct rte_eth_hash_global_conf *g_cfg)
+{
+ uint32_t i;
+ uint32_t mask0, i40e_mask = I40E_FLOW_TYPES;
+
+ if (g_cfg->hash_func != RTE_ETH_HASH_FUNCTION_TOEPLITZ &&
+ g_cfg->hash_func != RTE_ETH_HASH_FUNCTION_SIMPLE_XOR &&
+ g_cfg->hash_func != RTE_ETH_HASH_FUNCTION_DEFAULT) {
+ PMD_DRV_LOG(ERR, "Unsupported hash function type %d",
+ g_cfg->hash_func);
+ return -EINVAL;
+ }
+
+ /*
+ * As i40e supports less than 32 flow types, only first 32 bits need to
+ * be checked.
+ */
+ mask0 = g_cfg->valid_bit_mask[0];
+ for (i = 0; i < RTE_SYM_HASH_MASK_ARRAY_SIZE; i++) {
+ if (i == 0) {
+ /* Check if any unsupported flow type configured */
+ if ((mask0 | i40e_mask) ^ i40e_mask)
+ goto mask_err;
+ } else {
+ if (g_cfg->valid_bit_mask[i])
+ goto mask_err;
+ }
+ }
+
+ return 0;
+
+mask_err:
+ PMD_DRV_LOG(ERR, "i40e unsupported flow type bit(s) configured");
+
+ return -EINVAL;
+}
+
+/*
+ * Set global configurations of hash function type and symmetric hash enable
+ * per flow type (pctype). Note any modifying global configuration will affect
+ * all the ports on the same NIC.
+ */
+static int
+i40e_set_hash_filter_global_config(struct i40e_hw *hw,
+ struct rte_eth_hash_global_conf *g_cfg)
+{
+ int ret;
+ uint16_t i;
+ uint32_t reg;
+ uint32_t mask0 = g_cfg->valid_bit_mask[0];
+ enum i40e_filter_pctype pctype;
+
+ /* Check the input parameters */
+ ret = i40e_hash_global_config_check(g_cfg);
+ if (ret < 0)
+ return ret;
+
+ for (i = 0; mask0 && i < UINT32_BIT; i++) {
+ if (!(mask0 & (1UL << i)))
+ continue;
+ mask0 &= ~(1UL << i);
+ /* if flowtype is invalid, continue */
+ if (!I40E_VALID_FLOW(i))
+ continue;
+ pctype = i40e_flowtype_to_pctype(i);
+ reg = (g_cfg->sym_hash_enable_mask[0] & (1UL << i)) ?
+ I40E_GLQF_HSYM_SYMH_ENA_MASK : 0;
+ i40e_write_rx_ctl(hw, I40E_GLQF_HSYM(pctype), reg);
+ }
+
+ reg = i40e_read_rx_ctl(hw, I40E_GLQF_CTL);
+ if (g_cfg->hash_func == RTE_ETH_HASH_FUNCTION_TOEPLITZ) {
+ /* Toeplitz */
+ if (reg & I40E_GLQF_CTL_HTOEP_MASK) {
+ PMD_DRV_LOG(DEBUG, "Hash function already set to "
+ "Toeplitz");
+ goto out;
+ }
+ reg |= I40E_GLQF_CTL_HTOEP_MASK;
+ } else if (g_cfg->hash_func == RTE_ETH_HASH_FUNCTION_SIMPLE_XOR) {
+ /* Simple XOR */
+ if (!(reg & I40E_GLQF_CTL_HTOEP_MASK)) {
+ PMD_DRV_LOG(DEBUG, "Hash function already set to "
+ "Simple XOR");
+ goto out;
+ }
+ reg &= ~I40E_GLQF_CTL_HTOEP_MASK;
+ } else
+ /* Use the default, and keep it as it is */
+ goto out;
+
+ i40e_write_rx_ctl(hw, I40E_GLQF_CTL, reg);
+
+out:
+ I40E_WRITE_FLUSH(hw);
+
+ return 0;
+}
+
+/**
+ * Valid input sets for hash and flow director filters per PCTYPE
+ */
+static uint64_t
+i40e_get_valid_input_set(enum i40e_filter_pctype pctype,
+ enum rte_filter_type filter)
+{
+ uint64_t valid;
+
+ static const uint64_t valid_hash_inset_table[] = {
+ [I40E_FILTER_PCTYPE_FRAG_IPV4] =
+ I40E_INSET_DMAC | I40E_INSET_SMAC |
+ I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
+ I40E_INSET_VLAN_TUNNEL | I40E_INSET_IPV4_SRC |
+ I40E_INSET_IPV4_DST | I40E_INSET_IPV4_TOS |
+ I40E_INSET_IPV4_PROTO | I40E_INSET_IPV4_TTL |
+ I40E_INSET_TUNNEL_DMAC | I40E_INSET_TUNNEL_ID |
+ I40E_INSET_FLEX_PAYLOAD,
+ [I40E_FILTER_PCTYPE_NONF_IPV4_UDP] =
+ I40E_INSET_DMAC | I40E_INSET_SMAC |
+ I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
+ I40E_INSET_VLAN_TUNNEL | I40E_INSET_IPV4_TOS |
+ I40E_INSET_IPV4_PROTO | I40E_INSET_IPV4_TTL |
+ I40E_INSET_TUNNEL_DMAC | I40E_INSET_TUNNEL_ID |
+ I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
+ I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT |
+ I40E_INSET_FLEX_PAYLOAD,
+ [I40E_FILTER_PCTYPE_NONF_IPV4_TCP] =
+ I40E_INSET_DMAC | I40E_INSET_SMAC |
+ I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
+ I40E_INSET_VLAN_TUNNEL | I40E_INSET_IPV4_TOS |
+ I40E_INSET_IPV4_PROTO | I40E_INSET_IPV4_TTL |
+ I40E_INSET_TUNNEL_DMAC | I40E_INSET_TUNNEL_ID |
+ I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
+ I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT |
+ I40E_INSET_TCP_FLAGS | I40E_INSET_FLEX_PAYLOAD,
+ [I40E_FILTER_PCTYPE_NONF_IPV4_SCTP] =
+ I40E_INSET_DMAC | I40E_INSET_SMAC |
+ I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
+ I40E_INSET_VLAN_TUNNEL | I40E_INSET_IPV4_TOS |
+ I40E_INSET_IPV4_PROTO | I40E_INSET_IPV4_TTL |
+ I40E_INSET_TUNNEL_DMAC | I40E_INSET_TUNNEL_ID |
+ I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
+ I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT |
+ I40E_INSET_SCTP_VT | I40E_INSET_FLEX_PAYLOAD,
+ [I40E_FILTER_PCTYPE_NONF_IPV4_OTHER] =
+ I40E_INSET_DMAC | I40E_INSET_SMAC |
+ I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
+ I40E_INSET_VLAN_TUNNEL | I40E_INSET_IPV4_TOS |
+ I40E_INSET_IPV4_PROTO | I40E_INSET_IPV4_TTL |
+ I40E_INSET_TUNNEL_DMAC | I40E_INSET_TUNNEL_ID |
+ I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
+ I40E_INSET_FLEX_PAYLOAD,
+ [I40E_FILTER_PCTYPE_FRAG_IPV6] =
+ I40E_INSET_DMAC | I40E_INSET_SMAC |
+ I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
+ I40E_INSET_VLAN_TUNNEL | I40E_INSET_IPV6_TC |
+ I40E_INSET_IPV6_FLOW | I40E_INSET_IPV6_NEXT_HDR |
+ I40E_INSET_IPV6_HOP_LIMIT | I40E_INSET_TUNNEL_DMAC |
+ I40E_INSET_TUNNEL_ID | I40E_INSET_IPV6_SRC |
+ I40E_INSET_IPV6_DST | I40E_INSET_FLEX_PAYLOAD,
+ [I40E_FILTER_PCTYPE_NONF_IPV6_UDP] =
+ I40E_INSET_DMAC | I40E_INSET_SMAC |
+ I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
+ I40E_INSET_VLAN_TUNNEL | I40E_INSET_IPV6_TC |
+ I40E_INSET_IPV6_FLOW | I40E_INSET_IPV6_NEXT_HDR |
+ I40E_INSET_IPV6_HOP_LIMIT | I40E_INSET_IPV6_SRC |
+ I40E_INSET_IPV6_DST | I40E_INSET_SRC_PORT |
+ I40E_INSET_DST_PORT | I40E_INSET_FLEX_PAYLOAD,
+ [I40E_FILTER_PCTYPE_NONF_IPV6_TCP] =
+ I40E_INSET_DMAC | I40E_INSET_SMAC |
+ I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
+ I40E_INSET_VLAN_TUNNEL | I40E_INSET_IPV6_TC |
+ I40E_INSET_IPV6_FLOW | I40E_INSET_IPV6_NEXT_HDR |
+ I40E_INSET_IPV6_HOP_LIMIT | I40E_INSET_IPV6_SRC |
+ I40E_INSET_IPV6_DST | I40E_INSET_SRC_PORT |
+ I40E_INSET_DST_PORT | I40E_INSET_TCP_FLAGS |
+ I40E_INSET_FLEX_PAYLOAD,
+ [I40E_FILTER_PCTYPE_NONF_IPV6_SCTP] =
+ I40E_INSET_DMAC | I40E_INSET_SMAC |
+ I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
+ I40E_INSET_VLAN_TUNNEL | I40E_INSET_IPV6_TC |
+ I40E_INSET_IPV6_FLOW | I40E_INSET_IPV6_NEXT_HDR |
+ I40E_INSET_IPV6_HOP_LIMIT | I40E_INSET_IPV6_SRC |
+ I40E_INSET_IPV6_DST | I40E_INSET_SRC_PORT |
+ I40E_INSET_DST_PORT | I40E_INSET_SCTP_VT |
+ I40E_INSET_FLEX_PAYLOAD,
+ [I40E_FILTER_PCTYPE_NONF_IPV6_OTHER] =
+ I40E_INSET_DMAC | I40E_INSET_SMAC |
+ I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
+ I40E_INSET_VLAN_TUNNEL | I40E_INSET_IPV6_TC |
+ I40E_INSET_IPV6_FLOW | I40E_INSET_IPV6_NEXT_HDR |
+ I40E_INSET_IPV6_HOP_LIMIT | I40E_INSET_IPV6_SRC |
+ I40E_INSET_IPV6_DST | I40E_INSET_TUNNEL_ID |
+ I40E_INSET_FLEX_PAYLOAD,
+ [I40E_FILTER_PCTYPE_L2_PAYLOAD] =
+ I40E_INSET_DMAC | I40E_INSET_SMAC |
+ I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
+ I40E_INSET_VLAN_TUNNEL | I40E_INSET_LAST_ETHER_TYPE |
+ I40E_INSET_FLEX_PAYLOAD,
+ };
+
+ /**
+ * Flow director supports only fields defined in
+ * union rte_eth_fdir_flow.
+ */
+ static const uint64_t valid_fdir_inset_table[] = {
+ [I40E_FILTER_PCTYPE_FRAG_IPV4] =
+ I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
+ I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
+ I40E_INSET_IPV4_TOS | I40E_INSET_IPV4_PROTO |
+ I40E_INSET_IPV4_TTL,
+ [I40E_FILTER_PCTYPE_NONF_IPV4_UDP] =
+ I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
+ I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
+ I40E_INSET_IPV4_TOS | I40E_INSET_IPV4_TTL |
+ I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT,
+ [I40E_FILTER_PCTYPE_NONF_IPV4_TCP] =
+ I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
+ I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
+ I40E_INSET_IPV4_TOS | I40E_INSET_IPV4_TTL |
+ I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT,
+ [I40E_FILTER_PCTYPE_NONF_IPV4_SCTP] =
+ I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
+ I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
+ I40E_INSET_IPV4_TOS | I40E_INSET_IPV4_TTL |
+ I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT |
+ I40E_INSET_SCTP_VT,
+ [I40E_FILTER_PCTYPE_NONF_IPV4_OTHER] =
+ I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
+ I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
+ I40E_INSET_IPV4_TOS | I40E_INSET_IPV4_PROTO |
+ I40E_INSET_IPV4_TTL,
+ [I40E_FILTER_PCTYPE_FRAG_IPV6] =
+ I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
+ I40E_INSET_IPV6_SRC | I40E_INSET_IPV6_DST |
+ I40E_INSET_IPV6_TC | I40E_INSET_IPV6_NEXT_HDR |
+ I40E_INSET_IPV6_HOP_LIMIT,
+ [I40E_FILTER_PCTYPE_NONF_IPV6_UDP] =
+ I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
+ I40E_INSET_IPV6_SRC | I40E_INSET_IPV6_DST |
+ I40E_INSET_IPV6_TC | I40E_INSET_IPV6_HOP_LIMIT |
+ I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT,
+ [I40E_FILTER_PCTYPE_NONF_IPV6_TCP] =
+ I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
+ I40E_INSET_IPV6_SRC | I40E_INSET_IPV6_DST |
+ I40E_INSET_IPV6_TC | I40E_INSET_IPV6_HOP_LIMIT |
+ I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT,
+ [I40E_FILTER_PCTYPE_NONF_IPV6_SCTP] =
+ I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
+ I40E_INSET_IPV6_SRC | I40E_INSET_IPV6_DST |
+ I40E_INSET_IPV6_TC | I40E_INSET_IPV6_HOP_LIMIT |
+ I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT |
+ I40E_INSET_SCTP_VT,
+ [I40E_FILTER_PCTYPE_NONF_IPV6_OTHER] =
+ I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
+ I40E_INSET_IPV6_SRC | I40E_INSET_IPV6_DST |
+ I40E_INSET_IPV6_TC | I40E_INSET_IPV6_NEXT_HDR |
+ I40E_INSET_IPV6_HOP_LIMIT,
+ [I40E_FILTER_PCTYPE_L2_PAYLOAD] =
+ I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
+ I40E_INSET_LAST_ETHER_TYPE,
+ };
+
+ if (pctype > I40E_FILTER_PCTYPE_L2_PAYLOAD)
+ return 0;
+ if (filter == RTE_ETH_FILTER_HASH)
+ valid = valid_hash_inset_table[pctype];
+ else
+ valid = valid_fdir_inset_table[pctype];
+
+ return valid;
+}
+
+/**
+ * Validate if the input set is allowed for a specific PCTYPE
+ */
+static int
+i40e_validate_input_set(enum i40e_filter_pctype pctype,
+ enum rte_filter_type filter, uint64_t inset)
+{
+ uint64_t valid;
+
+ valid = i40e_get_valid_input_set(pctype, filter);
+ if (inset & (~valid))
+ return -EINVAL;
+
+ return 0;
+}
+
+/* default input set fields combination per pctype */
+static uint64_t
+i40e_get_default_input_set(uint16_t pctype)
+{
+ static const uint64_t default_inset_table[] = {
+ [I40E_FILTER_PCTYPE_FRAG_IPV4] =
+ I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST,
+ [I40E_FILTER_PCTYPE_NONF_IPV4_UDP] =
+ I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
+ I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT,
+ [I40E_FILTER_PCTYPE_NONF_IPV4_TCP] =
+ I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
+ I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT,
+ [I40E_FILTER_PCTYPE_NONF_IPV4_SCTP] =
+ I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
+ I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT |
+ I40E_INSET_SCTP_VT,
+ [I40E_FILTER_PCTYPE_NONF_IPV4_OTHER] =
+ I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST,
+ [I40E_FILTER_PCTYPE_FRAG_IPV6] =
+ I40E_INSET_IPV6_SRC | I40E_INSET_IPV6_DST,
+ [I40E_FILTER_PCTYPE_NONF_IPV6_UDP] =
+ I40E_INSET_IPV6_SRC | I40E_INSET_IPV6_DST |
+ I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT,
+ [I40E_FILTER_PCTYPE_NONF_IPV6_TCP] =
+ I40E_INSET_IPV6_SRC | I40E_INSET_IPV6_DST |
+ I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT,
+ [I40E_FILTER_PCTYPE_NONF_IPV6_SCTP] =
+ I40E_INSET_IPV6_SRC | I40E_INSET_IPV6_DST |
+ I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT |
+ I40E_INSET_SCTP_VT,
+ [I40E_FILTER_PCTYPE_NONF_IPV6_OTHER] =
+ I40E_INSET_IPV6_SRC | I40E_INSET_IPV6_DST,
+ [I40E_FILTER_PCTYPE_L2_PAYLOAD] =
+ I40E_INSET_LAST_ETHER_TYPE,
+ };
+
+ if (pctype > I40E_FILTER_PCTYPE_L2_PAYLOAD)
+ return 0;
+
+ return default_inset_table[pctype];
+}
+
+/**
+ * Parse the input set from index to logical bit masks
+ */
+static int
+i40e_parse_input_set(uint64_t *inset,
+ enum i40e_filter_pctype pctype,
+ enum rte_eth_input_set_field *field,
+ uint16_t size)
+{
+ uint16_t i, j;
+ int ret = -EINVAL;
+
+ static const struct {
+ enum rte_eth_input_set_field field;
+ uint64_t inset;
+ } inset_convert_table[] = {
+ {RTE_ETH_INPUT_SET_NONE, I40E_INSET_NONE},
+ {RTE_ETH_INPUT_SET_L2_SRC_MAC, I40E_INSET_SMAC},
+ {RTE_ETH_INPUT_SET_L2_DST_MAC, I40E_INSET_DMAC},
+ {RTE_ETH_INPUT_SET_L2_OUTER_VLAN, I40E_INSET_VLAN_OUTER},
+ {RTE_ETH_INPUT_SET_L2_INNER_VLAN, I40E_INSET_VLAN_INNER},
+ {RTE_ETH_INPUT_SET_L2_ETHERTYPE, I40E_INSET_LAST_ETHER_TYPE},
+ {RTE_ETH_INPUT_SET_L3_SRC_IP4, I40E_INSET_IPV4_SRC},
+ {RTE_ETH_INPUT_SET_L3_DST_IP4, I40E_INSET_IPV4_DST},
+ {RTE_ETH_INPUT_SET_L3_IP4_TOS, I40E_INSET_IPV4_TOS},
+ {RTE_ETH_INPUT_SET_L3_IP4_PROTO, I40E_INSET_IPV4_PROTO},
+ {RTE_ETH_INPUT_SET_L3_IP4_TTL, I40E_INSET_IPV4_TTL},
+ {RTE_ETH_INPUT_SET_L3_SRC_IP6, I40E_INSET_IPV6_SRC},
+ {RTE_ETH_INPUT_SET_L3_DST_IP6, I40E_INSET_IPV6_DST},
+ {RTE_ETH_INPUT_SET_L3_IP6_TC, I40E_INSET_IPV6_TC},
+ {RTE_ETH_INPUT_SET_L3_IP6_NEXT_HEADER,
+ I40E_INSET_IPV6_NEXT_HDR},
+ {RTE_ETH_INPUT_SET_L3_IP6_HOP_LIMITS,
+ I40E_INSET_IPV6_HOP_LIMIT},
+ {RTE_ETH_INPUT_SET_L4_UDP_SRC_PORT, I40E_INSET_SRC_PORT},
+ {RTE_ETH_INPUT_SET_L4_TCP_SRC_PORT, I40E_INSET_SRC_PORT},
+ {RTE_ETH_INPUT_SET_L4_SCTP_SRC_PORT, I40E_INSET_SRC_PORT},
+ {RTE_ETH_INPUT_SET_L4_UDP_DST_PORT, I40E_INSET_DST_PORT},
+ {RTE_ETH_INPUT_SET_L4_TCP_DST_PORT, I40E_INSET_DST_PORT},
+ {RTE_ETH_INPUT_SET_L4_SCTP_DST_PORT, I40E_INSET_DST_PORT},
+ {RTE_ETH_INPUT_SET_L4_SCTP_VERIFICATION_TAG,
+ I40E_INSET_SCTP_VT},
+ {RTE_ETH_INPUT_SET_TUNNEL_L2_INNER_DST_MAC,
+ I40E_INSET_TUNNEL_DMAC},
+ {RTE_ETH_INPUT_SET_TUNNEL_L2_INNER_VLAN,
+ I40E_INSET_VLAN_TUNNEL},
+ {RTE_ETH_INPUT_SET_TUNNEL_L4_UDP_KEY,
+ I40E_INSET_TUNNEL_ID},
+ {RTE_ETH_INPUT_SET_TUNNEL_GRE_KEY, I40E_INSET_TUNNEL_ID},
+ {RTE_ETH_INPUT_SET_FLEX_PAYLOAD_1ST_WORD,
+ I40E_INSET_FLEX_PAYLOAD_W1},
+ {RTE_ETH_INPUT_SET_FLEX_PAYLOAD_2ND_WORD,
+ I40E_INSET_FLEX_PAYLOAD_W2},
+ {RTE_ETH_INPUT_SET_FLEX_PAYLOAD_3RD_WORD,
+ I40E_INSET_FLEX_PAYLOAD_W3},
+ {RTE_ETH_INPUT_SET_FLEX_PAYLOAD_4TH_WORD,
+ I40E_INSET_FLEX_PAYLOAD_W4},
+ {RTE_ETH_INPUT_SET_FLEX_PAYLOAD_5TH_WORD,
+ I40E_INSET_FLEX_PAYLOAD_W5},
+ {RTE_ETH_INPUT_SET_FLEX_PAYLOAD_6TH_WORD,
+ I40E_INSET_FLEX_PAYLOAD_W6},
+ {RTE_ETH_INPUT_SET_FLEX_PAYLOAD_7TH_WORD,
+ I40E_INSET_FLEX_PAYLOAD_W7},
+ {RTE_ETH_INPUT_SET_FLEX_PAYLOAD_8TH_WORD,
+ I40E_INSET_FLEX_PAYLOAD_W8},
+ };
+
+ if (!inset || !field || size > RTE_ETH_INSET_SIZE_MAX)
+ return ret;
+
+ /* Only one item allowed for default or all */
+ if (size == 1) {
+ if (field[0] == RTE_ETH_INPUT_SET_DEFAULT) {
+ *inset = i40e_get_default_input_set(pctype);
+ return 0;
+ } else if (field[0] == RTE_ETH_INPUT_SET_NONE) {
+ *inset = I40E_INSET_NONE;
+ return 0;
+ }
+ }
+
+ for (i = 0, *inset = 0; i < size; i++) {
+ for (j = 0; j < RTE_DIM(inset_convert_table); j++) {
+ if (field[i] == inset_convert_table[j].field) {
+ *inset |= inset_convert_table[j].inset;
+ break;
+ }
+ }
+
+ /* It contains unsupported input set, return immediately */
+ if (j == RTE_DIM(inset_convert_table))
+ return ret;
+ }
+
+ return 0;
+}
+
+/**
+ * Translate the input set from bit masks to register aware bit masks
+ * and vice versa
+ */
+static uint64_t
+i40e_translate_input_set_reg(uint64_t input)
+{
+ uint64_t val = 0;
+ uint16_t i;
+
+ static const struct {
+ uint64_t inset;
+ uint64_t inset_reg;
+ } inset_map[] = {
+ {I40E_INSET_DMAC, I40E_REG_INSET_L2_DMAC},
+ {I40E_INSET_SMAC, I40E_REG_INSET_L2_SMAC},
+ {I40E_INSET_VLAN_OUTER, I40E_REG_INSET_L2_OUTER_VLAN},
+ {I40E_INSET_VLAN_INNER, I40E_REG_INSET_L2_INNER_VLAN},
+ {I40E_INSET_LAST_ETHER_TYPE, I40E_REG_INSET_LAST_ETHER_TYPE},
+ {I40E_INSET_IPV4_SRC, I40E_REG_INSET_L3_SRC_IP4},
+ {I40E_INSET_IPV4_DST, I40E_REG_INSET_L3_DST_IP4},
+ {I40E_INSET_IPV4_TOS, I40E_REG_INSET_L3_IP4_TOS},
+ {I40E_INSET_IPV4_PROTO, I40E_REG_INSET_L3_IP4_PROTO},
+ {I40E_INSET_IPV4_TTL, I40E_REG_INSET_L3_IP4_TTL},
+ {I40E_INSET_IPV6_SRC, I40E_REG_INSET_L3_SRC_IP6},
+ {I40E_INSET_IPV6_DST, I40E_REG_INSET_L3_DST_IP6},
+ {I40E_INSET_IPV6_TC, I40E_REG_INSET_L3_IP6_TC},
+ {I40E_INSET_IPV6_NEXT_HDR, I40E_REG_INSET_L3_IP6_NEXT_HDR},
+ {I40E_INSET_IPV6_HOP_LIMIT, I40E_REG_INSET_L3_IP6_HOP_LIMIT},
+ {I40E_INSET_SRC_PORT, I40E_REG_INSET_L4_SRC_PORT},
+ {I40E_INSET_DST_PORT, I40E_REG_INSET_L4_DST_PORT},
+ {I40E_INSET_SCTP_VT, I40E_REG_INSET_L4_SCTP_VERIFICATION_TAG},
+ {I40E_INSET_TUNNEL_ID, I40E_REG_INSET_TUNNEL_ID},
+ {I40E_INSET_TUNNEL_DMAC,
+ I40E_REG_INSET_TUNNEL_L2_INNER_DST_MAC},
+ {I40E_INSET_TUNNEL_IPV4_DST, I40E_REG_INSET_TUNNEL_L3_DST_IP4},
+ {I40E_INSET_TUNNEL_IPV6_DST, I40E_REG_INSET_TUNNEL_L3_DST_IP6},
+ {I40E_INSET_TUNNEL_SRC_PORT,
+ I40E_REG_INSET_TUNNEL_L4_UDP_SRC_PORT},
+ {I40E_INSET_TUNNEL_DST_PORT,
+ I40E_REG_INSET_TUNNEL_L4_UDP_DST_PORT},
+ {I40E_INSET_VLAN_TUNNEL, I40E_REG_INSET_TUNNEL_VLAN},
+ {I40E_INSET_FLEX_PAYLOAD_W1, I40E_REG_INSET_FLEX_PAYLOAD_WORD1},
+ {I40E_INSET_FLEX_PAYLOAD_W2, I40E_REG_INSET_FLEX_PAYLOAD_WORD2},
+ {I40E_INSET_FLEX_PAYLOAD_W3, I40E_REG_INSET_FLEX_PAYLOAD_WORD3},
+ {I40E_INSET_FLEX_PAYLOAD_W4, I40E_REG_INSET_FLEX_PAYLOAD_WORD4},
+ {I40E_INSET_FLEX_PAYLOAD_W5, I40E_REG_INSET_FLEX_PAYLOAD_WORD5},
+ {I40E_INSET_FLEX_PAYLOAD_W6, I40E_REG_INSET_FLEX_PAYLOAD_WORD6},
+ {I40E_INSET_FLEX_PAYLOAD_W7, I40E_REG_INSET_FLEX_PAYLOAD_WORD7},
+ {I40E_INSET_FLEX_PAYLOAD_W8, I40E_REG_INSET_FLEX_PAYLOAD_WORD8},
+ };
+
+ if (input == 0)
+ return val;
+
+ /* Translate input set to register aware inset */
+ for (i = 0; i < RTE_DIM(inset_map); i++) {
+ if (input & inset_map[i].inset)
+ val |= inset_map[i].inset_reg;
+ }
+
+ return val;
+}
+
+static int
+i40e_generate_inset_mask_reg(uint64_t inset, uint32_t *mask, uint8_t nb_elem)
+{
+ uint8_t i, idx = 0;
+ uint64_t inset_need_mask = inset;
+
+ static const struct {
+ uint64_t inset;
+ uint32_t mask;
+ } inset_mask_map[] = {
+ {I40E_INSET_IPV4_TOS, I40E_INSET_IPV4_TOS_MASK},
+ {I40E_INSET_IPV4_PROTO | I40E_INSET_IPV4_TTL, 0},
+ {I40E_INSET_IPV4_PROTO, I40E_INSET_IPV4_PROTO_MASK},
+ {I40E_INSET_IPV4_TTL, I40E_INSET_IPv4_TTL_MASK},
+ {I40E_INSET_IPV6_TC, I40E_INSET_IPV6_TC_MASK},
+ {I40E_INSET_IPV6_NEXT_HDR | I40E_INSET_IPV6_HOP_LIMIT, 0},
+ {I40E_INSET_IPV6_NEXT_HDR, I40E_INSET_IPV6_NEXT_HDR_MASK},
+ {I40E_INSET_IPV6_HOP_LIMIT, I40E_INSET_IPV6_HOP_LIMIT_MASK},
+ };
+
+ if (!inset || !mask || !nb_elem)
+ return 0;
+
+ for (i = 0, idx = 0; i < RTE_DIM(inset_mask_map); i++) {
+ /* Clear the inset bit, if no MASK is required,
+ * for example proto + ttl
+ */
+ if ((inset & inset_mask_map[i].inset) ==
+ inset_mask_map[i].inset && inset_mask_map[i].mask == 0)
+ inset_need_mask &= ~inset_mask_map[i].inset;
+ if (!inset_need_mask)
+ return 0;
+ }
+ for (i = 0, idx = 0; i < RTE_DIM(inset_mask_map); i++) {
+ if ((inset_need_mask & inset_mask_map[i].inset) ==
+ inset_mask_map[i].inset) {
+ if (idx >= nb_elem) {
+ PMD_DRV_LOG(ERR, "exceed maximal number of bitmasks");
+ return -EINVAL;
+ }
+ mask[idx] = inset_mask_map[i].mask;
+ idx++;
+ }
+ }
+
+ return idx;
+}
+
+static void
+i40e_check_write_reg(struct i40e_hw *hw, uint32_t addr, uint32_t val)
+{
+ uint32_t reg = i40e_read_rx_ctl(hw, addr);
+
+ PMD_DRV_LOG(DEBUG, "[0x%08x] original: 0x%08x\n", addr, reg);
+ if (reg != val)
+ i40e_write_rx_ctl(hw, addr, val);
+ PMD_DRV_LOG(DEBUG, "[0x%08x] after: 0x%08x\n", addr,
+ (uint32_t)i40e_read_rx_ctl(hw, addr));
+}
+
+static void
+i40e_filter_input_set_init(struct i40e_pf *pf)
+{
+ struct i40e_hw *hw = I40E_PF_TO_HW(pf);
+ enum i40e_filter_pctype pctype;
+ uint64_t input_set, inset_reg;
+ uint32_t mask_reg[I40E_INSET_MASK_NUM_REG] = {0};
+ int num, i;
+
+ for (pctype = I40E_FILTER_PCTYPE_NONF_IPV4_UDP;
+ pctype <= I40E_FILTER_PCTYPE_L2_PAYLOAD; pctype++) {
+ if (!I40E_VALID_PCTYPE(pctype))
+ continue;
+ input_set = i40e_get_default_input_set(pctype);
+
+ num = i40e_generate_inset_mask_reg(input_set, mask_reg,
+ I40E_INSET_MASK_NUM_REG);
+ if (num < 0)
+ return;
+ inset_reg = i40e_translate_input_set_reg(input_set);
+
+ i40e_check_write_reg(hw, I40E_PRTQF_FD_INSET(pctype, 0),
+ (uint32_t)(inset_reg & UINT32_MAX));
+ i40e_check_write_reg(hw, I40E_PRTQF_FD_INSET(pctype, 1),
+ (uint32_t)((inset_reg >>
+ I40E_32_BIT_WIDTH) & UINT32_MAX));
+ i40e_check_write_reg(hw, I40E_GLQF_HASH_INSET(0, pctype),
+ (uint32_t)(inset_reg & UINT32_MAX));
+ i40e_check_write_reg(hw, I40E_GLQF_HASH_INSET(1, pctype),
+ (uint32_t)((inset_reg >>
+ I40E_32_BIT_WIDTH) & UINT32_MAX));
+
+ for (i = 0; i < num; i++) {
+ i40e_check_write_reg(hw, I40E_GLQF_FD_MSK(i, pctype),
+ mask_reg[i]);
+ i40e_check_write_reg(hw, I40E_GLQF_HASH_MSK(i, pctype),
+ mask_reg[i]);
+ }
+ /*clear unused mask registers of the pctype */
+ for (i = num; i < I40E_INSET_MASK_NUM_REG; i++) {
+ i40e_check_write_reg(hw, I40E_GLQF_FD_MSK(i, pctype),
+ 0);
+ i40e_check_write_reg(hw, I40E_GLQF_HASH_MSK(i, pctype),
+ 0);
+ }
+ I40E_WRITE_FLUSH(hw);
+
+ /* store the default input set */
+ pf->hash_input_set[pctype] = input_set;
+ pf->fdir.input_set[pctype] = input_set;
+ }
+}
+
+int
+i40e_hash_filter_inset_select(struct i40e_hw *hw,
+ struct rte_eth_input_set_conf *conf)
+{
+ struct i40e_pf *pf = &((struct i40e_adapter *)hw->back)->pf;
+ enum i40e_filter_pctype pctype;
+ uint64_t input_set, inset_reg = 0;
+ uint32_t mask_reg[I40E_INSET_MASK_NUM_REG] = {0};
+ int ret, i, num;
+
+ if (!conf) {
+ PMD_DRV_LOG(ERR, "Invalid pointer");
+ return -EFAULT;
+ }
+ if (conf->op != RTE_ETH_INPUT_SET_SELECT &&
+ conf->op != RTE_ETH_INPUT_SET_ADD) {
+ PMD_DRV_LOG(ERR, "Unsupported input set operation");
+ return -EINVAL;
+ }
+
+ if (!I40E_VALID_FLOW(conf->flow_type)) {
+ PMD_DRV_LOG(ERR, "invalid flow_type input.");
+ return -EINVAL;
+ }
+ pctype = i40e_flowtype_to_pctype(conf->flow_type);
+ ret = i40e_parse_input_set(&input_set, pctype, conf->field,
+ conf->inset_size);
+ if (ret) {
+ PMD_DRV_LOG(ERR, "Failed to parse input set");
+ return -EINVAL;
+ }
+ if (i40e_validate_input_set(pctype, RTE_ETH_FILTER_HASH,
+ input_set) != 0) {
+ PMD_DRV_LOG(ERR, "Invalid input set");
+ return -EINVAL;
+ }
+ if (conf->op == RTE_ETH_INPUT_SET_ADD) {
+ /* get inset value in register */
+ inset_reg = i40e_read_rx_ctl(hw, I40E_GLQF_HASH_INSET(1, pctype));
+ inset_reg <<= I40E_32_BIT_WIDTH;
+ inset_reg |= i40e_read_rx_ctl(hw, I40E_GLQF_HASH_INSET(0, pctype));
+ input_set |= pf->hash_input_set[pctype];
+ }
+ num = i40e_generate_inset_mask_reg(input_set, mask_reg,
+ I40E_INSET_MASK_NUM_REG);
+ if (num < 0)
+ return -EINVAL;
+
+ inset_reg |= i40e_translate_input_set_reg(input_set);
+
+ i40e_check_write_reg(hw, I40E_GLQF_HASH_INSET(0, pctype),
+ (uint32_t)(inset_reg & UINT32_MAX));
+ i40e_check_write_reg(hw, I40E_GLQF_HASH_INSET(1, pctype),
+ (uint32_t)((inset_reg >>
+ I40E_32_BIT_WIDTH) & UINT32_MAX));
+
+ for (i = 0; i < num; i++)
+ i40e_check_write_reg(hw, I40E_GLQF_HASH_MSK(i, pctype),
+ mask_reg[i]);
+ /*clear unused mask registers of the pctype */
+ for (i = num; i < I40E_INSET_MASK_NUM_REG; i++)
+ i40e_check_write_reg(hw, I40E_GLQF_HASH_MSK(i, pctype),
+ 0);
+ I40E_WRITE_FLUSH(hw);
+
+ pf->hash_input_set[pctype] = input_set;
+ return 0;
+}
+
+int
+i40e_fdir_filter_inset_select(struct i40e_pf *pf,
+ struct rte_eth_input_set_conf *conf)
+{
+ struct i40e_hw *hw = I40E_PF_TO_HW(pf);
+ enum i40e_filter_pctype pctype;
+ uint64_t input_set, inset_reg = 0;
+ uint32_t mask_reg[I40E_INSET_MASK_NUM_REG] = {0};
+ int ret, i, num;
+
+ if (!hw || !conf) {
+ PMD_DRV_LOG(ERR, "Invalid pointer");
+ return -EFAULT;
+ }
+ if (conf->op != RTE_ETH_INPUT_SET_SELECT &&
+ conf->op != RTE_ETH_INPUT_SET_ADD) {
+ PMD_DRV_LOG(ERR, "Unsupported input set operation");
+ return -EINVAL;
+ }
+
+ if (!I40E_VALID_FLOW(conf->flow_type)) {
+ PMD_DRV_LOG(ERR, "invalid flow_type input.");
+ return -EINVAL;
+ }
+ pctype = i40e_flowtype_to_pctype(conf->flow_type);
+ ret = i40e_parse_input_set(&input_set, pctype, conf->field,
+ conf->inset_size);
+ if (ret) {
+ PMD_DRV_LOG(ERR, "Failed to parse input set");
+ return -EINVAL;
+ }
+ if (i40e_validate_input_set(pctype, RTE_ETH_FILTER_FDIR,
+ input_set) != 0) {
+ PMD_DRV_LOG(ERR, "Invalid input set");
+ return -EINVAL;
+ }
+
+ /* get inset value in register */
+ inset_reg = i40e_read_rx_ctl(hw, I40E_PRTQF_FD_INSET(pctype, 1));
+ inset_reg <<= I40E_32_BIT_WIDTH;
+ inset_reg |= i40e_read_rx_ctl(hw, I40E_PRTQF_FD_INSET(pctype, 0));
+
+ /* Can not change the inset reg for flex payload for fdir,
+ * it is done by writing I40E_PRTQF_FD_FLXINSET
+ * in i40e_set_flex_mask_on_pctype.
+ */
+ if (conf->op == RTE_ETH_INPUT_SET_SELECT)
+ inset_reg &= I40E_REG_INSET_FLEX_PAYLOAD_WORDS;
+ else
+ input_set |= pf->fdir.input_set[pctype];
+ num = i40e_generate_inset_mask_reg(input_set, mask_reg,
+ I40E_INSET_MASK_NUM_REG);
+ if (num < 0)
+ return -EINVAL;
+
+ inset_reg |= i40e_translate_input_set_reg(input_set);
+
+ i40e_check_write_reg(hw, I40E_PRTQF_FD_INSET(pctype, 0),
+ (uint32_t)(inset_reg & UINT32_MAX));
+ i40e_check_write_reg(hw, I40E_PRTQF_FD_INSET(pctype, 1),
+ (uint32_t)((inset_reg >>
+ I40E_32_BIT_WIDTH) & UINT32_MAX));
+
+ for (i = 0; i < num; i++)
+ i40e_check_write_reg(hw, I40E_GLQF_FD_MSK(i, pctype),
+ mask_reg[i]);
+ /*clear unused mask registers of the pctype */
+ for (i = num; i < I40E_INSET_MASK_NUM_REG; i++)
+ i40e_check_write_reg(hw, I40E_GLQF_FD_MSK(i, pctype),
+ 0);
+ I40E_WRITE_FLUSH(hw);
+
+ pf->fdir.input_set[pctype] = input_set;
+ return 0;
+}
+
+static int
+i40e_hash_filter_get(struct i40e_hw *hw, struct rte_eth_hash_filter_info *info)
+{
+ int ret = 0;
+
+ if (!hw || !info) {
+ PMD_DRV_LOG(ERR, "Invalid pointer");
+ return -EFAULT;
+ }
+
+ switch (info->info_type) {
+ case RTE_ETH_HASH_FILTER_SYM_HASH_ENA_PER_PORT:
+ i40e_get_symmetric_hash_enable_per_port(hw,
+ &(info->info.enable));
+ break;
+ case RTE_ETH_HASH_FILTER_GLOBAL_CONFIG:
+ ret = i40e_get_hash_filter_global_config(hw,
+ &(info->info.global_conf));
+ break;
+ default:
+ PMD_DRV_LOG(ERR, "Hash filter info type (%d) not supported",
+ info->info_type);
+ ret = -EINVAL;
+ break;
+ }
+
+ return ret;
+}
+
+static int
+i40e_hash_filter_set(struct i40e_hw *hw, struct rte_eth_hash_filter_info *info)
+{
+ int ret = 0;
+
+ if (!hw || !info) {
+ PMD_DRV_LOG(ERR, "Invalid pointer");
+ return -EFAULT;
+ }
+
+ switch (info->info_type) {
+ case RTE_ETH_HASH_FILTER_SYM_HASH_ENA_PER_PORT:
+ i40e_set_symmetric_hash_enable_per_port(hw, info->info.enable);
+ break;
+ case RTE_ETH_HASH_FILTER_GLOBAL_CONFIG:
+ ret = i40e_set_hash_filter_global_config(hw,
+ &(info->info.global_conf));
+ break;
+ case RTE_ETH_HASH_FILTER_INPUT_SET_SELECT:
+ ret = i40e_hash_filter_inset_select(hw,
+ &(info->info.input_set_conf));
+ break;
+
+ default:
+ PMD_DRV_LOG(ERR, "Hash filter info type (%d) not supported",
+ info->info_type);
+ ret = -EINVAL;
+ break;
+ }
+
+ return ret;
+}
+
+/* Operations for hash function */
+static int
+i40e_hash_filter_ctrl(struct rte_eth_dev *dev,
+ enum rte_filter_op filter_op,
+ void *arg)
+{
+ struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ int ret = 0;
+
+ switch (filter_op) {
+ case RTE_ETH_FILTER_NOP:
+ break;
+ case RTE_ETH_FILTER_GET:
+ ret = i40e_hash_filter_get(hw,
+ (struct rte_eth_hash_filter_info *)arg);
+ break;
+ case RTE_ETH_FILTER_SET:
+ ret = i40e_hash_filter_set(hw,
+ (struct rte_eth_hash_filter_info *)arg);
+ break;
+ default:
+ PMD_DRV_LOG(WARNING, "Filter operation (%d) not supported",
+ filter_op);
+ ret = -ENOTSUP;
+ break;
+ }
+
+ return ret;
+}
+
+/*
+ * Configure ethertype filter, which can director packet by filtering
+ * with mac address and ether_type or only ether_type
+ */
+static int
+i40e_ethertype_filter_set(struct i40e_pf *pf,
+ struct rte_eth_ethertype_filter *filter,
+ bool add)
+{
+ struct i40e_hw *hw = I40E_PF_TO_HW(pf);
+ struct i40e_control_filter_stats stats;
+ uint16_t flags = 0;
+ int ret;
+
+ if (filter->queue >= pf->dev_data->nb_rx_queues) {
+ PMD_DRV_LOG(ERR, "Invalid queue ID");
+ return -EINVAL;
+ }
+ if (filter->ether_type == ETHER_TYPE_IPv4 ||
+ filter->ether_type == ETHER_TYPE_IPv6) {
+ PMD_DRV_LOG(ERR, "unsupported ether_type(0x%04x) in"
+ " control packet filter.", filter->ether_type);
+ return -EINVAL;
+ }
+ if (filter->ether_type == ETHER_TYPE_VLAN)
+ PMD_DRV_LOG(WARNING, "filter vlan ether_type in first tag is"
+ " not supported.");
+
+ if (!(filter->flags & RTE_ETHTYPE_FLAGS_MAC))
+ flags |= I40E_AQC_ADD_CONTROL_PACKET_FLAGS_IGNORE_MAC;
+ if (filter->flags & RTE_ETHTYPE_FLAGS_DROP)
+ flags |= I40E_AQC_ADD_CONTROL_PACKET_FLAGS_DROP;
+ flags |= I40E_AQC_ADD_CONTROL_PACKET_FLAGS_TO_QUEUE;
+
+ memset(&stats, 0, sizeof(stats));
+ ret = i40e_aq_add_rem_control_packet_filter(hw,
+ filter->mac_addr.addr_bytes,
+ filter->ether_type, flags,
+ pf->main_vsi->seid,
+ filter->queue, add, &stats, NULL);
+
+ PMD_DRV_LOG(INFO, "add/rem control packet filter, return %d,"
+ " mac_etype_used = %u, etype_used = %u,"
+ " mac_etype_free = %u, etype_free = %u\n",
+ ret, stats.mac_etype_used, stats.etype_used,
+ stats.mac_etype_free, stats.etype_free);
+ if (ret < 0)
+ return -ENOSYS;
+ return 0;
+}
+
+/*
+ * Handle operations for ethertype filter.
+ */
+static int
+i40e_ethertype_filter_handle(struct rte_eth_dev *dev,
+ enum rte_filter_op filter_op,
+ void *arg)
+{
+ struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
+ int ret = 0;
+
+ if (filter_op == RTE_ETH_FILTER_NOP)
+ return ret;
+
+ if (arg == NULL) {
+ PMD_DRV_LOG(ERR, "arg shouldn't be NULL for operation %u",
+ filter_op);
+ return -EINVAL;
+ }
+
+ switch (filter_op) {
+ case RTE_ETH_FILTER_ADD:
+ ret = i40e_ethertype_filter_set(pf,
+ (struct rte_eth_ethertype_filter *)arg,
+ TRUE);
+ break;
+ case RTE_ETH_FILTER_DELETE:
+ ret = i40e_ethertype_filter_set(pf,
+ (struct rte_eth_ethertype_filter *)arg,
+ FALSE);
+ break;
+ default:
+ PMD_DRV_LOG(ERR, "unsupported operation %u\n", filter_op);
+ ret = -ENOSYS;
+ break;
+ }
+ return ret;
+}
+
+static int
+i40e_dev_filter_ctrl(struct rte_eth_dev *dev,
+ enum rte_filter_type filter_type,
+ enum rte_filter_op filter_op,
+ void *arg)
+{
+ int ret = 0;
+
+ if (dev == NULL)
+ return -EINVAL;
+
+ switch (filter_type) {
+ case RTE_ETH_FILTER_NONE:
+ /* For global configuration */
+ ret = i40e_filter_ctrl_global_config(dev, filter_op, arg);
+ break;
+ case RTE_ETH_FILTER_HASH:
+ ret = i40e_hash_filter_ctrl(dev, filter_op, arg);
+ break;
+ case RTE_ETH_FILTER_MACVLAN:
+ ret = i40e_mac_filter_handle(dev, filter_op, arg);
+ break;
+ case RTE_ETH_FILTER_ETHERTYPE:
+ ret = i40e_ethertype_filter_handle(dev, filter_op, arg);
+ break;
+ case RTE_ETH_FILTER_TUNNEL:
+ ret = i40e_tunnel_filter_handle(dev, filter_op, arg);
+ break;
+ case RTE_ETH_FILTER_FDIR:
+ ret = i40e_fdir_ctrl_func(dev, filter_op, arg);
+ break;
+ default:
+ PMD_DRV_LOG(WARNING, "Filter type (%d) not supported",
+ filter_type);
+ ret = -EINVAL;
+ break;
+ }
+
+ return ret;
+}
+
+/*
+ * Check and enable Extended Tag.
+ * Enabling Extended Tag is important for 40G performance.
+ */
+static void
+i40e_enable_extended_tag(struct rte_eth_dev *dev)
+{
+ uint32_t buf = 0;
+ int ret;
+
+ ret = rte_eal_pci_read_config(dev->pci_dev, &buf, sizeof(buf),
+ PCI_DEV_CAP_REG);
+ if (ret < 0) {
+ PMD_DRV_LOG(ERR, "Failed to read PCI offset 0x%x",
+ PCI_DEV_CAP_REG);
+ return;
+ }
+ if (!(buf & PCI_DEV_CAP_EXT_TAG_MASK)) {
+ PMD_DRV_LOG(ERR, "Does not support Extended Tag");
+ return;
+ }
+
+ buf = 0;
+ ret = rte_eal_pci_read_config(dev->pci_dev, &buf, sizeof(buf),
+ PCI_DEV_CTRL_REG);
+ if (ret < 0) {
+ PMD_DRV_LOG(ERR, "Failed to read PCI offset 0x%x",
+ PCI_DEV_CTRL_REG);
+ return;
+ }
+ if (buf & PCI_DEV_CTRL_EXT_TAG_MASK) {
+ PMD_DRV_LOG(DEBUG, "Extended Tag has already been enabled");
+ return;
+ }
+ buf |= PCI_DEV_CTRL_EXT_TAG_MASK;
+ ret = rte_eal_pci_write_config(dev->pci_dev, &buf, sizeof(buf),
+ PCI_DEV_CTRL_REG);
+ if (ret < 0) {
+ PMD_DRV_LOG(ERR, "Failed to write PCI offset 0x%x",
+ PCI_DEV_CTRL_REG);
+ return;
+ }
+}
+
+/*
+ * As some registers wouldn't be reset unless a global hardware reset,
+ * hardware initialization is needed to put those registers into an
+ * expected initial state.
+ */
+static void
+i40e_hw_init(struct rte_eth_dev *dev)
+{
+ struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+
+ i40e_enable_extended_tag(dev);
+
+ /* clear the PF Queue Filter control register */
+ i40e_write_rx_ctl(hw, I40E_PFQF_CTL_0, 0);
+
+ /* Disable symmetric hash per port */
+ i40e_set_symmetric_hash_enable_per_port(hw, 0);
+}
+
+enum i40e_filter_pctype
+i40e_flowtype_to_pctype(uint16_t flow_type)
+{
+ static const enum i40e_filter_pctype pctype_table[] = {
+ [RTE_ETH_FLOW_FRAG_IPV4] = I40E_FILTER_PCTYPE_FRAG_IPV4,
+ [RTE_ETH_FLOW_NONFRAG_IPV4_UDP] =
+ I40E_FILTER_PCTYPE_NONF_IPV4_UDP,
+ [RTE_ETH_FLOW_NONFRAG_IPV4_TCP] =
+ I40E_FILTER_PCTYPE_NONF_IPV4_TCP,
+ [RTE_ETH_FLOW_NONFRAG_IPV4_SCTP] =
+ I40E_FILTER_PCTYPE_NONF_IPV4_SCTP,
+ [RTE_ETH_FLOW_NONFRAG_IPV4_OTHER] =
+ I40E_FILTER_PCTYPE_NONF_IPV4_OTHER,
+ [RTE_ETH_FLOW_FRAG_IPV6] = I40E_FILTER_PCTYPE_FRAG_IPV6,
+ [RTE_ETH_FLOW_NONFRAG_IPV6_UDP] =
+ I40E_FILTER_PCTYPE_NONF_IPV6_UDP,
+ [RTE_ETH_FLOW_NONFRAG_IPV6_TCP] =
+ I40E_FILTER_PCTYPE_NONF_IPV6_TCP,
+ [RTE_ETH_FLOW_NONFRAG_IPV6_SCTP] =
+ I40E_FILTER_PCTYPE_NONF_IPV6_SCTP,
+ [RTE_ETH_FLOW_NONFRAG_IPV6_OTHER] =
+ I40E_FILTER_PCTYPE_NONF_IPV6_OTHER,
+ [RTE_ETH_FLOW_L2_PAYLOAD] = I40E_FILTER_PCTYPE_L2_PAYLOAD,
+ };
+
+ return pctype_table[flow_type];
+}
+
+uint16_t
+i40e_pctype_to_flowtype(enum i40e_filter_pctype pctype)
+{
+ static const uint16_t flowtype_table[] = {
+ [I40E_FILTER_PCTYPE_FRAG_IPV4] = RTE_ETH_FLOW_FRAG_IPV4,
+ [I40E_FILTER_PCTYPE_NONF_IPV4_UDP] =
+ RTE_ETH_FLOW_NONFRAG_IPV4_UDP,
+ [I40E_FILTER_PCTYPE_NONF_IPV4_TCP] =
+ RTE_ETH_FLOW_NONFRAG_IPV4_TCP,
+ [I40E_FILTER_PCTYPE_NONF_IPV4_SCTP] =
+ RTE_ETH_FLOW_NONFRAG_IPV4_SCTP,
+ [I40E_FILTER_PCTYPE_NONF_IPV4_OTHER] =
+ RTE_ETH_FLOW_NONFRAG_IPV4_OTHER,
+ [I40E_FILTER_PCTYPE_FRAG_IPV6] = RTE_ETH_FLOW_FRAG_IPV6,
+ [I40E_FILTER_PCTYPE_NONF_IPV6_UDP] =
+ RTE_ETH_FLOW_NONFRAG_IPV6_UDP,
+ [I40E_FILTER_PCTYPE_NONF_IPV6_TCP] =
+ RTE_ETH_FLOW_NONFRAG_IPV6_TCP,
+ [I40E_FILTER_PCTYPE_NONF_IPV6_SCTP] =
+ RTE_ETH_FLOW_NONFRAG_IPV6_SCTP,
+ [I40E_FILTER_PCTYPE_NONF_IPV6_OTHER] =
+ RTE_ETH_FLOW_NONFRAG_IPV6_OTHER,
+ [I40E_FILTER_PCTYPE_L2_PAYLOAD] = RTE_ETH_FLOW_L2_PAYLOAD,
+ };
+
+ return flowtype_table[pctype];
+}
+
+/*
+ * On X710, performance number is far from the expectation on recent firmware
+ * versions; on XL710, performance number is also far from the expectation on
+ * recent firmware versions, if promiscuous mode is disabled, or promiscuous
+ * mode is enabled and port MAC address is equal to the packet destination MAC
+ * address. The fix for this issue may not be integrated in the following
+ * firmware version. So the workaround in software driver is needed. It needs
+ * to modify the initial values of 3 internal only registers for both X710 and
+ * XL710. Note that the values for X710 or XL710 could be different, and the
+ * workaround can be removed when it is fixed in firmware in the future.
+ */
+
+/* For both X710 and XL710 */
+#define I40E_GL_SWR_PRI_JOIN_MAP_0_VALUE 0x10000200
+#define I40E_GL_SWR_PRI_JOIN_MAP_0 0x26CE00
+
+#define I40E_GL_SWR_PRI_JOIN_MAP_2_VALUE 0x011f0200
+#define I40E_GL_SWR_PRI_JOIN_MAP_2 0x26CE08
+
+/* For X710 */
+#define I40E_GL_SWR_PM_UP_THR_EF_VALUE 0x03030303
+/* For XL710 */
+#define I40E_GL_SWR_PM_UP_THR_SF_VALUE 0x06060606
+#define I40E_GL_SWR_PM_UP_THR 0x269FBC
+
+static void
+i40e_configure_registers(struct i40e_hw *hw)
+{
+ static struct {
+ uint32_t addr;
+ uint64_t val;
+ } reg_table[] = {
+ {I40E_GL_SWR_PRI_JOIN_MAP_0, I40E_GL_SWR_PRI_JOIN_MAP_0_VALUE},
+ {I40E_GL_SWR_PRI_JOIN_MAP_2, I40E_GL_SWR_PRI_JOIN_MAP_2_VALUE},
+ {I40E_GL_SWR_PM_UP_THR, 0}, /* Compute value dynamically */
+ };
+ uint64_t reg;
+ uint32_t i;
+ int ret;
+
+ for (i = 0; i < RTE_DIM(reg_table); i++) {
+ if (reg_table[i].addr == I40E_GL_SWR_PM_UP_THR) {
+ if (i40e_is_40G_device(hw->device_id)) /* For XL710 */
+ reg_table[i].val =
+ I40E_GL_SWR_PM_UP_THR_SF_VALUE;
+ else /* For X710 */
+ reg_table[i].val =
+ I40E_GL_SWR_PM_UP_THR_EF_VALUE;
+ }
+
+ ret = i40e_aq_debug_read_register(hw, reg_table[i].addr,
+ &reg, NULL);
+ if (ret < 0) {
+ PMD_DRV_LOG(ERR, "Failed to read from 0x%"PRIx32,
+ reg_table[i].addr);
+ break;
+ }
+ PMD_DRV_LOG(DEBUG, "Read from 0x%"PRIx32": 0x%"PRIx64,
+ reg_table[i].addr, reg);
+ if (reg == reg_table[i].val)
+ continue;
+
+ ret = i40e_aq_debug_write_register(hw, reg_table[i].addr,
+ reg_table[i].val, NULL);
+ if (ret < 0) {
+ PMD_DRV_LOG(ERR, "Failed to write 0x%"PRIx64" to the "
+ "address of 0x%"PRIx32, reg_table[i].val,
+ reg_table[i].addr);
+ break;
+ }
+ PMD_DRV_LOG(DEBUG, "Write 0x%"PRIx64" to the address of "
+ "0x%"PRIx32, reg_table[i].val, reg_table[i].addr);
+ }
+}
+
+#define I40E_VSI_TSR(_i) (0x00050800 + ((_i) * 4))
+#define I40E_VSI_TSR_QINQ_CONFIG 0xc030
+#define I40E_VSI_L2TAGSTXVALID(_i) (0x00042800 + ((_i) * 4))
+#define I40E_VSI_L2TAGSTXVALID_QINQ 0xab
+static int
+i40e_config_qinq(struct i40e_hw *hw, struct i40e_vsi *vsi)
+{
+ uint32_t reg;
+ int ret;
+
+ if (vsi->vsi_id >= I40E_MAX_NUM_VSIS) {
+ PMD_DRV_LOG(ERR, "VSI ID exceeds the maximum");
+ return -EINVAL;
+ }
+
+ /* Configure for double VLAN RX stripping */
+ reg = I40E_READ_REG(hw, I40E_VSI_TSR(vsi->vsi_id));
+ if ((reg & I40E_VSI_TSR_QINQ_CONFIG) != I40E_VSI_TSR_QINQ_CONFIG) {
+ reg |= I40E_VSI_TSR_QINQ_CONFIG;
+ ret = i40e_aq_debug_write_register(hw,
+ I40E_VSI_TSR(vsi->vsi_id),
+ reg, NULL);
+ if (ret < 0) {
+ PMD_DRV_LOG(ERR, "Failed to update VSI_TSR[%d]",
+ vsi->vsi_id);
+ return I40E_ERR_CONFIG;
+ }
+ }
+
+ /* Configure for double VLAN TX insertion */
+ reg = I40E_READ_REG(hw, I40E_VSI_L2TAGSTXVALID(vsi->vsi_id));
+ if ((reg & 0xff) != I40E_VSI_L2TAGSTXVALID_QINQ) {
+ reg = I40E_VSI_L2TAGSTXVALID_QINQ;
+ ret = i40e_aq_debug_write_register(hw,
+ I40E_VSI_L2TAGSTXVALID(
+ vsi->vsi_id), reg, NULL);
+ if (ret < 0) {
+ PMD_DRV_LOG(ERR, "Failed to update "
+ "VSI_L2TAGSTXVALID[%d]", vsi->vsi_id);
+ return I40E_ERR_CONFIG;
+ }
+ }
+
+ return 0;
+}
+
+/**
+ * i40e_aq_add_mirror_rule
+ * @hw: pointer to the hardware structure
+ * @seid: VEB seid to add mirror rule to
+ * @dst_id: destination vsi seid
+ * @entries: Buffer which contains the entities to be mirrored
+ * @count: number of entities contained in the buffer
+ * @rule_id:the rule_id of the rule to be added
+ *
+ * Add a mirror rule for a given veb.
+ *
+ **/
+static enum i40e_status_code
+i40e_aq_add_mirror_rule(struct i40e_hw *hw,
+ uint16_t seid, uint16_t dst_id,
+ uint16_t rule_type, uint16_t *entries,
+ uint16_t count, uint16_t *rule_id)
+{
+ struct i40e_aq_desc desc;
+ struct i40e_aqc_add_delete_mirror_rule cmd;
+ struct i40e_aqc_add_delete_mirror_rule_completion *resp =
+ (struct i40e_aqc_add_delete_mirror_rule_completion *)
+ &desc.params.raw;
+ uint16_t buff_len;
+ enum i40e_status_code status;
+
+ i40e_fill_default_direct_cmd_desc(&desc,
+ i40e_aqc_opc_add_mirror_rule);
+ memset(&cmd, 0, sizeof(cmd));
+
+ buff_len = sizeof(uint16_t) * count;
+ desc.datalen = rte_cpu_to_le_16(buff_len);
+ if (buff_len > 0)
+ desc.flags |= rte_cpu_to_le_16(
+ (uint16_t)(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD));
+ cmd.rule_type = rte_cpu_to_le_16(rule_type <<
+ I40E_AQC_MIRROR_RULE_TYPE_SHIFT);
+ cmd.num_entries = rte_cpu_to_le_16(count);
+ cmd.seid = rte_cpu_to_le_16(seid);
+ cmd.destination = rte_cpu_to_le_16(dst_id);
+
+ rte_memcpy(&desc.params.raw, &cmd, sizeof(cmd));
+ status = i40e_asq_send_command(hw, &desc, entries, buff_len, NULL);
+ PMD_DRV_LOG(INFO, "i40e_aq_add_mirror_rule, aq_status %d,"
+ "rule_id = %u"
+ " mirror_rules_used = %u, mirror_rules_free = %u,",
+ hw->aq.asq_last_status, resp->rule_id,
+ resp->mirror_rules_used, resp->mirror_rules_free);
+ *rule_id = rte_le_to_cpu_16(resp->rule_id);
+
+ return status;
+}
+
+/**
+ * i40e_aq_del_mirror_rule
+ * @hw: pointer to the hardware structure
+ * @seid: VEB seid to add mirror rule to
+ * @entries: Buffer which contains the entities to be mirrored
+ * @count: number of entities contained in the buffer
+ * @rule_id:the rule_id of the rule to be delete
+ *
+ * Delete a mirror rule for a given veb.
+ *
+ **/
+static enum i40e_status_code
+i40e_aq_del_mirror_rule(struct i40e_hw *hw,
+ uint16_t seid, uint16_t rule_type, uint16_t *entries,
+ uint16_t count, uint16_t rule_id)
+{
+ struct i40e_aq_desc desc;
+ struct i40e_aqc_add_delete_mirror_rule cmd;
+ uint16_t buff_len = 0;
+ enum i40e_status_code status;
+ void *buff = NULL;
+
+ i40e_fill_default_direct_cmd_desc(&desc,
+ i40e_aqc_opc_delete_mirror_rule);
+ memset(&cmd, 0, sizeof(cmd));
+ if (rule_type == I40E_AQC_MIRROR_RULE_TYPE_VLAN) {
+ desc.flags |= rte_cpu_to_le_16((uint16_t)(I40E_AQ_FLAG_BUF |
+ I40E_AQ_FLAG_RD));
+ cmd.num_entries = count;
+ buff_len = sizeof(uint16_t) * count;
+ desc.datalen = rte_cpu_to_le_16(buff_len);
+ buff = (void *)entries;
+ } else
+ /* rule id is filled in destination field for deleting mirror rule */
+ cmd.destination = rte_cpu_to_le_16(rule_id);
+
+ cmd.rule_type = rte_cpu_to_le_16(rule_type <<
+ I40E_AQC_MIRROR_RULE_TYPE_SHIFT);
+ cmd.seid = rte_cpu_to_le_16(seid);
+
+ rte_memcpy(&desc.params.raw, &cmd, sizeof(cmd));
+ status = i40e_asq_send_command(hw, &desc, buff, buff_len, NULL);
+
+ return status;
+}
+
+/**
+ * i40e_mirror_rule_set
+ * @dev: pointer to the hardware structure
+ * @mirror_conf: mirror rule info
+ * @sw_id: mirror rule's sw_id
+ * @on: enable/disable
+ *
+ * set a mirror rule.
+ *
+ **/
+static int
+i40e_mirror_rule_set(struct rte_eth_dev *dev,
+ struct rte_eth_mirror_conf *mirror_conf,
+ uint8_t sw_id, uint8_t on)
+{
+ struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
+ struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ struct i40e_mirror_rule *it, *mirr_rule = NULL;
+ struct i40e_mirror_rule *parent = NULL;
+ uint16_t seid, dst_seid, rule_id;
+ uint16_t i, j = 0;
+ int ret;
+
+ PMD_DRV_LOG(DEBUG, "i40e_mirror_rule_set: sw_id = %d.", sw_id);
+
+ if (pf->main_vsi->veb == NULL || pf->vfs == NULL) {
+ PMD_DRV_LOG(ERR, "mirror rule can not be configured"
+ " without veb or vfs.");
+ return -ENOSYS;
+ }
+ if (pf->nb_mirror_rule > I40E_MAX_MIRROR_RULES) {
+ PMD_DRV_LOG(ERR, "mirror table is full.");
+ return -ENOSPC;
+ }
+ if (mirror_conf->dst_pool > pf->vf_num) {
+ PMD_DRV_LOG(ERR, "invalid destination pool %u.",
+ mirror_conf->dst_pool);
+ return -EINVAL;
+ }
+
+ seid = pf->main_vsi->veb->seid;
+
+ TAILQ_FOREACH(it, &pf->mirror_list, rules) {
+ if (sw_id <= it->index) {
+ mirr_rule = it;
+ break;
+ }
+ parent = it;
+ }
+ if (mirr_rule && sw_id == mirr_rule->index) {
+ if (on) {
+ PMD_DRV_LOG(ERR, "mirror rule exists.");
+ return -EEXIST;
+ } else {
+ ret = i40e_aq_del_mirror_rule(hw, seid,
+ mirr_rule->rule_type,
+ mirr_rule->entries,
+ mirr_rule->num_entries, mirr_rule->id);
+ if (ret < 0) {
+ PMD_DRV_LOG(ERR, "failed to remove mirror rule:"
+ " ret = %d, aq_err = %d.",
+ ret, hw->aq.asq_last_status);
+ return -ENOSYS;
+ }
+ TAILQ_REMOVE(&pf->mirror_list, mirr_rule, rules);
+ rte_free(mirr_rule);
+ pf->nb_mirror_rule--;
+ return 0;
+ }
+ } else if (!on) {
+ PMD_DRV_LOG(ERR, "mirror rule doesn't exist.");
+ return -ENOENT;
+ }
+
+ mirr_rule = rte_zmalloc("i40e_mirror_rule",
+ sizeof(struct i40e_mirror_rule) , 0);
+ if (!mirr_rule) {
+ PMD_DRV_LOG(ERR, "failed to allocate memory");
+ return I40E_ERR_NO_MEMORY;
+ }
+ switch (mirror_conf->rule_type) {
+ case ETH_MIRROR_VLAN:
+ for (i = 0, j = 0; i < ETH_MIRROR_MAX_VLANS; i++) {
+ if (mirror_conf->vlan.vlan_mask & (1ULL << i)) {
+ mirr_rule->entries[j] =
+ mirror_conf->vlan.vlan_id[i];
+ j++;
+ }
+ }
+ if (j == 0) {
+ PMD_DRV_LOG(ERR, "vlan is not specified.");
+ rte_free(mirr_rule);
+ return -EINVAL;
+ }
+ mirr_rule->rule_type = I40E_AQC_MIRROR_RULE_TYPE_VLAN;
+ break;
+ case ETH_MIRROR_VIRTUAL_POOL_UP:
+ case ETH_MIRROR_VIRTUAL_POOL_DOWN:
+ /* check if the specified pool bit is out of range */
+ if (mirror_conf->pool_mask > (uint64_t)(1ULL << (pf->vf_num + 1))) {
+ PMD_DRV_LOG(ERR, "pool mask is out of range.");
+ rte_free(mirr_rule);
+ return -EINVAL;
+ }
+ for (i = 0, j = 0; i < pf->vf_num; i++) {
+ if (mirror_conf->pool_mask & (1ULL << i)) {
+ mirr_rule->entries[j] = pf->vfs[i].vsi->seid;
+ j++;
+ }
+ }
+ if (mirror_conf->pool_mask & (1ULL << pf->vf_num)) {
+ /* add pf vsi to entries */
+ mirr_rule->entries[j] = pf->main_vsi_seid;
+ j++;
+ }
+ if (j == 0) {
+ PMD_DRV_LOG(ERR, "pool is not specified.");
+ rte_free(mirr_rule);
+ return -EINVAL;
+ }
+ /* egress and ingress in aq commands means from switch but not port */
+ mirr_rule->rule_type =
+ (mirror_conf->rule_type == ETH_MIRROR_VIRTUAL_POOL_UP) ?
+ I40E_AQC_MIRROR_RULE_TYPE_VPORT_EGRESS :
+ I40E_AQC_MIRROR_RULE_TYPE_VPORT_INGRESS;
+ break;
+ case ETH_MIRROR_UPLINK_PORT:
+ /* egress and ingress in aq commands means from switch but not port*/
+ mirr_rule->rule_type = I40E_AQC_MIRROR_RULE_TYPE_ALL_EGRESS;
+ break;
+ case ETH_MIRROR_DOWNLINK_PORT:
+ mirr_rule->rule_type = I40E_AQC_MIRROR_RULE_TYPE_ALL_INGRESS;
+ break;
+ default:
+ PMD_DRV_LOG(ERR, "unsupported mirror type %d.",
+ mirror_conf->rule_type);
+ rte_free(mirr_rule);
+ return -EINVAL;
+ }
+
+ /* If the dst_pool is equal to vf_num, consider it as PF */
+ if (mirror_conf->dst_pool == pf->vf_num)
+ dst_seid = pf->main_vsi_seid;
+ else
+ dst_seid = pf->vfs[mirror_conf->dst_pool].vsi->seid;
+
+ ret = i40e_aq_add_mirror_rule(hw, seid, dst_seid,
+ mirr_rule->rule_type, mirr_rule->entries,
+ j, &rule_id);
+ if (ret < 0) {
+ PMD_DRV_LOG(ERR, "failed to add mirror rule:"
+ " ret = %d, aq_err = %d.",
+ ret, hw->aq.asq_last_status);
+ rte_free(mirr_rule);
+ return -ENOSYS;
+ }
+
+ mirr_rule->index = sw_id;
+ mirr_rule->num_entries = j;
+ mirr_rule->id = rule_id;
+ mirr_rule->dst_vsi_seid = dst_seid;
+
+ if (parent)
+ TAILQ_INSERT_AFTER(&pf->mirror_list, parent, mirr_rule, rules);
+ else
+ TAILQ_INSERT_HEAD(&pf->mirror_list, mirr_rule, rules);
+
+ pf->nb_mirror_rule++;
+ return 0;
+}
+
+/**
+ * i40e_mirror_rule_reset
+ * @dev: pointer to the device
+ * @sw_id: mirror rule's sw_id
+ *
+ * reset a mirror rule.
+ *
+ **/
+static int
+i40e_mirror_rule_reset(struct rte_eth_dev *dev, uint8_t sw_id)
+{
+ struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
+ struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ struct i40e_mirror_rule *it, *mirr_rule = NULL;
+ uint16_t seid;
+ int ret;
+
+ PMD_DRV_LOG(DEBUG, "i40e_mirror_rule_reset: sw_id = %d.", sw_id);
+
+ seid = pf->main_vsi->veb->seid;
+
+ TAILQ_FOREACH(it, &pf->mirror_list, rules) {
+ if (sw_id == it->index) {
+ mirr_rule = it;
+ break;
+ }
+ }
+ if (mirr_rule) {
+ ret = i40e_aq_del_mirror_rule(hw, seid,
+ mirr_rule->rule_type,
+ mirr_rule->entries,
+ mirr_rule->num_entries, mirr_rule->id);
+ if (ret < 0) {
+ PMD_DRV_LOG(ERR, "failed to remove mirror rule:"
+ " status = %d, aq_err = %d.",
+ ret, hw->aq.asq_last_status);
+ return -ENOSYS;
+ }
+ TAILQ_REMOVE(&pf->mirror_list, mirr_rule, rules);
+ rte_free(mirr_rule);
+ pf->nb_mirror_rule--;
+ } else {
+ PMD_DRV_LOG(ERR, "mirror rule doesn't exist.");
+ return -ENOENT;
+ }
+ return 0;
+}
+
+static uint64_t
+i40e_read_systime_cyclecounter(struct rte_eth_dev *dev)
+{
+ struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ uint64_t systim_cycles;
+
+ systim_cycles = (uint64_t)I40E_READ_REG(hw, I40E_PRTTSYN_TIME_L);
+ systim_cycles |= (uint64_t)I40E_READ_REG(hw, I40E_PRTTSYN_TIME_H)
+ << 32;
+
+ return systim_cycles;
+}
+
+static uint64_t
+i40e_read_rx_tstamp_cyclecounter(struct rte_eth_dev *dev, uint8_t index)
+{
+ struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ uint64_t rx_tstamp;
+
+ rx_tstamp = (uint64_t)I40E_READ_REG(hw, I40E_PRTTSYN_RXTIME_L(index));
+ rx_tstamp |= (uint64_t)I40E_READ_REG(hw, I40E_PRTTSYN_RXTIME_H(index))
+ << 32;
+
+ return rx_tstamp;
+}
+
+static uint64_t
+i40e_read_tx_tstamp_cyclecounter(struct rte_eth_dev *dev)
+{
+ struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ uint64_t tx_tstamp;
+
+ tx_tstamp = (uint64_t)I40E_READ_REG(hw, I40E_PRTTSYN_TXTIME_L);
+ tx_tstamp |= (uint64_t)I40E_READ_REG(hw, I40E_PRTTSYN_TXTIME_H)
+ << 32;
+
+ return tx_tstamp;
+}
+
+static void
+i40e_start_timecounters(struct rte_eth_dev *dev)
+{
+ struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ struct i40e_adapter *adapter =
+ (struct i40e_adapter *)dev->data->dev_private;
+ struct rte_eth_link link;
+ uint32_t tsync_inc_l;
+ uint32_t tsync_inc_h;
+
+ /* Get current link speed. */
+ memset(&link, 0, sizeof(link));
+ i40e_dev_link_update(dev, 1);
+ rte_i40e_dev_atomic_read_link_status(dev, &link);
+
+ switch (link.link_speed) {
+ case ETH_SPEED_NUM_40G:
+ tsync_inc_l = I40E_PTP_40GB_INCVAL & 0xFFFFFFFF;
+ tsync_inc_h = I40E_PTP_40GB_INCVAL >> 32;
+ break;
+ case ETH_SPEED_NUM_10G:
+ tsync_inc_l = I40E_PTP_10GB_INCVAL & 0xFFFFFFFF;
+ tsync_inc_h = I40E_PTP_10GB_INCVAL >> 32;
+ break;
+ case ETH_SPEED_NUM_1G:
+ tsync_inc_l = I40E_PTP_1GB_INCVAL & 0xFFFFFFFF;
+ tsync_inc_h = I40E_PTP_1GB_INCVAL >> 32;
+ break;
+ default:
+ tsync_inc_l = 0x0;
+ tsync_inc_h = 0x0;
+ }
+
+ /* Set the timesync increment value. */
+ I40E_WRITE_REG(hw, I40E_PRTTSYN_INC_L, tsync_inc_l);
+ I40E_WRITE_REG(hw, I40E_PRTTSYN_INC_H, tsync_inc_h);
+
+ memset(&adapter->systime_tc, 0, sizeof(struct rte_timecounter));
+ memset(&adapter->rx_tstamp_tc, 0, sizeof(struct rte_timecounter));
+ memset(&adapter->tx_tstamp_tc, 0, sizeof(struct rte_timecounter));
+
+ adapter->systime_tc.cc_mask = I40E_CYCLECOUNTER_MASK;
+ adapter->systime_tc.cc_shift = 0;
+ adapter->systime_tc.nsec_mask = 0;
+
+ adapter->rx_tstamp_tc.cc_mask = I40E_CYCLECOUNTER_MASK;
+ adapter->rx_tstamp_tc.cc_shift = 0;
+ adapter->rx_tstamp_tc.nsec_mask = 0;
+
+ adapter->tx_tstamp_tc.cc_mask = I40E_CYCLECOUNTER_MASK;
+ adapter->tx_tstamp_tc.cc_shift = 0;
+ adapter->tx_tstamp_tc.nsec_mask = 0;
+}
+
+static int
+i40e_timesync_adjust_time(struct rte_eth_dev *dev, int64_t delta)
+{
+ struct i40e_adapter *adapter =
+ (struct i40e_adapter *)dev->data->dev_private;
+
+ adapter->systime_tc.nsec += delta;
+ adapter->rx_tstamp_tc.nsec += delta;
+ adapter->tx_tstamp_tc.nsec += delta;
+
+ return 0;
+}
+
+static int
+i40e_timesync_write_time(struct rte_eth_dev *dev, const struct timespec *ts)
+{
+ uint64_t ns;
+ struct i40e_adapter *adapter =
+ (struct i40e_adapter *)dev->data->dev_private;
+
+ ns = rte_timespec_to_ns(ts);
+
+ /* Set the timecounters to a new value. */
+ adapter->systime_tc.nsec = ns;
+ adapter->rx_tstamp_tc.nsec = ns;
+ adapter->tx_tstamp_tc.nsec = ns;
+
+ return 0;
+}
+
+static int
+i40e_timesync_read_time(struct rte_eth_dev *dev, struct timespec *ts)
+{
+ uint64_t ns, systime_cycles;
+ struct i40e_adapter *adapter =
+ (struct i40e_adapter *)dev->data->dev_private;
+
+ systime_cycles = i40e_read_systime_cyclecounter(dev);
+ ns = rte_timecounter_update(&adapter->systime_tc, systime_cycles);
+ *ts = rte_ns_to_timespec(ns);
+
+ return 0;
+}
+
+static int
+i40e_timesync_enable(struct rte_eth_dev *dev)
+{
+ struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ uint32_t tsync_ctl_l;
+ uint32_t tsync_ctl_h;
+
+ /* Stop the timesync system time. */
+ I40E_WRITE_REG(hw, I40E_PRTTSYN_INC_L, 0x0);
+ I40E_WRITE_REG(hw, I40E_PRTTSYN_INC_H, 0x0);
+ /* Reset the timesync system time value. */
+ I40E_WRITE_REG(hw, I40E_PRTTSYN_TIME_L, 0x0);
+ I40E_WRITE_REG(hw, I40E_PRTTSYN_TIME_H, 0x0);
+
+ i40e_start_timecounters(dev);
+
+ /* Clear timesync registers. */
+ I40E_READ_REG(hw, I40E_PRTTSYN_STAT_0);
+ I40E_READ_REG(hw, I40E_PRTTSYN_TXTIME_H);
+ I40E_READ_REG(hw, I40E_PRTTSYN_RXTIME_H(0));
+ I40E_READ_REG(hw, I40E_PRTTSYN_RXTIME_H(1));
+ I40E_READ_REG(hw, I40E_PRTTSYN_RXTIME_H(2));
+ I40E_READ_REG(hw, I40E_PRTTSYN_RXTIME_H(3));
+
+ /* Enable timestamping of PTP packets. */
+ tsync_ctl_l = I40E_READ_REG(hw, I40E_PRTTSYN_CTL0);
+ tsync_ctl_l |= I40E_PRTTSYN_TSYNENA;
+
+ tsync_ctl_h = I40E_READ_REG(hw, I40E_PRTTSYN_CTL1);
+ tsync_ctl_h |= I40E_PRTTSYN_TSYNENA;
+ tsync_ctl_h |= I40E_PRTTSYN_TSYNTYPE;
+
+ I40E_WRITE_REG(hw, I40E_PRTTSYN_CTL0, tsync_ctl_l);
+ I40E_WRITE_REG(hw, I40E_PRTTSYN_CTL1, tsync_ctl_h);
+
+ return 0;
+}
+
+static int
+i40e_timesync_disable(struct rte_eth_dev *dev)
+{
+ struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ uint32_t tsync_ctl_l;
+ uint32_t tsync_ctl_h;
+
+ /* Disable timestamping of transmitted PTP packets. */
+ tsync_ctl_l = I40E_READ_REG(hw, I40E_PRTTSYN_CTL0);
+ tsync_ctl_l &= ~I40E_PRTTSYN_TSYNENA;
+
+ tsync_ctl_h = I40E_READ_REG(hw, I40E_PRTTSYN_CTL1);
+ tsync_ctl_h &= ~I40E_PRTTSYN_TSYNENA;
+
+ I40E_WRITE_REG(hw, I40E_PRTTSYN_CTL0, tsync_ctl_l);
+ I40E_WRITE_REG(hw, I40E_PRTTSYN_CTL1, tsync_ctl_h);
+
+ /* Reset the timesync increment value. */
+ I40E_WRITE_REG(hw, I40E_PRTTSYN_INC_L, 0x0);
+ I40E_WRITE_REG(hw, I40E_PRTTSYN_INC_H, 0x0);
+
+ return 0;
+}
+
+static int
+i40e_timesync_read_rx_timestamp(struct rte_eth_dev *dev,
+ struct timespec *timestamp, uint32_t flags)
+{
+ struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ struct i40e_adapter *adapter =
+ (struct i40e_adapter *)dev->data->dev_private;
+
+ uint32_t sync_status;
+ uint32_t index = flags & 0x03;
+ uint64_t rx_tstamp_cycles;
+ uint64_t ns;
+
+ sync_status = I40E_READ_REG(hw, I40E_PRTTSYN_STAT_1);
+ if ((sync_status & (1 << index)) == 0)
+ return -EINVAL;
+
+ rx_tstamp_cycles = i40e_read_rx_tstamp_cyclecounter(dev, index);
+ ns = rte_timecounter_update(&adapter->rx_tstamp_tc, rx_tstamp_cycles);
+ *timestamp = rte_ns_to_timespec(ns);
+
+ return 0;
+}
+
+static int
+i40e_timesync_read_tx_timestamp(struct rte_eth_dev *dev,
+ struct timespec *timestamp)
+{
+ struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ struct i40e_adapter *adapter =
+ (struct i40e_adapter *)dev->data->dev_private;
+
+ uint32_t sync_status;
+ uint64_t tx_tstamp_cycles;
+ uint64_t ns;
+
+ sync_status = I40E_READ_REG(hw, I40E_PRTTSYN_STAT_0);
+ if ((sync_status & I40E_PRTTSYN_STAT_0_TXTIME_MASK) == 0)
+ return -EINVAL;
+
+ tx_tstamp_cycles = i40e_read_tx_tstamp_cyclecounter(dev);
+ ns = rte_timecounter_update(&adapter->tx_tstamp_tc, tx_tstamp_cycles);
+ *timestamp = rte_ns_to_timespec(ns);
+
+ return 0;
+}
+
+/*
+ * i40e_parse_dcb_configure - parse dcb configure from user
+ * @dev: the device being configured
+ * @dcb_cfg: pointer of the result of parse
+ * @*tc_map: bit map of enabled traffic classes
+ *
+ * Returns 0 on success, negative value on failure
+ */
+static int
+i40e_parse_dcb_configure(struct rte_eth_dev *dev,
+ struct i40e_dcbx_config *dcb_cfg,
+ uint8_t *tc_map)
+{
+ struct rte_eth_dcb_rx_conf *dcb_rx_conf;
+ uint8_t i, tc_bw, bw_lf;
+
+ memset(dcb_cfg, 0, sizeof(struct i40e_dcbx_config));
+
+ dcb_rx_conf = &dev->data->dev_conf.rx_adv_conf.dcb_rx_conf;
+ if (dcb_rx_conf->nb_tcs > I40E_MAX_TRAFFIC_CLASS) {
+ PMD_INIT_LOG(ERR, "number of tc exceeds max.");
+ return -EINVAL;
+ }
+
+ /* assume each tc has the same bw */
+ tc_bw = I40E_MAX_PERCENT / dcb_rx_conf->nb_tcs;
+ for (i = 0; i < dcb_rx_conf->nb_tcs; i++)
+ dcb_cfg->etscfg.tcbwtable[i] = tc_bw;
+ /* to ensure the sum of tcbw is equal to 100 */
+ bw_lf = I40E_MAX_PERCENT % dcb_rx_conf->nb_tcs;
+ for (i = 0; i < bw_lf; i++)
+ dcb_cfg->etscfg.tcbwtable[i]++;
+
+ /* assume each tc has the same Transmission Selection Algorithm */
+ for (i = 0; i < dcb_rx_conf->nb_tcs; i++)
+ dcb_cfg->etscfg.tsatable[i] = I40E_IEEE_TSA_ETS;
+
+ for (i = 0; i < I40E_MAX_USER_PRIORITY; i++)
+ dcb_cfg->etscfg.prioritytable[i] =
+ dcb_rx_conf->dcb_tc[i];
+
+ /* FW needs one App to configure HW */
+ dcb_cfg->numapps = I40E_DEFAULT_DCB_APP_NUM;
+ dcb_cfg->app[0].selector = I40E_APP_SEL_ETHTYPE;
+ dcb_cfg->app[0].priority = I40E_DEFAULT_DCB_APP_PRIO;
+ dcb_cfg->app[0].protocolid = I40E_APP_PROTOID_FCOE;
+
+ if (dcb_rx_conf->nb_tcs == 0)
+ *tc_map = 1; /* tc0 only */
+ else
+ *tc_map = RTE_LEN2MASK(dcb_rx_conf->nb_tcs, uint8_t);
+
+ if (dev->data->dev_conf.dcb_capability_en & ETH_DCB_PFC_SUPPORT) {
+ dcb_cfg->pfc.willing = 0;
+ dcb_cfg->pfc.pfccap = I40E_MAX_TRAFFIC_CLASS;
+ dcb_cfg->pfc.pfcenable = *tc_map;
+ }
+ return 0;
+}
+
+
+static enum i40e_status_code
+i40e_vsi_update_queue_mapping(struct i40e_vsi *vsi,
+ struct i40e_aqc_vsi_properties_data *info,
+ uint8_t enabled_tcmap)
+{
+ enum i40e_status_code ret;
+ int i, total_tc = 0;
+ uint16_t qpnum_per_tc, bsf, qp_idx;
+ struct rte_eth_dev_data *dev_data = I40E_VSI_TO_DEV_DATA(vsi);
+ struct i40e_pf *pf = I40E_VSI_TO_PF(vsi);
+ uint16_t used_queues;
+
+ ret = validate_tcmap_parameter(vsi, enabled_tcmap);
+ if (ret != I40E_SUCCESS)
+ return ret;
+
+ for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
+ if (enabled_tcmap & (1 << i))
+ total_tc++;
+ }
+ if (total_tc == 0)
+ total_tc = 1;
+ vsi->enabled_tc = enabled_tcmap;
+
+ /* different VSI has different queues assigned */
+ if (vsi->type == I40E_VSI_MAIN)
+ used_queues = dev_data->nb_rx_queues -
+ pf->nb_cfg_vmdq_vsi * RTE_LIBRTE_I40E_QUEUE_NUM_PER_VM;
+ else if (vsi->type == I40E_VSI_VMDQ2)
+ used_queues = RTE_LIBRTE_I40E_QUEUE_NUM_PER_VM;
+ else {
+ PMD_INIT_LOG(ERR, "unsupported VSI type.");
+ return I40E_ERR_NO_AVAILABLE_VSI;
+ }
+
+ qpnum_per_tc = used_queues / total_tc;
+ /* Number of queues per enabled TC */
+ if (qpnum_per_tc == 0) {
+ PMD_INIT_LOG(ERR, " number of queues is less that tcs.");
+ return I40E_ERR_INVALID_QP_ID;
+ }
+ qpnum_per_tc = RTE_MIN(i40e_align_floor(qpnum_per_tc),
+ I40E_MAX_Q_PER_TC);
+ bsf = rte_bsf32(qpnum_per_tc);
+
+ /**
+ * Configure TC and queue mapping parameters, for enabled TC,
+ * allocate qpnum_per_tc queues to this traffic. For disabled TC,
+ * default queue will serve it.
+ */
+ qp_idx = 0;
+ for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
+ if (vsi->enabled_tc & (1 << i)) {
+ info->tc_mapping[i] = rte_cpu_to_le_16((qp_idx <<
+ I40E_AQ_VSI_TC_QUE_OFFSET_SHIFT) |
+ (bsf << I40E_AQ_VSI_TC_QUE_NUMBER_SHIFT));
+ qp_idx += qpnum_per_tc;
+ } else
+ info->tc_mapping[i] = 0;
+ }
+
+ /* Associate queue number with VSI, Keep vsi->nb_qps unchanged */
+ if (vsi->type == I40E_VSI_SRIOV) {
+ info->mapping_flags |=
+ rte_cpu_to_le_16(I40E_AQ_VSI_QUE_MAP_NONCONTIG);
+ for (i = 0; i < vsi->nb_qps; i++)
+ info->queue_mapping[i] =
+ rte_cpu_to_le_16(vsi->base_queue + i);
+ } else {
+ info->mapping_flags |=
+ rte_cpu_to_le_16(I40E_AQ_VSI_QUE_MAP_CONTIG);
+ info->queue_mapping[0] = rte_cpu_to_le_16(vsi->base_queue);
+ }
+ info->valid_sections |=
+ rte_cpu_to_le_16(I40E_AQ_VSI_PROP_QUEUE_MAP_VALID);
+
+ return I40E_SUCCESS;
+}
+
+/*
+ * i40e_config_switch_comp_tc - Configure VEB tc setting for given TC map
+ * @veb: VEB to be configured
+ * @tc_map: enabled TC bitmap
+ *
+ * Returns 0 on success, negative value on failure
+ */
+static enum i40e_status_code
+i40e_config_switch_comp_tc(struct i40e_veb *veb, uint8_t tc_map)
+{
+ struct i40e_aqc_configure_switching_comp_bw_config_data veb_bw;
+ struct i40e_aqc_query_switching_comp_bw_config_resp bw_query;
+ struct i40e_aqc_query_switching_comp_ets_config_resp ets_query;
+ struct i40e_hw *hw = I40E_VSI_TO_HW(veb->associate_vsi);
+ enum i40e_status_code ret = I40E_SUCCESS;
+ int i;
+ uint32_t bw_max;
+
+ /* Check if enabled_tc is same as existing or new TCs */
+ if (veb->enabled_tc == tc_map)
+ return ret;
+
+ /* configure tc bandwidth */
+ memset(&veb_bw, 0, sizeof(veb_bw));
+ veb_bw.tc_valid_bits = tc_map;
+ /* Enable ETS TCs with equal BW Share for now across all VSIs */
+ for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
+ if (tc_map & BIT_ULL(i))
+ veb_bw.tc_bw_share_credits[i] = 1;
+ }
+ ret = i40e_aq_config_switch_comp_bw_config(hw, veb->seid,
+ &veb_bw, NULL);
+ if (ret) {
+ PMD_INIT_LOG(ERR, "AQ command Config switch_comp BW allocation"
+ " per TC failed = %d",
+ hw->aq.asq_last_status);
+ return ret;
+ }
+
+ memset(&ets_query, 0, sizeof(ets_query));
+ ret = i40e_aq_query_switch_comp_ets_config(hw, veb->seid,
+ &ets_query, NULL);
+ if (ret != I40E_SUCCESS) {
+ PMD_DRV_LOG(ERR, "Failed to get switch_comp ETS"
+ " configuration %u", hw->aq.asq_last_status);
+ return ret;
+ }
+ memset(&bw_query, 0, sizeof(bw_query));
+ ret = i40e_aq_query_switch_comp_bw_config(hw, veb->seid,
+ &bw_query, NULL);
+ if (ret != I40E_SUCCESS) {
+ PMD_DRV_LOG(ERR, "Failed to get switch_comp bandwidth"
+ " configuration %u", hw->aq.asq_last_status);
+ return ret;
+ }
+
+ /* store and print out BW info */
+ veb->bw_info.bw_limit = rte_le_to_cpu_16(ets_query.port_bw_limit);
+ veb->bw_info.bw_max = ets_query.tc_bw_max;
+ PMD_DRV_LOG(DEBUG, "switch_comp bw limit:%u", veb->bw_info.bw_limit);
+ PMD_DRV_LOG(DEBUG, "switch_comp max_bw:%u", veb->bw_info.bw_max);
+ bw_max = rte_le_to_cpu_16(bw_query.tc_bw_max[0]) |
+ (rte_le_to_cpu_16(bw_query.tc_bw_max[1]) <<
+ I40E_16_BIT_WIDTH);
+ for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
+ veb->bw_info.bw_ets_share_credits[i] =
+ bw_query.tc_bw_share_credits[i];
+ veb->bw_info.bw_ets_credits[i] =
+ rte_le_to_cpu_16(bw_query.tc_bw_limits[i]);
+ /* 4 bits per TC, 4th bit is reserved */
+ veb->bw_info.bw_ets_max[i] =
+ (uint8_t)((bw_max >> (i * I40E_4_BIT_WIDTH)) &
+ RTE_LEN2MASK(3, uint8_t));
+ PMD_DRV_LOG(DEBUG, "\tVEB TC%u:share credits %u", i,
+ veb->bw_info.bw_ets_share_credits[i]);
+ PMD_DRV_LOG(DEBUG, "\tVEB TC%u:credits %u", i,
+ veb->bw_info.bw_ets_credits[i]);
+ PMD_DRV_LOG(DEBUG, "\tVEB TC%u: max credits: %u", i,
+ veb->bw_info.bw_ets_max[i]);
+ }
+
+ veb->enabled_tc = tc_map;
+
+ return ret;
+}
+
+
+/*
+ * i40e_vsi_config_tc - Configure VSI tc setting for given TC map
+ * @vsi: VSI to be configured
+ * @tc_map: enabled TC bitmap
+ *
+ * Returns 0 on success, negative value on failure
+ */
+static enum i40e_status_code
+i40e_vsi_config_tc(struct i40e_vsi *vsi, uint8_t tc_map)
+{
+ struct i40e_aqc_configure_vsi_tc_bw_data bw_data;
+ struct i40e_vsi_context ctxt;
+ struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
+ enum i40e_status_code ret = I40E_SUCCESS;
+ int i;
+
+ /* Check if enabled_tc is same as existing or new TCs */
+ if (vsi->enabled_tc == tc_map)
+ return ret;
+
+ /* configure tc bandwidth */
+ memset(&bw_data, 0, sizeof(bw_data));
+ bw_data.tc_valid_bits = tc_map;
+ /* Enable ETS TCs with equal BW Share for now across all VSIs */
+ for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
+ if (tc_map & BIT_ULL(i))
+ bw_data.tc_bw_credits[i] = 1;
+ }
+ ret = i40e_aq_config_vsi_tc_bw(hw, vsi->seid, &bw_data, NULL);
+ if (ret) {
+ PMD_INIT_LOG(ERR, "AQ command Config VSI BW allocation"
+ " per TC failed = %d",
+ hw->aq.asq_last_status);
+ goto out;
+ }
+ for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++)
+ vsi->info.qs_handle[i] = bw_data.qs_handles[i];
+
+ /* Update Queue Pairs Mapping for currently enabled UPs */
+ ctxt.seid = vsi->seid;
+ ctxt.pf_num = hw->pf_id;
+ ctxt.vf_num = 0;
+ ctxt.uplink_seid = vsi->uplink_seid;
+ ctxt.info = vsi->info;
+ i40e_get_cap(hw);
+ ret = i40e_vsi_update_queue_mapping(vsi, &ctxt.info, tc_map);
+ if (ret)
+ goto out;
+
+ /* Update the VSI after updating the VSI queue-mapping information */
+ ret = i40e_aq_update_vsi_params(hw, &ctxt, NULL);
+ if (ret) {
+ PMD_INIT_LOG(ERR, "Failed to configure "
+ "TC queue mapping = %d",
+ hw->aq.asq_last_status);
+ goto out;
+ }
+ /* update the local VSI info with updated queue map */
+ (void)rte_memcpy(&vsi->info.tc_mapping, &ctxt.info.tc_mapping,
+ sizeof(vsi->info.tc_mapping));
+ (void)rte_memcpy(&vsi->info.queue_mapping,
+ &ctxt.info.queue_mapping,
+ sizeof(vsi->info.queue_mapping));
+ vsi->info.mapping_flags = ctxt.info.mapping_flags;
+ vsi->info.valid_sections = 0;
+
+ /* query and update current VSI BW information */
+ ret = i40e_vsi_get_bw_config(vsi);
+ if (ret) {
+ PMD_INIT_LOG(ERR,
+ "Failed updating vsi bw info, err %s aq_err %s",
+ i40e_stat_str(hw, ret),
+ i40e_aq_str(hw, hw->aq.asq_last_status));
+ goto out;
+ }
+
+ vsi->enabled_tc = tc_map;
+
+out:
+ return ret;
+}
+
+/*
+ * i40e_dcb_hw_configure - program the dcb setting to hw
+ * @pf: pf the configuration is taken on
+ * @new_cfg: new configuration
+ * @tc_map: enabled TC bitmap
+ *
+ * Returns 0 on success, negative value on failure
+ */
+static enum i40e_status_code
+i40e_dcb_hw_configure(struct i40e_pf *pf,
+ struct i40e_dcbx_config *new_cfg,
+ uint8_t tc_map)
+{
+ struct i40e_hw *hw = I40E_PF_TO_HW(pf);
+ struct i40e_dcbx_config *old_cfg = &hw->local_dcbx_config;
+ struct i40e_vsi *main_vsi = pf->main_vsi;
+ struct i40e_vsi_list *vsi_list;
+ enum i40e_status_code ret;
+ int i;
+ uint32_t val;
+
+ /* Use the FW API if FW > v4.4*/
+ if (!(((hw->aq.fw_maj_ver == 4) && (hw->aq.fw_min_ver >= 4)) ||
+ (hw->aq.fw_maj_ver >= 5))) {
+ PMD_INIT_LOG(ERR, "FW < v4.4, can not use FW LLDP API"
+ " to configure DCB");
+ return I40E_ERR_FIRMWARE_API_VERSION;
+ }
+
+ /* Check if need reconfiguration */
+ if (!memcmp(new_cfg, old_cfg, sizeof(struct i40e_dcbx_config))) {
+ PMD_INIT_LOG(ERR, "No Change in DCB Config required.");
+ return I40E_SUCCESS;
+ }
+
+ /* Copy the new config to the current config */
+ *old_cfg = *new_cfg;
+ old_cfg->etsrec = old_cfg->etscfg;
+ ret = i40e_set_dcb_config(hw);
+ if (ret) {
+ PMD_INIT_LOG(ERR,
+ "Set DCB Config failed, err %s aq_err %s\n",
+ i40e_stat_str(hw, ret),
+ i40e_aq_str(hw, hw->aq.asq_last_status));
+ return ret;
+ }
+ /* set receive Arbiter to RR mode and ETS scheme by default */
+ for (i = 0; i <= I40E_PRTDCB_RETSTCC_MAX_INDEX; i++) {
+ val = I40E_READ_REG(hw, I40E_PRTDCB_RETSTCC(i));
+ val &= ~(I40E_PRTDCB_RETSTCC_BWSHARE_MASK |
+ I40E_PRTDCB_RETSTCC_UPINTC_MODE_MASK |
+ I40E_PRTDCB_RETSTCC_ETSTC_SHIFT);
+ val |= ((uint32_t)old_cfg->etscfg.tcbwtable[i] <<
+ I40E_PRTDCB_RETSTCC_BWSHARE_SHIFT) &
+ I40E_PRTDCB_RETSTCC_BWSHARE_MASK;
+ val |= ((uint32_t)1 << I40E_PRTDCB_RETSTCC_UPINTC_MODE_SHIFT) &
+ I40E_PRTDCB_RETSTCC_UPINTC_MODE_MASK;
+ val |= ((uint32_t)1 << I40E_PRTDCB_RETSTCC_ETSTC_SHIFT) &
+ I40E_PRTDCB_RETSTCC_ETSTC_MASK;
+ I40E_WRITE_REG(hw, I40E_PRTDCB_RETSTCC(i), val);
+ }
+ /* get local mib to check whether it is configured correctly */
+ /* IEEE mode */
+ hw->local_dcbx_config.dcbx_mode = I40E_DCBX_MODE_IEEE;
+ /* Get Local DCB Config */
+ i40e_aq_get_dcb_config(hw, I40E_AQ_LLDP_MIB_LOCAL, 0,
+ &hw->local_dcbx_config);
+
+ /* if Veb is created, need to update TC of it at first */
+ if (main_vsi->veb) {
+ ret = i40e_config_switch_comp_tc(main_vsi->veb, tc_map);
+ if (ret)
+ PMD_INIT_LOG(WARNING,
+ "Failed configuring TC for VEB seid=%d\n",
+ main_vsi->veb->seid);
+ }
+ /* Update each VSI */
+ i40e_vsi_config_tc(main_vsi, tc_map);
+ if (main_vsi->veb) {
+ TAILQ_FOREACH(vsi_list, &main_vsi->veb->head, list) {
+ /* Beside main VSI and VMDQ VSIs, only enable default
+ * TC for other VSIs
+ */
+ if (vsi_list->vsi->type == I40E_VSI_VMDQ2)
+ ret = i40e_vsi_config_tc(vsi_list->vsi,
+ tc_map);
+ else
+ ret = i40e_vsi_config_tc(vsi_list->vsi,
+ I40E_DEFAULT_TCMAP);
+ if (ret)
+ PMD_INIT_LOG(WARNING,
+ "Failed configuring TC for VSI seid=%d\n",
+ vsi_list->vsi->seid);
+ /* continue */
+ }
+ }
+ return I40E_SUCCESS;
+}
+
+/*
+ * i40e_dcb_init_configure - initial dcb config
+ * @dev: device being configured
+ * @sw_dcb: indicate whether dcb is sw configured or hw offload
+ *
+ * Returns 0 on success, negative value on failure
+ */
+static int
+i40e_dcb_init_configure(struct rte_eth_dev *dev, bool sw_dcb)
+{
+ struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
+ struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ int ret = 0;
+
+ if ((pf->flags & I40E_FLAG_DCB) == 0) {
+ PMD_INIT_LOG(ERR, "HW doesn't support DCB");
+ return -ENOTSUP;
+ }
+
+ /* DCB initialization:
+ * Update DCB configuration from the Firmware and configure
+ * LLDP MIB change event.
+ */
+ if (sw_dcb == TRUE) {
+ ret = i40e_aq_stop_lldp(hw, TRUE, NULL);
+ if (ret != I40E_SUCCESS)
+ PMD_INIT_LOG(DEBUG, "Failed to stop lldp");
+
+ ret = i40e_init_dcb(hw);
+ /* if sw_dcb, lldp agent is stopped, the return from
+ * i40e_init_dcb we expect is failure with I40E_AQ_RC_EPERM
+ * adminq status.
+ */
+ if (ret != I40E_SUCCESS &&
+ hw->aq.asq_last_status == I40E_AQ_RC_EPERM) {
+ memset(&hw->local_dcbx_config, 0,
+ sizeof(struct i40e_dcbx_config));
+ /* set dcb default configuration */
+ hw->local_dcbx_config.etscfg.willing = 0;
+ hw->local_dcbx_config.etscfg.maxtcs = 0;
+ hw->local_dcbx_config.etscfg.tcbwtable[0] = 100;
+ hw->local_dcbx_config.etscfg.tsatable[0] =
+ I40E_IEEE_TSA_ETS;
+ hw->local_dcbx_config.etsrec =
+ hw->local_dcbx_config.etscfg;
+ hw->local_dcbx_config.pfc.willing = 0;
+ hw->local_dcbx_config.pfc.pfccap =
+ I40E_MAX_TRAFFIC_CLASS;
+ /* FW needs one App to configure HW */
+ hw->local_dcbx_config.numapps = 1;
+ hw->local_dcbx_config.app[0].selector =
+ I40E_APP_SEL_ETHTYPE;
+ hw->local_dcbx_config.app[0].priority = 3;
+ hw->local_dcbx_config.app[0].protocolid =
+ I40E_APP_PROTOID_FCOE;
+ ret = i40e_set_dcb_config(hw);
+ if (ret) {
+ PMD_INIT_LOG(ERR, "default dcb config fails."
+ " err = %d, aq_err = %d.", ret,
+ hw->aq.asq_last_status);
+ return -ENOSYS;
+ }
+ } else {
+ PMD_INIT_LOG(ERR, "DCBX configuration failed, err = %d,"
+ " aq_err = %d.", ret,
+ hw->aq.asq_last_status);
+ return -ENOTSUP;
+ }
+ } else {
+ ret = i40e_aq_start_lldp(hw, NULL);
+ if (ret != I40E_SUCCESS)
+ PMD_INIT_LOG(DEBUG, "Failed to start lldp");
+
+ ret = i40e_init_dcb(hw);
+ if (!ret) {
+ if (hw->dcbx_status == I40E_DCBX_STATUS_DISABLED) {
+ PMD_INIT_LOG(ERR, "HW doesn't support"
+ " DCBX offload.");
+ return -ENOTSUP;
+ }
+ } else {
+ PMD_INIT_LOG(ERR, "DCBX configuration failed, err = %d,"
+ " aq_err = %d.", ret,
+ hw->aq.asq_last_status);
+ return -ENOTSUP;
+ }
+ }
+ return 0;
+}
+
+/*
+ * i40e_dcb_setup - setup dcb related config
+ * @dev: device being configured
+ *
+ * Returns 0 on success, negative value on failure
+ */
+static int
+i40e_dcb_setup(struct rte_eth_dev *dev)
+{
+ struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
+ struct i40e_dcbx_config dcb_cfg;
+ uint8_t tc_map = 0;
+ int ret = 0;
+
+ if ((pf->flags & I40E_FLAG_DCB) == 0) {
+ PMD_INIT_LOG(ERR, "HW doesn't support DCB");
+ return -ENOTSUP;
+ }
+
+ if (pf->vf_num != 0)
+ PMD_INIT_LOG(DEBUG, " DCB only works on pf and vmdq vsis.");
+
+ ret = i40e_parse_dcb_configure(dev, &dcb_cfg, &tc_map);
+ if (ret) {
+ PMD_INIT_LOG(ERR, "invalid dcb config");
+ return -EINVAL;
+ }
+ ret = i40e_dcb_hw_configure(pf, &dcb_cfg, tc_map);
+ if (ret) {
+ PMD_INIT_LOG(ERR, "dcb sw configure fails");
+ return -ENOSYS;
+ }
+
+ return 0;
+}
+
+static int
+i40e_dev_get_dcb_info(struct rte_eth_dev *dev,
+ struct rte_eth_dcb_info *dcb_info)
+{
+ struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
+ struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ struct i40e_vsi *vsi = pf->main_vsi;
+ struct i40e_dcbx_config *dcb_cfg = &hw->local_dcbx_config;
+ uint16_t bsf, tc_mapping;
+ int i, j = 0;
+
+ if (dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_DCB_FLAG)
+ dcb_info->nb_tcs = rte_bsf32(vsi->enabled_tc + 1);
+ else
+ dcb_info->nb_tcs = 1;
+ for (i = 0; i < I40E_MAX_USER_PRIORITY; i++)
+ dcb_info->prio_tc[i] = dcb_cfg->etscfg.prioritytable[i];
+ for (i = 0; i < dcb_info->nb_tcs; i++)
+ dcb_info->tc_bws[i] = dcb_cfg->etscfg.tcbwtable[i];
+
+ /* get queue mapping if vmdq is disabled */
+ if (!pf->nb_cfg_vmdq_vsi) {
+ for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
+ if (!(vsi->enabled_tc & (1 << i)))
+ continue;
+ tc_mapping = rte_le_to_cpu_16(vsi->info.tc_mapping[i]);
+ dcb_info->tc_queue.tc_rxq[j][i].base =
+ (tc_mapping & I40E_AQ_VSI_TC_QUE_OFFSET_MASK) >>
+ I40E_AQ_VSI_TC_QUE_OFFSET_SHIFT;
+ dcb_info->tc_queue.tc_txq[j][i].base =
+ dcb_info->tc_queue.tc_rxq[j][i].base;
+ bsf = (tc_mapping & I40E_AQ_VSI_TC_QUE_NUMBER_MASK) >>
+ I40E_AQ_VSI_TC_QUE_NUMBER_SHIFT;
+ dcb_info->tc_queue.tc_rxq[j][i].nb_queue = 1 << bsf;
+ dcb_info->tc_queue.tc_txq[j][i].nb_queue =
+ dcb_info->tc_queue.tc_rxq[j][i].nb_queue;
+ }
+ return 0;
+ }
+
+ /* get queue mapping if vmdq is enabled */
+ do {
+ vsi = pf->vmdq[j].vsi;
+ for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
+ if (!(vsi->enabled_tc & (1 << i)))
+ continue;
+ tc_mapping = rte_le_to_cpu_16(vsi->info.tc_mapping[i]);
+ dcb_info->tc_queue.tc_rxq[j][i].base =
+ (tc_mapping & I40E_AQ_VSI_TC_QUE_OFFSET_MASK) >>
+ I40E_AQ_VSI_TC_QUE_OFFSET_SHIFT;
+ dcb_info->tc_queue.tc_txq[j][i].base =
+ dcb_info->tc_queue.tc_rxq[j][i].base;
+ bsf = (tc_mapping & I40E_AQ_VSI_TC_QUE_NUMBER_MASK) >>
+ I40E_AQ_VSI_TC_QUE_NUMBER_SHIFT;
+ dcb_info->tc_queue.tc_rxq[j][i].nb_queue = 1 << bsf;
+ dcb_info->tc_queue.tc_txq[j][i].nb_queue =
+ dcb_info->tc_queue.tc_rxq[j][i].nb_queue;
+ }
+ j++;
+ } while (j < RTE_MIN(pf->nb_cfg_vmdq_vsi, ETH_MAX_VMDQ_POOL));
+ return 0;
+}
+
+static int
+i40e_dev_rx_queue_intr_enable(struct rte_eth_dev *dev, uint16_t queue_id)
+{
+ struct rte_intr_handle *intr_handle = &dev->pci_dev->intr_handle;
+ struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ uint16_t interval =
+ i40e_calc_itr_interval(RTE_LIBRTE_I40E_ITR_INTERVAL);
+ uint16_t msix_intr;
+
+ msix_intr = intr_handle->intr_vec[queue_id];
+ if (msix_intr == I40E_MISC_VEC_ID)
+ I40E_WRITE_REG(hw, I40E_PFINT_DYN_CTL0,
+ I40E_PFINT_DYN_CTLN_INTENA_MASK |
+ I40E_PFINT_DYN_CTLN_CLEARPBA_MASK |
+ (0 << I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT) |
+ (interval <<
+ I40E_PFINT_DYN_CTLN_INTERVAL_SHIFT));
+ else
+ I40E_WRITE_REG(hw,
+ I40E_PFINT_DYN_CTLN(msix_intr -
+ I40E_RX_VEC_START),
+ I40E_PFINT_DYN_CTLN_INTENA_MASK |
+ I40E_PFINT_DYN_CTLN_CLEARPBA_MASK |
+ (0 << I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT) |
+ (interval <<
+ I40E_PFINT_DYN_CTLN_INTERVAL_SHIFT));
+
+ I40E_WRITE_FLUSH(hw);
+ rte_intr_enable(&dev->pci_dev->intr_handle);
+
+ return 0;
+}
+
+static int
+i40e_dev_rx_queue_intr_disable(struct rte_eth_dev *dev, uint16_t queue_id)
+{
+ struct rte_intr_handle *intr_handle = &dev->pci_dev->intr_handle;
+ struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ uint16_t msix_intr;
+
+ msix_intr = intr_handle->intr_vec[queue_id];
+ if (msix_intr == I40E_MISC_VEC_ID)
+ I40E_WRITE_REG(hw, I40E_PFINT_DYN_CTL0, 0);
+ else
+ I40E_WRITE_REG(hw,
+ I40E_PFINT_DYN_CTLN(msix_intr -
+ I40E_RX_VEC_START),
+ 0);
+ I40E_WRITE_FLUSH(hw);
+
+ return 0;
+}
+
+static int i40e_get_regs(struct rte_eth_dev *dev,
+ struct rte_dev_reg_info *regs)
+{
+ struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ uint32_t *ptr_data = regs->data;
+ uint32_t reg_idx, arr_idx, arr_idx2, reg_offset;
+ const struct i40e_reg_info *reg_info;
+
+ if (ptr_data == NULL) {
+ regs->length = I40E_GLGEN_STAT_CLEAR + 4;
+ regs->width = sizeof(uint32_t);
+ return 0;
+ }
+
+ /* The first few registers have to be read using AQ operations */
+ reg_idx = 0;
+ while (i40e_regs_adminq[reg_idx].name) {
+ reg_info = &i40e_regs_adminq[reg_idx++];
+ for (arr_idx = 0; arr_idx <= reg_info->count1; arr_idx++)
+ for (arr_idx2 = 0;
+ arr_idx2 <= reg_info->count2;
+ arr_idx2++) {
+ reg_offset = arr_idx * reg_info->stride1 +
+ arr_idx2 * reg_info->stride2;
+ reg_offset += reg_info->base_addr;
+ ptr_data[reg_offset >> 2] =
+ i40e_read_rx_ctl(hw, reg_offset);
+ }
+ }
+
+ /* The remaining registers can be read using primitives */
+ reg_idx = 0;
+ while (i40e_regs_others[reg_idx].name) {
+ reg_info = &i40e_regs_others[reg_idx++];
+ for (arr_idx = 0; arr_idx <= reg_info->count1; arr_idx++)
+ for (arr_idx2 = 0;
+ arr_idx2 <= reg_info->count2;
+ arr_idx2++) {
+ reg_offset = arr_idx * reg_info->stride1 +
+ arr_idx2 * reg_info->stride2;
+ reg_offset += reg_info->base_addr;
+ ptr_data[reg_offset >> 2] =
+ I40E_READ_REG(hw, reg_offset);
+ }
+ }
+
+ return 0;
+}
+
+static int i40e_get_eeprom_length(struct rte_eth_dev *dev)
+{
+ struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+
+ /* Convert word count to byte count */
+ return hw->nvm.sr_size << 1;
+}
+
+static int i40e_get_eeprom(struct rte_eth_dev *dev,
+ struct rte_dev_eeprom_info *eeprom)
+{
+ struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ uint16_t *data = eeprom->data;
+ uint16_t offset, length, cnt_words;
+ int ret_code;
+
+ offset = eeprom->offset >> 1;
+ length = eeprom->length >> 1;
+ cnt_words = length;
+
+ if (offset > hw->nvm.sr_size ||
+ offset + length > hw->nvm.sr_size) {
+ PMD_DRV_LOG(ERR, "Requested EEPROM bytes out of range.");
+ return -EINVAL;
+ }
+
+ eeprom->magic = hw->vendor_id | (hw->device_id << 16);
+
+ ret_code = i40e_read_nvm_buffer(hw, offset, &cnt_words, data);
+ if (ret_code != I40E_SUCCESS || cnt_words != length) {
+ PMD_DRV_LOG(ERR, "EEPROM read failed.");
+ return -EIO;
+ }
+
+ return 0;
+}
+
+static void i40e_set_default_mac_addr(struct rte_eth_dev *dev,
+ struct ether_addr *mac_addr)
+{
+ struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+
+ if (!is_valid_assigned_ether_addr(mac_addr)) {
+ PMD_DRV_LOG(ERR, "Tried to set invalid MAC address.");
+ return;
+ }
+
+ /* Flags: 0x3 updates port address */
+ i40e_aq_mac_address_write(hw, 0x3, mac_addr->addr_bytes, NULL);
+}
+
+static int
+i40e_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
+{
+ struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
+ struct rte_eth_dev_data *dev_data = pf->dev_data;
+ uint32_t frame_size = mtu + ETHER_HDR_LEN
+ + ETHER_CRC_LEN + I40E_VLAN_TAG_SIZE;
+ int ret = 0;
+
+ /* check if mtu is within the allowed range */
+ if ((mtu < ETHER_MIN_MTU) || (frame_size > I40E_FRAME_SIZE_MAX))
+ return -EINVAL;
+
+ /* mtu setting is forbidden if port is start */
+ if (dev_data->dev_started) {
+ PMD_DRV_LOG(ERR,
+ "port %d must be stopped before configuration\n",
+ dev_data->port_id);
+ return -EBUSY;
+ }
+
+ if (frame_size > ETHER_MAX_LEN)
+ dev_data->dev_conf.rxmode.jumbo_frame = 1;
+ else
+ dev_data->dev_conf.rxmode.jumbo_frame = 0;
+
+ dev_data->dev_conf.rxmode.max_rx_pkt_len = frame_size;
+
+ return ret;
+}
diff --git a/src/dpdk/drivers/net/i40e/i40e_ethdev.h b/src/dpdk/drivers/net/i40e/i40e_ethdev.h
new file mode 100644
index 00000000..92c8fad0
--- /dev/null
+++ b/src/dpdk/drivers/net/i40e/i40e_ethdev.h
@@ -0,0 +1,715 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2010-2015 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _I40E_ETHDEV_H_
+#define _I40E_ETHDEV_H_
+
+#include <rte_eth_ctrl.h>
+#include <rte_time.h>
+#include <rte_kvargs.h>
+
+#define I40E_VLAN_TAG_SIZE 4
+
+#define I40E_AQ_LEN 32
+#define I40E_AQ_BUF_SZ 4096
+/* Number of queues per TC should be one of 1, 2, 4, 8, 16, 32, 64 */
+#define I40E_MAX_Q_PER_TC 64
+#define I40E_NUM_DESC_DEFAULT 512
+#define I40E_NUM_DESC_ALIGN 32
+#define I40E_BUF_SIZE_MIN 1024
+#define I40E_FRAME_SIZE_MAX 9728
+#define I40E_QUEUE_BASE_ADDR_UNIT 128
+/* number of VSIs and queue default setting */
+#define I40E_MAX_QP_NUM_PER_VF 16
+#define I40E_DEFAULT_QP_NUM_FDIR 1
+#define I40E_UINT32_BIT_SIZE (CHAR_BIT * sizeof(uint32_t))
+#define I40E_VFTA_SIZE (4096 / I40E_UINT32_BIT_SIZE)
+/* Maximun number of MAC addresses */
+#define I40E_NUM_MACADDR_MAX 64
+/* Maximum number of VFs */
+#define I40E_MAX_VF 128
+
+/*
+ * vlan_id is a 12 bit number.
+ * The VFTA array is actually a 4096 bit array, 128 of 32bit elements.
+ * 2^5 = 32. The val of lower 5 bits specifies the bit in the 32bit element.
+ * The higher 7 bit val specifies VFTA array index.
+ */
+#define I40E_VFTA_BIT(vlan_id) (1 << ((vlan_id) & 0x1F))
+#define I40E_VFTA_IDX(vlan_id) ((vlan_id) >> 5)
+
+/* Default TC traffic in case DCB is not enabled */
+#define I40E_DEFAULT_TCMAP 0x1
+#define I40E_FDIR_QUEUE_ID 0
+
+/* Always assign pool 0 to main VSI, VMDQ will start from 1 */
+#define I40E_VMDQ_POOL_BASE 1
+
+#define I40E_DEFAULT_RX_FREE_THRESH 32
+#define I40E_DEFAULT_RX_PTHRESH 8
+#define I40E_DEFAULT_RX_HTHRESH 8
+#define I40E_DEFAULT_RX_WTHRESH 0
+
+#define I40E_DEFAULT_TX_FREE_THRESH 32
+#define I40E_DEFAULT_TX_PTHRESH 32
+#define I40E_DEFAULT_TX_HTHRESH 0
+#define I40E_DEFAULT_TX_WTHRESH 0
+#define I40E_DEFAULT_TX_RSBIT_THRESH 32
+
+/* Bit shift and mask */
+#define I40E_4_BIT_WIDTH (CHAR_BIT / 2)
+#define I40E_4_BIT_MASK RTE_LEN2MASK(I40E_4_BIT_WIDTH, uint8_t)
+#define I40E_8_BIT_WIDTH CHAR_BIT
+#define I40E_8_BIT_MASK UINT8_MAX
+#define I40E_16_BIT_WIDTH (CHAR_BIT * 2)
+#define I40E_16_BIT_MASK UINT16_MAX
+#define I40E_32_BIT_WIDTH (CHAR_BIT * 4)
+#define I40E_32_BIT_MASK UINT32_MAX
+#define I40E_48_BIT_WIDTH (CHAR_BIT * 6)
+#define I40E_48_BIT_MASK RTE_LEN2MASK(I40E_48_BIT_WIDTH, uint64_t)
+
+/* Linux PF host with virtchnl version 1.1 */
+#define PF_IS_V11(vf) \
+ (((vf)->version_major == I40E_VIRTCHNL_VERSION_MAJOR) && \
+ ((vf)->version_minor == 1))
+
+/* index flex payload per layer */
+enum i40e_flxpld_layer_idx {
+ I40E_FLXPLD_L2_IDX = 0,
+ I40E_FLXPLD_L3_IDX = 1,
+ I40E_FLXPLD_L4_IDX = 2,
+ I40E_MAX_FLXPLD_LAYER = 3,
+};
+#define I40E_MAX_FLXPLD_FIED 3 /* max number of flex payload fields */
+#define I40E_FDIR_BITMASK_NUM_WORD 2 /* max number of bitmask words */
+#define I40E_FDIR_MAX_FLEXWORD_NUM 8 /* max number of flexpayload words */
+#define I40E_FDIR_MAX_FLEX_LEN 16 /* len in bytes of flex payload */
+#define I40E_INSET_MASK_NUM_REG 2 /* number of input set mask registers */
+
+/* i40e flags */
+#define I40E_FLAG_RSS (1ULL << 0)
+#define I40E_FLAG_DCB (1ULL << 1)
+#define I40E_FLAG_VMDQ (1ULL << 2)
+#define I40E_FLAG_SRIOV (1ULL << 3)
+#define I40E_FLAG_HEADER_SPLIT_DISABLED (1ULL << 4)
+#define I40E_FLAG_HEADER_SPLIT_ENABLED (1ULL << 5)
+#define I40E_FLAG_FDIR (1ULL << 6)
+#define I40E_FLAG_VXLAN (1ULL << 7)
+#define I40E_FLAG_RSS_AQ_CAPABLE (1ULL << 8)
+#define I40E_FLAG_ALL (I40E_FLAG_RSS | \
+ I40E_FLAG_DCB | \
+ I40E_FLAG_VMDQ | \
+ I40E_FLAG_SRIOV | \
+ I40E_FLAG_HEADER_SPLIT_DISABLED | \
+ I40E_FLAG_HEADER_SPLIT_ENABLED | \
+ I40E_FLAG_FDIR | \
+ I40E_FLAG_VXLAN | \
+ I40E_FLAG_RSS_AQ_CAPABLE)
+
+#define I40E_RSS_OFFLOAD_ALL ( \
+ ETH_RSS_FRAG_IPV4 | \
+ ETH_RSS_NONFRAG_IPV4_TCP | \
+ ETH_RSS_NONFRAG_IPV4_UDP | \
+ ETH_RSS_NONFRAG_IPV4_SCTP | \
+ ETH_RSS_NONFRAG_IPV4_OTHER | \
+ ETH_RSS_FRAG_IPV6 | \
+ ETH_RSS_NONFRAG_IPV6_TCP | \
+ ETH_RSS_NONFRAG_IPV6_UDP | \
+ ETH_RSS_NONFRAG_IPV6_SCTP | \
+ ETH_RSS_NONFRAG_IPV6_OTHER | \
+ ETH_RSS_L2_PAYLOAD)
+
+/* All bits of RSS hash enable */
+#define I40E_RSS_HENA_ALL ( \
+ (1ULL << I40E_FILTER_PCTYPE_NONF_IPV4_UDP) | \
+ (1ULL << I40E_FILTER_PCTYPE_NONF_IPV4_TCP) | \
+ (1ULL << I40E_FILTER_PCTYPE_NONF_IPV4_SCTP) | \
+ (1ULL << I40E_FILTER_PCTYPE_NONF_IPV4_OTHER) | \
+ (1ULL << I40E_FILTER_PCTYPE_FRAG_IPV4) | \
+ (1ULL << I40E_FILTER_PCTYPE_NONF_IPV6_UDP) | \
+ (1ULL << I40E_FILTER_PCTYPE_NONF_IPV6_TCP) | \
+ (1ULL << I40E_FILTER_PCTYPE_NONF_IPV6_SCTP) | \
+ (1ULL << I40E_FILTER_PCTYPE_NONF_IPV6_OTHER) | \
+ (1ULL << I40E_FILTER_PCTYPE_FRAG_IPV6) | \
+ (1ULL << I40E_FILTER_PCTYPE_FCOE_OX) | \
+ (1ULL << I40E_FILTER_PCTYPE_FCOE_RX) | \
+ (1ULL << I40E_FILTER_PCTYPE_FCOE_OTHER) | \
+ (1ULL << I40E_FILTER_PCTYPE_L2_PAYLOAD))
+
+#define I40E_MISC_VEC_ID RTE_INTR_VEC_ZERO_OFFSET
+#define I40E_RX_VEC_START RTE_INTR_VEC_RXTX_OFFSET
+
+/* Default queue interrupt throttling time in microseconds */
+#define I40E_ITR_INDEX_DEFAULT 0
+#define I40E_QUEUE_ITR_INTERVAL_DEFAULT 32 /* 32 us */
+#define I40E_QUEUE_ITR_INTERVAL_MAX 8160 /* 8160 us */
+
+/* Special FW support this floating VEB feature */
+#define FLOATING_VEB_SUPPORTED_FW_MAJ 5
+#define FLOATING_VEB_SUPPORTED_FW_MIN 0
+
+struct i40e_adapter;
+
+/**
+ * MAC filter structure
+ */
+struct i40e_mac_filter_info {
+ enum rte_mac_filter_type filter_type;
+ struct ether_addr mac_addr;
+};
+
+TAILQ_HEAD(i40e_mac_filter_list, i40e_mac_filter);
+
+/* MAC filter list structure */
+struct i40e_mac_filter {
+ TAILQ_ENTRY(i40e_mac_filter) next;
+ struct i40e_mac_filter_info mac_info;
+};
+
+TAILQ_HEAD(i40e_vsi_list_head, i40e_vsi_list);
+
+struct i40e_vsi;
+
+/* VSI list structure */
+struct i40e_vsi_list {
+ TAILQ_ENTRY(i40e_vsi_list) list;
+ struct i40e_vsi *vsi;
+};
+
+struct i40e_rx_queue;
+struct i40e_tx_queue;
+
+/* Bandwidth limit information */
+struct i40e_bw_info {
+ uint16_t bw_limit; /* BW Limit (0 = disabled) */
+ uint8_t bw_max; /* Max BW limit if enabled */
+
+ /* Relative credits within same TC with respect to other VSIs or Comps */
+ uint8_t bw_ets_share_credits[I40E_MAX_TRAFFIC_CLASS];
+ /* Bandwidth limit per TC */
+ uint8_t bw_ets_credits[I40E_MAX_TRAFFIC_CLASS];
+ /* Max bandwidth limit per TC */
+ uint8_t bw_ets_max[I40E_MAX_TRAFFIC_CLASS];
+};
+
+/* Structure that defines a VEB */
+struct i40e_veb {
+ struct i40e_vsi_list_head head;
+ struct i40e_vsi *associate_vsi; /* Associate VSI who owns the VEB */
+ struct i40e_pf *associate_pf; /* Associate PF who owns the VEB */
+ uint16_t seid; /* The seid of VEB itself */
+ uint16_t uplink_seid; /* The uplink seid of this VEB */
+ uint16_t stats_idx;
+ struct i40e_eth_stats stats;
+ uint8_t enabled_tc; /* The traffic class enabled */
+ struct i40e_bw_info bw_info; /* VEB bandwidth information */
+};
+
+/* i40e MACVLAN filter structure */
+struct i40e_macvlan_filter {
+ struct ether_addr macaddr;
+ enum rte_mac_filter_type filter_type;
+ uint16_t vlan_id;
+};
+
+/*
+ * Structure that defines a VSI, associated with a adapter.
+ */
+struct i40e_vsi {
+ struct i40e_adapter *adapter; /* Backreference to associated adapter */
+ struct i40e_aqc_vsi_properties_data info; /* VSI properties */
+
+ struct i40e_eth_stats eth_stats_offset;
+ struct i40e_eth_stats eth_stats;
+ /*
+ * When drivers loaded, only a default main VSI exists. In case new VSI
+ * needs to add, HW needs to know the layout that VSIs are organized.
+ * Besides that, VSI isan element and can't switch packets, which needs
+ * to add new component VEB to perform switching. So, a new VSI needs
+ * to specify the the uplink VSI (Parent VSI) before created. The
+ * uplink VSI will check whether it had a VEB to switch packets. If no,
+ * it will try to create one. Then, uplink VSI will move the new VSI
+ * into its' sib_vsi_list to manage all the downlink VSI.
+ * sib_vsi_list: the VSI list that shared the same uplink VSI.
+ * parent_vsi : the uplink VSI. It's NULL for main VSI.
+ * veb : the VEB associates with the VSI.
+ */
+ struct i40e_vsi_list sib_vsi_list; /* sibling vsi list */
+ struct i40e_vsi *parent_vsi;
+ struct i40e_veb *veb; /* Associated veb, could be null */
+ struct i40e_veb *floating_veb; /* Associated floating veb */
+ bool offset_loaded;
+ enum i40e_vsi_type type; /* VSI types */
+ uint16_t vlan_num; /* Total VLAN number */
+ uint16_t mac_num; /* Total mac number */
+ uint32_t vfta[I40E_VFTA_SIZE]; /* VLAN bitmap */
+ struct i40e_mac_filter_list mac_list; /* macvlan filter list */
+ /* specific VSI-defined parameters, SRIOV stored the vf_id */
+ uint32_t user_param;
+ uint16_t seid; /* The seid of VSI itself */
+ uint16_t uplink_seid; /* The uplink seid of this VSI */
+ uint16_t nb_qps; /* Number of queue pairs VSI can occupy */
+ uint16_t nb_used_qps; /* Number of queue pairs VSI uses */
+ uint16_t max_macaddrs; /* Maximum number of MAC addresses */
+ uint16_t base_queue; /* The first queue index of this VSI */
+ /*
+ * The offset to visit VSI related register, assigned by HW when
+ * creating VSI
+ */
+ uint16_t vsi_id;
+ uint16_t msix_intr; /* The MSIX interrupt binds to VSI */
+ uint16_t nb_msix; /* The max number of msix vector */
+ uint8_t enabled_tc; /* The traffic class enabled */
+ struct i40e_bw_info bw_info; /* VSI bandwidth information */
+};
+
+struct pool_entry {
+ LIST_ENTRY(pool_entry) next;
+ uint16_t base;
+ uint16_t len;
+};
+
+LIST_HEAD(res_list, pool_entry);
+
+struct i40e_res_pool_info {
+ uint32_t base; /* Resource start index */
+ uint32_t num_alloc; /* Allocated resource number */
+ uint32_t num_free; /* Total available resource number */
+ struct res_list alloc_list; /* Allocated resource list */
+ struct res_list free_list; /* Available resource list */
+};
+
+enum I40E_VF_STATE {
+ I40E_VF_INACTIVE = 0,
+ I40E_VF_INRESET,
+ I40E_VF_ININIT,
+ I40E_VF_ACTIVE,
+};
+
+/*
+ * Structure to store private data for PF host.
+ */
+struct i40e_pf_vf {
+ struct i40e_pf *pf;
+ struct i40e_vsi *vsi;
+ enum I40E_VF_STATE state; /* The number of queue pairs availiable */
+ uint16_t vf_idx; /* VF index in pf->vfs */
+ uint16_t lan_nb_qps; /* Actual queues allocated */
+ uint16_t reset_cnt; /* Total vf reset times */
+ struct ether_addr mac_addr; /* Default MAC address */
+};
+
+/*
+ * Structure to store private data for flow control.
+ */
+struct i40e_fc_conf {
+ uint16_t pause_time; /* Flow control pause timer */
+ /* FC high water 0-7 for pfc and 8 for lfc unit:kilobytes */
+ uint32_t high_water[I40E_MAX_TRAFFIC_CLASS + 1];
+ /* FC low water 0-7 for pfc and 8 for lfc unit:kilobytes */
+ uint32_t low_water[I40E_MAX_TRAFFIC_CLASS + 1];
+};
+
+/*
+ * Structure to store private data for VMDQ instance
+ */
+struct i40e_vmdq_info {
+ struct i40e_pf *pf;
+ struct i40e_vsi *vsi;
+};
+
+/*
+ * Structure to store flex pit for flow diretor.
+ */
+struct i40e_fdir_flex_pit {
+ uint8_t src_offset; /* offset in words from the beginning of payload */
+ uint8_t size; /* size in words */
+ uint8_t dst_offset; /* offset in words of flexible payload */
+};
+
+struct i40e_fdir_flex_mask {
+ uint8_t word_mask; /**< Bit i enables word i of flexible payload */
+ struct {
+ uint8_t offset;
+ uint16_t mask;
+ } bitmask[I40E_FDIR_BITMASK_NUM_WORD];
+};
+
+#define I40E_FILTER_PCTYPE_MAX 64
+/*
+ * A structure used to define fields of a FDIR related info.
+ */
+struct i40e_fdir_info {
+ struct i40e_vsi *fdir_vsi; /* pointer to fdir VSI structure */
+ uint16_t match_counter_index; /* Statistic counter index used for fdir*/
+ struct i40e_tx_queue *txq;
+ struct i40e_rx_queue *rxq;
+ void *prg_pkt; /* memory for fdir program packet */
+ uint64_t dma_addr; /* physic address of packet memory*/
+ /* input set bits for each pctype */
+ uint64_t input_set[I40E_FILTER_PCTYPE_MAX];
+ /*
+ * the rule how bytes stream is extracted as flexible payload
+ * for each payload layer, the setting can up to three elements
+ */
+ struct i40e_fdir_flex_pit flex_set[I40E_MAX_FLXPLD_LAYER * I40E_MAX_FLXPLD_FIED];
+ struct i40e_fdir_flex_mask flex_mask[I40E_FILTER_PCTYPE_MAX];
+};
+
+#define I40E_MIRROR_MAX_ENTRIES_PER_RULE 64
+#define I40E_MAX_MIRROR_RULES 64
+/*
+ * Mirror rule structure
+ */
+struct i40e_mirror_rule {
+ TAILQ_ENTRY(i40e_mirror_rule) rules;
+ uint8_t rule_type;
+ uint16_t index; /* the sw index of mirror rule */
+ uint16_t id; /* the rule id assigned by firmware */
+ uint16_t dst_vsi_seid; /* destination vsi for this mirror rule. */
+ uint16_t num_entries;
+ /* the info stores depend on the rule type.
+ If type is I40E_MIRROR_TYPE_VLAN, vlan ids are stored here.
+ If type is I40E_MIRROR_TYPE_VPORT_*, vsi's seid are stored.
+ */
+ uint16_t entries[I40E_MIRROR_MAX_ENTRIES_PER_RULE];
+};
+
+TAILQ_HEAD(i40e_mirror_rule_list, i40e_mirror_rule);
+
+/*
+ * Structure to store private data specific for PF instance.
+ */
+struct i40e_pf {
+ struct i40e_adapter *adapter; /* The adapter this PF associate to */
+ struct i40e_vsi *main_vsi; /* pointer to main VSI structure */
+ uint16_t mac_seid; /* The seid of the MAC of this PF */
+ uint16_t main_vsi_seid; /* The seid of the main VSI */
+ uint16_t max_num_vsi;
+ struct i40e_res_pool_info qp_pool; /*Queue pair pool */
+ struct i40e_res_pool_info msix_pool; /* MSIX interrupt pool */
+
+ struct i40e_hw_port_stats stats_offset;
+ struct i40e_hw_port_stats stats;
+ bool offset_loaded;
+
+ struct rte_eth_dev_data *dev_data; /* Pointer to the device data */
+ struct ether_addr dev_addr; /* PF device mac address */
+ uint64_t flags; /* PF feature flags */
+ /* All kinds of queue pair setting for different VSIs */
+ struct i40e_pf_vf *vfs;
+ uint16_t vf_num;
+ /* Each of below queue pairs should be power of 2 since it's the
+ precondition after TC configuration applied */
+ uint16_t lan_nb_qp_max;
+ uint16_t lan_nb_qps; /* The number of queue pairs of LAN */
+ uint16_t lan_qp_offset;
+ uint16_t vmdq_nb_qp_max;
+ uint16_t vmdq_nb_qps; /* The number of queue pairs of VMDq */
+ uint16_t vmdq_qp_offset;
+ uint16_t vf_nb_qp_max;
+ uint16_t vf_nb_qps; /* The number of queue pairs of VF */
+ uint16_t vf_qp_offset;
+ uint16_t fdir_nb_qps; /* The number of queue pairs of Flow Director */
+ uint16_t fdir_qp_offset;
+
+ uint16_t hash_lut_size; /* The size of hash lookup table */
+ /* input set bits for each pctype */
+ uint64_t hash_input_set[I40E_FILTER_PCTYPE_MAX];
+ /* store VXLAN UDP ports */
+ uint16_t vxlan_ports[I40E_MAX_PF_UDP_OFFLOAD_PORTS];
+ uint16_t vxlan_bitmap; /* Vxlan bit mask */
+
+ /* VMDQ related info */
+ uint16_t max_nb_vmdq_vsi; /* Max number of VMDQ VSIs supported */
+ uint16_t nb_cfg_vmdq_vsi; /* number of VMDQ VSIs configured */
+ struct i40e_vmdq_info *vmdq;
+
+ struct i40e_fdir_info fdir; /* flow director info */
+ struct i40e_fc_conf fc_conf; /* Flow control conf */
+ struct i40e_mirror_rule_list mirror_list;
+ uint16_t nb_mirror_rule; /* The number of mirror rules */
+ bool floating_veb; /* The flag to use the floating VEB */
+ /* The floating enable flag for the specific VF */
+ bool floating_veb_list[I40E_MAX_VF];
+};
+
+enum pending_msg {
+ PFMSG_LINK_CHANGE = 0x1,
+ PFMSG_RESET_IMPENDING = 0x2,
+ PFMSG_DRIVER_CLOSE = 0x4,
+};
+
+struct i40e_vsi_vlan_pvid_info {
+ uint16_t on; /* Enable or disable pvid */
+ union {
+ uint16_t pvid; /* Valid in case 'on' is set to set pvid */
+ struct {
+ /* Valid in case 'on' is cleared. 'tagged' will reject tagged packets,
+ * while 'untagged' will reject untagged packets.
+ */
+ uint8_t tagged;
+ uint8_t untagged;
+ } reject;
+ } config;
+};
+
+struct i40e_vf_rx_queues {
+ uint64_t rx_dma_addr;
+ uint32_t rx_ring_len;
+ uint32_t buff_size;
+};
+
+struct i40e_vf_tx_queues {
+ uint64_t tx_dma_addr;
+ uint32_t tx_ring_len;
+};
+
+/*
+ * Structure to store private data specific for VF instance.
+ */
+struct i40e_vf {
+ struct i40e_adapter *adapter; /* The adapter this VF associate to */
+ struct rte_eth_dev_data *dev_data; /* Pointer to the device data */
+ uint16_t num_queue_pairs;
+ uint16_t max_pkt_len; /* Maximum packet length */
+ bool promisc_unicast_enabled;
+ bool promisc_multicast_enabled;
+
+ uint32_t version_major; /* Major version number */
+ uint32_t version_minor; /* Minor version number */
+ uint16_t promisc_flags; /* Promiscuous setting */
+ uint32_t vlan[I40E_VFTA_SIZE]; /* VLAN bit map */
+
+ /* Event from pf */
+ bool dev_closed;
+ bool link_up;
+ enum i40e_aq_link_speed link_speed;
+ bool vf_reset;
+ volatile uint32_t pend_cmd; /* pending command not finished yet */
+ uint32_t cmd_retval; /* return value of the cmd response from PF */
+ u16 pend_msg; /* flags indicates events from pf not handled yet */
+ uint8_t *aq_resp; /* buffer to store the adminq response from PF */
+
+ /* VSI info */
+ struct i40e_virtchnl_vf_resource *vf_res; /* All VSIs */
+ struct i40e_virtchnl_vsi_resource *vsi_res; /* LAN VSI */
+ struct i40e_vsi vsi;
+ uint64_t flags;
+};
+
+/*
+ * Structure to store private data for each PF/VF instance.
+ */
+struct i40e_adapter {
+ /* Common for both PF and VF */
+ struct i40e_hw hw;
+ struct rte_eth_dev *eth_dev;
+
+ /* Specific for PF or VF */
+ union {
+ struct i40e_pf pf;
+ struct i40e_vf vf;
+ };
+
+ /* For vector PMD */
+ bool rx_bulk_alloc_allowed;
+ bool rx_vec_allowed;
+ bool tx_simple_allowed;
+ bool tx_vec_allowed;
+
+ /* For PTP */
+ struct rte_timecounter systime_tc;
+ struct rte_timecounter rx_tstamp_tc;
+ struct rte_timecounter tx_tstamp_tc;
+};
+
+int i40e_dev_switch_queues(struct i40e_pf *pf, bool on);
+int i40e_vsi_release(struct i40e_vsi *vsi);
+struct i40e_vsi *i40e_vsi_setup(struct i40e_pf *pf,
+ enum i40e_vsi_type type,
+ struct i40e_vsi *uplink_vsi,
+ uint16_t user_param);
+int i40e_switch_rx_queue(struct i40e_hw *hw, uint16_t q_idx, bool on);
+int i40e_switch_tx_queue(struct i40e_hw *hw, uint16_t q_idx, bool on);
+int i40e_vsi_add_vlan(struct i40e_vsi *vsi, uint16_t vlan);
+int i40e_vsi_delete_vlan(struct i40e_vsi *vsi, uint16_t vlan);
+int i40e_vsi_add_mac(struct i40e_vsi *vsi, struct i40e_mac_filter_info *filter);
+int i40e_vsi_delete_mac(struct i40e_vsi *vsi, struct ether_addr *addr);
+void i40e_update_vsi_stats(struct i40e_vsi *vsi);
+void i40e_pf_disable_irq0(struct i40e_hw *hw);
+void i40e_pf_enable_irq0(struct i40e_hw *hw);
+int i40e_dev_link_update(struct rte_eth_dev *dev,
+ __rte_unused int wait_to_complete);
+void i40e_vsi_queues_bind_intr(struct i40e_vsi *vsi);
+void i40e_vsi_queues_unbind_intr(struct i40e_vsi *vsi);
+int i40e_vsi_vlan_pvid_set(struct i40e_vsi *vsi,
+ struct i40e_vsi_vlan_pvid_info *info);
+int i40e_vsi_config_vlan_stripping(struct i40e_vsi *vsi, bool on);
+int i40e_vsi_config_vlan_filter(struct i40e_vsi *vsi, bool on);
+uint64_t i40e_config_hena(uint64_t flags);
+uint64_t i40e_parse_hena(uint64_t flags);
+enum i40e_status_code i40e_fdir_setup_tx_resources(struct i40e_pf *pf);
+enum i40e_status_code i40e_fdir_setup_rx_resources(struct i40e_pf *pf);
+int i40e_fdir_setup(struct i40e_pf *pf);
+const struct rte_memzone *i40e_memzone_reserve(const char *name,
+ uint32_t len,
+ int socket_id);
+int i40e_fdir_configure(struct rte_eth_dev *dev);
+void i40e_fdir_teardown(struct i40e_pf *pf);
+enum i40e_filter_pctype i40e_flowtype_to_pctype(uint16_t flow_type);
+uint16_t i40e_pctype_to_flowtype(enum i40e_filter_pctype pctype);
+int i40e_fdir_ctrl_func(struct rte_eth_dev *dev,
+ enum rte_filter_op filter_op,
+ void *arg);
+int i40e_select_filter_input_set(struct i40e_hw *hw,
+ struct rte_eth_input_set_conf *conf,
+ enum rte_filter_type filter);
+int i40e_hash_filter_inset_select(struct i40e_hw *hw,
+ struct rte_eth_input_set_conf *conf);
+int i40e_fdir_filter_inset_select(struct i40e_pf *pf,
+ struct rte_eth_input_set_conf *conf);
+
+void i40e_rxq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,
+ struct rte_eth_rxq_info *qinfo);
+void i40e_txq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,
+ struct rte_eth_txq_info *qinfo);
+
+/* I40E_DEV_PRIVATE_TO */
+#define I40E_DEV_PRIVATE_TO_PF(adapter) \
+ (&((struct i40e_adapter *)adapter)->pf)
+#define I40E_DEV_PRIVATE_TO_HW(adapter) \
+ (&((struct i40e_adapter *)adapter)->hw)
+#define I40E_DEV_PRIVATE_TO_ADAPTER(adapter) \
+ ((struct i40e_adapter *)adapter)
+
+/* I40EVF_DEV_PRIVATE_TO */
+#define I40EVF_DEV_PRIVATE_TO_VF(adapter) \
+ (&((struct i40e_adapter *)adapter)->vf)
+
+static inline struct i40e_vsi *
+i40e_get_vsi_from_adapter(struct i40e_adapter *adapter)
+{
+ struct i40e_hw *hw;
+
+ if (!adapter)
+ return NULL;
+
+ hw = I40E_DEV_PRIVATE_TO_HW(adapter);
+ if (hw->mac.type == I40E_MAC_VF || hw->mac.type == I40E_MAC_X722_VF) {
+ struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(adapter);
+ return &vf->vsi;
+ } else {
+ struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(adapter);
+ return pf->main_vsi;
+ }
+}
+#define I40E_DEV_PRIVATE_TO_MAIN_VSI(adapter) \
+ i40e_get_vsi_from_adapter((struct i40e_adapter *)adapter)
+
+/* I40E_VSI_TO */
+#define I40E_VSI_TO_HW(vsi) \
+ (&(((struct i40e_vsi *)vsi)->adapter->hw))
+#define I40E_VSI_TO_PF(vsi) \
+ (&(((struct i40e_vsi *)vsi)->adapter->pf))
+#define I40E_VSI_TO_VF(vsi) \
+ (&(((struct i40e_vsi *)vsi)->adapter->vf))
+#define I40E_VSI_TO_DEV_DATA(vsi) \
+ (((struct i40e_vsi *)vsi)->adapter->pf.dev_data)
+#define I40E_VSI_TO_ETH_DEV(vsi) \
+ (((struct i40e_vsi *)vsi)->adapter->eth_dev)
+
+/* I40E_PF_TO */
+#define I40E_PF_TO_HW(pf) \
+ (&(((struct i40e_pf *)pf)->adapter->hw))
+#define I40E_PF_TO_ADAPTER(pf) \
+ ((struct i40e_adapter *)pf->adapter)
+
+/* I40E_VF_TO */
+#define I40E_VF_TO_HW(vf) \
+ (&(((struct i40e_vf *)vf)->adapter->hw))
+
+static inline void
+i40e_init_adminq_parameter(struct i40e_hw *hw)
+{
+ hw->aq.num_arq_entries = I40E_AQ_LEN;
+ hw->aq.num_asq_entries = I40E_AQ_LEN;
+ hw->aq.arq_buf_size = I40E_AQ_BUF_SZ;
+ hw->aq.asq_buf_size = I40E_AQ_BUF_SZ;
+}
+
+static inline int
+i40e_align_floor(int n)
+{
+ if (n == 0)
+ return 0;
+ return 1 << (sizeof(n) * CHAR_BIT - 1 - __builtin_clz(n));
+}
+
+static inline uint16_t
+i40e_calc_itr_interval(int16_t interval)
+{
+ if (interval < 0 || interval > I40E_QUEUE_ITR_INTERVAL_MAX)
+ interval = I40E_QUEUE_ITR_INTERVAL_DEFAULT;
+
+ /* Convert to hardware count, as writing each 1 represents 2 us */
+ return interval / 2;
+}
+
+#define I40E_VALID_FLOW(flow_type) \
+ ((flow_type) == RTE_ETH_FLOW_FRAG_IPV4 || \
+ (flow_type) == RTE_ETH_FLOW_NONFRAG_IPV4_TCP || \
+ (flow_type) == RTE_ETH_FLOW_NONFRAG_IPV4_UDP || \
+ (flow_type) == RTE_ETH_FLOW_NONFRAG_IPV4_SCTP || \
+ (flow_type) == RTE_ETH_FLOW_NONFRAG_IPV4_OTHER || \
+ (flow_type) == RTE_ETH_FLOW_FRAG_IPV6 || \
+ (flow_type) == RTE_ETH_FLOW_NONFRAG_IPV6_TCP || \
+ (flow_type) == RTE_ETH_FLOW_NONFRAG_IPV6_UDP || \
+ (flow_type) == RTE_ETH_FLOW_NONFRAG_IPV6_SCTP || \
+ (flow_type) == RTE_ETH_FLOW_NONFRAG_IPV6_OTHER || \
+ (flow_type) == RTE_ETH_FLOW_L2_PAYLOAD)
+
+#define I40E_VALID_PCTYPE(pctype) \
+ ((pctype) == I40E_FILTER_PCTYPE_FRAG_IPV4 || \
+ (pctype) == I40E_FILTER_PCTYPE_NONF_IPV4_TCP || \
+ (pctype) == I40E_FILTER_PCTYPE_NONF_IPV4_UDP || \
+ (pctype) == I40E_FILTER_PCTYPE_NONF_IPV4_SCTP || \
+ (pctype) == I40E_FILTER_PCTYPE_NONF_IPV4_OTHER || \
+ (pctype) == I40E_FILTER_PCTYPE_FRAG_IPV6 || \
+ (pctype) == I40E_FILTER_PCTYPE_NONF_IPV6_UDP || \
+ (pctype) == I40E_FILTER_PCTYPE_NONF_IPV6_TCP || \
+ (pctype) == I40E_FILTER_PCTYPE_NONF_IPV6_SCTP || \
+ (pctype) == I40E_FILTER_PCTYPE_NONF_IPV6_OTHER || \
+ (pctype) == I40E_FILTER_PCTYPE_L2_PAYLOAD)
+
+#endif /* _I40E_ETHDEV_H_ */
diff --git a/src/dpdk/drivers/net/i40e/i40e_ethdev_vf.c b/src/dpdk/drivers/net/i40e/i40e_ethdev_vf.c
new file mode 100644
index 00000000..a616ae0b
--- /dev/null
+++ b/src/dpdk/drivers/net/i40e/i40e_ethdev_vf.c
@@ -0,0 +1,2678 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2010-2016 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <sys/queue.h>
+#include <stdio.h>
+#include <errno.h>
+#include <stdint.h>
+#include <string.h>
+#include <unistd.h>
+#include <stdarg.h>
+#include <inttypes.h>
+#include <rte_byteorder.h>
+#include <rte_common.h>
+#include <rte_cycles.h>
+
+#include <rte_interrupts.h>
+#include <rte_log.h>
+#include <rte_debug.h>
+#include <rte_pci.h>
+#include <rte_atomic.h>
+#include <rte_branch_prediction.h>
+#include <rte_memory.h>
+#include <rte_memzone.h>
+#include <rte_eal.h>
+#include <rte_alarm.h>
+#include <rte_ether.h>
+#include <rte_ethdev.h>
+#include <rte_atomic.h>
+#include <rte_malloc.h>
+#include <rte_dev.h>
+
+#include "i40e_logs.h"
+#include "base/i40e_prototype.h"
+#include "base/i40e_adminq_cmd.h"
+#include "base/i40e_type.h"
+
+#include "i40e_rxtx.h"
+#include "i40e_ethdev.h"
+#include "i40e_pf.h"
+#define I40EVF_VSI_DEFAULT_MSIX_INTR 1
+#define I40EVF_VSI_DEFAULT_MSIX_INTR_LNX 0
+
+/* busy wait delay in msec */
+#define I40EVF_BUSY_WAIT_DELAY 10
+#define I40EVF_BUSY_WAIT_COUNT 50
+#define MAX_RESET_WAIT_CNT 20
+
+struct i40evf_arq_msg_info {
+ enum i40e_virtchnl_ops ops;
+ enum i40e_status_code result;
+ uint16_t buf_len;
+ uint16_t msg_len;
+ uint8_t *msg;
+};
+
+struct vf_cmd_info {
+ enum i40e_virtchnl_ops ops;
+ uint8_t *in_args;
+ uint32_t in_args_size;
+ uint8_t *out_buffer;
+ /* Input & output type. pass in buffer size and pass out
+ * actual return result
+ */
+ uint32_t out_size;
+};
+
+enum i40evf_aq_result {
+ I40EVF_MSG_ERR = -1, /* Meet error when accessing admin queue */
+ I40EVF_MSG_NON, /* Read nothing from admin queue */
+ I40EVF_MSG_SYS, /* Read system msg from admin queue */
+ I40EVF_MSG_CMD, /* Read async command result */
+};
+
+static int i40evf_dev_configure(struct rte_eth_dev *dev);
+static int i40evf_dev_start(struct rte_eth_dev *dev);
+static void i40evf_dev_stop(struct rte_eth_dev *dev);
+static void i40evf_dev_info_get(struct rte_eth_dev *dev,
+ struct rte_eth_dev_info *dev_info);
+static int i40evf_dev_link_update(struct rte_eth_dev *dev,
+ __rte_unused int wait_to_complete);
+static void i40evf_dev_stats_get(struct rte_eth_dev *dev,
+ struct rte_eth_stats *stats);
+static int i40evf_dev_xstats_get(struct rte_eth_dev *dev,
+ struct rte_eth_xstat *xstats, unsigned n);
+static int i40evf_dev_xstats_get_names(struct rte_eth_dev *dev,
+ struct rte_eth_xstat_name *xstats_names,
+ unsigned limit);
+static void i40evf_dev_xstats_reset(struct rte_eth_dev *dev);
+static int i40evf_vlan_filter_set(struct rte_eth_dev *dev,
+ uint16_t vlan_id, int on);
+static void i40evf_vlan_offload_set(struct rte_eth_dev *dev, int mask);
+static int i40evf_vlan_pvid_set(struct rte_eth_dev *dev, uint16_t pvid,
+ int on);
+static void i40evf_dev_close(struct rte_eth_dev *dev);
+static void i40evf_dev_promiscuous_enable(struct rte_eth_dev *dev);
+static void i40evf_dev_promiscuous_disable(struct rte_eth_dev *dev);
+static void i40evf_dev_allmulticast_enable(struct rte_eth_dev *dev);
+static void i40evf_dev_allmulticast_disable(struct rte_eth_dev *dev);
+static int i40evf_get_link_status(struct rte_eth_dev *dev,
+ struct rte_eth_link *link);
+static int i40evf_init_vlan(struct rte_eth_dev *dev);
+static int i40evf_dev_rx_queue_start(struct rte_eth_dev *dev,
+ uint16_t rx_queue_id);
+static int i40evf_dev_rx_queue_stop(struct rte_eth_dev *dev,
+ uint16_t rx_queue_id);
+static int i40evf_dev_tx_queue_start(struct rte_eth_dev *dev,
+ uint16_t tx_queue_id);
+static int i40evf_dev_tx_queue_stop(struct rte_eth_dev *dev,
+ uint16_t tx_queue_id);
+static void i40evf_add_mac_addr(struct rte_eth_dev *dev,
+ struct ether_addr *addr,
+ uint32_t index,
+ uint32_t pool);
+static void i40evf_del_mac_addr(struct rte_eth_dev *dev, uint32_t index);
+static int i40evf_dev_rss_reta_update(struct rte_eth_dev *dev,
+ struct rte_eth_rss_reta_entry64 *reta_conf,
+ uint16_t reta_size);
+static int i40evf_dev_rss_reta_query(struct rte_eth_dev *dev,
+ struct rte_eth_rss_reta_entry64 *reta_conf,
+ uint16_t reta_size);
+static int i40evf_config_rss(struct i40e_vf *vf);
+static int i40evf_dev_rss_hash_update(struct rte_eth_dev *dev,
+ struct rte_eth_rss_conf *rss_conf);
+static int i40evf_dev_rss_hash_conf_get(struct rte_eth_dev *dev,
+ struct rte_eth_rss_conf *rss_conf);
+static int
+i40evf_dev_rx_queue_intr_enable(struct rte_eth_dev *dev, uint16_t queue_id);
+static int
+i40evf_dev_rx_queue_intr_disable(struct rte_eth_dev *dev, uint16_t queue_id);
+static void i40evf_handle_pf_event(__rte_unused struct rte_eth_dev *dev,
+ uint8_t *msg,
+ uint16_t msglen);
+
+/* Default hash key buffer for RSS */
+static uint32_t rss_key_default[I40E_VFQF_HKEY_MAX_INDEX + 1];
+
+struct rte_i40evf_xstats_name_off {
+ char name[RTE_ETH_XSTATS_NAME_SIZE];
+ unsigned offset;
+};
+
+static const struct rte_i40evf_xstats_name_off rte_i40evf_stats_strings[] = {
+ {"rx_bytes", offsetof(struct i40e_eth_stats, rx_bytes)},
+ {"rx_unicast_packets", offsetof(struct i40e_eth_stats, rx_unicast)},
+ {"rx_multicast_packets", offsetof(struct i40e_eth_stats, rx_multicast)},
+ {"rx_broadcast_packets", offsetof(struct i40e_eth_stats, rx_broadcast)},
+ {"rx_dropped_packets", offsetof(struct i40e_eth_stats, rx_discards)},
+ {"rx_unknown_protocol_packets", offsetof(struct i40e_eth_stats,
+ rx_unknown_protocol)},
+ {"tx_bytes", offsetof(struct i40e_eth_stats, tx_bytes)},
+ {"tx_unicast_packets", offsetof(struct i40e_eth_stats, tx_bytes)},
+ {"tx_multicast_packets", offsetof(struct i40e_eth_stats, tx_bytes)},
+ {"tx_broadcast_packets", offsetof(struct i40e_eth_stats, tx_bytes)},
+ {"tx_dropped_packets", offsetof(struct i40e_eth_stats, tx_bytes)},
+ {"tx_error_packets", offsetof(struct i40e_eth_stats, tx_bytes)},
+};
+
+#define I40EVF_NB_XSTATS (sizeof(rte_i40evf_stats_strings) / \
+ sizeof(rte_i40evf_stats_strings[0]))
+
+static const struct eth_dev_ops i40evf_eth_dev_ops = {
+ .dev_configure = i40evf_dev_configure,
+ .dev_start = i40evf_dev_start,
+ .dev_stop = i40evf_dev_stop,
+ .promiscuous_enable = i40evf_dev_promiscuous_enable,
+ .promiscuous_disable = i40evf_dev_promiscuous_disable,
+ .allmulticast_enable = i40evf_dev_allmulticast_enable,
+ .allmulticast_disable = i40evf_dev_allmulticast_disable,
+ .link_update = i40evf_dev_link_update,
+ .stats_get = i40evf_dev_stats_get,
+ .xstats_get = i40evf_dev_xstats_get,
+ .xstats_get_names = i40evf_dev_xstats_get_names,
+ .xstats_reset = i40evf_dev_xstats_reset,
+ .dev_close = i40evf_dev_close,
+ .dev_infos_get = i40evf_dev_info_get,
+ .dev_supported_ptypes_get = i40e_dev_supported_ptypes_get,
+ .vlan_filter_set = i40evf_vlan_filter_set,
+ .vlan_offload_set = i40evf_vlan_offload_set,
+ .vlan_pvid_set = i40evf_vlan_pvid_set,
+ .rx_queue_start = i40evf_dev_rx_queue_start,
+ .rx_queue_stop = i40evf_dev_rx_queue_stop,
+ .tx_queue_start = i40evf_dev_tx_queue_start,
+ .tx_queue_stop = i40evf_dev_tx_queue_stop,
+ .rx_queue_setup = i40e_dev_rx_queue_setup,
+ .rx_queue_release = i40e_dev_rx_queue_release,
+ .rx_queue_intr_enable = i40evf_dev_rx_queue_intr_enable,
+ .rx_queue_intr_disable = i40evf_dev_rx_queue_intr_disable,
+ .rx_descriptor_done = i40e_dev_rx_descriptor_done,
+ .tx_queue_setup = i40e_dev_tx_queue_setup,
+ .tx_queue_release = i40e_dev_tx_queue_release,
+ .rx_queue_count = i40e_dev_rx_queue_count,
+ .rxq_info_get = i40e_rxq_info_get,
+ .txq_info_get = i40e_txq_info_get,
+ .mac_addr_add = i40evf_add_mac_addr,
+ .mac_addr_remove = i40evf_del_mac_addr,
+ .reta_update = i40evf_dev_rss_reta_update,
+ .reta_query = i40evf_dev_rss_reta_query,
+ .rss_hash_update = i40evf_dev_rss_hash_update,
+ .rss_hash_conf_get = i40evf_dev_rss_hash_conf_get,
+};
+
+/*
+ * Read data in admin queue to get msg from pf driver
+ */
+static enum i40evf_aq_result
+i40evf_read_pfmsg(struct rte_eth_dev *dev, struct i40evf_arq_msg_info *data)
+{
+ struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
+ struct i40e_arq_event_info event;
+ enum i40e_virtchnl_ops opcode;
+ enum i40e_status_code retval;
+ int ret;
+ enum i40evf_aq_result result = I40EVF_MSG_NON;
+
+ event.buf_len = data->buf_len;
+ event.msg_buf = data->msg;
+ ret = i40e_clean_arq_element(hw, &event, NULL);
+ /* Can't read any msg from adminQ */
+ if (ret) {
+ if (ret != I40E_ERR_ADMIN_QUEUE_NO_WORK)
+ result = I40EVF_MSG_ERR;
+ return result;
+ }
+
+ opcode = (enum i40e_virtchnl_ops)rte_le_to_cpu_32(event.desc.cookie_high);
+ retval = (enum i40e_status_code)rte_le_to_cpu_32(event.desc.cookie_low);
+ /* pf sys event */
+ if (opcode == I40E_VIRTCHNL_OP_EVENT) {
+ struct i40e_virtchnl_pf_event *vpe =
+ (struct i40e_virtchnl_pf_event *)event.msg_buf;
+
+ result = I40EVF_MSG_SYS;
+ switch (vpe->event) {
+ case I40E_VIRTCHNL_EVENT_LINK_CHANGE:
+ vf->link_up =
+ vpe->event_data.link_event.link_status;
+ vf->link_speed =
+ vpe->event_data.link_event.link_speed;
+ vf->pend_msg |= PFMSG_LINK_CHANGE;
+ PMD_DRV_LOG(INFO, "Link status update:%s",
+ vf->link_up ? "up" : "down");
+ break;
+ case I40E_VIRTCHNL_EVENT_RESET_IMPENDING:
+ vf->vf_reset = true;
+ vf->pend_msg |= PFMSG_RESET_IMPENDING;
+ PMD_DRV_LOG(INFO, "vf is reseting");
+ break;
+ case I40E_VIRTCHNL_EVENT_PF_DRIVER_CLOSE:
+ vf->dev_closed = true;
+ vf->pend_msg |= PFMSG_DRIVER_CLOSE;
+ PMD_DRV_LOG(INFO, "PF driver closed");
+ break;
+ default:
+ PMD_DRV_LOG(ERR, "%s: Unknown event %d from pf",
+ __func__, vpe->event);
+ }
+ } else {
+ /* async reply msg on command issued by vf previously */
+ result = I40EVF_MSG_CMD;
+ /* Actual data length read from PF */
+ data->msg_len = event.msg_len;
+ }
+
+ data->result = retval;
+ data->ops = opcode;
+
+ return result;
+}
+
+/**
+ * clear current command. Only call in case execute
+ * _atomic_set_cmd successfully.
+ */
+static inline void
+_clear_cmd(struct i40e_vf *vf)
+{
+ rte_wmb();
+ vf->pend_cmd = I40E_VIRTCHNL_OP_UNKNOWN;
+}
+
+/*
+ * Check there is pending cmd in execution. If none, set new command.
+ */
+static inline int
+_atomic_set_cmd(struct i40e_vf *vf, enum i40e_virtchnl_ops ops)
+{
+ int ret = rte_atomic32_cmpset(&vf->pend_cmd,
+ I40E_VIRTCHNL_OP_UNKNOWN, ops);
+
+ if (!ret)
+ PMD_DRV_LOG(ERR, "There is incomplete cmd %d", vf->pend_cmd);
+
+ return !ret;
+}
+
+#define MAX_TRY_TIMES 200
+#define ASQ_DELAY_MS 10
+
+static int
+i40evf_execute_vf_cmd(struct rte_eth_dev *dev, struct vf_cmd_info *args)
+{
+ struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
+ struct i40evf_arq_msg_info info;
+ enum i40evf_aq_result ret;
+ int err, i = 0;
+
+ if (_atomic_set_cmd(vf, args->ops))
+ return -1;
+
+ info.msg = args->out_buffer;
+ info.buf_len = args->out_size;
+ info.ops = I40E_VIRTCHNL_OP_UNKNOWN;
+ info.result = I40E_SUCCESS;
+
+ err = i40e_aq_send_msg_to_pf(hw, args->ops, I40E_SUCCESS,
+ args->in_args, args->in_args_size, NULL);
+ if (err) {
+ PMD_DRV_LOG(ERR, "fail to send cmd %d", args->ops);
+ _clear_cmd(vf);
+ return err;
+ }
+
+ switch (args->ops) {
+ case I40E_VIRTCHNL_OP_RESET_VF:
+ /*no need to process in this function */
+ err = 0;
+ break;
+ case I40E_VIRTCHNL_OP_VERSION:
+ case I40E_VIRTCHNL_OP_GET_VF_RESOURCES:
+ /* for init adminq commands, need to poll the response */
+ err = -1;
+ do {
+ ret = i40evf_read_pfmsg(dev, &info);
+ if (ret == I40EVF_MSG_CMD) {
+ err = 0;
+ break;
+ } else if (ret == I40EVF_MSG_ERR)
+ break;
+ rte_delay_ms(ASQ_DELAY_MS);
+ /* If don't read msg or read sys event, continue */
+ } while (i++ < MAX_TRY_TIMES);
+ _clear_cmd(vf);
+ break;
+
+ default:
+ /* for other adminq in running time, waiting the cmd done flag */
+ err = -1;
+ do {
+ if (vf->pend_cmd == I40E_VIRTCHNL_OP_UNKNOWN) {
+ err = 0;
+ break;
+ }
+ rte_delay_ms(ASQ_DELAY_MS);
+ /* If don't read msg or read sys event, continue */
+ } while (i++ < MAX_TRY_TIMES);
+ break;
+ }
+
+ return err | vf->cmd_retval;
+}
+
+/*
+ * Check API version with sync wait until version read or fail from admin queue
+ */
+static int
+i40evf_check_api_version(struct rte_eth_dev *dev)
+{
+ struct i40e_virtchnl_version_info version, *pver;
+ int err;
+ struct vf_cmd_info args;
+ struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
+
+ version.major = I40E_VIRTCHNL_VERSION_MAJOR;
+ version.minor = I40E_VIRTCHNL_VERSION_MINOR;
+
+ args.ops = I40E_VIRTCHNL_OP_VERSION;
+ args.in_args = (uint8_t *)&version;
+ args.in_args_size = sizeof(version);
+ args.out_buffer = vf->aq_resp;
+ args.out_size = I40E_AQ_BUF_SZ;
+
+ err = i40evf_execute_vf_cmd(dev, &args);
+ if (err) {
+ PMD_INIT_LOG(ERR, "fail to execute command OP_VERSION");
+ return err;
+ }
+
+ pver = (struct i40e_virtchnl_version_info *)args.out_buffer;
+ vf->version_major = pver->major;
+ vf->version_minor = pver->minor;
+ if (vf->version_major == I40E_DPDK_VERSION_MAJOR)
+ PMD_DRV_LOG(INFO, "Peer is DPDK PF host");
+ else if ((vf->version_major == I40E_VIRTCHNL_VERSION_MAJOR) &&
+ (vf->version_minor <= I40E_VIRTCHNL_VERSION_MINOR))
+ PMD_DRV_LOG(INFO, "Peer is Linux PF host");
+ else {
+ PMD_INIT_LOG(ERR, "PF/VF API version mismatch:(%u.%u)-(%u.%u)",
+ vf->version_major, vf->version_minor,
+ I40E_VIRTCHNL_VERSION_MAJOR,
+ I40E_VIRTCHNL_VERSION_MINOR);
+ return -1;
+ }
+
+ return 0;
+}
+
+static int
+i40evf_get_vf_resource(struct rte_eth_dev *dev)
+{
+ struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
+ int err;
+ struct vf_cmd_info args;
+ uint32_t caps, len;
+
+ args.ops = I40E_VIRTCHNL_OP_GET_VF_RESOURCES;
+ args.out_buffer = vf->aq_resp;
+ args.out_size = I40E_AQ_BUF_SZ;
+ if (PF_IS_V11(vf)) {
+ caps = I40E_VIRTCHNL_VF_OFFLOAD_L2 |
+ I40E_VIRTCHNL_VF_OFFLOAD_RSS_AQ |
+ I40E_VIRTCHNL_VF_OFFLOAD_RSS_REG |
+ I40E_VIRTCHNL_VF_OFFLOAD_VLAN |
+ I40E_VIRTCHNL_VF_OFFLOAD_RX_POLLING;
+ args.in_args = (uint8_t *)&caps;
+ args.in_args_size = sizeof(caps);
+ } else {
+ args.in_args = NULL;
+ args.in_args_size = 0;
+ }
+ err = i40evf_execute_vf_cmd(dev, &args);
+
+ if (err) {
+ PMD_DRV_LOG(ERR, "fail to execute command OP_GET_VF_RESOURCE");
+ return err;
+ }
+
+ len = sizeof(struct i40e_virtchnl_vf_resource) +
+ I40E_MAX_VF_VSI * sizeof(struct i40e_virtchnl_vsi_resource);
+
+ (void)rte_memcpy(vf->vf_res, args.out_buffer,
+ RTE_MIN(args.out_size, len));
+ i40e_vf_parse_hw_config(hw, vf->vf_res);
+
+ return 0;
+}
+
+static int
+i40evf_config_promisc(struct rte_eth_dev *dev,
+ bool enable_unicast,
+ bool enable_multicast)
+{
+ struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
+ int err;
+ struct vf_cmd_info args;
+ struct i40e_virtchnl_promisc_info promisc;
+
+ promisc.flags = 0;
+ promisc.vsi_id = vf->vsi_res->vsi_id;
+
+ if (enable_unicast)
+ promisc.flags |= I40E_FLAG_VF_UNICAST_PROMISC;
+
+ if (enable_multicast)
+ promisc.flags |= I40E_FLAG_VF_MULTICAST_PROMISC;
+
+ args.ops = I40E_VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE;
+ args.in_args = (uint8_t *)&promisc;
+ args.in_args_size = sizeof(promisc);
+ args.out_buffer = vf->aq_resp;
+ args.out_size = I40E_AQ_BUF_SZ;
+
+ err = i40evf_execute_vf_cmd(dev, &args);
+
+ if (err)
+ PMD_DRV_LOG(ERR, "fail to execute command "
+ "CONFIG_PROMISCUOUS_MODE");
+ return err;
+}
+
+/* Configure vlan and double vlan offload. Use flag to specify which part to configure */
+static int
+i40evf_config_vlan_offload(struct rte_eth_dev *dev,
+ bool enable_vlan_strip)
+{
+ struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
+ int err;
+ struct vf_cmd_info args;
+ struct i40e_virtchnl_vlan_offload_info offload;
+
+ offload.vsi_id = vf->vsi_res->vsi_id;
+ offload.enable_vlan_strip = enable_vlan_strip;
+
+ args.ops = (enum i40e_virtchnl_ops)I40E_VIRTCHNL_OP_CFG_VLAN_OFFLOAD;
+ args.in_args = (uint8_t *)&offload;
+ args.in_args_size = sizeof(offload);
+ args.out_buffer = vf->aq_resp;
+ args.out_size = I40E_AQ_BUF_SZ;
+
+ err = i40evf_execute_vf_cmd(dev, &args);
+ if (err)
+ PMD_DRV_LOG(ERR, "fail to execute command CFG_VLAN_OFFLOAD");
+
+ return err;
+}
+
+static int
+i40evf_config_vlan_pvid(struct rte_eth_dev *dev,
+ struct i40e_vsi_vlan_pvid_info *info)
+{
+ struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
+ int err;
+ struct vf_cmd_info args;
+ struct i40e_virtchnl_pvid_info tpid_info;
+
+ if (info == NULL) {
+ PMD_DRV_LOG(ERR, "invalid parameters");
+ return I40E_ERR_PARAM;
+ }
+
+ memset(&tpid_info, 0, sizeof(tpid_info));
+ tpid_info.vsi_id = vf->vsi_res->vsi_id;
+ (void)rte_memcpy(&tpid_info.info, info, sizeof(*info));
+
+ args.ops = (enum i40e_virtchnl_ops)I40E_VIRTCHNL_OP_CFG_VLAN_PVID;
+ args.in_args = (uint8_t *)&tpid_info;
+ args.in_args_size = sizeof(tpid_info);
+ args.out_buffer = vf->aq_resp;
+ args.out_size = I40E_AQ_BUF_SZ;
+
+ err = i40evf_execute_vf_cmd(dev, &args);
+ if (err)
+ PMD_DRV_LOG(ERR, "fail to execute command CFG_VLAN_PVID");
+
+ return err;
+}
+
+static void
+i40evf_fill_virtchnl_vsi_txq_info(struct i40e_virtchnl_txq_info *txq_info,
+ uint16_t vsi_id,
+ uint16_t queue_id,
+ uint16_t nb_txq,
+ struct i40e_tx_queue *txq)
+{
+ txq_info->vsi_id = vsi_id;
+ txq_info->queue_id = queue_id;
+ if (queue_id < nb_txq) {
+ txq_info->ring_len = txq->nb_tx_desc;
+ txq_info->dma_ring_addr = txq->tx_ring_phys_addr;
+ }
+}
+
+static void
+i40evf_fill_virtchnl_vsi_rxq_info(struct i40e_virtchnl_rxq_info *rxq_info,
+ uint16_t vsi_id,
+ uint16_t queue_id,
+ uint16_t nb_rxq,
+ uint32_t max_pkt_size,
+ struct i40e_rx_queue *rxq)
+{
+ rxq_info->vsi_id = vsi_id;
+ rxq_info->queue_id = queue_id;
+ rxq_info->max_pkt_size = max_pkt_size;
+ if (queue_id < nb_rxq) {
+ rxq_info->ring_len = rxq->nb_rx_desc;
+ rxq_info->dma_ring_addr = rxq->rx_ring_phys_addr;
+ rxq_info->databuffer_size =
+ (rte_pktmbuf_data_room_size(rxq->mp) -
+ RTE_PKTMBUF_HEADROOM);
+ }
+}
+
+/* It configures VSI queues to co-work with Linux PF host */
+static int
+i40evf_configure_vsi_queues(struct rte_eth_dev *dev)
+{
+ struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
+ struct i40e_rx_queue **rxq =
+ (struct i40e_rx_queue **)dev->data->rx_queues;
+ struct i40e_tx_queue **txq =
+ (struct i40e_tx_queue **)dev->data->tx_queues;
+ struct i40e_virtchnl_vsi_queue_config_info *vc_vqci;
+ struct i40e_virtchnl_queue_pair_info *vc_qpi;
+ struct vf_cmd_info args;
+ uint16_t i, nb_qp = vf->num_queue_pairs;
+ const uint32_t size =
+ I40E_VIRTCHNL_CONFIG_VSI_QUEUES_SIZE(vc_vqci, nb_qp);
+ uint8_t buff[size];
+ int ret;
+
+ memset(buff, 0, sizeof(buff));
+ vc_vqci = (struct i40e_virtchnl_vsi_queue_config_info *)buff;
+ vc_vqci->vsi_id = vf->vsi_res->vsi_id;
+ vc_vqci->num_queue_pairs = nb_qp;
+
+ for (i = 0, vc_qpi = vc_vqci->qpair; i < nb_qp; i++, vc_qpi++) {
+ i40evf_fill_virtchnl_vsi_txq_info(&vc_qpi->txq,
+ vc_vqci->vsi_id, i, dev->data->nb_tx_queues, txq[i]);
+ i40evf_fill_virtchnl_vsi_rxq_info(&vc_qpi->rxq,
+ vc_vqci->vsi_id, i, dev->data->nb_rx_queues,
+ vf->max_pkt_len, rxq[i]);
+ }
+ memset(&args, 0, sizeof(args));
+ args.ops = I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES;
+ args.in_args = (uint8_t *)vc_vqci;
+ args.in_args_size = size;
+ args.out_buffer = vf->aq_resp;
+ args.out_size = I40E_AQ_BUF_SZ;
+ ret = i40evf_execute_vf_cmd(dev, &args);
+ if (ret)
+ PMD_DRV_LOG(ERR, "Failed to execute command of "
+ "I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES\n");
+
+ return ret;
+}
+
+/* It configures VSI queues to co-work with DPDK PF host */
+static int
+i40evf_configure_vsi_queues_ext(struct rte_eth_dev *dev)
+{
+ struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
+ struct i40e_rx_queue **rxq =
+ (struct i40e_rx_queue **)dev->data->rx_queues;
+ struct i40e_tx_queue **txq =
+ (struct i40e_tx_queue **)dev->data->tx_queues;
+ struct i40e_virtchnl_vsi_queue_config_ext_info *vc_vqcei;
+ struct i40e_virtchnl_queue_pair_ext_info *vc_qpei;
+ struct vf_cmd_info args;
+ uint16_t i, nb_qp = vf->num_queue_pairs;
+ const uint32_t size =
+ I40E_VIRTCHNL_CONFIG_VSI_QUEUES_SIZE(vc_vqcei, nb_qp);
+ uint8_t buff[size];
+ int ret;
+
+ memset(buff, 0, sizeof(buff));
+ vc_vqcei = (struct i40e_virtchnl_vsi_queue_config_ext_info *)buff;
+ vc_vqcei->vsi_id = vf->vsi_res->vsi_id;
+ vc_vqcei->num_queue_pairs = nb_qp;
+ vc_qpei = vc_vqcei->qpair;
+ for (i = 0; i < nb_qp; i++, vc_qpei++) {
+ i40evf_fill_virtchnl_vsi_txq_info(&vc_qpei->txq,
+ vc_vqcei->vsi_id, i, dev->data->nb_tx_queues, txq[i]);
+ i40evf_fill_virtchnl_vsi_rxq_info(&vc_qpei->rxq,
+ vc_vqcei->vsi_id, i, dev->data->nb_rx_queues,
+ vf->max_pkt_len, rxq[i]);
+ if (i < dev->data->nb_rx_queues)
+ /*
+ * It adds extra info for configuring VSI queues, which
+ * is needed to enable the configurable crc stripping
+ * in VF.
+ */
+ vc_qpei->rxq_ext.crcstrip =
+ dev->data->dev_conf.rxmode.hw_strip_crc;
+ }
+ memset(&args, 0, sizeof(args));
+ args.ops =
+ (enum i40e_virtchnl_ops)I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES_EXT;
+ args.in_args = (uint8_t *)vc_vqcei;
+ args.in_args_size = size;
+ args.out_buffer = vf->aq_resp;
+ args.out_size = I40E_AQ_BUF_SZ;
+ ret = i40evf_execute_vf_cmd(dev, &args);
+ if (ret)
+ PMD_DRV_LOG(ERR, "Failed to execute command of "
+ "I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES_EXT\n");
+
+ return ret;
+}
+
+static int
+i40evf_configure_queues(struct rte_eth_dev *dev)
+{
+ struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
+
+ if (vf->version_major == I40E_DPDK_VERSION_MAJOR)
+ /* To support DPDK PF host */
+ return i40evf_configure_vsi_queues_ext(dev);
+ else
+ /* To support Linux PF host */
+ return i40evf_configure_vsi_queues(dev);
+}
+
+static int
+i40evf_config_irq_map(struct rte_eth_dev *dev)
+{
+ struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
+ struct vf_cmd_info args;
+ uint8_t cmd_buffer[sizeof(struct i40e_virtchnl_irq_map_info) + \
+ sizeof(struct i40e_virtchnl_vector_map)];
+ struct i40e_virtchnl_irq_map_info *map_info;
+ struct rte_intr_handle *intr_handle = &dev->pci_dev->intr_handle;
+ uint32_t vector_id;
+ int i, err;
+
+ if (rte_intr_allow_others(intr_handle)) {
+ if (vf->version_major == I40E_DPDK_VERSION_MAJOR)
+ vector_id = I40EVF_VSI_DEFAULT_MSIX_INTR;
+ else
+ vector_id = I40EVF_VSI_DEFAULT_MSIX_INTR_LNX;
+ } else {
+ vector_id = I40E_MISC_VEC_ID;
+ }
+
+ map_info = (struct i40e_virtchnl_irq_map_info *)cmd_buffer;
+ map_info->num_vectors = 1;
+ map_info->vecmap[0].rxitr_idx = I40E_ITR_INDEX_DEFAULT;
+ map_info->vecmap[0].vsi_id = vf->vsi_res->vsi_id;
+ /* Alway use default dynamic MSIX interrupt */
+ map_info->vecmap[0].vector_id = vector_id;
+ /* Don't map any tx queue */
+ map_info->vecmap[0].txq_map = 0;
+ map_info->vecmap[0].rxq_map = 0;
+ for (i = 0; i < dev->data->nb_rx_queues; i++) {
+ map_info->vecmap[0].rxq_map |= 1 << i;
+ if (rte_intr_dp_is_en(intr_handle))
+ intr_handle->intr_vec[i] = vector_id;
+ }
+
+ args.ops = I40E_VIRTCHNL_OP_CONFIG_IRQ_MAP;
+ args.in_args = (u8 *)cmd_buffer;
+ args.in_args_size = sizeof(cmd_buffer);
+ args.out_buffer = vf->aq_resp;
+ args.out_size = I40E_AQ_BUF_SZ;
+ err = i40evf_execute_vf_cmd(dev, &args);
+ if (err)
+ PMD_DRV_LOG(ERR, "fail to execute command OP_ENABLE_QUEUES");
+
+ return err;
+}
+
+static int
+i40evf_switch_queue(struct rte_eth_dev *dev, bool isrx, uint16_t qid,
+ bool on)
+{
+ struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
+ struct i40e_virtchnl_queue_select queue_select;
+ int err;
+ struct vf_cmd_info args;
+ memset(&queue_select, 0, sizeof(queue_select));
+ queue_select.vsi_id = vf->vsi_res->vsi_id;
+
+ if (isrx)
+ queue_select.rx_queues |= 1 << qid;
+ else
+ queue_select.tx_queues |= 1 << qid;
+
+ if (on)
+ args.ops = I40E_VIRTCHNL_OP_ENABLE_QUEUES;
+ else
+ args.ops = I40E_VIRTCHNL_OP_DISABLE_QUEUES;
+ args.in_args = (u8 *)&queue_select;
+ args.in_args_size = sizeof(queue_select);
+ args.out_buffer = vf->aq_resp;
+ args.out_size = I40E_AQ_BUF_SZ;
+ err = i40evf_execute_vf_cmd(dev, &args);
+ if (err)
+ PMD_DRV_LOG(ERR, "fail to switch %s %u %s",
+ isrx ? "RX" : "TX", qid, on ? "on" : "off");
+
+ return err;
+}
+
+static int
+i40evf_start_queues(struct rte_eth_dev *dev)
+{
+ struct rte_eth_dev_data *dev_data = dev->data;
+ int i;
+ struct i40e_rx_queue *rxq;
+ struct i40e_tx_queue *txq;
+
+ for (i = 0; i < dev->data->nb_rx_queues; i++) {
+ rxq = dev_data->rx_queues[i];
+ if (rxq->rx_deferred_start)
+ continue;
+ if (i40evf_dev_rx_queue_start(dev, i) != 0) {
+ PMD_DRV_LOG(ERR, "Fail to start queue %u", i);
+ return -1;
+ }
+ }
+
+ for (i = 0; i < dev->data->nb_tx_queues; i++) {
+ txq = dev_data->tx_queues[i];
+ if (txq->tx_deferred_start)
+ continue;
+ if (i40evf_dev_tx_queue_start(dev, i) != 0) {
+ PMD_DRV_LOG(ERR, "Fail to start queue %u", i);
+ return -1;
+ }
+ }
+
+ return 0;
+}
+
+static int
+i40evf_stop_queues(struct rte_eth_dev *dev)
+{
+ int i;
+
+ /* Stop TX queues first */
+ for (i = 0; i < dev->data->nb_tx_queues; i++) {
+ if (i40evf_dev_tx_queue_stop(dev, i) != 0) {
+ PMD_DRV_LOG(ERR, "Fail to stop queue %u", i);
+ return -1;
+ }
+ }
+
+ /* Then stop RX queues */
+ for (i = 0; i < dev->data->nb_rx_queues; i++) {
+ if (i40evf_dev_rx_queue_stop(dev, i) != 0) {
+ PMD_DRV_LOG(ERR, "Fail to stop queue %u", i);
+ return -1;
+ }
+ }
+
+ return 0;
+}
+
+static void
+i40evf_add_mac_addr(struct rte_eth_dev *dev,
+ struct ether_addr *addr,
+ __rte_unused uint32_t index,
+ __rte_unused uint32_t pool)
+{
+ struct i40e_virtchnl_ether_addr_list *list;
+ struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
+ uint8_t cmd_buffer[sizeof(struct i40e_virtchnl_ether_addr_list) + \
+ sizeof(struct i40e_virtchnl_ether_addr)];
+ int err;
+ struct vf_cmd_info args;
+
+ if (i40e_validate_mac_addr(addr->addr_bytes) != I40E_SUCCESS) {
+ PMD_DRV_LOG(ERR, "Invalid mac:%x:%x:%x:%x:%x:%x",
+ addr->addr_bytes[0], addr->addr_bytes[1],
+ addr->addr_bytes[2], addr->addr_bytes[3],
+ addr->addr_bytes[4], addr->addr_bytes[5]);
+ return;
+ }
+
+ list = (struct i40e_virtchnl_ether_addr_list *)cmd_buffer;
+ list->vsi_id = vf->vsi_res->vsi_id;
+ list->num_elements = 1;
+ (void)rte_memcpy(list->list[0].addr, addr->addr_bytes,
+ sizeof(addr->addr_bytes));
+
+ args.ops = I40E_VIRTCHNL_OP_ADD_ETHER_ADDRESS;
+ args.in_args = cmd_buffer;
+ args.in_args_size = sizeof(cmd_buffer);
+ args.out_buffer = vf->aq_resp;
+ args.out_size = I40E_AQ_BUF_SZ;
+ err = i40evf_execute_vf_cmd(dev, &args);
+ if (err)
+ PMD_DRV_LOG(ERR, "fail to execute command "
+ "OP_ADD_ETHER_ADDRESS");
+
+ return;
+}
+
+static void
+i40evf_del_mac_addr(struct rte_eth_dev *dev, uint32_t index)
+{
+ struct i40e_virtchnl_ether_addr_list *list;
+ struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
+ struct rte_eth_dev_data *data = dev->data;
+ struct ether_addr *addr;
+ uint8_t cmd_buffer[sizeof(struct i40e_virtchnl_ether_addr_list) + \
+ sizeof(struct i40e_virtchnl_ether_addr)];
+ int err;
+ struct vf_cmd_info args;
+
+ addr = &(data->mac_addrs[index]);
+
+ if (i40e_validate_mac_addr(addr->addr_bytes) != I40E_SUCCESS) {
+ PMD_DRV_LOG(ERR, "Invalid mac:%x-%x-%x-%x-%x-%x",
+ addr->addr_bytes[0], addr->addr_bytes[1],
+ addr->addr_bytes[2], addr->addr_bytes[3],
+ addr->addr_bytes[4], addr->addr_bytes[5]);
+ return;
+ }
+
+ list = (struct i40e_virtchnl_ether_addr_list *)cmd_buffer;
+ list->vsi_id = vf->vsi_res->vsi_id;
+ list->num_elements = 1;
+ (void)rte_memcpy(list->list[0].addr, addr->addr_bytes,
+ sizeof(addr->addr_bytes));
+
+ args.ops = I40E_VIRTCHNL_OP_DEL_ETHER_ADDRESS;
+ args.in_args = cmd_buffer;
+ args.in_args_size = sizeof(cmd_buffer);
+ args.out_buffer = vf->aq_resp;
+ args.out_size = I40E_AQ_BUF_SZ;
+ err = i40evf_execute_vf_cmd(dev, &args);
+ if (err)
+ PMD_DRV_LOG(ERR, "fail to execute command "
+ "OP_DEL_ETHER_ADDRESS");
+ return;
+}
+
+static int
+i40evf_update_stats(struct rte_eth_dev *dev, struct i40e_eth_stats **pstats)
+{
+ struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
+ struct i40e_virtchnl_queue_select q_stats;
+ int err;
+ struct vf_cmd_info args;
+
+ memset(&q_stats, 0, sizeof(q_stats));
+ q_stats.vsi_id = vf->vsi_res->vsi_id;
+ args.ops = I40E_VIRTCHNL_OP_GET_STATS;
+ args.in_args = (u8 *)&q_stats;
+ args.in_args_size = sizeof(q_stats);
+ args.out_buffer = vf->aq_resp;
+ args.out_size = I40E_AQ_BUF_SZ;
+
+ err = i40evf_execute_vf_cmd(dev, &args);
+ if (err) {
+ PMD_DRV_LOG(ERR, "fail to execute command OP_GET_STATS");
+ *pstats = NULL;
+ return err;
+ }
+ *pstats = (struct i40e_eth_stats *)args.out_buffer;
+ return 0;
+}
+
+static int
+i40evf_get_statics(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
+{
+ int ret;
+ struct i40e_eth_stats *pstats = NULL;
+
+ ret = i40evf_update_stats(dev, &pstats);
+ if (ret != 0)
+ return 0;
+
+ stats->ipackets = pstats->rx_unicast + pstats->rx_multicast +
+ pstats->rx_broadcast;
+ stats->opackets = pstats->tx_broadcast + pstats->tx_multicast +
+ pstats->tx_unicast;
+ stats->ierrors = pstats->rx_discards;
+ stats->oerrors = pstats->tx_errors + pstats->tx_discards;
+ stats->ibytes = pstats->rx_bytes;
+ stats->obytes = pstats->tx_bytes;
+
+ return 0;
+}
+
+static void
+i40evf_dev_xstats_reset(struct rte_eth_dev *dev)
+{
+ struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
+ struct i40e_eth_stats *pstats = NULL;
+
+ /* read stat values to clear hardware registers */
+ i40evf_update_stats(dev, &pstats);
+
+ /* set stats offset base on current values */
+ vf->vsi.eth_stats_offset = vf->vsi.eth_stats;
+}
+
+static int i40evf_dev_xstats_get_names(__rte_unused struct rte_eth_dev *dev,
+ struct rte_eth_xstat_name *xstats_names,
+ __rte_unused unsigned limit)
+{
+ unsigned i;
+
+ if (xstats_names != NULL)
+ for (i = 0; i < I40EVF_NB_XSTATS; i++) {
+ snprintf(xstats_names[i].name,
+ sizeof(xstats_names[i].name),
+ "%s", rte_i40evf_stats_strings[i].name);
+ }
+ return I40EVF_NB_XSTATS;
+}
+
+static int i40evf_dev_xstats_get(struct rte_eth_dev *dev,
+ struct rte_eth_xstat *xstats, unsigned n)
+{
+ int ret;
+ unsigned i;
+ struct i40e_eth_stats *pstats = NULL;
+
+ if (n < I40EVF_NB_XSTATS)
+ return I40EVF_NB_XSTATS;
+
+ ret = i40evf_update_stats(dev, &pstats);
+ if (ret != 0)
+ return 0;
+
+ if (!xstats)
+ return 0;
+
+ /* loop over xstats array and values from pstats */
+ for (i = 0; i < I40EVF_NB_XSTATS; i++) {
+ xstats[i].id = i;
+ xstats[i].value = *(uint64_t *)(((char *)pstats) +
+ rte_i40evf_stats_strings[i].offset);
+ }
+
+ return I40EVF_NB_XSTATS;
+}
+
+static int
+i40evf_add_vlan(struct rte_eth_dev *dev, uint16_t vlanid)
+{
+ struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
+ struct i40e_virtchnl_vlan_filter_list *vlan_list;
+ uint8_t cmd_buffer[sizeof(struct i40e_virtchnl_vlan_filter_list) +
+ sizeof(uint16_t)];
+ int err;
+ struct vf_cmd_info args;
+
+ vlan_list = (struct i40e_virtchnl_vlan_filter_list *)cmd_buffer;
+ vlan_list->vsi_id = vf->vsi_res->vsi_id;
+ vlan_list->num_elements = 1;
+ vlan_list->vlan_id[0] = vlanid;
+
+ args.ops = I40E_VIRTCHNL_OP_ADD_VLAN;
+ args.in_args = (u8 *)&cmd_buffer;
+ args.in_args_size = sizeof(cmd_buffer);
+ args.out_buffer = vf->aq_resp;
+ args.out_size = I40E_AQ_BUF_SZ;
+ err = i40evf_execute_vf_cmd(dev, &args);
+ if (err)
+ PMD_DRV_LOG(ERR, "fail to execute command OP_ADD_VLAN");
+
+ return err;
+}
+
+static int
+i40evf_del_vlan(struct rte_eth_dev *dev, uint16_t vlanid)
+{
+ struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
+ struct i40e_virtchnl_vlan_filter_list *vlan_list;
+ uint8_t cmd_buffer[sizeof(struct i40e_virtchnl_vlan_filter_list) +
+ sizeof(uint16_t)];
+ int err;
+ struct vf_cmd_info args;
+
+ vlan_list = (struct i40e_virtchnl_vlan_filter_list *)cmd_buffer;
+ vlan_list->vsi_id = vf->vsi_res->vsi_id;
+ vlan_list->num_elements = 1;
+ vlan_list->vlan_id[0] = vlanid;
+
+ args.ops = I40E_VIRTCHNL_OP_DEL_VLAN;
+ args.in_args = (u8 *)&cmd_buffer;
+ args.in_args_size = sizeof(cmd_buffer);
+ args.out_buffer = vf->aq_resp;
+ args.out_size = I40E_AQ_BUF_SZ;
+ err = i40evf_execute_vf_cmd(dev, &args);
+ if (err)
+ PMD_DRV_LOG(ERR, "fail to execute command OP_DEL_VLAN");
+
+ return err;
+}
+
+static int
+i40evf_get_link_status(struct rte_eth_dev *dev, struct rte_eth_link *link)
+{
+ struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
+ int err;
+ struct vf_cmd_info args;
+ struct rte_eth_link *new_link;
+
+ args.ops = (enum i40e_virtchnl_ops)I40E_VIRTCHNL_OP_GET_LINK_STAT;
+ args.in_args = NULL;
+ args.in_args_size = 0;
+ args.out_buffer = vf->aq_resp;
+ args.out_size = I40E_AQ_BUF_SZ;
+ err = i40evf_execute_vf_cmd(dev, &args);
+ if (err) {
+ PMD_DRV_LOG(ERR, "fail to execute command OP_GET_LINK_STAT");
+ return err;
+ }
+
+ new_link = (struct rte_eth_link *)args.out_buffer;
+ (void)rte_memcpy(link, new_link, sizeof(*link));
+
+ return 0;
+}
+
+static const struct rte_pci_id pci_id_i40evf_map[] = {
+ { RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_VF) },
+ { RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_VF_HV) },
+ { RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_X722_A0_VF) },
+ { RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_X722_VF) },
+ { RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_X722_VF_HV) },
+ { .vendor_id = 0, /* sentinel */ },
+};
+
+static inline int
+i40evf_dev_atomic_write_link_status(struct rte_eth_dev *dev,
+ struct rte_eth_link *link)
+{
+ struct rte_eth_link *dst = &(dev->data->dev_link);
+ struct rte_eth_link *src = link;
+
+ if (rte_atomic64_cmpset((uint64_t *)dst, *(uint64_t *)dst,
+ *(uint64_t *)src) == 0)
+ return -1;
+
+ return 0;
+}
+
+/* Disable IRQ0 */
+static inline void
+i40evf_disable_irq0(struct i40e_hw *hw)
+{
+ /* Disable all interrupt types */
+ I40E_WRITE_REG(hw, I40E_VFINT_ICR0_ENA1, 0);
+ I40E_WRITE_REG(hw, I40E_VFINT_DYN_CTL01,
+ I40E_VFINT_DYN_CTL01_ITR_INDX_MASK);
+ I40EVF_WRITE_FLUSH(hw);
+}
+
+/* Enable IRQ0 */
+static inline void
+i40evf_enable_irq0(struct i40e_hw *hw)
+{
+ /* Enable admin queue interrupt trigger */
+ uint32_t val;
+
+ i40evf_disable_irq0(hw);
+ val = I40E_READ_REG(hw, I40E_VFINT_ICR0_ENA1);
+ val |= I40E_VFINT_ICR0_ENA1_ADMINQ_MASK |
+ I40E_VFINT_ICR0_ENA1_LINK_STAT_CHANGE_MASK;
+ I40E_WRITE_REG(hw, I40E_VFINT_ICR0_ENA1, val);
+
+ I40E_WRITE_REG(hw, I40E_VFINT_DYN_CTL01,
+ I40E_VFINT_DYN_CTL01_INTENA_MASK |
+ I40E_VFINT_DYN_CTL01_CLEARPBA_MASK |
+ I40E_VFINT_DYN_CTL01_ITR_INDX_MASK);
+
+ I40EVF_WRITE_FLUSH(hw);
+}
+
+static int
+i40evf_reset_vf(struct i40e_hw *hw)
+{
+ int i, reset;
+
+ if (i40e_vf_reset(hw) != I40E_SUCCESS) {
+ PMD_INIT_LOG(ERR, "Reset VF NIC failed");
+ return -1;
+ }
+ /**
+ * After issuing vf reset command to pf, pf won't necessarily
+ * reset vf, it depends on what state it exactly is. If it's not
+ * initialized yet, it won't have vf reset since it's in a certain
+ * state. If not, it will try to reset. Even vf is reset, pf will
+ * set I40E_VFGEN_RSTAT to COMPLETE first, then wait 10ms and set
+ * it to ACTIVE. In this duration, vf may not catch the moment that
+ * COMPLETE is set. So, for vf, we'll try to wait a long time.
+ */
+ rte_delay_ms(200);
+
+ for (i = 0; i < MAX_RESET_WAIT_CNT; i++) {
+ reset = rd32(hw, I40E_VFGEN_RSTAT) &
+ I40E_VFGEN_RSTAT_VFR_STATE_MASK;
+ reset = reset >> I40E_VFGEN_RSTAT_VFR_STATE_SHIFT;
+ if (I40E_VFR_COMPLETED == reset || I40E_VFR_VFACTIVE == reset)
+ break;
+ else
+ rte_delay_ms(50);
+ }
+
+ if (i >= MAX_RESET_WAIT_CNT) {
+ PMD_INIT_LOG(ERR, "Reset VF NIC failed");
+ return -1;
+ }
+
+ return 0;
+}
+
+static int
+i40evf_init_vf(struct rte_eth_dev *dev)
+{
+ int i, err, bufsz;
+ struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
+ struct ether_addr *p_mac_addr;
+ uint16_t interval =
+ i40e_calc_itr_interval(I40E_QUEUE_ITR_INTERVAL_MAX);
+
+ vf->adapter = I40E_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
+ vf->dev_data = dev->data;
+ err = i40e_set_mac_type(hw);
+ if (err) {
+ PMD_INIT_LOG(ERR, "set_mac_type failed: %d", err);
+ goto err;
+ }
+
+ i40e_init_adminq_parameter(hw);
+ err = i40e_init_adminq(hw);
+ if (err) {
+ PMD_INIT_LOG(ERR, "init_adminq failed: %d", err);
+ goto err;
+ }
+
+ /* Reset VF and wait until it's complete */
+ if (i40evf_reset_vf(hw)) {
+ PMD_INIT_LOG(ERR, "reset NIC failed");
+ goto err_aq;
+ }
+
+ /* VF reset, shutdown admin queue and initialize again */
+ if (i40e_shutdown_adminq(hw) != I40E_SUCCESS) {
+ PMD_INIT_LOG(ERR, "i40e_shutdown_adminq failed");
+ return -1;
+ }
+
+ i40e_init_adminq_parameter(hw);
+ if (i40e_init_adminq(hw) != I40E_SUCCESS) {
+ PMD_INIT_LOG(ERR, "init_adminq failed");
+ return -1;
+ }
+ vf->aq_resp = rte_zmalloc("vf_aq_resp", I40E_AQ_BUF_SZ, 0);
+ if (!vf->aq_resp) {
+ PMD_INIT_LOG(ERR, "unable to allocate vf_aq_resp memory");
+ goto err_aq;
+ }
+ if (i40evf_check_api_version(dev) != 0) {
+ PMD_INIT_LOG(ERR, "check_api version failed");
+ goto err_aq;
+ }
+ bufsz = sizeof(struct i40e_virtchnl_vf_resource) +
+ (I40E_MAX_VF_VSI * sizeof(struct i40e_virtchnl_vsi_resource));
+ vf->vf_res = rte_zmalloc("vf_res", bufsz, 0);
+ if (!vf->vf_res) {
+ PMD_INIT_LOG(ERR, "unable to allocate vf_res memory");
+ goto err_aq;
+ }
+
+ if (i40evf_get_vf_resource(dev) != 0) {
+ PMD_INIT_LOG(ERR, "i40evf_get_vf_config failed");
+ goto err_alloc;
+ }
+
+ /* got VF config message back from PF, now we can parse it */
+ for (i = 0; i < vf->vf_res->num_vsis; i++) {
+ if (vf->vf_res->vsi_res[i].vsi_type == I40E_VSI_SRIOV)
+ vf->vsi_res = &vf->vf_res->vsi_res[i];
+ }
+
+ if (!vf->vsi_res) {
+ PMD_INIT_LOG(ERR, "no LAN VSI found");
+ goto err_alloc;
+ }
+
+ if (hw->mac.type == I40E_MAC_X722_VF)
+ vf->flags = I40E_FLAG_RSS_AQ_CAPABLE;
+ vf->vsi.vsi_id = vf->vsi_res->vsi_id;
+ vf->vsi.type = vf->vsi_res->vsi_type;
+ vf->vsi.nb_qps = vf->vsi_res->num_queue_pairs;
+ vf->vsi.adapter = I40E_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
+
+ /* Store the MAC address configured by host, or generate random one */
+ p_mac_addr = (struct ether_addr *)(vf->vsi_res->default_mac_addr);
+ if (is_valid_assigned_ether_addr(p_mac_addr)) /* Configured by host */
+ ether_addr_copy(p_mac_addr, (struct ether_addr *)hw->mac.addr);
+ else
+ eth_random_addr(hw->mac.addr); /* Generate a random one */
+
+ /* If the PF host is not DPDK, set the interval of ITR0 to max*/
+ if (vf->version_major != I40E_DPDK_VERSION_MAJOR) {
+ I40E_WRITE_REG(hw, I40E_VFINT_DYN_CTL01,
+ (I40E_ITR_INDEX_DEFAULT <<
+ I40E_VFINT_DYN_CTL0_ITR_INDX_SHIFT) |
+ (interval <<
+ I40E_VFINT_DYN_CTL0_INTERVAL_SHIFT));
+ I40EVF_WRITE_FLUSH(hw);
+ }
+
+ return 0;
+
+err_alloc:
+ rte_free(vf->vf_res);
+err_aq:
+ i40e_shutdown_adminq(hw); /* ignore error */
+err:
+ return -1;
+}
+
+static int
+i40evf_uninit_vf(struct rte_eth_dev *dev)
+{
+ struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
+ struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+
+ PMD_INIT_FUNC_TRACE();
+
+ if (hw->adapter_stopped == 0)
+ i40evf_dev_close(dev);
+ rte_free(vf->vf_res);
+ vf->vf_res = NULL;
+ rte_free(vf->aq_resp);
+ vf->aq_resp = NULL;
+
+ return 0;
+}
+
+static void
+i40evf_handle_pf_event(__rte_unused struct rte_eth_dev *dev,
+ uint8_t *msg,
+ __rte_unused uint16_t msglen)
+{
+ struct i40e_virtchnl_pf_event *pf_msg =
+ (struct i40e_virtchnl_pf_event *)msg;
+ struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
+
+ switch (pf_msg->event) {
+ case I40E_VIRTCHNL_EVENT_RESET_IMPENDING:
+ PMD_DRV_LOG(DEBUG, "VIRTCHNL_EVENT_RESET_IMPENDING event\n");
+ _rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_RESET);
+ break;
+ case I40E_VIRTCHNL_EVENT_LINK_CHANGE:
+ PMD_DRV_LOG(DEBUG, "VIRTCHNL_EVENT_LINK_CHANGE event\n");
+ vf->link_up = pf_msg->event_data.link_event.link_status;
+ vf->link_speed = pf_msg->event_data.link_event.link_speed;
+ break;
+ case I40E_VIRTCHNL_EVENT_PF_DRIVER_CLOSE:
+ PMD_DRV_LOG(DEBUG, "VIRTCHNL_EVENT_PF_DRIVER_CLOSE event\n");
+ break;
+ default:
+ PMD_DRV_LOG(ERR, " unknown event received %u", pf_msg->event);
+ break;
+ }
+}
+
+static void
+i40evf_handle_aq_msg(struct rte_eth_dev *dev)
+{
+ struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
+ struct i40e_arq_event_info info;
+ struct i40e_virtchnl_msg *v_msg;
+ uint16_t pending, opcode;
+ int ret;
+
+ info.buf_len = I40E_AQ_BUF_SZ;
+ if (!vf->aq_resp) {
+ PMD_DRV_LOG(ERR, "Buffer for adminq resp should not be NULL");
+ return;
+ }
+ info.msg_buf = vf->aq_resp;
+ v_msg = (struct i40e_virtchnl_msg *)&info.desc;
+
+ pending = 1;
+ while (pending) {
+ ret = i40e_clean_arq_element(hw, &info, &pending);
+
+ if (ret != I40E_SUCCESS) {
+ PMD_DRV_LOG(INFO, "Failed to read msg from AdminQ,"
+ "ret: %d", ret);
+ break;
+ }
+ opcode = rte_le_to_cpu_16(info.desc.opcode);
+
+ switch (opcode) {
+ case i40e_aqc_opc_send_msg_to_vf:
+ if (v_msg->v_opcode == I40E_VIRTCHNL_OP_EVENT)
+ /* process event*/
+ i40evf_handle_pf_event(dev, info.msg_buf,
+ info.msg_len);
+ else {
+ /* read message and it's expected one */
+ if (v_msg->v_opcode == vf->pend_cmd) {
+ vf->cmd_retval = v_msg->v_retval;
+ /* prevent compiler reordering */
+ rte_compiler_barrier();
+ _clear_cmd(vf);
+ } else
+ PMD_DRV_LOG(ERR, "command mismatch,"
+ "expect %u, get %u",
+ vf->pend_cmd, v_msg->v_opcode);
+ PMD_DRV_LOG(DEBUG, "adminq response is received,"
+ " opcode = %d\n", v_msg->v_opcode);
+ }
+ break;
+ default:
+ PMD_DRV_LOG(ERR, "Request %u is not supported yet",
+ opcode);
+ break;
+ }
+ }
+}
+
+/**
+ * Interrupt handler triggered by NIC for handling
+ * specific interrupt. Only adminq interrupt is processed in VF.
+ *
+ * @param handle
+ * Pointer to interrupt handle.
+ * @param param
+ * The address of parameter (struct rte_eth_dev *) regsitered before.
+ *
+ * @return
+ * void
+ */
+static void
+i40evf_dev_interrupt_handler(__rte_unused struct rte_intr_handle *handle,
+ void *param)
+{
+ struct rte_eth_dev *dev = (struct rte_eth_dev *)param;
+ struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ uint32_t icr0;
+
+ i40evf_disable_irq0(hw);
+
+ /* read out interrupt causes */
+ icr0 = I40E_READ_REG(hw, I40E_VFINT_ICR01);
+
+ /* No interrupt event indicated */
+ if (!(icr0 & I40E_VFINT_ICR01_INTEVENT_MASK)) {
+ PMD_DRV_LOG(DEBUG, "No interrupt event, nothing to do\n");
+ goto done;
+ }
+
+ if (icr0 & I40E_VFINT_ICR01_ADMINQ_MASK) {
+ PMD_DRV_LOG(DEBUG, "ICR01_ADMINQ is reported\n");
+ i40evf_handle_aq_msg(dev);
+ }
+
+ /* Link Status Change interrupt */
+ if (icr0 & I40E_VFINT_ICR01_LINK_STAT_CHANGE_MASK)
+ PMD_DRV_LOG(DEBUG, "LINK_STAT_CHANGE is reported,"
+ " do nothing\n");
+
+done:
+ i40evf_enable_irq0(hw);
+ rte_intr_enable(&dev->pci_dev->intr_handle);
+}
+
+static int
+i40evf_dev_init(struct rte_eth_dev *eth_dev)
+{
+ struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(\
+ eth_dev->data->dev_private);
+ struct rte_pci_device *pci_dev = eth_dev->pci_dev;
+
+ PMD_INIT_FUNC_TRACE();
+
+ /* assign ops func pointer */
+ eth_dev->dev_ops = &i40evf_eth_dev_ops;
+ eth_dev->rx_pkt_burst = &i40e_recv_pkts;
+ eth_dev->tx_pkt_burst = &i40e_xmit_pkts;
+
+ /*
+ * For secondary processes, we don't initialise any further as primary
+ * has already done this work.
+ */
+ if (rte_eal_process_type() != RTE_PROC_PRIMARY){
+ i40e_set_rx_function(eth_dev);
+ i40e_set_tx_function(eth_dev);
+ return 0;
+ }
+
+ rte_eth_copy_pci_info(eth_dev, eth_dev->pci_dev);
+
+ hw->vendor_id = eth_dev->pci_dev->id.vendor_id;
+ hw->device_id = eth_dev->pci_dev->id.device_id;
+ hw->subsystem_vendor_id = eth_dev->pci_dev->id.subsystem_vendor_id;
+ hw->subsystem_device_id = eth_dev->pci_dev->id.subsystem_device_id;
+ hw->bus.device = eth_dev->pci_dev->addr.devid;
+ hw->bus.func = eth_dev->pci_dev->addr.function;
+ hw->hw_addr = (void *)eth_dev->pci_dev->mem_resource[0].addr;
+ hw->adapter_stopped = 0;
+
+ if(i40evf_init_vf(eth_dev) != 0) {
+ PMD_INIT_LOG(ERR, "Init vf failed");
+ return -1;
+ }
+
+ /* register callback func to eal lib */
+ rte_intr_callback_register(&pci_dev->intr_handle,
+ i40evf_dev_interrupt_handler, (void *)eth_dev);
+
+ /* enable uio intr after callback register */
+ rte_intr_enable(&pci_dev->intr_handle);
+
+ /* configure and enable device interrupt */
+ i40evf_enable_irq0(hw);
+
+ /* copy mac addr */
+ eth_dev->data->mac_addrs = rte_zmalloc("i40evf_mac",
+ ETHER_ADDR_LEN * I40E_NUM_MACADDR_MAX,
+ 0);
+ if (eth_dev->data->mac_addrs == NULL) {
+ PMD_INIT_LOG(ERR, "Failed to allocate %d bytes needed to"
+ " store MAC addresses",
+ ETHER_ADDR_LEN * I40E_NUM_MACADDR_MAX);
+ return -ENOMEM;
+ }
+ ether_addr_copy((struct ether_addr *)hw->mac.addr,
+ &eth_dev->data->mac_addrs[0]);
+
+ return 0;
+}
+
+static int
+i40evf_dev_uninit(struct rte_eth_dev *eth_dev)
+{
+ PMD_INIT_FUNC_TRACE();
+
+ if (rte_eal_process_type() != RTE_PROC_PRIMARY)
+ return -EPERM;
+
+ eth_dev->dev_ops = NULL;
+ eth_dev->rx_pkt_burst = NULL;
+ eth_dev->tx_pkt_burst = NULL;
+
+ if (i40evf_uninit_vf(eth_dev) != 0) {
+ PMD_INIT_LOG(ERR, "i40evf_uninit_vf failed");
+ return -1;
+ }
+
+ rte_free(eth_dev->data->mac_addrs);
+ eth_dev->data->mac_addrs = NULL;
+
+ return 0;
+}
+/*
+ * virtual function driver struct
+ */
+static struct eth_driver rte_i40evf_pmd = {
+ .pci_drv = {
+ .name = "rte_i40evf_pmd",
+ .id_table = pci_id_i40evf_map,
+ .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_DETACHABLE,
+ },
+ .eth_dev_init = i40evf_dev_init,
+ .eth_dev_uninit = i40evf_dev_uninit,
+ .dev_private_size = sizeof(struct i40e_adapter),
+};
+
+/*
+ * VF Driver initialization routine.
+ * Invoked one at EAL init time.
+ * Register itself as the [Virtual Poll Mode] Driver of PCI Fortville devices.
+ */
+static int
+rte_i40evf_pmd_init(const char *name __rte_unused,
+ const char *params __rte_unused)
+{
+ PMD_INIT_FUNC_TRACE();
+
+ rte_eth_driver_register(&rte_i40evf_pmd);
+
+ return 0;
+}
+
+static struct rte_driver rte_i40evf_driver = {
+ .type = PMD_PDEV,
+ .init = rte_i40evf_pmd_init,
+};
+
+PMD_REGISTER_DRIVER(rte_i40evf_driver, i40evf);
+DRIVER_REGISTER_PCI_TABLE(i40evf, pci_id_i40evf_map);
+
+static int
+i40evf_dev_configure(struct rte_eth_dev *dev)
+{
+ struct i40e_adapter *ad =
+ I40E_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
+ struct rte_eth_conf *conf = &dev->data->dev_conf;
+ struct i40e_vf *vf;
+
+ /* Initialize to TRUE. If any of Rx queues doesn't meet the bulk
+ * allocation or vector Rx preconditions we will reset it.
+ */
+ ad->rx_bulk_alloc_allowed = true;
+ ad->rx_vec_allowed = true;
+ ad->tx_simple_allowed = true;
+ ad->tx_vec_allowed = true;
+
+ /* For non-DPDK PF drivers, VF has no ability to disable HW
+ * CRC strip, and is implicitly enabled by the PF.
+ */
+ if (!conf->rxmode.hw_strip_crc) {
+ vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
+ if ((vf->version_major == I40E_VIRTCHNL_VERSION_MAJOR) &&
+ (vf->version_minor <= I40E_VIRTCHNL_VERSION_MINOR)) {
+ /* Peer is running non-DPDK PF driver. */
+ PMD_INIT_LOG(ERR, "VF can't disable HW CRC Strip");
+ return -EINVAL;
+ }
+ }
+
+ return i40evf_init_vlan(dev);
+}
+
+static int
+i40evf_init_vlan(struct rte_eth_dev *dev)
+{
+ struct rte_eth_dev_data *data = dev->data;
+ int ret;
+
+ /* Apply vlan offload setting */
+ i40evf_vlan_offload_set(dev, ETH_VLAN_STRIP_MASK);
+
+ /* Apply pvid setting */
+ ret = i40evf_vlan_pvid_set(dev, data->dev_conf.txmode.pvid,
+ data->dev_conf.txmode.hw_vlan_insert_pvid);
+ return ret;
+}
+
+static void
+i40evf_vlan_offload_set(struct rte_eth_dev *dev, int mask)
+{
+ bool enable_vlan_strip = 0;
+ struct rte_eth_conf *dev_conf = &dev->data->dev_conf;
+ struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
+
+ /* Linux pf host doesn't support vlan offload yet */
+ if (vf->version_major == I40E_DPDK_VERSION_MAJOR) {
+ /* Vlan stripping setting */
+ if (mask & ETH_VLAN_STRIP_MASK) {
+ /* Enable or disable VLAN stripping */
+ if (dev_conf->rxmode.hw_vlan_strip)
+ enable_vlan_strip = 1;
+ else
+ enable_vlan_strip = 0;
+
+ i40evf_config_vlan_offload(dev, enable_vlan_strip);
+ }
+ }
+}
+
+static int
+i40evf_vlan_pvid_set(struct rte_eth_dev *dev, uint16_t pvid, int on)
+{
+ struct rte_eth_conf *dev_conf = &dev->data->dev_conf;
+ struct i40e_vsi_vlan_pvid_info info;
+ struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
+
+ memset(&info, 0, sizeof(info));
+ info.on = on;
+
+ /* Linux pf host don't support vlan offload yet */
+ if (vf->version_major == I40E_DPDK_VERSION_MAJOR) {
+ if (info.on)
+ info.config.pvid = pvid;
+ else {
+ info.config.reject.tagged =
+ dev_conf->txmode.hw_vlan_reject_tagged;
+ info.config.reject.untagged =
+ dev_conf->txmode.hw_vlan_reject_untagged;
+ }
+ return i40evf_config_vlan_pvid(dev, &info);
+ }
+
+ return 0;
+}
+
+static int
+i40evf_dev_rx_queue_start(struct rte_eth_dev *dev, uint16_t rx_queue_id)
+{
+ struct i40e_rx_queue *rxq;
+ int err = 0;
+ struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+
+ PMD_INIT_FUNC_TRACE();
+
+ if (rx_queue_id < dev->data->nb_rx_queues) {
+ rxq = dev->data->rx_queues[rx_queue_id];
+
+ err = i40e_alloc_rx_queue_mbufs(rxq);
+ if (err) {
+ PMD_DRV_LOG(ERR, "Failed to allocate RX queue mbuf");
+ return err;
+ }
+
+ rte_wmb();
+
+ /* Init the RX tail register. */
+ I40E_PCI_REG_WRITE(rxq->qrx_tail, rxq->nb_rx_desc - 1);
+ I40EVF_WRITE_FLUSH(hw);
+
+ /* Ready to switch the queue on */
+ err = i40evf_switch_queue(dev, TRUE, rx_queue_id, TRUE);
+
+ if (err)
+ PMD_DRV_LOG(ERR, "Failed to switch RX queue %u on",
+ rx_queue_id);
+ else
+ dev->data->rx_queue_state[rx_queue_id] = RTE_ETH_QUEUE_STATE_STARTED;
+ }
+
+ return err;
+}
+
+static int
+i40evf_dev_rx_queue_stop(struct rte_eth_dev *dev, uint16_t rx_queue_id)
+{
+ struct i40e_rx_queue *rxq;
+ int err;
+
+ if (rx_queue_id < dev->data->nb_rx_queues) {
+ rxq = dev->data->rx_queues[rx_queue_id];
+
+ err = i40evf_switch_queue(dev, TRUE, rx_queue_id, FALSE);
+
+ if (err) {
+ PMD_DRV_LOG(ERR, "Failed to switch RX queue %u off",
+ rx_queue_id);
+ return err;
+ }
+
+ i40e_rx_queue_release_mbufs(rxq);
+ i40e_reset_rx_queue(rxq);
+ dev->data->rx_queue_state[rx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED;
+ }
+
+ return 0;
+}
+
+static int
+i40evf_dev_tx_queue_start(struct rte_eth_dev *dev, uint16_t tx_queue_id)
+{
+ int err = 0;
+
+ PMD_INIT_FUNC_TRACE();
+
+ if (tx_queue_id < dev->data->nb_tx_queues) {
+
+ /* Ready to switch the queue on */
+ err = i40evf_switch_queue(dev, FALSE, tx_queue_id, TRUE);
+
+ if (err)
+ PMD_DRV_LOG(ERR, "Failed to switch TX queue %u on",
+ tx_queue_id);
+ else
+ dev->data->tx_queue_state[tx_queue_id] = RTE_ETH_QUEUE_STATE_STARTED;
+ }
+
+ return err;
+}
+
+static int
+i40evf_dev_tx_queue_stop(struct rte_eth_dev *dev, uint16_t tx_queue_id)
+{
+ struct i40e_tx_queue *txq;
+ int err;
+
+ if (tx_queue_id < dev->data->nb_tx_queues) {
+ txq = dev->data->tx_queues[tx_queue_id];
+
+ err = i40evf_switch_queue(dev, FALSE, tx_queue_id, FALSE);
+
+ if (err) {
+ PMD_DRV_LOG(ERR, "Failed to switch TX queue %u off",
+ tx_queue_id);
+ return err;
+ }
+
+ i40e_tx_queue_release_mbufs(txq);
+ i40e_reset_tx_queue(txq);
+ dev->data->tx_queue_state[tx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED;
+ }
+
+ return 0;
+}
+
+static int
+i40evf_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on)
+{
+ int ret;
+
+ if (on)
+ ret = i40evf_add_vlan(dev, vlan_id);
+ else
+ ret = i40evf_del_vlan(dev,vlan_id);
+
+ return ret;
+}
+
+static int
+i40evf_rxq_init(struct rte_eth_dev *dev, struct i40e_rx_queue *rxq)
+{
+ struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ struct rte_eth_dev_data *dev_data = dev->data;
+ struct rte_pktmbuf_pool_private *mbp_priv;
+ uint16_t buf_size, len;
+
+ rxq->qrx_tail = hw->hw_addr + I40E_QRX_TAIL1(rxq->queue_id);
+ I40E_PCI_REG_WRITE(rxq->qrx_tail, rxq->nb_rx_desc - 1);
+ I40EVF_WRITE_FLUSH(hw);
+
+ /* Calculate the maximum packet length allowed */
+ mbp_priv = rte_mempool_get_priv(rxq->mp);
+ buf_size = (uint16_t)(mbp_priv->mbuf_data_room_size -
+ RTE_PKTMBUF_HEADROOM);
+ rxq->hs_mode = i40e_header_split_none;
+ rxq->rx_hdr_len = 0;
+ rxq->rx_buf_len = RTE_ALIGN(buf_size, (1 << I40E_RXQ_CTX_DBUFF_SHIFT));
+ len = rxq->rx_buf_len * I40E_MAX_CHAINED_RX_BUFFERS;
+ rxq->max_pkt_len = RTE_MIN(len,
+ dev_data->dev_conf.rxmode.max_rx_pkt_len);
+
+ /**
+ * Check if the jumbo frame and maximum packet length are set correctly
+ */
+ if (dev_data->dev_conf.rxmode.jumbo_frame == 1) {
+ if (rxq->max_pkt_len <= ETHER_MAX_LEN ||
+ rxq->max_pkt_len > I40E_FRAME_SIZE_MAX) {
+ PMD_DRV_LOG(ERR, "maximum packet length must be "
+ "larger than %u and smaller than %u, as jumbo "
+ "frame is enabled", (uint32_t)ETHER_MAX_LEN,
+ (uint32_t)I40E_FRAME_SIZE_MAX);
+ return I40E_ERR_CONFIG;
+ }
+ } else {
+ if (rxq->max_pkt_len < ETHER_MIN_LEN ||
+ rxq->max_pkt_len > ETHER_MAX_LEN) {
+ PMD_DRV_LOG(ERR, "maximum packet length must be "
+ "larger than %u and smaller than %u, as jumbo "
+ "frame is disabled", (uint32_t)ETHER_MIN_LEN,
+ (uint32_t)ETHER_MAX_LEN);
+ return I40E_ERR_CONFIG;
+ }
+ }
+
+ if (dev_data->dev_conf.rxmode.enable_scatter ||
+ (rxq->max_pkt_len + 2 * I40E_VLAN_TAG_SIZE) > buf_size) {
+ dev_data->scattered_rx = 1;
+ }
+
+ return 0;
+}
+
+static int
+i40evf_rx_init(struct rte_eth_dev *dev)
+{
+ struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
+ uint16_t i;
+ int ret = I40E_SUCCESS;
+ struct i40e_rx_queue **rxq =
+ (struct i40e_rx_queue **)dev->data->rx_queues;
+
+ i40evf_config_rss(vf);
+ for (i = 0; i < dev->data->nb_rx_queues; i++) {
+ if (!rxq[i] || !rxq[i]->q_set)
+ continue;
+ ret = i40evf_rxq_init(dev, rxq[i]);
+ if (ret != I40E_SUCCESS)
+ break;
+ }
+ if (ret == I40E_SUCCESS)
+ i40e_set_rx_function(dev);
+
+ return ret;
+}
+
+static void
+i40evf_tx_init(struct rte_eth_dev *dev)
+{
+ uint16_t i;
+ struct i40e_tx_queue **txq =
+ (struct i40e_tx_queue **)dev->data->tx_queues;
+ struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+
+ for (i = 0; i < dev->data->nb_tx_queues; i++)
+ txq[i]->qtx_tail = hw->hw_addr + I40E_QTX_TAIL1(i);
+
+ i40e_set_tx_function(dev);
+}
+
+static inline void
+i40evf_enable_queues_intr(struct rte_eth_dev *dev)
+{
+ struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
+ struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ struct rte_intr_handle *intr_handle = &dev->pci_dev->intr_handle;
+
+ if (!rte_intr_allow_others(intr_handle)) {
+ I40E_WRITE_REG(hw,
+ I40E_VFINT_DYN_CTL01,
+ I40E_VFINT_DYN_CTL01_INTENA_MASK |
+ I40E_VFINT_DYN_CTL01_CLEARPBA_MASK |
+ I40E_VFINT_DYN_CTL01_ITR_INDX_MASK);
+ I40EVF_WRITE_FLUSH(hw);
+ return;
+ }
+
+ if (vf->version_major == I40E_DPDK_VERSION_MAJOR)
+ /* To support DPDK PF host */
+ I40E_WRITE_REG(hw,
+ I40E_VFINT_DYN_CTLN1(I40EVF_VSI_DEFAULT_MSIX_INTR - 1),
+ I40E_VFINT_DYN_CTLN1_INTENA_MASK |
+ I40E_VFINT_DYN_CTLN_CLEARPBA_MASK);
+ /* If host driver is kernel driver, do nothing.
+ * Interrupt 0 is used for rx packets, but don't set
+ * I40E_VFINT_DYN_CTL01,
+ * because it is already done in i40evf_enable_irq0.
+ */
+
+ I40EVF_WRITE_FLUSH(hw);
+}
+
+static inline void
+i40evf_disable_queues_intr(struct rte_eth_dev *dev)
+{
+ struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
+ struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ struct rte_intr_handle *intr_handle = &dev->pci_dev->intr_handle;
+
+ if (!rte_intr_allow_others(intr_handle)) {
+ I40E_WRITE_REG(hw, I40E_VFINT_DYN_CTL01,
+ I40E_VFINT_DYN_CTL01_ITR_INDX_MASK);
+ I40EVF_WRITE_FLUSH(hw);
+ return;
+ }
+
+ if (vf->version_major == I40E_DPDK_VERSION_MAJOR)
+ I40E_WRITE_REG(hw,
+ I40E_VFINT_DYN_CTLN1(I40EVF_VSI_DEFAULT_MSIX_INTR
+ - 1),
+ 0);
+ /* If host driver is kernel driver, do nothing.
+ * Interrupt 0 is used for rx packets, but don't zero
+ * I40E_VFINT_DYN_CTL01,
+ * because interrupt 0 is also used for adminq processing.
+ */
+
+ I40EVF_WRITE_FLUSH(hw);
+}
+
+static int
+i40evf_dev_rx_queue_intr_enable(struct rte_eth_dev *dev, uint16_t queue_id)
+{
+ struct rte_intr_handle *intr_handle = &dev->pci_dev->intr_handle;
+ struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ uint16_t interval =
+ i40e_calc_itr_interval(RTE_LIBRTE_I40E_ITR_INTERVAL);
+ uint16_t msix_intr;
+
+ msix_intr = intr_handle->intr_vec[queue_id];
+ if (msix_intr == I40E_MISC_VEC_ID)
+ I40E_WRITE_REG(hw, I40E_VFINT_DYN_CTL01,
+ I40E_VFINT_DYN_CTL01_INTENA_MASK |
+ I40E_VFINT_DYN_CTL01_CLEARPBA_MASK |
+ (0 << I40E_VFINT_DYN_CTL01_ITR_INDX_SHIFT) |
+ (interval <<
+ I40E_VFINT_DYN_CTL01_INTERVAL_SHIFT));
+ else
+ I40E_WRITE_REG(hw,
+ I40E_VFINT_DYN_CTLN1(msix_intr -
+ I40E_RX_VEC_START),
+ I40E_VFINT_DYN_CTLN1_INTENA_MASK |
+ I40E_VFINT_DYN_CTLN1_CLEARPBA_MASK |
+ (0 << I40E_VFINT_DYN_CTLN1_ITR_INDX_SHIFT) |
+ (interval <<
+ I40E_VFINT_DYN_CTLN1_INTERVAL_SHIFT));
+
+ I40EVF_WRITE_FLUSH(hw);
+
+ rte_intr_enable(&dev->pci_dev->intr_handle);
+
+ return 0;
+}
+
+static int
+i40evf_dev_rx_queue_intr_disable(struct rte_eth_dev *dev, uint16_t queue_id)
+{
+ struct rte_intr_handle *intr_handle = &dev->pci_dev->intr_handle;
+ struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ uint16_t msix_intr;
+
+ msix_intr = intr_handle->intr_vec[queue_id];
+ if (msix_intr == I40E_MISC_VEC_ID)
+ I40E_WRITE_REG(hw, I40E_VFINT_DYN_CTL01, 0);
+ else
+ I40E_WRITE_REG(hw,
+ I40E_VFINT_DYN_CTLN1(msix_intr -
+ I40E_RX_VEC_START),
+ 0);
+
+ I40EVF_WRITE_FLUSH(hw);
+
+ return 0;
+}
+
+static void
+i40evf_add_del_all_mac_addr(struct rte_eth_dev *dev, bool add)
+{
+ struct i40e_virtchnl_ether_addr_list *list;
+ struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
+ int err, i, j;
+ int next_begin = 0;
+ int begin = 0;
+ uint32_t len;
+ struct ether_addr *addr;
+ struct vf_cmd_info args;
+
+ do {
+ j = 0;
+ len = sizeof(struct i40e_virtchnl_ether_addr_list);
+ for (i = begin; i < I40E_NUM_MACADDR_MAX; i++, next_begin++) {
+ if (is_zero_ether_addr(&dev->data->mac_addrs[i]))
+ continue;
+ len += sizeof(struct i40e_virtchnl_ether_addr);
+ if (len >= I40E_AQ_BUF_SZ) {
+ next_begin = i + 1;
+ break;
+ }
+ }
+
+ list = rte_zmalloc("i40evf_del_mac_buffer", len, 0);
+
+ for (i = begin; i < next_begin; i++) {
+ addr = &dev->data->mac_addrs[i];
+ if (is_zero_ether_addr(addr))
+ continue;
+ (void)rte_memcpy(list->list[j].addr, addr->addr_bytes,
+ sizeof(addr->addr_bytes));
+ PMD_DRV_LOG(DEBUG, "add/rm mac:%x:%x:%x:%x:%x:%x",
+ addr->addr_bytes[0], addr->addr_bytes[1],
+ addr->addr_bytes[2], addr->addr_bytes[3],
+ addr->addr_bytes[4], addr->addr_bytes[5]);
+ j++;
+ }
+ list->vsi_id = vf->vsi_res->vsi_id;
+ list->num_elements = j;
+ args.ops = add ? I40E_VIRTCHNL_OP_ADD_ETHER_ADDRESS :
+ I40E_VIRTCHNL_OP_DEL_ETHER_ADDRESS;
+ args.in_args = (uint8_t *)list;
+ args.in_args_size = len;
+ args.out_buffer = vf->aq_resp;
+ args.out_size = I40E_AQ_BUF_SZ;
+ err = i40evf_execute_vf_cmd(dev, &args);
+ if (err)
+ PMD_DRV_LOG(ERR, "fail to execute command %s",
+ add ? "OP_ADD_ETHER_ADDRESS" :
+ "OP_DEL_ETHER_ADDRESS");
+ rte_free(list);
+ begin = next_begin;
+ } while (begin < I40E_NUM_MACADDR_MAX);
+}
+
+static int
+i40evf_dev_start(struct rte_eth_dev *dev)
+{
+ struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
+ struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ struct rte_intr_handle *intr_handle = &dev->pci_dev->intr_handle;
+ uint32_t intr_vector = 0;
+
+ PMD_INIT_FUNC_TRACE();
+
+ hw->adapter_stopped = 0;
+
+ vf->max_pkt_len = dev->data->dev_conf.rxmode.max_rx_pkt_len;
+ vf->num_queue_pairs = RTE_MAX(dev->data->nb_rx_queues,
+ dev->data->nb_tx_queues);
+
+ /* check and configure queue intr-vector mapping */
+ if (dev->data->dev_conf.intr_conf.rxq != 0) {
+ intr_vector = dev->data->nb_rx_queues;
+ if (rte_intr_efd_enable(intr_handle, intr_vector))
+ return -1;
+ }
+
+ if (rte_intr_dp_is_en(intr_handle) && !intr_handle->intr_vec) {
+ intr_handle->intr_vec =
+ rte_zmalloc("intr_vec",
+ dev->data->nb_rx_queues * sizeof(int), 0);
+ if (!intr_handle->intr_vec) {
+ PMD_INIT_LOG(ERR, "Failed to allocate %d rx_queues"
+ " intr_vec\n", dev->data->nb_rx_queues);
+ return -ENOMEM;
+ }
+ }
+
+ if (i40evf_rx_init(dev) != 0){
+ PMD_DRV_LOG(ERR, "failed to do RX init");
+ return -1;
+ }
+
+ i40evf_tx_init(dev);
+
+ if (i40evf_configure_queues(dev) != 0) {
+ PMD_DRV_LOG(ERR, "configure queues failed");
+ goto err_queue;
+ }
+ if (i40evf_config_irq_map(dev)) {
+ PMD_DRV_LOG(ERR, "config_irq_map failed");
+ goto err_queue;
+ }
+
+ /* Set all mac addrs */
+ i40evf_add_del_all_mac_addr(dev, TRUE);
+
+ if (i40evf_start_queues(dev) != 0) {
+ PMD_DRV_LOG(ERR, "enable queues failed");
+ goto err_mac;
+ }
+
+ i40evf_enable_queues_intr(dev);
+ return 0;
+
+err_mac:
+ i40evf_add_del_all_mac_addr(dev, FALSE);
+err_queue:
+ return -1;
+}
+
+static void
+i40evf_dev_stop(struct rte_eth_dev *dev)
+{
+ struct rte_intr_handle *intr_handle = &dev->pci_dev->intr_handle;
+
+ PMD_INIT_FUNC_TRACE();
+
+ i40evf_stop_queues(dev);
+ i40evf_disable_queues_intr(dev);
+ i40e_dev_clear_queues(dev);
+
+ /* Clean datapath event and queue/vec mapping */
+ rte_intr_efd_disable(intr_handle);
+ if (intr_handle->intr_vec) {
+ rte_free(intr_handle->intr_vec);
+ intr_handle->intr_vec = NULL;
+ }
+ /* remove all mac addrs */
+ i40evf_add_del_all_mac_addr(dev, FALSE);
+
+}
+
+static int
+i40evf_dev_link_update(struct rte_eth_dev *dev,
+ __rte_unused int wait_to_complete)
+{
+ struct rte_eth_link new_link;
+ struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
+ /*
+ * DPDK pf host provide interfacet to acquire link status
+ * while Linux driver does not
+ */
+ if (vf->version_major == I40E_DPDK_VERSION_MAJOR)
+ i40evf_get_link_status(dev, &new_link);
+ else {
+ /* Linux driver PF host */
+ switch (vf->link_speed) {
+ case I40E_LINK_SPEED_100MB:
+ new_link.link_speed = ETH_SPEED_NUM_100M;
+ break;
+ case I40E_LINK_SPEED_1GB:
+ new_link.link_speed = ETH_SPEED_NUM_1G;
+ break;
+ case I40E_LINK_SPEED_10GB:
+ new_link.link_speed = ETH_SPEED_NUM_10G;
+ break;
+ case I40E_LINK_SPEED_20GB:
+ new_link.link_speed = ETH_SPEED_NUM_20G;
+ break;
+ case I40E_LINK_SPEED_40GB:
+ new_link.link_speed = ETH_SPEED_NUM_40G;
+ break;
+ default:
+ new_link.link_speed = ETH_SPEED_NUM_100M;
+ break;
+ }
+ /* full duplex only */
+ new_link.link_duplex = ETH_LINK_FULL_DUPLEX;
+ new_link.link_status = vf->link_up ? ETH_LINK_UP :
+ ETH_LINK_DOWN;
+ }
+ i40evf_dev_atomic_write_link_status(dev, &new_link);
+
+ return 0;
+}
+
+static void
+i40evf_dev_promiscuous_enable(struct rte_eth_dev *dev)
+{
+ struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
+ int ret;
+
+ /* If enabled, just return */
+ if (vf->promisc_unicast_enabled)
+ return;
+
+ ret = i40evf_config_promisc(dev, 1, vf->promisc_multicast_enabled);
+ if (ret == 0)
+ vf->promisc_unicast_enabled = TRUE;
+}
+
+static void
+i40evf_dev_promiscuous_disable(struct rte_eth_dev *dev)
+{
+ struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
+ int ret;
+
+ /* If disabled, just return */
+ if (!vf->promisc_unicast_enabled)
+ return;
+
+ ret = i40evf_config_promisc(dev, 0, vf->promisc_multicast_enabled);
+ if (ret == 0)
+ vf->promisc_unicast_enabled = FALSE;
+}
+
+static void
+i40evf_dev_allmulticast_enable(struct rte_eth_dev *dev)
+{
+ struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
+ int ret;
+
+ /* If enabled, just return */
+ if (vf->promisc_multicast_enabled)
+ return;
+
+ ret = i40evf_config_promisc(dev, vf->promisc_unicast_enabled, 1);
+ if (ret == 0)
+ vf->promisc_multicast_enabled = TRUE;
+}
+
+static void
+i40evf_dev_allmulticast_disable(struct rte_eth_dev *dev)
+{
+ struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
+ int ret;
+
+ /* If enabled, just return */
+ if (!vf->promisc_multicast_enabled)
+ return;
+
+ ret = i40evf_config_promisc(dev, vf->promisc_unicast_enabled, 0);
+ if (ret == 0)
+ vf->promisc_multicast_enabled = FALSE;
+}
+
+static void
+i40evf_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
+{
+ struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
+
+ memset(dev_info, 0, sizeof(*dev_info));
+ dev_info->max_rx_queues = vf->vsi_res->num_queue_pairs;
+ dev_info->max_tx_queues = vf->vsi_res->num_queue_pairs;
+ dev_info->min_rx_bufsize = I40E_BUF_SIZE_MIN;
+ dev_info->max_rx_pktlen = I40E_FRAME_SIZE_MAX;
+ dev_info->hash_key_size = (I40E_VFQF_HKEY_MAX_INDEX + 1) * sizeof(uint32_t);
+ dev_info->reta_size = ETH_RSS_RETA_SIZE_64;
+ dev_info->flow_type_rss_offloads = I40E_RSS_OFFLOAD_ALL;
+ dev_info->max_mac_addrs = I40E_NUM_MACADDR_MAX;
+ dev_info->rx_offload_capa =
+ DEV_RX_OFFLOAD_VLAN_STRIP |
+ DEV_RX_OFFLOAD_QINQ_STRIP |
+ DEV_RX_OFFLOAD_IPV4_CKSUM |
+ DEV_RX_OFFLOAD_UDP_CKSUM |
+ DEV_RX_OFFLOAD_TCP_CKSUM;
+ dev_info->tx_offload_capa =
+ DEV_TX_OFFLOAD_VLAN_INSERT |
+ DEV_TX_OFFLOAD_QINQ_INSERT |
+ DEV_TX_OFFLOAD_IPV4_CKSUM |
+ DEV_TX_OFFLOAD_UDP_CKSUM |
+ DEV_TX_OFFLOAD_TCP_CKSUM |
+ DEV_TX_OFFLOAD_SCTP_CKSUM;
+
+ dev_info->default_rxconf = (struct rte_eth_rxconf) {
+ .rx_thresh = {
+ .pthresh = I40E_DEFAULT_RX_PTHRESH,
+ .hthresh = I40E_DEFAULT_RX_HTHRESH,
+ .wthresh = I40E_DEFAULT_RX_WTHRESH,
+ },
+ .rx_free_thresh = I40E_DEFAULT_RX_FREE_THRESH,
+ .rx_drop_en = 0,
+ };
+
+ dev_info->default_txconf = (struct rte_eth_txconf) {
+ .tx_thresh = {
+ .pthresh = I40E_DEFAULT_TX_PTHRESH,
+ .hthresh = I40E_DEFAULT_TX_HTHRESH,
+ .wthresh = I40E_DEFAULT_TX_WTHRESH,
+ },
+ .tx_free_thresh = I40E_DEFAULT_TX_FREE_THRESH,
+ .tx_rs_thresh = I40E_DEFAULT_TX_RSBIT_THRESH,
+ .txq_flags = ETH_TXQ_FLAGS_NOMULTSEGS |
+ ETH_TXQ_FLAGS_NOOFFLOADS,
+ };
+
+ dev_info->rx_desc_lim = (struct rte_eth_desc_lim) {
+ .nb_max = I40E_MAX_RING_DESC,
+ .nb_min = I40E_MIN_RING_DESC,
+ .nb_align = I40E_ALIGN_RING_DESC,
+ };
+
+ dev_info->tx_desc_lim = (struct rte_eth_desc_lim) {
+ .nb_max = I40E_MAX_RING_DESC,
+ .nb_min = I40E_MIN_RING_DESC,
+ .nb_align = I40E_ALIGN_RING_DESC,
+ };
+}
+
+static void
+i40evf_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
+{
+ if (i40evf_get_statics(dev, stats))
+ PMD_DRV_LOG(ERR, "Get statics failed");
+}
+
+static void
+i40evf_dev_close(struct rte_eth_dev *dev)
+{
+ struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ struct rte_pci_device *pci_dev = dev->pci_dev;
+
+ i40evf_dev_stop(dev);
+ hw->adapter_stopped = 1;
+ i40e_dev_free_queues(dev);
+ i40evf_reset_vf(hw);
+ i40e_shutdown_adminq(hw);
+ /* disable uio intr before callback unregister */
+ rte_intr_disable(&pci_dev->intr_handle);
+
+ /* unregister callback func from eal lib */
+ rte_intr_callback_unregister(&pci_dev->intr_handle,
+ i40evf_dev_interrupt_handler, (void *)dev);
+ i40evf_disable_irq0(hw);
+}
+
+static int
+i40evf_get_rss_lut(struct i40e_vsi *vsi, uint8_t *lut, uint16_t lut_size)
+{
+ struct i40e_vf *vf = I40E_VSI_TO_VF(vsi);
+ struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
+ int ret;
+
+ if (!lut)
+ return -EINVAL;
+
+ if (vf->flags & I40E_FLAG_RSS_AQ_CAPABLE) {
+ ret = i40e_aq_get_rss_lut(hw, vsi->vsi_id, FALSE,
+ lut, lut_size);
+ if (ret) {
+ PMD_DRV_LOG(ERR, "Failed to get RSS lookup table");
+ return ret;
+ }
+ } else {
+ uint32_t *lut_dw = (uint32_t *)lut;
+ uint16_t i, lut_size_dw = lut_size / 4;
+
+ for (i = 0; i < lut_size_dw; i++)
+ lut_dw[i] = I40E_READ_REG(hw, I40E_VFQF_HLUT(i));
+ }
+
+ return 0;
+}
+
+static int
+i40evf_set_rss_lut(struct i40e_vsi *vsi, uint8_t *lut, uint16_t lut_size)
+{
+ struct i40e_vf *vf;
+ struct i40e_hw *hw;
+ int ret;
+
+ if (!vsi || !lut)
+ return -EINVAL;
+
+ vf = I40E_VSI_TO_VF(vsi);
+ hw = I40E_VSI_TO_HW(vsi);
+
+ if (vf->flags & I40E_FLAG_RSS_AQ_CAPABLE) {
+ ret = i40e_aq_set_rss_lut(hw, vsi->vsi_id, FALSE,
+ lut, lut_size);
+ if (ret) {
+ PMD_DRV_LOG(ERR, "Failed to set RSS lookup table");
+ return ret;
+ }
+ } else {
+ uint32_t *lut_dw = (uint32_t *)lut;
+ uint16_t i, lut_size_dw = lut_size / 4;
+
+ for (i = 0; i < lut_size_dw; i++)
+ I40E_WRITE_REG(hw, I40E_VFQF_HLUT(i), lut_dw[i]);
+ I40EVF_WRITE_FLUSH(hw);
+ }
+
+ return 0;
+}
+
+static int
+i40evf_dev_rss_reta_update(struct rte_eth_dev *dev,
+ struct rte_eth_rss_reta_entry64 *reta_conf,
+ uint16_t reta_size)
+{
+ struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
+ uint8_t *lut;
+ uint16_t i, idx, shift;
+ int ret;
+
+ if (reta_size != ETH_RSS_RETA_SIZE_64) {
+ PMD_DRV_LOG(ERR, "The size of hash lookup table configured "
+ "(%d) doesn't match the number of hardware can "
+ "support (%d)\n", reta_size, ETH_RSS_RETA_SIZE_64);
+ return -EINVAL;
+ }
+
+ lut = rte_zmalloc("i40e_rss_lut", reta_size, 0);
+ if (!lut) {
+ PMD_DRV_LOG(ERR, "No memory can be allocated");
+ return -ENOMEM;
+ }
+ ret = i40evf_get_rss_lut(&vf->vsi, lut, reta_size);
+ if (ret)
+ goto out;
+ for (i = 0; i < reta_size; i++) {
+ idx = i / RTE_RETA_GROUP_SIZE;
+ shift = i % RTE_RETA_GROUP_SIZE;
+ if (reta_conf[idx].mask & (1ULL << shift))
+ lut[i] = reta_conf[idx].reta[shift];
+ }
+ ret = i40evf_set_rss_lut(&vf->vsi, lut, reta_size);
+
+out:
+ rte_free(lut);
+
+ return ret;
+}
+
+static int
+i40evf_dev_rss_reta_query(struct rte_eth_dev *dev,
+ struct rte_eth_rss_reta_entry64 *reta_conf,
+ uint16_t reta_size)
+{
+ struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
+ uint16_t i, idx, shift;
+ uint8_t *lut;
+ int ret;
+
+ if (reta_size != ETH_RSS_RETA_SIZE_64) {
+ PMD_DRV_LOG(ERR, "The size of hash lookup table configured "
+ "(%d) doesn't match the number of hardware can "
+ "support (%d)\n", reta_size, ETH_RSS_RETA_SIZE_64);
+ return -EINVAL;
+ }
+
+ lut = rte_zmalloc("i40e_rss_lut", reta_size, 0);
+ if (!lut) {
+ PMD_DRV_LOG(ERR, "No memory can be allocated");
+ return -ENOMEM;
+ }
+
+ ret = i40evf_get_rss_lut(&vf->vsi, lut, reta_size);
+ if (ret)
+ goto out;
+ for (i = 0; i < reta_size; i++) {
+ idx = i / RTE_RETA_GROUP_SIZE;
+ shift = i % RTE_RETA_GROUP_SIZE;
+ if (reta_conf[idx].mask & (1ULL << shift))
+ reta_conf[idx].reta[shift] = lut[i];
+ }
+
+out:
+ rte_free(lut);
+
+ return ret;
+}
+
+static int
+i40evf_set_rss_key(struct i40e_vsi *vsi, uint8_t *key, uint8_t key_len)
+{
+ struct i40e_vf *vf = I40E_VSI_TO_VF(vsi);
+ struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
+ int ret = 0;
+
+ if (!key || key_len == 0) {
+ PMD_DRV_LOG(DEBUG, "No key to be configured");
+ return 0;
+ } else if (key_len != (I40E_VFQF_HKEY_MAX_INDEX + 1) *
+ sizeof(uint32_t)) {
+ PMD_DRV_LOG(ERR, "Invalid key length %u", key_len);
+ return -EINVAL;
+ }
+
+ if (vf->flags & I40E_FLAG_RSS_AQ_CAPABLE) {
+ struct i40e_aqc_get_set_rss_key_data *key_dw =
+ (struct i40e_aqc_get_set_rss_key_data *)key;
+
+ ret = i40e_aq_set_rss_key(hw, vsi->vsi_id, key_dw);
+ if (ret)
+ PMD_INIT_LOG(ERR, "Failed to configure RSS key "
+ "via AQ");
+ } else {
+ uint32_t *hash_key = (uint32_t *)key;
+ uint16_t i;
+
+ for (i = 0; i <= I40E_VFQF_HKEY_MAX_INDEX; i++)
+ i40e_write_rx_ctl(hw, I40E_VFQF_HKEY(i), hash_key[i]);
+ I40EVF_WRITE_FLUSH(hw);
+ }
+
+ return ret;
+}
+
+static int
+i40evf_get_rss_key(struct i40e_vsi *vsi, uint8_t *key, uint8_t *key_len)
+{
+ struct i40e_vf *vf = I40E_VSI_TO_VF(vsi);
+ struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
+ int ret;
+
+ if (!key || !key_len)
+ return -EINVAL;
+
+ if (vf->flags & I40E_FLAG_RSS_AQ_CAPABLE) {
+ ret = i40e_aq_get_rss_key(hw, vsi->vsi_id,
+ (struct i40e_aqc_get_set_rss_key_data *)key);
+ if (ret) {
+ PMD_INIT_LOG(ERR, "Failed to get RSS key via AQ");
+ return ret;
+ }
+ } else {
+ uint32_t *key_dw = (uint32_t *)key;
+ uint16_t i;
+
+ for (i = 0; i <= I40E_VFQF_HKEY_MAX_INDEX; i++)
+ key_dw[i] = i40e_read_rx_ctl(hw, I40E_VFQF_HKEY(i));
+ }
+ *key_len = (I40E_VFQF_HKEY_MAX_INDEX + 1) * sizeof(uint32_t);
+
+ return 0;
+}
+
+static int
+i40evf_hw_rss_hash_set(struct i40e_vf *vf, struct rte_eth_rss_conf *rss_conf)
+{
+ struct i40e_hw *hw = I40E_VF_TO_HW(vf);
+ uint64_t rss_hf, hena;
+ int ret;
+
+ ret = i40evf_set_rss_key(&vf->vsi, rss_conf->rss_key,
+ rss_conf->rss_key_len);
+ if (ret)
+ return ret;
+
+ rss_hf = rss_conf->rss_hf;
+ hena = (uint64_t)i40e_read_rx_ctl(hw, I40E_VFQF_HENA(0));
+ hena |= ((uint64_t)i40e_read_rx_ctl(hw, I40E_VFQF_HENA(1))) << 32;
+ hena &= ~I40E_RSS_HENA_ALL;
+ hena |= i40e_config_hena(rss_hf);
+ i40e_write_rx_ctl(hw, I40E_VFQF_HENA(0), (uint32_t)hena);
+ i40e_write_rx_ctl(hw, I40E_VFQF_HENA(1), (uint32_t)(hena >> 32));
+ I40EVF_WRITE_FLUSH(hw);
+
+ return 0;
+}
+
+static void
+i40evf_disable_rss(struct i40e_vf *vf)
+{
+ struct i40e_hw *hw = I40E_VF_TO_HW(vf);
+ uint64_t hena;
+
+ hena = (uint64_t)i40e_read_rx_ctl(hw, I40E_VFQF_HENA(0));
+ hena |= ((uint64_t)i40e_read_rx_ctl(hw, I40E_VFQF_HENA(1))) << 32;
+ hena &= ~I40E_RSS_HENA_ALL;
+ i40e_write_rx_ctl(hw, I40E_VFQF_HENA(0), (uint32_t)hena);
+ i40e_write_rx_ctl(hw, I40E_VFQF_HENA(1), (uint32_t)(hena >> 32));
+ I40EVF_WRITE_FLUSH(hw);
+}
+
+static int
+i40evf_config_rss(struct i40e_vf *vf)
+{
+ struct i40e_hw *hw = I40E_VF_TO_HW(vf);
+ struct rte_eth_rss_conf rss_conf;
+ uint32_t i, j, lut = 0, nb_q = (I40E_VFQF_HLUT_MAX_INDEX + 1) * 4;
+ uint16_t num;
+
+ if (vf->dev_data->dev_conf.rxmode.mq_mode != ETH_MQ_RX_RSS) {
+ i40evf_disable_rss(vf);
+ PMD_DRV_LOG(DEBUG, "RSS not configured\n");
+ return 0;
+ }
+
+ num = RTE_MIN(vf->dev_data->nb_rx_queues, I40E_MAX_QP_NUM_PER_VF);
+ /* Fill out the look up table */
+ for (i = 0, j = 0; i < nb_q; i++, j++) {
+ if (j >= num)
+ j = 0;
+ lut = (lut << 8) | j;
+ if ((i & 3) == 3)
+ I40E_WRITE_REG(hw, I40E_VFQF_HLUT(i >> 2), lut);
+ }
+
+ rss_conf = vf->dev_data->dev_conf.rx_adv_conf.rss_conf;
+ if ((rss_conf.rss_hf & I40E_RSS_OFFLOAD_ALL) == 0) {
+ i40evf_disable_rss(vf);
+ PMD_DRV_LOG(DEBUG, "No hash flag is set\n");
+ return 0;
+ }
+
+ if (rss_conf.rss_key == NULL || rss_conf.rss_key_len <
+ (I40E_VFQF_HKEY_MAX_INDEX + 1) * sizeof(uint32_t)) {
+ /* Calculate the default hash key */
+ for (i = 0; i <= I40E_VFQF_HKEY_MAX_INDEX; i++)
+ rss_key_default[i] = (uint32_t)rte_rand();
+ rss_conf.rss_key = (uint8_t *)rss_key_default;
+ rss_conf.rss_key_len = (I40E_VFQF_HKEY_MAX_INDEX + 1) *
+ sizeof(uint32_t);
+ }
+
+ return i40evf_hw_rss_hash_set(vf, &rss_conf);
+}
+
+static int
+i40evf_dev_rss_hash_update(struct rte_eth_dev *dev,
+ struct rte_eth_rss_conf *rss_conf)
+{
+ struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
+ struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ uint64_t rss_hf = rss_conf->rss_hf & I40E_RSS_OFFLOAD_ALL;
+ uint64_t hena;
+
+ hena = (uint64_t)i40e_read_rx_ctl(hw, I40E_VFQF_HENA(0));
+ hena |= ((uint64_t)i40e_read_rx_ctl(hw, I40E_VFQF_HENA(1))) << 32;
+ if (!(hena & I40E_RSS_HENA_ALL)) { /* RSS disabled */
+ if (rss_hf != 0) /* Enable RSS */
+ return -EINVAL;
+ return 0;
+ }
+
+ /* RSS enabled */
+ if (rss_hf == 0) /* Disable RSS */
+ return -EINVAL;
+
+ return i40evf_hw_rss_hash_set(vf, rss_conf);
+}
+
+static int
+i40evf_dev_rss_hash_conf_get(struct rte_eth_dev *dev,
+ struct rte_eth_rss_conf *rss_conf)
+{
+ struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
+ struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ uint64_t hena;
+
+ i40evf_get_rss_key(&vf->vsi, rss_conf->rss_key,
+ &rss_conf->rss_key_len);
+
+ hena = (uint64_t)i40e_read_rx_ctl(hw, I40E_VFQF_HENA(0));
+ hena |= ((uint64_t)i40e_read_rx_ctl(hw, I40E_VFQF_HENA(1))) << 32;
+ rss_conf->rss_hf = i40e_parse_hena(hena);
+
+ return 0;
+}
diff --git a/src/dpdk/drivers/net/i40e/i40e_fdir.c b/src/dpdk/drivers/net/i40e/i40e_fdir.c
new file mode 100644
index 00000000..f65c4110
--- /dev/null
+++ b/src/dpdk/drivers/net/i40e/i40e_fdir.c
@@ -0,0 +1,1451 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2010-2015 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <sys/queue.h>
+#include <stdio.h>
+#include <errno.h>
+#include <stdint.h>
+#include <string.h>
+#include <unistd.h>
+#include <stdarg.h>
+
+#include <rte_ether.h>
+#include <rte_ethdev.h>
+#include <rte_log.h>
+#include <rte_memzone.h>
+#include <rte_malloc.h>
+#include <rte_arp.h>
+#include <rte_ip.h>
+#include <rte_udp.h>
+#include <rte_tcp.h>
+#include <rte_sctp.h>
+
+#include "i40e_logs.h"
+#include "base/i40e_type.h"
+#include "base/i40e_prototype.h"
+#include "i40e_ethdev.h"
+#include "i40e_rxtx.h"
+
+#define I40E_FDIR_MZ_NAME "FDIR_MEMZONE"
+#ifndef IPV6_ADDR_LEN
+#define IPV6_ADDR_LEN 16
+#endif
+
+#define I40E_FDIR_PKT_LEN 512
+#define I40E_FDIR_IP_DEFAULT_LEN 420
+#define I40E_FDIR_IP_DEFAULT_TTL 0x40
+#define I40E_FDIR_IP_DEFAULT_VERSION_IHL 0x45
+#define I40E_FDIR_TCP_DEFAULT_DATAOFF 0x50
+#define I40E_FDIR_IPv6_DEFAULT_VTC_FLOW 0x60000000
+#define I40E_FDIR_IPv6_TC_OFFSET 20
+
+#define I40E_FDIR_IPv6_DEFAULT_HOP_LIMITS 0xFF
+#define I40E_FDIR_IPv6_PAYLOAD_LEN 380
+#define I40E_FDIR_UDP_DEFAULT_LEN 400
+
+/* Wait count and interval for fdir filter programming */
+#define I40E_FDIR_WAIT_COUNT 10
+#define I40E_FDIR_WAIT_INTERVAL_US 1000
+
+/* Wait count and interval for fdir filter flush */
+#define I40E_FDIR_FLUSH_RETRY 50
+#define I40E_FDIR_FLUSH_INTERVAL_MS 5
+
+#define I40E_COUNTER_PF 2
+/* Statistic counter index for one pf */
+#define I40E_COUNTER_INDEX_FDIR(pf_id) (0 + (pf_id) * I40E_COUNTER_PF)
+#define I40E_MAX_FLX_SOURCE_OFF 480
+#define I40E_FLX_OFFSET_IN_FIELD_VECTOR 50
+
+#define NONUSE_FLX_PIT_DEST_OFF 63
+#define NONUSE_FLX_PIT_FSIZE 1
+#define MK_FLX_PIT(src_offset, fsize, dst_offset) ( \
+ (((src_offset) << I40E_PRTQF_FLX_PIT_SOURCE_OFF_SHIFT) & \
+ I40E_PRTQF_FLX_PIT_SOURCE_OFF_MASK) | \
+ (((fsize) << I40E_PRTQF_FLX_PIT_FSIZE_SHIFT) & \
+ I40E_PRTQF_FLX_PIT_FSIZE_MASK) | \
+ ((((dst_offset) == NONUSE_FLX_PIT_DEST_OFF ? \
+ NONUSE_FLX_PIT_DEST_OFF : \
+ ((dst_offset) + I40E_FLX_OFFSET_IN_FIELD_VECTOR)) << \
+ I40E_PRTQF_FLX_PIT_DEST_OFF_SHIFT) & \
+ I40E_PRTQF_FLX_PIT_DEST_OFF_MASK))
+
+#define I40E_FDIR_FLOWS ( \
+ (1 << RTE_ETH_FLOW_FRAG_IPV4) | \
+ (1 << RTE_ETH_FLOW_NONFRAG_IPV4_UDP) | \
+ (1 << RTE_ETH_FLOW_NONFRAG_IPV4_TCP) | \
+ (1 << RTE_ETH_FLOW_NONFRAG_IPV4_SCTP) | \
+ (1 << RTE_ETH_FLOW_NONFRAG_IPV4_OTHER) | \
+ (1 << RTE_ETH_FLOW_FRAG_IPV6) | \
+ (1 << RTE_ETH_FLOW_NONFRAG_IPV6_UDP) | \
+ (1 << RTE_ETH_FLOW_NONFRAG_IPV6_TCP) | \
+ (1 << RTE_ETH_FLOW_NONFRAG_IPV6_SCTP) | \
+ (1 << RTE_ETH_FLOW_NONFRAG_IPV6_OTHER) | \
+ (1 << RTE_ETH_FLOW_L2_PAYLOAD))
+
+#define I40E_FLEX_WORD_MASK(off) (0x80 >> (off))
+
+static int i40e_fdir_filter_programming(struct i40e_pf *pf,
+ enum i40e_filter_pctype pctype,
+ const struct rte_eth_fdir_filter *filter,
+ bool add);
+static int i40e_fdir_flush(struct rte_eth_dev *dev);
+
+static int
+i40e_fdir_rx_queue_init(struct i40e_rx_queue *rxq)
+{
+ struct i40e_hw *hw = I40E_VSI_TO_HW(rxq->vsi);
+ struct i40e_hmc_obj_rxq rx_ctx;
+ int err = I40E_SUCCESS;
+
+ memset(&rx_ctx, 0, sizeof(struct i40e_hmc_obj_rxq));
+ /* Init the RX queue in hardware */
+ rx_ctx.dbuff = I40E_RXBUF_SZ_1024 >> I40E_RXQ_CTX_DBUFF_SHIFT;
+ rx_ctx.hbuff = 0;
+ rx_ctx.base = rxq->rx_ring_phys_addr / I40E_QUEUE_BASE_ADDR_UNIT;
+ rx_ctx.qlen = rxq->nb_rx_desc;
+#ifndef RTE_LIBRTE_I40E_16BYTE_RX_DESC
+ rx_ctx.dsize = 1;
+#endif
+ rx_ctx.dtype = i40e_header_split_none;
+ rx_ctx.hsplit_0 = I40E_HEADER_SPLIT_NONE;
+ rx_ctx.rxmax = ETHER_MAX_LEN;
+ rx_ctx.tphrdesc_ena = 1;
+ rx_ctx.tphwdesc_ena = 1;
+ rx_ctx.tphdata_ena = 1;
+ rx_ctx.tphhead_ena = 1;
+ rx_ctx.lrxqthresh = 2;
+ rx_ctx.crcstrip = 0;
+ rx_ctx.l2tsel = 1;
+ rx_ctx.showiv = 0;
+ rx_ctx.prefena = 1;
+
+ err = i40e_clear_lan_rx_queue_context(hw, rxq->reg_idx);
+ if (err != I40E_SUCCESS) {
+ PMD_DRV_LOG(ERR, "Failed to clear FDIR RX queue context.");
+ return err;
+ }
+ err = i40e_set_lan_rx_queue_context(hw, rxq->reg_idx, &rx_ctx);
+ if (err != I40E_SUCCESS) {
+ PMD_DRV_LOG(ERR, "Failed to set FDIR RX queue context.");
+ return err;
+ }
+ rxq->qrx_tail = hw->hw_addr +
+ I40E_QRX_TAIL(rxq->vsi->base_queue);
+
+ rte_wmb();
+ /* Init the RX tail regieter. */
+ I40E_PCI_REG_WRITE(rxq->qrx_tail, 0);
+ I40E_PCI_REG_WRITE(rxq->qrx_tail, rxq->nb_rx_desc - 1);
+
+ return err;
+}
+
+/*
+ * i40e_fdir_setup - reserve and initialize the Flow Director resources
+ * @pf: board private structure
+ */
+int
+i40e_fdir_setup(struct i40e_pf *pf)
+{
+ struct i40e_hw *hw = I40E_PF_TO_HW(pf);
+ struct i40e_vsi *vsi;
+ int err = I40E_SUCCESS;
+ char z_name[RTE_MEMZONE_NAMESIZE];
+ const struct rte_memzone *mz = NULL;
+ struct rte_eth_dev *eth_dev = pf->adapter->eth_dev;
+
+ if ((pf->flags & I40E_FLAG_FDIR) == 0) {
+ PMD_INIT_LOG(ERR, "HW doesn't support FDIR");
+ return I40E_NOT_SUPPORTED;
+ }
+
+ PMD_DRV_LOG(INFO, "FDIR HW Capabilities: num_filters_guaranteed = %u,"
+ " num_filters_best_effort = %u.",
+ hw->func_caps.fd_filters_guaranteed,
+ hw->func_caps.fd_filters_best_effort);
+
+ vsi = pf->fdir.fdir_vsi;
+ if (vsi) {
+ PMD_DRV_LOG(INFO, "FDIR initialization has been done.");
+ return I40E_SUCCESS;
+ }
+ /* make new FDIR VSI */
+ vsi = i40e_vsi_setup(pf, I40E_VSI_FDIR, pf->main_vsi, 0);
+ if (!vsi) {
+ PMD_DRV_LOG(ERR, "Couldn't create FDIR VSI.");
+ return I40E_ERR_NO_AVAILABLE_VSI;
+ }
+ pf->fdir.fdir_vsi = vsi;
+
+ /*Fdir tx queue setup*/
+ err = i40e_fdir_setup_tx_resources(pf);
+ if (err) {
+ PMD_DRV_LOG(ERR, "Failed to setup FDIR TX resources.");
+ goto fail_setup_tx;
+ }
+
+ /*Fdir rx queue setup*/
+ err = i40e_fdir_setup_rx_resources(pf);
+ if (err) {
+ PMD_DRV_LOG(ERR, "Failed to setup FDIR RX resources.");
+ goto fail_setup_rx;
+ }
+
+ err = i40e_tx_queue_init(pf->fdir.txq);
+ if (err) {
+ PMD_DRV_LOG(ERR, "Failed to do FDIR TX initialization.");
+ goto fail_mem;
+ }
+
+ /* need switch on before dev start*/
+ err = i40e_switch_tx_queue(hw, vsi->base_queue, TRUE);
+ if (err) {
+ PMD_DRV_LOG(ERR, "Failed to do fdir TX switch on.");
+ goto fail_mem;
+ }
+
+ /* Init the rx queue in hardware */
+ err = i40e_fdir_rx_queue_init(pf->fdir.rxq);
+ if (err) {
+ PMD_DRV_LOG(ERR, "Failed to do FDIR RX initialization.");
+ goto fail_mem;
+ }
+
+ /* switch on rx queue */
+ err = i40e_switch_rx_queue(hw, vsi->base_queue, TRUE);
+ if (err) {
+ PMD_DRV_LOG(ERR, "Failed to do FDIR RX switch on.");
+ goto fail_mem;
+ }
+
+ /* reserve memory for the fdir programming packet */
+ snprintf(z_name, sizeof(z_name), "%s_%s_%d",
+ eth_dev->driver->pci_drv.name,
+ I40E_FDIR_MZ_NAME,
+ eth_dev->data->port_id);
+ mz = i40e_memzone_reserve(z_name, I40E_FDIR_PKT_LEN, SOCKET_ID_ANY);
+ if (!mz) {
+ PMD_DRV_LOG(ERR, "Cannot init memzone for "
+ "flow director program packet.");
+ err = I40E_ERR_NO_MEMORY;
+ goto fail_mem;
+ }
+ pf->fdir.prg_pkt = mz->addr;
+ pf->fdir.dma_addr = rte_mem_phy2mch(mz->memseg_id, mz->phys_addr);
+
+ pf->fdir.match_counter_index = I40E_COUNTER_INDEX_FDIR(hw->pf_id);
+ PMD_DRV_LOG(INFO, "FDIR setup successfully, with programming queue %u.",
+ vsi->base_queue);
+ return I40E_SUCCESS;
+
+fail_mem:
+ i40e_dev_rx_queue_release(pf->fdir.rxq);
+ pf->fdir.rxq = NULL;
+fail_setup_rx:
+ i40e_dev_tx_queue_release(pf->fdir.txq);
+ pf->fdir.txq = NULL;
+fail_setup_tx:
+ i40e_vsi_release(vsi);
+ pf->fdir.fdir_vsi = NULL;
+ return err;
+}
+
+/*
+ * i40e_fdir_teardown - release the Flow Director resources
+ * @pf: board private structure
+ */
+void
+i40e_fdir_teardown(struct i40e_pf *pf)
+{
+ struct i40e_hw *hw = I40E_PF_TO_HW(pf);
+ struct i40e_vsi *vsi;
+
+ vsi = pf->fdir.fdir_vsi;
+ if (!vsi)
+ return;
+ i40e_switch_tx_queue(hw, vsi->base_queue, FALSE);
+ i40e_switch_rx_queue(hw, vsi->base_queue, FALSE);
+ i40e_dev_rx_queue_release(pf->fdir.rxq);
+ pf->fdir.rxq = NULL;
+ i40e_dev_tx_queue_release(pf->fdir.txq);
+ pf->fdir.txq = NULL;
+ i40e_vsi_release(vsi);
+ pf->fdir.fdir_vsi = NULL;
+}
+
+/* check whether the flow director table in empty */
+static inline int
+i40e_fdir_empty(struct i40e_hw *hw)
+{
+ uint32_t guarant_cnt, best_cnt;
+
+ guarant_cnt = (uint32_t)((I40E_READ_REG(hw, I40E_PFQF_FDSTAT) &
+ I40E_PFQF_FDSTAT_GUARANT_CNT_MASK) >>
+ I40E_PFQF_FDSTAT_GUARANT_CNT_SHIFT);
+ best_cnt = (uint32_t)((I40E_READ_REG(hw, I40E_PFQF_FDSTAT) &
+ I40E_PFQF_FDSTAT_BEST_CNT_MASK) >>
+ I40E_PFQF_FDSTAT_BEST_CNT_SHIFT);
+ if (best_cnt + guarant_cnt > 0)
+ return -1;
+
+ return 0;
+}
+
+/*
+ * Initialize the configuration about bytes stream extracted as flexible payload
+ * and mask setting
+ */
+static inline void
+i40e_init_flx_pld(struct i40e_pf *pf)
+{
+ struct i40e_hw *hw = I40E_PF_TO_HW(pf);
+ uint8_t pctype;
+ int i, index;
+
+ /*
+ * Define the bytes stream extracted as flexible payload in
+ * field vector. By default, select 8 words from the beginning
+ * of payload as flexible payload.
+ */
+ for (i = I40E_FLXPLD_L2_IDX; i < I40E_MAX_FLXPLD_LAYER; i++) {
+ index = i * I40E_MAX_FLXPLD_FIED;
+ pf->fdir.flex_set[index].src_offset = 0;
+ pf->fdir.flex_set[index].size = I40E_FDIR_MAX_FLEXWORD_NUM;
+ pf->fdir.flex_set[index].dst_offset = 0;
+ I40E_WRITE_REG(hw, I40E_PRTQF_FLX_PIT(index), 0x0000C900);
+ I40E_WRITE_REG(hw,
+ I40E_PRTQF_FLX_PIT(index + 1), 0x0000FC29);/*non-used*/
+ I40E_WRITE_REG(hw,
+ I40E_PRTQF_FLX_PIT(index + 2), 0x0000FC2A);/*non-used*/
+ }
+
+ /* initialize the masks */
+ for (pctype = I40E_FILTER_PCTYPE_NONF_IPV4_UDP;
+ pctype <= I40E_FILTER_PCTYPE_L2_PAYLOAD; pctype++) {
+ if (!I40E_VALID_PCTYPE((enum i40e_filter_pctype)pctype))
+ continue;
+ pf->fdir.flex_mask[pctype].word_mask = 0;
+ i40e_write_rx_ctl(hw, I40E_PRTQF_FD_FLXINSET(pctype), 0);
+ for (i = 0; i < I40E_FDIR_BITMASK_NUM_WORD; i++) {
+ pf->fdir.flex_mask[pctype].bitmask[i].offset = 0;
+ pf->fdir.flex_mask[pctype].bitmask[i].mask = 0;
+ i40e_write_rx_ctl(hw, I40E_PRTQF_FD_MSK(pctype, i), 0);
+ }
+ }
+}
+
+#define I40E_WORD(hi, lo) (uint16_t)((((hi) << 8) & 0xFF00) | ((lo) & 0xFF))
+
+#define I40E_VALIDATE_FLEX_PIT(flex_pit1, flex_pit2) do { \
+ if ((flex_pit2).src_offset < \
+ (flex_pit1).src_offset + (flex_pit1).size) { \
+ PMD_DRV_LOG(ERR, "src_offset should be not" \
+ " less than than previous offset" \
+ " + previous FSIZE."); \
+ return -EINVAL; \
+ } \
+} while (0)
+
+/*
+ * i40e_srcoff_to_flx_pit - transform the src_offset into flex_pit structure,
+ * and the flex_pit will be sorted by it's src_offset value
+ */
+static inline uint16_t
+i40e_srcoff_to_flx_pit(const uint16_t *src_offset,
+ struct i40e_fdir_flex_pit *flex_pit)
+{
+ uint16_t src_tmp, size, num = 0;
+ uint16_t i, k, j = 0;
+
+ while (j < I40E_FDIR_MAX_FLEX_LEN) {
+ size = 1;
+ for (; j < I40E_FDIR_MAX_FLEX_LEN - 1; j++) {
+ if (src_offset[j + 1] == src_offset[j] + 1)
+ size++;
+ else
+ break;
+ }
+ src_tmp = src_offset[j] + 1 - size;
+ /* the flex_pit need to be sort by src_offset */
+ for (i = 0; i < num; i++) {
+ if (src_tmp < flex_pit[i].src_offset)
+ break;
+ }
+ /* if insert required, move backward */
+ for (k = num; k > i; k--)
+ flex_pit[k] = flex_pit[k - 1];
+ /* insert */
+ flex_pit[i].dst_offset = j + 1 - size;
+ flex_pit[i].src_offset = src_tmp;
+ flex_pit[i].size = size;
+ j++;
+ num++;
+ }
+ return num;
+}
+
+/* i40e_check_fdir_flex_payload -check flex payload configuration arguments */
+static inline int
+i40e_check_fdir_flex_payload(const struct rte_eth_flex_payload_cfg *flex_cfg)
+{
+ struct i40e_fdir_flex_pit flex_pit[I40E_FDIR_MAX_FLEX_LEN];
+ uint16_t num, i;
+
+ for (i = 0; i < I40E_FDIR_MAX_FLEX_LEN; i++) {
+ if (flex_cfg->src_offset[i] >= I40E_MAX_FLX_SOURCE_OFF) {
+ PMD_DRV_LOG(ERR, "exceeds maxmial payload limit.");
+ return -EINVAL;
+ }
+ }
+
+ memset(flex_pit, 0, sizeof(flex_pit));
+ num = i40e_srcoff_to_flx_pit(flex_cfg->src_offset, flex_pit);
+ if (num > I40E_MAX_FLXPLD_FIED) {
+ PMD_DRV_LOG(ERR, "exceeds maxmial number of flex fields.");
+ return -EINVAL;
+ }
+ for (i = 0; i < num; i++) {
+ if (flex_pit[i].size & 0x01 || flex_pit[i].dst_offset & 0x01 ||
+ flex_pit[i].src_offset & 0x01) {
+ PMD_DRV_LOG(ERR, "flexpayload should be measured"
+ " in word");
+ return -EINVAL;
+ }
+ if (i != num - 1)
+ I40E_VALIDATE_FLEX_PIT(flex_pit[i], flex_pit[i + 1]);
+ }
+ return 0;
+}
+
+/*
+ * i40e_check_fdir_flex_conf -check if the flex payload and mask configuration
+ * arguments are valid
+ */
+static int
+i40e_check_fdir_flex_conf(const struct rte_eth_fdir_flex_conf *conf)
+{
+ const struct rte_eth_flex_payload_cfg *flex_cfg;
+ const struct rte_eth_fdir_flex_mask *flex_mask;
+ uint16_t mask_tmp;
+ uint8_t nb_bitmask;
+ uint16_t i, j;
+ int ret = 0;
+
+ if (conf == NULL) {
+ PMD_DRV_LOG(INFO, "NULL pointer.");
+ return -EINVAL;
+ }
+ /* check flexible payload setting configuration */
+ if (conf->nb_payloads > RTE_ETH_L4_PAYLOAD) {
+ PMD_DRV_LOG(ERR, "invalid number of payload setting.");
+ return -EINVAL;
+ }
+ for (i = 0; i < conf->nb_payloads; i++) {
+ flex_cfg = &conf->flex_set[i];
+ if (flex_cfg->type > RTE_ETH_L4_PAYLOAD) {
+ PMD_DRV_LOG(ERR, "invalid payload type.");
+ return -EINVAL;
+ }
+ ret = i40e_check_fdir_flex_payload(flex_cfg);
+ if (ret < 0) {
+ PMD_DRV_LOG(ERR, "invalid flex payload arguments.");
+ return -EINVAL;
+ }
+ }
+
+ /* check flex mask setting configuration */
+ if (conf->nb_flexmasks >= RTE_ETH_FLOW_MAX) {
+ PMD_DRV_LOG(ERR, "invalid number of flex masks.");
+ return -EINVAL;
+ }
+ for (i = 0; i < conf->nb_flexmasks; i++) {
+ flex_mask = &conf->flex_mask[i];
+ if (!I40E_VALID_FLOW(flex_mask->flow_type)) {
+ PMD_DRV_LOG(WARNING, "invalid flow type.");
+ return -EINVAL;
+ }
+ nb_bitmask = 0;
+ for (j = 0; j < I40E_FDIR_MAX_FLEX_LEN; j += sizeof(uint16_t)) {
+ mask_tmp = I40E_WORD(flex_mask->mask[j],
+ flex_mask->mask[j + 1]);
+ if (mask_tmp != 0x0 && mask_tmp != UINT16_MAX) {
+ nb_bitmask++;
+ if (nb_bitmask > I40E_FDIR_BITMASK_NUM_WORD) {
+ PMD_DRV_LOG(ERR, " exceed maximal"
+ " number of bitmasks.");
+ return -EINVAL;
+ }
+ }
+ }
+ }
+ return 0;
+}
+
+/*
+ * i40e_set_flx_pld_cfg -configure the rule how bytes stream is extracted as flexible payload
+ * @pf: board private structure
+ * @cfg: the rule how bytes stream is extracted as flexible payload
+ */
+static void
+i40e_set_flx_pld_cfg(struct i40e_pf *pf,
+ const struct rte_eth_flex_payload_cfg *cfg)
+{
+ struct i40e_hw *hw = I40E_PF_TO_HW(pf);
+ struct i40e_fdir_flex_pit flex_pit[I40E_MAX_FLXPLD_FIED];
+ uint32_t flx_pit;
+ uint16_t num, min_next_off; /* in words */
+ uint8_t field_idx = 0;
+ uint8_t layer_idx = 0;
+ uint16_t i;
+
+ if (cfg->type == RTE_ETH_L2_PAYLOAD)
+ layer_idx = I40E_FLXPLD_L2_IDX;
+ else if (cfg->type == RTE_ETH_L3_PAYLOAD)
+ layer_idx = I40E_FLXPLD_L3_IDX;
+ else if (cfg->type == RTE_ETH_L4_PAYLOAD)
+ layer_idx = I40E_FLXPLD_L4_IDX;
+
+ memset(flex_pit, 0, sizeof(flex_pit));
+ num = i40e_srcoff_to_flx_pit(cfg->src_offset, flex_pit);
+
+ for (i = 0; i < RTE_MIN(num, RTE_DIM(flex_pit)); i++) {
+ field_idx = layer_idx * I40E_MAX_FLXPLD_FIED + i;
+ /* record the info in fdir structure */
+ pf->fdir.flex_set[field_idx].src_offset =
+ flex_pit[i].src_offset / sizeof(uint16_t);
+ pf->fdir.flex_set[field_idx].size =
+ flex_pit[i].size / sizeof(uint16_t);
+ pf->fdir.flex_set[field_idx].dst_offset =
+ flex_pit[i].dst_offset / sizeof(uint16_t);
+ flx_pit = MK_FLX_PIT(pf->fdir.flex_set[field_idx].src_offset,
+ pf->fdir.flex_set[field_idx].size,
+ pf->fdir.flex_set[field_idx].dst_offset);
+
+ I40E_WRITE_REG(hw, I40E_PRTQF_FLX_PIT(field_idx), flx_pit);
+ }
+ min_next_off = pf->fdir.flex_set[field_idx].src_offset +
+ pf->fdir.flex_set[field_idx].size;
+
+ for (; i < I40E_MAX_FLXPLD_FIED; i++) {
+ /* set the non-used register obeying register's constrain */
+ flx_pit = MK_FLX_PIT(min_next_off, NONUSE_FLX_PIT_FSIZE,
+ NONUSE_FLX_PIT_DEST_OFF);
+ I40E_WRITE_REG(hw,
+ I40E_PRTQF_FLX_PIT(layer_idx * I40E_MAX_FLXPLD_FIED + i),
+ flx_pit);
+ min_next_off++;
+ }
+}
+
+/*
+ * i40e_set_flex_mask_on_pctype - configure the mask on flexible payload
+ * @pf: board private structure
+ * @pctype: packet classify type
+ * @flex_masks: mask for flexible payload
+ */
+static void
+i40e_set_flex_mask_on_pctype(struct i40e_pf *pf,
+ enum i40e_filter_pctype pctype,
+ const struct rte_eth_fdir_flex_mask *mask_cfg)
+{
+ struct i40e_hw *hw = I40E_PF_TO_HW(pf);
+ struct i40e_fdir_flex_mask *flex_mask;
+ uint32_t flxinset, fd_mask;
+ uint16_t mask_tmp;
+ uint8_t i, nb_bitmask = 0;
+
+ flex_mask = &pf->fdir.flex_mask[pctype];
+ memset(flex_mask, 0, sizeof(struct i40e_fdir_flex_mask));
+ for (i = 0; i < I40E_FDIR_MAX_FLEX_LEN; i += sizeof(uint16_t)) {
+ mask_tmp = I40E_WORD(mask_cfg->mask[i], mask_cfg->mask[i + 1]);
+ if (mask_tmp != 0x0) {
+ flex_mask->word_mask |=
+ I40E_FLEX_WORD_MASK(i / sizeof(uint16_t));
+ if (mask_tmp != UINT16_MAX) {
+ /* set bit mask */
+ flex_mask->bitmask[nb_bitmask].mask = ~mask_tmp;
+ flex_mask->bitmask[nb_bitmask].offset =
+ i / sizeof(uint16_t);
+ nb_bitmask++;
+ }
+ }
+ }
+ /* write mask to hw */
+ flxinset = (flex_mask->word_mask <<
+ I40E_PRTQF_FD_FLXINSET_INSET_SHIFT) &
+ I40E_PRTQF_FD_FLXINSET_INSET_MASK;
+ i40e_write_rx_ctl(hw, I40E_PRTQF_FD_FLXINSET(pctype), flxinset);
+
+ for (i = 0; i < nb_bitmask; i++) {
+ fd_mask = (flex_mask->bitmask[i].mask <<
+ I40E_PRTQF_FD_MSK_MASK_SHIFT) &
+ I40E_PRTQF_FD_MSK_MASK_MASK;
+ fd_mask |= ((flex_mask->bitmask[i].offset +
+ I40E_FLX_OFFSET_IN_FIELD_VECTOR) <<
+ I40E_PRTQF_FD_MSK_OFFSET_SHIFT) &
+ I40E_PRTQF_FD_MSK_OFFSET_MASK;
+ i40e_write_rx_ctl(hw, I40E_PRTQF_FD_MSK(pctype, i), fd_mask);
+ }
+}
+
+/*
+ * Configure flow director related setting
+ */
+int
+i40e_fdir_configure(struct rte_eth_dev *dev)
+{
+ struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
+ struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ struct rte_eth_fdir_flex_conf *conf;
+ enum i40e_filter_pctype pctype;
+ uint32_t val;
+ uint8_t i;
+ int ret = 0;
+
+ /*
+ * configuration need to be done before
+ * flow director filters are added
+ * If filters exist, flush them.
+ */
+ if (i40e_fdir_empty(hw) < 0) {
+ ret = i40e_fdir_flush(dev);
+ if (ret) {
+ PMD_DRV_LOG(ERR, "failed to flush fdir table.");
+ return ret;
+ }
+ }
+
+ /* enable FDIR filter */
+ val = i40e_read_rx_ctl(hw, I40E_PFQF_CTL_0);
+ val |= I40E_PFQF_CTL_0_FD_ENA_MASK;
+ i40e_write_rx_ctl(hw, I40E_PFQF_CTL_0, val);
+
+ i40e_init_flx_pld(pf); /* set flex config to default value */
+
+ conf = &dev->data->dev_conf.fdir_conf.flex_conf;
+ ret = i40e_check_fdir_flex_conf(conf);
+ if (ret < 0) {
+ PMD_DRV_LOG(ERR, " invalid configuration arguments.");
+ return -EINVAL;
+ }
+ /* configure flex payload */
+ for (i = 0; i < conf->nb_payloads; i++)
+ i40e_set_flx_pld_cfg(pf, &conf->flex_set[i]);
+ /* configure flex mask*/
+ for (i = 0; i < conf->nb_flexmasks; i++) {
+ pctype = i40e_flowtype_to_pctype(conf->flex_mask[i].flow_type);
+ i40e_set_flex_mask_on_pctype(pf, pctype, &conf->flex_mask[i]);
+ }
+
+ return ret;
+}
+
+static inline int
+i40e_fdir_fill_eth_ip_head(const struct rte_eth_fdir_input *fdir_input,
+ unsigned char *raw_pkt,
+ bool vlan)
+{
+ static uint8_t vlan_frame[] = {0x81, 0, 0, 0};
+ uint16_t *ether_type;
+ uint8_t len = 2 * sizeof(struct ether_addr);
+ struct ipv4_hdr *ip;
+ struct ipv6_hdr *ip6;
+ static const uint8_t next_proto[] = {
+ [RTE_ETH_FLOW_FRAG_IPV4] = IPPROTO_IP,
+ [RTE_ETH_FLOW_NONFRAG_IPV4_TCP] = IPPROTO_TCP,
+ [RTE_ETH_FLOW_NONFRAG_IPV4_UDP] = IPPROTO_UDP,
+ [RTE_ETH_FLOW_NONFRAG_IPV4_SCTP] = IPPROTO_SCTP,
+ [RTE_ETH_FLOW_NONFRAG_IPV4_OTHER] = IPPROTO_IP,
+ [RTE_ETH_FLOW_FRAG_IPV6] = IPPROTO_NONE,
+ [RTE_ETH_FLOW_NONFRAG_IPV6_TCP] = IPPROTO_TCP,
+ [RTE_ETH_FLOW_NONFRAG_IPV6_UDP] = IPPROTO_UDP,
+ [RTE_ETH_FLOW_NONFRAG_IPV6_SCTP] = IPPROTO_SCTP,
+ [RTE_ETH_FLOW_NONFRAG_IPV6_OTHER] = IPPROTO_NONE,
+ };
+
+ raw_pkt += 2 * sizeof(struct ether_addr);
+ if (vlan && fdir_input->flow_ext.vlan_tci) {
+ rte_memcpy(raw_pkt, vlan_frame, sizeof(vlan_frame));
+ rte_memcpy(raw_pkt + sizeof(uint16_t),
+ &fdir_input->flow_ext.vlan_tci,
+ sizeof(uint16_t));
+ raw_pkt += sizeof(vlan_frame);
+ len += sizeof(vlan_frame);
+ }
+ ether_type = (uint16_t *)raw_pkt;
+ raw_pkt += sizeof(uint16_t);
+ len += sizeof(uint16_t);
+
+ switch (fdir_input->flow_type) {
+ case RTE_ETH_FLOW_L2_PAYLOAD:
+ *ether_type = fdir_input->flow.l2_flow.ether_type;
+ break;
+ case RTE_ETH_FLOW_NONFRAG_IPV4_TCP:
+ case RTE_ETH_FLOW_NONFRAG_IPV4_UDP:
+ case RTE_ETH_FLOW_NONFRAG_IPV4_SCTP:
+ case RTE_ETH_FLOW_NONFRAG_IPV4_OTHER:
+ case RTE_ETH_FLOW_FRAG_IPV4:
+ ip = (struct ipv4_hdr *)raw_pkt;
+
+ *ether_type = rte_cpu_to_be_16(ETHER_TYPE_IPv4);
+ ip->version_ihl = I40E_FDIR_IP_DEFAULT_VERSION_IHL;
+ /* set len to by default */
+ ip->total_length = rte_cpu_to_be_16(I40E_FDIR_IP_DEFAULT_LEN);
+ ip->next_proto_id = fdir_input->flow.ip4_flow.proto ?
+ fdir_input->flow.ip4_flow.proto :
+ next_proto[fdir_input->flow_type];
+ ip->time_to_live = fdir_input->flow.ip4_flow.ttl ?
+ fdir_input->flow.ip4_flow.ttl :
+ I40E_FDIR_IP_DEFAULT_TTL;
+ ip->type_of_service = fdir_input->flow.ip4_flow.tos;
+ /*
+ * The source and destination fields in the transmitted packet
+ * need to be presented in a reversed order with respect
+ * to the expected received packets.
+ */
+ ip->src_addr = fdir_input->flow.ip4_flow.dst_ip;
+ ip->dst_addr = fdir_input->flow.ip4_flow.src_ip;
+ len += sizeof(struct ipv4_hdr);
+ break;
+ case RTE_ETH_FLOW_NONFRAG_IPV6_TCP:
+ case RTE_ETH_FLOW_NONFRAG_IPV6_UDP:
+ case RTE_ETH_FLOW_NONFRAG_IPV6_SCTP:
+ case RTE_ETH_FLOW_NONFRAG_IPV6_OTHER:
+ case RTE_ETH_FLOW_FRAG_IPV6:
+ ip6 = (struct ipv6_hdr *)raw_pkt;
+
+ *ether_type = rte_cpu_to_be_16(ETHER_TYPE_IPv6);
+ ip6->vtc_flow =
+ rte_cpu_to_be_32(I40E_FDIR_IPv6_DEFAULT_VTC_FLOW |
+ (fdir_input->flow.ipv6_flow.tc <<
+ I40E_FDIR_IPv6_TC_OFFSET));
+ ip6->payload_len =
+ rte_cpu_to_be_16(I40E_FDIR_IPv6_PAYLOAD_LEN);
+ ip6->proto = fdir_input->flow.ipv6_flow.proto ?
+ fdir_input->flow.ipv6_flow.proto :
+ next_proto[fdir_input->flow_type];
+ ip6->hop_limits = fdir_input->flow.ipv6_flow.hop_limits ?
+ fdir_input->flow.ipv6_flow.hop_limits :
+ I40E_FDIR_IPv6_DEFAULT_HOP_LIMITS;
+ /*
+ * The source and destination fields in the transmitted packet
+ * need to be presented in a reversed order with respect
+ * to the expected received packets.
+ */
+ rte_memcpy(&(ip6->src_addr),
+ &(fdir_input->flow.ipv6_flow.dst_ip),
+ IPV6_ADDR_LEN);
+ rte_memcpy(&(ip6->dst_addr),
+ &(fdir_input->flow.ipv6_flow.src_ip),
+ IPV6_ADDR_LEN);
+ len += sizeof(struct ipv6_hdr);
+ break;
+ default:
+ PMD_DRV_LOG(ERR, "unknown flow type %u.",
+ fdir_input->flow_type);
+ return -1;
+ }
+ return len;
+}
+
+
+/*
+ * i40e_fdir_construct_pkt - construct packet based on fields in input
+ * @pf: board private structure
+ * @fdir_input: input set of the flow director entry
+ * @raw_pkt: a packet to be constructed
+ */
+static int
+i40e_fdir_construct_pkt(struct i40e_pf *pf,
+ const struct rte_eth_fdir_input *fdir_input,
+ unsigned char *raw_pkt)
+{
+ unsigned char *payload, *ptr;
+ struct udp_hdr *udp;
+ struct tcp_hdr *tcp;
+ struct sctp_hdr *sctp;
+ uint8_t size, dst = 0;
+ uint8_t i, pit_idx, set_idx = I40E_FLXPLD_L4_IDX; /* use l4 by default*/
+ int len;
+
+ /* fill the ethernet and IP head */
+ len = i40e_fdir_fill_eth_ip_head(fdir_input, raw_pkt,
+ !!fdir_input->flow_ext.vlan_tci);
+ if (len < 0)
+ return -EINVAL;
+
+ /* fill the L4 head */
+ switch (fdir_input->flow_type) {
+ case RTE_ETH_FLOW_NONFRAG_IPV4_UDP:
+ udp = (struct udp_hdr *)(raw_pkt + len);
+ payload = (unsigned char *)udp + sizeof(struct udp_hdr);
+ /*
+ * The source and destination fields in the transmitted packet
+ * need to be presented in a reversed order with respect
+ * to the expected received packets.
+ */
+ udp->src_port = fdir_input->flow.udp4_flow.dst_port;
+ udp->dst_port = fdir_input->flow.udp4_flow.src_port;
+ udp->dgram_len = rte_cpu_to_be_16(I40E_FDIR_UDP_DEFAULT_LEN);
+ break;
+
+ case RTE_ETH_FLOW_NONFRAG_IPV4_TCP:
+ tcp = (struct tcp_hdr *)(raw_pkt + len);
+ payload = (unsigned char *)tcp + sizeof(struct tcp_hdr);
+ /*
+ * The source and destination fields in the transmitted packet
+ * need to be presented in a reversed order with respect
+ * to the expected received packets.
+ */
+ tcp->src_port = fdir_input->flow.tcp4_flow.dst_port;
+ tcp->dst_port = fdir_input->flow.tcp4_flow.src_port;
+ tcp->data_off = I40E_FDIR_TCP_DEFAULT_DATAOFF;
+ break;
+
+ case RTE_ETH_FLOW_NONFRAG_IPV4_SCTP:
+ sctp = (struct sctp_hdr *)(raw_pkt + len);
+ payload = (unsigned char *)sctp + sizeof(struct sctp_hdr);
+ /*
+ * The source and destination fields in the transmitted packet
+ * need to be presented in a reversed order with respect
+ * to the expected received packets.
+ */
+ sctp->src_port = fdir_input->flow.sctp4_flow.dst_port;
+ sctp->dst_port = fdir_input->flow.sctp4_flow.src_port;
+ sctp->tag = fdir_input->flow.sctp4_flow.verify_tag;
+ break;
+
+ case RTE_ETH_FLOW_NONFRAG_IPV4_OTHER:
+ case RTE_ETH_FLOW_FRAG_IPV4:
+ payload = raw_pkt + len;
+ set_idx = I40E_FLXPLD_L3_IDX;
+ break;
+
+ case RTE_ETH_FLOW_NONFRAG_IPV6_UDP:
+ udp = (struct udp_hdr *)(raw_pkt + len);
+ payload = (unsigned char *)udp + sizeof(struct udp_hdr);
+ /*
+ * The source and destination fields in the transmitted packet
+ * need to be presented in a reversed order with respect
+ * to the expected received packets.
+ */
+ udp->src_port = fdir_input->flow.udp6_flow.dst_port;
+ udp->dst_port = fdir_input->flow.udp6_flow.src_port;
+ udp->dgram_len = rte_cpu_to_be_16(I40E_FDIR_IPv6_PAYLOAD_LEN);
+ break;
+
+ case RTE_ETH_FLOW_NONFRAG_IPV6_TCP:
+ tcp = (struct tcp_hdr *)(raw_pkt + len);
+ payload = (unsigned char *)tcp + sizeof(struct tcp_hdr);
+ /*
+ * The source and destination fields in the transmitted packet
+ * need to be presented in a reversed order with respect
+ * to the expected received packets.
+ */
+ tcp->data_off = I40E_FDIR_TCP_DEFAULT_DATAOFF;
+ tcp->src_port = fdir_input->flow.udp6_flow.dst_port;
+ tcp->dst_port = fdir_input->flow.udp6_flow.src_port;
+ break;
+
+ case RTE_ETH_FLOW_NONFRAG_IPV6_SCTP:
+ sctp = (struct sctp_hdr *)(raw_pkt + len);
+ payload = (unsigned char *)sctp + sizeof(struct sctp_hdr);
+ /*
+ * The source and destination fields in the transmitted packet
+ * need to be presented in a reversed order with respect
+ * to the expected received packets.
+ */
+ sctp->src_port = fdir_input->flow.sctp6_flow.dst_port;
+ sctp->dst_port = fdir_input->flow.sctp6_flow.src_port;
+ sctp->tag = fdir_input->flow.sctp6_flow.verify_tag;
+ break;
+
+ case RTE_ETH_FLOW_NONFRAG_IPV6_OTHER:
+ case RTE_ETH_FLOW_FRAG_IPV6:
+ payload = raw_pkt + len;
+ set_idx = I40E_FLXPLD_L3_IDX;
+ break;
+ case RTE_ETH_FLOW_L2_PAYLOAD:
+ payload = raw_pkt + len;
+ /*
+ * ARP packet is a special case on which the payload
+ * starts after the whole ARP header
+ */
+ if (fdir_input->flow.l2_flow.ether_type ==
+ rte_cpu_to_be_16(ETHER_TYPE_ARP))
+ payload += sizeof(struct arp_hdr);
+ set_idx = I40E_FLXPLD_L2_IDX;
+ break;
+ default:
+ PMD_DRV_LOG(ERR, "unknown flow type %u.", fdir_input->flow_type);
+ return -EINVAL;
+ }
+
+ /* fill the flexbytes to payload */
+ for (i = 0; i < I40E_MAX_FLXPLD_FIED; i++) {
+ pit_idx = set_idx * I40E_MAX_FLXPLD_FIED + i;
+ size = pf->fdir.flex_set[pit_idx].size;
+ if (size == 0)
+ continue;
+ dst = pf->fdir.flex_set[pit_idx].dst_offset * sizeof(uint16_t);
+ ptr = payload +
+ pf->fdir.flex_set[pit_idx].src_offset * sizeof(uint16_t);
+ (void)rte_memcpy(ptr,
+ &fdir_input->flow_ext.flexbytes[dst],
+ size * sizeof(uint16_t));
+ }
+
+ return 0;
+}
+
+/* Construct the tx flags */
+static inline uint64_t
+i40e_build_ctob(uint32_t td_cmd,
+ uint32_t td_offset,
+ unsigned int size,
+ uint32_t td_tag)
+{
+ return rte_cpu_to_le_64(I40E_TX_DESC_DTYPE_DATA |
+ ((uint64_t)td_cmd << I40E_TXD_QW1_CMD_SHIFT) |
+ ((uint64_t)td_offset << I40E_TXD_QW1_OFFSET_SHIFT) |
+ ((uint64_t)size << I40E_TXD_QW1_TX_BUF_SZ_SHIFT) |
+ ((uint64_t)td_tag << I40E_TXD_QW1_L2TAG1_SHIFT));
+}
+
+/*
+ * check the programming status descriptor in rx queue.
+ * done after Programming Flow Director is programmed on
+ * tx queue
+ */
+static inline int
+i40e_check_fdir_programming_status(struct i40e_rx_queue *rxq)
+{
+ volatile union i40e_rx_desc *rxdp;
+ uint64_t qword1;
+ uint32_t rx_status;
+ uint32_t len, id;
+ uint32_t error;
+ int ret = 0;
+
+ rxdp = &rxq->rx_ring[rxq->rx_tail];
+ qword1 = rte_le_to_cpu_64(rxdp->wb.qword1.status_error_len);
+ rx_status = (qword1 & I40E_RXD_QW1_STATUS_MASK)
+ >> I40E_RXD_QW1_STATUS_SHIFT;
+
+ if (rx_status & (1 << I40E_RX_DESC_STATUS_DD_SHIFT)) {
+ len = qword1 >> I40E_RX_PROG_STATUS_DESC_LENGTH_SHIFT;
+ id = (qword1 & I40E_RX_PROG_STATUS_DESC_QW1_PROGID_MASK) >>
+ I40E_RX_PROG_STATUS_DESC_QW1_PROGID_SHIFT;
+
+ if (len == I40E_RX_PROG_STATUS_DESC_LENGTH &&
+ id == I40E_RX_PROG_STATUS_DESC_FD_FILTER_STATUS) {
+ error = (qword1 &
+ I40E_RX_PROG_STATUS_DESC_QW1_ERROR_MASK) >>
+ I40E_RX_PROG_STATUS_DESC_QW1_ERROR_SHIFT;
+ if (error == (0x1 <<
+ I40E_RX_PROG_STATUS_DESC_FD_TBL_FULL_SHIFT)) {
+ PMD_DRV_LOG(ERR, "Failed to add FDIR filter"
+ " (FD_ID %u): programming status"
+ " reported.",
+ rxdp->wb.qword0.hi_dword.fd_id);
+ ret = -1;
+ } else if (error == (0x1 <<
+ I40E_RX_PROG_STATUS_DESC_NO_FD_ENTRY_SHIFT)) {
+ PMD_DRV_LOG(ERR, "Failed to delete FDIR filter"
+ " (FD_ID %u): programming status"
+ " reported.",
+ rxdp->wb.qword0.hi_dword.fd_id);
+ ret = -1;
+ } else
+ PMD_DRV_LOG(ERR, "invalid programming status"
+ " reported, error = %u.", error);
+ } else
+ PMD_DRV_LOG(ERR, "unknown programming status"
+ " reported, len = %d, id = %u.", len, id);
+ rxdp->wb.qword1.status_error_len = 0;
+ rxq->rx_tail++;
+ if (unlikely(rxq->rx_tail == rxq->nb_rx_desc))
+ rxq->rx_tail = 0;
+ }
+ return ret;
+}
+
+/*
+ * i40e_add_del_fdir_filter - add or remove a flow director filter.
+ * @pf: board private structure
+ * @filter: fdir filter entry
+ * @add: 0 - delete, 1 - add
+ */
+static int
+i40e_add_del_fdir_filter(struct rte_eth_dev *dev,
+ const struct rte_eth_fdir_filter *filter,
+ bool add)
+{
+ struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
+ unsigned char *pkt = (unsigned char *)pf->fdir.prg_pkt;
+ enum i40e_filter_pctype pctype;
+ int ret = 0;
+
+ if (dev->data->dev_conf.fdir_conf.mode != RTE_FDIR_MODE_PERFECT) {
+ PMD_DRV_LOG(ERR, "FDIR is not enabled, please"
+ " check the mode in fdir_conf.");
+ return -ENOTSUP;
+ }
+
+ if (!I40E_VALID_FLOW(filter->input.flow_type)) {
+ PMD_DRV_LOG(ERR, "invalid flow_type input.");
+ return -EINVAL;
+ }
+ if (filter->action.rx_queue >= pf->dev_data->nb_rx_queues) {
+ PMD_DRV_LOG(ERR, "Invalid queue ID");
+ return -EINVAL;
+ }
+ if (filter->input.flow_ext.is_vf &&
+ filter->input.flow_ext.dst_id >= pf->vf_num) {
+ PMD_DRV_LOG(ERR, "Invalid VF ID");
+ return -EINVAL;
+ }
+
+ memset(pkt, 0, I40E_FDIR_PKT_LEN);
+
+ ret = i40e_fdir_construct_pkt(pf, &filter->input, pkt);
+ if (ret < 0) {
+ PMD_DRV_LOG(ERR, "construct packet for fdir fails.");
+ return ret;
+ }
+ pctype = i40e_flowtype_to_pctype(filter->input.flow_type);
+ ret = i40e_fdir_filter_programming(pf, pctype, filter, add);
+ if (ret < 0) {
+ PMD_DRV_LOG(ERR, "fdir programming fails for PCTYPE(%u).",
+ pctype);
+ return ret;
+ }
+ return ret;
+}
+
+/*
+ * i40e_fdir_filter_programming - Program a flow director filter rule.
+ * Is done by Flow Director Programming Descriptor followed by packet
+ * structure that contains the filter fields need to match.
+ * @pf: board private structure
+ * @pctype: pctype
+ * @filter: fdir filter entry
+ * @add: 0 - delete, 1 - add
+ */
+static int
+i40e_fdir_filter_programming(struct i40e_pf *pf,
+ enum i40e_filter_pctype pctype,
+ const struct rte_eth_fdir_filter *filter,
+ bool add)
+{
+ struct i40e_tx_queue *txq = pf->fdir.txq;
+ struct i40e_rx_queue *rxq = pf->fdir.rxq;
+ const struct rte_eth_fdir_action *fdir_action = &filter->action;
+ volatile struct i40e_tx_desc *txdp;
+ volatile struct i40e_filter_program_desc *fdirdp;
+ uint32_t td_cmd;
+ uint16_t vsi_id, i;
+ uint8_t dest;
+
+ PMD_DRV_LOG(INFO, "filling filter programming descriptor.");
+ fdirdp = (volatile struct i40e_filter_program_desc *)
+ (&(txq->tx_ring[txq->tx_tail]));
+
+ fdirdp->qindex_flex_ptype_vsi =
+ rte_cpu_to_le_32((fdir_action->rx_queue <<
+ I40E_TXD_FLTR_QW0_QINDEX_SHIFT) &
+ I40E_TXD_FLTR_QW0_QINDEX_MASK);
+
+ fdirdp->qindex_flex_ptype_vsi |=
+ rte_cpu_to_le_32((fdir_action->flex_off <<
+ I40E_TXD_FLTR_QW0_FLEXOFF_SHIFT) &
+ I40E_TXD_FLTR_QW0_FLEXOFF_MASK);
+
+ fdirdp->qindex_flex_ptype_vsi |=
+ rte_cpu_to_le_32((pctype <<
+ I40E_TXD_FLTR_QW0_PCTYPE_SHIFT) &
+ I40E_TXD_FLTR_QW0_PCTYPE_MASK);
+
+ if (filter->input.flow_ext.is_vf)
+ vsi_id = pf->vfs[filter->input.flow_ext.dst_id].vsi->vsi_id;
+ else
+ /* Use LAN VSI Id by default */
+ vsi_id = pf->main_vsi->vsi_id;
+ fdirdp->qindex_flex_ptype_vsi |=
+ rte_cpu_to_le_32(((uint32_t)vsi_id <<
+ I40E_TXD_FLTR_QW0_DEST_VSI_SHIFT) &
+ I40E_TXD_FLTR_QW0_DEST_VSI_MASK);
+
+ fdirdp->dtype_cmd_cntindex =
+ rte_cpu_to_le_32(I40E_TX_DESC_DTYPE_FILTER_PROG);
+
+ if (add)
+ fdirdp->dtype_cmd_cntindex |= rte_cpu_to_le_32(
+ I40E_FILTER_PROGRAM_DESC_PCMD_ADD_UPDATE <<
+ I40E_TXD_FLTR_QW1_PCMD_SHIFT);
+ else
+ fdirdp->dtype_cmd_cntindex |= rte_cpu_to_le_32(
+ I40E_FILTER_PROGRAM_DESC_PCMD_REMOVE <<
+ I40E_TXD_FLTR_QW1_PCMD_SHIFT);
+
+ if (fdir_action->behavior == RTE_ETH_FDIR_REJECT)
+ dest = I40E_FILTER_PROGRAM_DESC_DEST_DROP_PACKET;
+ else if (fdir_action->behavior == RTE_ETH_FDIR_ACCEPT)
+ dest = I40E_FILTER_PROGRAM_DESC_DEST_DIRECT_PACKET_QINDEX;
+ else if (fdir_action->behavior == RTE_ETH_FDIR_PASSTHRU)
+ dest = I40E_FILTER_PROGRAM_DESC_DEST_DIRECT_PACKET_OTHER;
+ else {
+ PMD_DRV_LOG(ERR, "Failed to program FDIR filter:"
+ " unsupported fdir behavior.");
+ return -EINVAL;
+ }
+
+ fdirdp->dtype_cmd_cntindex |= rte_cpu_to_le_32((dest <<
+ I40E_TXD_FLTR_QW1_DEST_SHIFT) &
+ I40E_TXD_FLTR_QW1_DEST_MASK);
+
+ fdirdp->dtype_cmd_cntindex |=
+ rte_cpu_to_le_32((fdir_action->report_status<<
+ I40E_TXD_FLTR_QW1_FD_STATUS_SHIFT) &
+ I40E_TXD_FLTR_QW1_FD_STATUS_MASK);
+
+ fdirdp->dtype_cmd_cntindex |=
+ rte_cpu_to_le_32(I40E_TXD_FLTR_QW1_CNT_ENA_MASK);
+ fdirdp->dtype_cmd_cntindex |=
+ rte_cpu_to_le_32(
+ ((uint32_t)pf->fdir.match_counter_index <<
+ I40E_TXD_FLTR_QW1_CNTINDEX_SHIFT) &
+ I40E_TXD_FLTR_QW1_CNTINDEX_MASK);
+
+ fdirdp->fd_id = rte_cpu_to_le_32(filter->soft_id);
+
+ PMD_DRV_LOG(INFO, "filling transmit descriptor.");
+ txdp = &(txq->tx_ring[txq->tx_tail + 1]);
+ txdp->buffer_addr = rte_cpu_to_le_64(pf->fdir.dma_addr);
+ td_cmd = I40E_TX_DESC_CMD_EOP |
+ I40E_TX_DESC_CMD_RS |
+ I40E_TX_DESC_CMD_DUMMY;
+
+ txdp->cmd_type_offset_bsz =
+ i40e_build_ctob(td_cmd, 0, I40E_FDIR_PKT_LEN, 0);
+
+ txq->tx_tail += 2; /* set 2 descriptors above, fdirdp and txdp */
+ if (txq->tx_tail >= txq->nb_tx_desc)
+ txq->tx_tail = 0;
+ /* Update the tx tail register */
+ rte_wmb();
+ I40E_PCI_REG_WRITE(txq->qtx_tail, txq->tx_tail);
+
+ for (i = 0; i < I40E_FDIR_WAIT_COUNT; i++) {
+ rte_delay_us(I40E_FDIR_WAIT_INTERVAL_US);
+ if ((txdp->cmd_type_offset_bsz &
+ rte_cpu_to_le_64(I40E_TXD_QW1_DTYPE_MASK)) ==
+ rte_cpu_to_le_64(I40E_TX_DESC_DTYPE_DESC_DONE))
+ break;
+ }
+ if (i >= I40E_FDIR_WAIT_COUNT) {
+ PMD_DRV_LOG(ERR, "Failed to program FDIR filter:"
+ " time out to get DD on tx queue.");
+ return -ETIMEDOUT;
+ }
+ /* totally delay 10 ms to check programming status*/
+ rte_delay_us((I40E_FDIR_WAIT_COUNT - i) * I40E_FDIR_WAIT_INTERVAL_US);
+ if (i40e_check_fdir_programming_status(rxq) < 0) {
+ PMD_DRV_LOG(ERR, "Failed to program FDIR filter:"
+ " programming status reported.");
+ return -ENOSYS;
+ }
+
+ return 0;
+}
+
+/*
+ * i40e_fdir_flush - clear all filters of Flow Director table
+ * @pf: board private structure
+ */
+static int
+i40e_fdir_flush(struct rte_eth_dev *dev)
+{
+ struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
+ struct i40e_hw *hw = I40E_PF_TO_HW(pf);
+ uint32_t reg;
+ uint16_t guarant_cnt, best_cnt;
+ uint16_t i;
+
+ I40E_WRITE_REG(hw, I40E_PFQF_CTL_1, I40E_PFQF_CTL_1_CLEARFDTABLE_MASK);
+ I40E_WRITE_FLUSH(hw);
+
+ for (i = 0; i < I40E_FDIR_FLUSH_RETRY; i++) {
+ rte_delay_ms(I40E_FDIR_FLUSH_INTERVAL_MS);
+ reg = I40E_READ_REG(hw, I40E_PFQF_CTL_1);
+ if (!(reg & I40E_PFQF_CTL_1_CLEARFDTABLE_MASK))
+ break;
+ }
+ if (i >= I40E_FDIR_FLUSH_RETRY) {
+ PMD_DRV_LOG(ERR, "FD table did not flush, may need more time.");
+ return -ETIMEDOUT;
+ }
+ guarant_cnt = (uint16_t)((I40E_READ_REG(hw, I40E_PFQF_FDSTAT) &
+ I40E_PFQF_FDSTAT_GUARANT_CNT_MASK) >>
+ I40E_PFQF_FDSTAT_GUARANT_CNT_SHIFT);
+ best_cnt = (uint16_t)((I40E_READ_REG(hw, I40E_PFQF_FDSTAT) &
+ I40E_PFQF_FDSTAT_BEST_CNT_MASK) >>
+ I40E_PFQF_FDSTAT_BEST_CNT_SHIFT);
+ if (guarant_cnt != 0 || best_cnt != 0) {
+ PMD_DRV_LOG(ERR, "Failed to flush FD table.");
+ return -ENOSYS;
+ } else
+ PMD_DRV_LOG(INFO, "FD table Flush success.");
+ return 0;
+}
+
+static inline void
+i40e_fdir_info_get_flex_set(struct i40e_pf *pf,
+ struct rte_eth_flex_payload_cfg *flex_set,
+ uint16_t *num)
+{
+ struct i40e_fdir_flex_pit *flex_pit;
+ struct rte_eth_flex_payload_cfg *ptr = flex_set;
+ uint16_t src, dst, size, j, k;
+ uint8_t i, layer_idx;
+
+ for (layer_idx = I40E_FLXPLD_L2_IDX;
+ layer_idx <= I40E_FLXPLD_L4_IDX;
+ layer_idx++) {
+ if (layer_idx == I40E_FLXPLD_L2_IDX)
+ ptr->type = RTE_ETH_L2_PAYLOAD;
+ else if (layer_idx == I40E_FLXPLD_L3_IDX)
+ ptr->type = RTE_ETH_L3_PAYLOAD;
+ else if (layer_idx == I40E_FLXPLD_L4_IDX)
+ ptr->type = RTE_ETH_L4_PAYLOAD;
+
+ for (i = 0; i < I40E_MAX_FLXPLD_FIED; i++) {
+ flex_pit = &pf->fdir.flex_set[layer_idx *
+ I40E_MAX_FLXPLD_FIED + i];
+ if (flex_pit->size == 0)
+ continue;
+ src = flex_pit->src_offset * sizeof(uint16_t);
+ dst = flex_pit->dst_offset * sizeof(uint16_t);
+ size = flex_pit->size * sizeof(uint16_t);
+ for (j = src, k = dst; j < src + size; j++, k++)
+ ptr->src_offset[k] = j;
+ }
+ (*num)++;
+ ptr++;
+ }
+}
+
+static inline void
+i40e_fdir_info_get_flex_mask(struct i40e_pf *pf,
+ struct rte_eth_fdir_flex_mask *flex_mask,
+ uint16_t *num)
+{
+ struct i40e_fdir_flex_mask *mask;
+ struct rte_eth_fdir_flex_mask *ptr = flex_mask;
+ uint16_t flow_type;
+ uint8_t i, j;
+ uint16_t off_bytes, mask_tmp;
+
+ for (i = I40E_FILTER_PCTYPE_NONF_IPV4_UDP;
+ i <= I40E_FILTER_PCTYPE_L2_PAYLOAD;
+ i++) {
+ mask = &pf->fdir.flex_mask[i];
+ if (!I40E_VALID_PCTYPE((enum i40e_filter_pctype)i))
+ continue;
+ flow_type = i40e_pctype_to_flowtype((enum i40e_filter_pctype)i);
+ for (j = 0; j < I40E_FDIR_MAX_FLEXWORD_NUM; j++) {
+ if (mask->word_mask & I40E_FLEX_WORD_MASK(j)) {
+ ptr->mask[j * sizeof(uint16_t)] = UINT8_MAX;
+ ptr->mask[j * sizeof(uint16_t) + 1] = UINT8_MAX;
+ } else {
+ ptr->mask[j * sizeof(uint16_t)] = 0x0;
+ ptr->mask[j * sizeof(uint16_t) + 1] = 0x0;
+ }
+ }
+ for (j = 0; j < I40E_FDIR_BITMASK_NUM_WORD; j++) {
+ off_bytes = mask->bitmask[j].offset * sizeof(uint16_t);
+ mask_tmp = ~mask->bitmask[j].mask;
+ ptr->mask[off_bytes] &= I40E_HI_BYTE(mask_tmp);
+ ptr->mask[off_bytes + 1] &= I40E_LO_BYTE(mask_tmp);
+ }
+ ptr->flow_type = flow_type;
+ ptr++;
+ (*num)++;
+ }
+}
+
+/*
+ * i40e_fdir_info_get - get information of Flow Director
+ * @pf: ethernet device to get info from
+ * @fdir: a pointer to a structure of type *rte_eth_fdir_info* to be filled with
+ * the flow director information.
+ */
+static void
+i40e_fdir_info_get(struct rte_eth_dev *dev, struct rte_eth_fdir_info *fdir)
+{
+ struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
+ struct i40e_hw *hw = I40E_PF_TO_HW(pf);
+ uint16_t num_flex_set = 0;
+ uint16_t num_flex_mask = 0;
+
+ if (dev->data->dev_conf.fdir_conf.mode == RTE_FDIR_MODE_PERFECT)
+ fdir->mode = RTE_FDIR_MODE_PERFECT;
+ else
+ fdir->mode = RTE_FDIR_MODE_NONE;
+
+ fdir->guarant_spc =
+ (uint32_t)hw->func_caps.fd_filters_guaranteed;
+ fdir->best_spc =
+ (uint32_t)hw->func_caps.fd_filters_best_effort;
+ fdir->max_flexpayload = I40E_FDIR_MAX_FLEX_LEN;
+ fdir->flow_types_mask[0] = I40E_FDIR_FLOWS;
+ fdir->flex_payload_unit = sizeof(uint16_t);
+ fdir->flex_bitmask_unit = sizeof(uint16_t);
+ fdir->max_flex_payload_segment_num = I40E_MAX_FLXPLD_FIED;
+ fdir->flex_payload_limit = I40E_MAX_FLX_SOURCE_OFF;
+ fdir->max_flex_bitmask_num = I40E_FDIR_BITMASK_NUM_WORD;
+
+ i40e_fdir_info_get_flex_set(pf,
+ fdir->flex_conf.flex_set,
+ &num_flex_set);
+ i40e_fdir_info_get_flex_mask(pf,
+ fdir->flex_conf.flex_mask,
+ &num_flex_mask);
+
+ fdir->flex_conf.nb_payloads = num_flex_set;
+ fdir->flex_conf.nb_flexmasks = num_flex_mask;
+}
+
+/*
+ * i40e_fdir_stat_get - get statistics of Flow Director
+ * @pf: ethernet device to get info from
+ * @stat: a pointer to a structure of type *rte_eth_fdir_stats* to be filled with
+ * the flow director statistics.
+ */
+static void
+i40e_fdir_stats_get(struct rte_eth_dev *dev, struct rte_eth_fdir_stats *stat)
+{
+ struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
+ struct i40e_hw *hw = I40E_PF_TO_HW(pf);
+ uint32_t fdstat;
+
+ fdstat = I40E_READ_REG(hw, I40E_PFQF_FDSTAT);
+ stat->guarant_cnt =
+ (uint32_t)((fdstat & I40E_PFQF_FDSTAT_GUARANT_CNT_MASK) >>
+ I40E_PFQF_FDSTAT_GUARANT_CNT_SHIFT);
+ stat->best_cnt =
+ (uint32_t)((fdstat & I40E_PFQF_FDSTAT_BEST_CNT_MASK) >>
+ I40E_PFQF_FDSTAT_BEST_CNT_SHIFT);
+}
+
+static int
+i40e_fdir_filter_set(struct rte_eth_dev *dev,
+ struct rte_eth_fdir_filter_info *info)
+{
+ struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
+ int ret = 0;
+
+ if (!info) {
+ PMD_DRV_LOG(ERR, "Invalid pointer");
+ return -EFAULT;
+ }
+
+ switch (info->info_type) {
+ case RTE_ETH_FDIR_FILTER_INPUT_SET_SELECT:
+ ret = i40e_fdir_filter_inset_select(pf,
+ &(info->info.input_set_conf));
+ break;
+ default:
+ PMD_DRV_LOG(ERR, "FD filter info type (%d) not supported",
+ info->info_type);
+ return -EINVAL;
+ }
+
+ return ret;
+}
+
+/*
+ * i40e_fdir_ctrl_func - deal with all operations on flow director.
+ * @pf: board private structure
+ * @filter_op:operation will be taken.
+ * @arg: a pointer to specific structure corresponding to the filter_op
+ */
+int
+i40e_fdir_ctrl_func(struct rte_eth_dev *dev,
+ enum rte_filter_op filter_op,
+ void *arg)
+{
+ struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
+ int ret = 0;
+
+ if ((pf->flags & I40E_FLAG_FDIR) == 0)
+ return -ENOTSUP;
+
+ if (filter_op == RTE_ETH_FILTER_NOP)
+ return 0;
+
+ if (arg == NULL && filter_op != RTE_ETH_FILTER_FLUSH)
+ return -EINVAL;
+
+ switch (filter_op) {
+ case RTE_ETH_FILTER_ADD:
+ ret = i40e_add_del_fdir_filter(dev,
+ (struct rte_eth_fdir_filter *)arg,
+ TRUE);
+ break;
+ case RTE_ETH_FILTER_DELETE:
+ ret = i40e_add_del_fdir_filter(dev,
+ (struct rte_eth_fdir_filter *)arg,
+ FALSE);
+ break;
+ case RTE_ETH_FILTER_FLUSH:
+ ret = i40e_fdir_flush(dev);
+ break;
+ case RTE_ETH_FILTER_INFO:
+ i40e_fdir_info_get(dev, (struct rte_eth_fdir_info *)arg);
+ break;
+ case RTE_ETH_FILTER_SET:
+ ret = i40e_fdir_filter_set(dev,
+ (struct rte_eth_fdir_filter_info *)arg);
+ break;
+ case RTE_ETH_FILTER_STATS:
+ i40e_fdir_stats_get(dev, (struct rte_eth_fdir_stats *)arg);
+ break;
+ default:
+ PMD_DRV_LOG(ERR, "unknown operation %u.", filter_op);
+ ret = -EINVAL;
+ break;
+ }
+ return ret;
+}
diff --git a/src/dpdk/drivers/net/i40e/i40e_logs.h b/src/dpdk/drivers/net/i40e/i40e_logs.h
new file mode 100644
index 00000000..e042e242
--- /dev/null
+++ b/src/dpdk/drivers/net/i40e/i40e_logs.h
@@ -0,0 +1,77 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2010-2015 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _I40E_LOGS_H_
+#define _I40E_LOGS_H_
+
+#define PMD_INIT_LOG(level, fmt, args...) \
+ RTE_LOG(level, PMD, "%s(): " fmt "\n", __func__, ##args)
+
+#ifdef RTE_LIBRTE_I40E_DEBUG_INIT
+#define PMD_INIT_FUNC_TRACE() PMD_INIT_LOG(DEBUG, " >>")
+#else
+#define PMD_INIT_FUNC_TRACE() do { } while(0)
+#endif
+
+#ifdef RTE_LIBRTE_I40E_DEBUG_RX
+#define PMD_RX_LOG(level, fmt, args...) \
+ RTE_LOG(level, PMD, "%s(): " fmt "\n", __func__, ## args)
+#else
+#define PMD_RX_LOG(level, fmt, args...) do { } while(0)
+#endif
+
+#ifdef RTE_LIBRTE_I40E_DEBUG_TX
+#define PMD_TX_LOG(level, fmt, args...) \
+ RTE_LOG(level, PMD, "%s(): " fmt "\n", __func__, ## args)
+#else
+#define PMD_TX_LOG(level, fmt, args...) do { } while(0)
+#endif
+
+#ifdef RTE_LIBRTE_I40E_DEBUG_TX_FREE
+#define PMD_TX_FREE_LOG(level, fmt, args...) \
+ RTE_LOG(level, PMD, "%s(): " fmt "\n", __func__, ## args)
+#else
+#define PMD_TX_FREE_LOG(level, fmt, args...) do { } while(0)
+#endif
+
+#ifdef RTE_LIBRTE_I40E_DEBUG_DRIVER
+#define PMD_DRV_LOG_RAW(level, fmt, args...) \
+ RTE_LOG(level, PMD, "%s(): " fmt, __func__, ## args)
+#else
+#define PMD_DRV_LOG_RAW(level, fmt, args...) do { } while (0)
+#endif
+
+#define PMD_DRV_LOG(level, fmt, args...) \
+ PMD_DRV_LOG_RAW(level, fmt "\n", ## args)
+
+#endif /* _I40E_LOGS_H_ */
diff --git a/src/dpdk/drivers/net/i40e/i40e_pf.c b/src/dpdk/drivers/net/i40e/i40e_pf.c
new file mode 100644
index 00000000..d5b2d450
--- /dev/null
+++ b/src/dpdk/drivers/net/i40e/i40e_pf.c
@@ -0,0 +1,1104 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2010-2015 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <sys/queue.h>
+#include <stdio.h>
+#include <errno.h>
+#include <stdint.h>
+#include <string.h>
+#include <unistd.h>
+#include <stdarg.h>
+#include <inttypes.h>
+
+#include <rte_string_fns.h>
+#include <rte_pci.h>
+#include <rte_ether.h>
+#include <rte_ethdev.h>
+#include <rte_memzone.h>
+#include <rte_malloc.h>
+#include <rte_memcpy.h>
+
+#include "i40e_logs.h"
+#include "base/i40e_prototype.h"
+#include "base/i40e_adminq_cmd.h"
+#include "base/i40e_type.h"
+#include "i40e_ethdev.h"
+#include "i40e_rxtx.h"
+#include "i40e_pf.h"
+
+#define I40E_CFG_CRCSTRIP_DEFAULT 1
+
+static int
+i40e_pf_host_switch_queues(struct i40e_pf_vf *vf,
+ struct i40e_virtchnl_queue_select *qsel,
+ bool on);
+
+/**
+ * Bind PF queues with VSI and VF.
+ **/
+static int
+i40e_pf_vf_queues_mapping(struct i40e_pf_vf *vf)
+{
+ int i;
+ struct i40e_hw *hw = I40E_PF_TO_HW(vf->pf);
+ uint16_t vsi_id = vf->vsi->vsi_id;
+ uint16_t vf_id = vf->vf_idx;
+ uint16_t nb_qps = vf->vsi->nb_qps;
+ uint16_t qbase = vf->vsi->base_queue;
+ uint16_t q1, q2;
+ uint32_t val;
+
+ /*
+ * VF should use scatter range queues. So, it needn't
+ * to set QBASE in this register.
+ */
+ i40e_write_rx_ctl(hw, I40E_VSILAN_QBASE(vsi_id),
+ I40E_VSILAN_QBASE_VSIQTABLE_ENA_MASK);
+
+ /* Set to enable VFLAN_QTABLE[] registers valid */
+ I40E_WRITE_REG(hw, I40E_VPLAN_MAPENA(vf_id),
+ I40E_VPLAN_MAPENA_TXRX_ENA_MASK);
+
+ /* map PF queues to VF */
+ for (i = 0; i < nb_qps; i++) {
+ val = ((qbase + i) & I40E_VPLAN_QTABLE_QINDEX_MASK);
+ I40E_WRITE_REG(hw, I40E_VPLAN_QTABLE(i, vf_id), val);
+ }
+
+ /* map PF queues to VSI */
+ for (i = 0; i < I40E_MAX_QP_NUM_PER_VF / 2; i++) {
+ if (2 * i > nb_qps - 1)
+ q1 = I40E_VSILAN_QTABLE_QINDEX_0_MASK;
+ else
+ q1 = qbase + 2 * i;
+
+ if (2 * i + 1 > nb_qps - 1)
+ q2 = I40E_VSILAN_QTABLE_QINDEX_0_MASK;
+ else
+ q2 = qbase + 2 * i + 1;
+
+ val = (q2 << I40E_VSILAN_QTABLE_QINDEX_1_SHIFT) + q1;
+ i40e_write_rx_ctl(hw, I40E_VSILAN_QTABLE(i, vsi_id), val);
+ }
+ I40E_WRITE_FLUSH(hw);
+
+ return I40E_SUCCESS;
+}
+
+
+/**
+ * Proceed VF reset operation.
+ */
+int
+i40e_pf_host_vf_reset(struct i40e_pf_vf *vf, bool do_hw_reset)
+{
+ uint32_t val, i;
+ struct i40e_hw *hw;
+ struct i40e_pf *pf;
+ uint16_t vf_id, abs_vf_id, vf_msix_num;
+ int ret;
+ struct i40e_virtchnl_queue_select qsel;
+
+ if (vf == NULL)
+ return -EINVAL;
+
+ pf = vf->pf;
+ hw = I40E_PF_TO_HW(vf->pf);
+ vf_id = vf->vf_idx;
+ abs_vf_id = vf_id + hw->func_caps.vf_base_id;
+
+ /* Notify VF that we are in VFR progress */
+ I40E_WRITE_REG(hw, I40E_VFGEN_RSTAT1(vf_id), I40E_PF_VFR_INPROGRESS);
+
+ /*
+ * If require a SW VF reset, a VFLR interrupt will be generated,
+ * this function will be called again. To avoid it,
+ * disable interrupt first.
+ */
+ if (do_hw_reset) {
+ vf->state = I40E_VF_INRESET;
+ val = I40E_READ_REG(hw, I40E_VPGEN_VFRTRIG(vf_id));
+ val |= I40E_VPGEN_VFRTRIG_VFSWR_MASK;
+ I40E_WRITE_REG(hw, I40E_VPGEN_VFRTRIG(vf_id), val);
+ I40E_WRITE_FLUSH(hw);
+ }
+
+#define VFRESET_MAX_WAIT_CNT 100
+ /* Wait until VF reset is done */
+ for (i = 0; i < VFRESET_MAX_WAIT_CNT; i++) {
+ rte_delay_us(10);
+ val = I40E_READ_REG(hw, I40E_VPGEN_VFRSTAT(vf_id));
+ if (val & I40E_VPGEN_VFRSTAT_VFRD_MASK)
+ break;
+ }
+
+ if (i >= VFRESET_MAX_WAIT_CNT) {
+ PMD_DRV_LOG(ERR, "VF reset timeout");
+ return -ETIMEDOUT;
+ }
+
+ /* This is not first time to do reset, do cleanup job first */
+ if (vf->vsi) {
+ /* Disable queues */
+ memset(&qsel, 0, sizeof(qsel));
+ for (i = 0; i < vf->vsi->nb_qps; i++)
+ qsel.rx_queues |= 1 << i;
+ qsel.tx_queues = qsel.rx_queues;
+ ret = i40e_pf_host_switch_queues(vf, &qsel, false);
+ if (ret != I40E_SUCCESS) {
+ PMD_DRV_LOG(ERR, "Disable VF queues failed");
+ return -EFAULT;
+ }
+
+ /* Disable VF interrupt setting */
+ vf_msix_num = hw->func_caps.num_msix_vectors_vf;
+ for (i = 0; i < vf_msix_num; i++) {
+ if (!i)
+ val = I40E_VFINT_DYN_CTL0(vf_id);
+ else
+ val = I40E_VFINT_DYN_CTLN(((vf_msix_num - 1) *
+ (vf_id)) + (i - 1));
+ I40E_WRITE_REG(hw, val, I40E_VFINT_DYN_CTLN_CLEARPBA_MASK);
+ }
+ I40E_WRITE_FLUSH(hw);
+
+ /* remove VSI */
+ ret = i40e_vsi_release(vf->vsi);
+ if (ret != I40E_SUCCESS) {
+ PMD_DRV_LOG(ERR, "Release VSI failed");
+ return -EFAULT;
+ }
+ }
+
+#define I40E_VF_PCI_ADDR 0xAA
+#define I40E_VF_PEND_MASK 0x20
+ /* Check the pending transactions of this VF */
+ /* Use absolute VF id, refer to datasheet for details */
+ I40E_WRITE_REG(hw, I40E_PF_PCI_CIAA, I40E_VF_PCI_ADDR |
+ (abs_vf_id << I40E_PF_PCI_CIAA_VF_NUM_SHIFT));
+ for (i = 0; i < VFRESET_MAX_WAIT_CNT; i++) {
+ rte_delay_us(1);
+ val = I40E_READ_REG(hw, I40E_PF_PCI_CIAD);
+ if ((val & I40E_VF_PEND_MASK) == 0)
+ break;
+ }
+
+ if (i >= VFRESET_MAX_WAIT_CNT) {
+ PMD_DRV_LOG(ERR, "Wait VF PCI transaction end timeout");
+ return -ETIMEDOUT;
+ }
+
+ /* Reset done, Set COMPLETE flag and clear reset bit */
+ I40E_WRITE_REG(hw, I40E_VFGEN_RSTAT1(vf_id), I40E_PF_VFR_COMPLETED);
+ val = I40E_READ_REG(hw, I40E_VPGEN_VFRTRIG(vf_id));
+ val &= ~I40E_VPGEN_VFRTRIG_VFSWR_MASK;
+ I40E_WRITE_REG(hw, I40E_VPGEN_VFRTRIG(vf_id), val);
+ vf->reset_cnt++;
+ I40E_WRITE_FLUSH(hw);
+
+ /* Allocate resource again */
+ if (pf->floating_veb && pf->floating_veb_list[vf_id]) {
+ vf->vsi = i40e_vsi_setup(vf->pf, I40E_VSI_SRIOV,
+ NULL, vf->vf_idx);
+ } else {
+ vf->vsi = i40e_vsi_setup(vf->pf, I40E_VSI_SRIOV,
+ vf->pf->main_vsi, vf->vf_idx);
+ }
+
+ if (vf->vsi == NULL) {
+ PMD_DRV_LOG(ERR, "Add vsi failed");
+ return -EFAULT;
+ }
+
+ ret = i40e_pf_vf_queues_mapping(vf);
+ if (ret != I40E_SUCCESS) {
+ PMD_DRV_LOG(ERR, "queue mapping error");
+ i40e_vsi_release(vf->vsi);
+ return -EFAULT;
+ }
+
+ return ret;
+}
+
+static int
+i40e_pf_host_send_msg_to_vf(struct i40e_pf_vf *vf,
+ uint32_t opcode,
+ uint32_t retval,
+ uint8_t *msg,
+ uint16_t msglen)
+{
+ struct i40e_hw *hw = I40E_PF_TO_HW(vf->pf);
+ uint16_t abs_vf_id = hw->func_caps.vf_base_id + vf->vf_idx;
+ int ret;
+
+ ret = i40e_aq_send_msg_to_vf(hw, abs_vf_id, opcode, retval,
+ msg, msglen, NULL);
+ if (ret) {
+ PMD_INIT_LOG(ERR, "Fail to send message to VF, err %u",
+ hw->aq.asq_last_status);
+ }
+
+ return ret;
+}
+
+static void
+i40e_pf_host_process_cmd_version(struct i40e_pf_vf *vf)
+{
+ struct i40e_virtchnl_version_info info;
+
+ info.major = I40E_DPDK_VERSION_MAJOR;
+ info.minor = I40E_DPDK_VERSION_MINOR;
+ i40e_pf_host_send_msg_to_vf(vf, I40E_VIRTCHNL_OP_VERSION,
+ I40E_SUCCESS, (uint8_t *)&info, sizeof(info));
+}
+
+static int
+i40e_pf_host_process_cmd_reset_vf(struct i40e_pf_vf *vf)
+{
+ i40e_pf_host_vf_reset(vf, 1);
+
+ /* No feedback will be sent to VF for VFLR */
+ return I40E_SUCCESS;
+}
+
+static int
+i40e_pf_host_process_cmd_get_vf_resource(struct i40e_pf_vf *vf)
+{
+ struct i40e_virtchnl_vf_resource *vf_res = NULL;
+ struct i40e_hw *hw = I40E_PF_TO_HW(vf->pf);
+ uint32_t len = 0;
+ int ret = I40E_SUCCESS;
+
+ /* only have 1 VSI by default */
+ len = sizeof(struct i40e_virtchnl_vf_resource) +
+ I40E_DEFAULT_VF_VSI_NUM *
+ sizeof(struct i40e_virtchnl_vsi_resource);
+
+ vf_res = rte_zmalloc("i40e_vf_res", len, 0);
+ if (vf_res == NULL) {
+ PMD_DRV_LOG(ERR, "failed to allocate mem");
+ ret = I40E_ERR_NO_MEMORY;
+ vf_res = NULL;
+ len = 0;
+ goto send_msg;
+ }
+
+ vf_res->vf_offload_flags = I40E_VIRTCHNL_VF_OFFLOAD_L2 |
+ I40E_VIRTCHNL_VF_OFFLOAD_VLAN;
+ vf_res->max_vectors = hw->func_caps.num_msix_vectors_vf;
+ vf_res->num_queue_pairs = vf->vsi->nb_qps;
+ vf_res->num_vsis = I40E_DEFAULT_VF_VSI_NUM;
+
+ /* Change below setting if PF host can support more VSIs for VF */
+ vf_res->vsi_res[0].vsi_type = I40E_VSI_SRIOV;
+ /* As assume Vf only has single VSI now, always return 0 */
+ vf_res->vsi_res[0].vsi_id = 0;
+ vf_res->vsi_res[0].num_queue_pairs = vf->vsi->nb_qps;
+ ether_addr_copy(&vf->mac_addr,
+ (struct ether_addr *)vf_res->vsi_res[0].default_mac_addr);
+
+send_msg:
+ i40e_pf_host_send_msg_to_vf(vf, I40E_VIRTCHNL_OP_GET_VF_RESOURCES,
+ ret, (uint8_t *)vf_res, len);
+ rte_free(vf_res);
+
+ return ret;
+}
+
+static int
+i40e_pf_host_hmc_config_rxq(struct i40e_hw *hw,
+ struct i40e_pf_vf *vf,
+ struct i40e_virtchnl_rxq_info *rxq,
+ uint8_t crcstrip)
+{
+ int err = I40E_SUCCESS;
+ struct i40e_hmc_obj_rxq rx_ctx;
+ uint16_t abs_queue_id = vf->vsi->base_queue + rxq->queue_id;
+
+ /* Clear the context structure first */
+ memset(&rx_ctx, 0, sizeof(struct i40e_hmc_obj_rxq));
+ rx_ctx.dbuff = rxq->databuffer_size >> I40E_RXQ_CTX_DBUFF_SHIFT;
+ rx_ctx.hbuff = rxq->hdr_size >> I40E_RXQ_CTX_HBUFF_SHIFT;
+ rx_ctx.base = rxq->dma_ring_addr / I40E_QUEUE_BASE_ADDR_UNIT;
+ rx_ctx.qlen = rxq->ring_len;
+#ifndef RTE_LIBRTE_I40E_16BYTE_RX_DESC
+ rx_ctx.dsize = 1;
+#endif
+
+ if (rxq->splithdr_enabled) {
+ rx_ctx.hsplit_0 = I40E_HEADER_SPLIT_ALL;
+ rx_ctx.dtype = i40e_header_split_enabled;
+ } else {
+ rx_ctx.hsplit_0 = I40E_HEADER_SPLIT_NONE;
+ rx_ctx.dtype = i40e_header_split_none;
+ }
+ rx_ctx.rxmax = rxq->max_pkt_size;
+ rx_ctx.tphrdesc_ena = 1;
+ rx_ctx.tphwdesc_ena = 1;
+ rx_ctx.tphdata_ena = 1;
+ rx_ctx.tphhead_ena = 1;
+ rx_ctx.lrxqthresh = 2;
+ rx_ctx.crcstrip = crcstrip;
+ rx_ctx.l2tsel = 1;
+ rx_ctx.prefena = 1;
+
+ err = i40e_clear_lan_rx_queue_context(hw, abs_queue_id);
+ if (err != I40E_SUCCESS)
+ return err;
+ err = i40e_set_lan_rx_queue_context(hw, abs_queue_id, &rx_ctx);
+
+ return err;
+}
+
+static int
+i40e_pf_host_hmc_config_txq(struct i40e_hw *hw,
+ struct i40e_pf_vf *vf,
+ struct i40e_virtchnl_txq_info *txq)
+{
+ int err = I40E_SUCCESS;
+ struct i40e_hmc_obj_txq tx_ctx;
+ uint32_t qtx_ctl;
+ uint16_t abs_queue_id = vf->vsi->base_queue + txq->queue_id;
+
+
+ /* clear the context structure first */
+ memset(&tx_ctx, 0, sizeof(tx_ctx));
+ tx_ctx.new_context = 1;
+ tx_ctx.base = txq->dma_ring_addr / I40E_QUEUE_BASE_ADDR_UNIT;
+ tx_ctx.qlen = txq->ring_len;
+ tx_ctx.rdylist = rte_le_to_cpu_16(vf->vsi->info.qs_handle[0]);
+ err = i40e_clear_lan_tx_queue_context(hw, abs_queue_id);
+ if (err != I40E_SUCCESS)
+ return err;
+
+ err = i40e_set_lan_tx_queue_context(hw, abs_queue_id, &tx_ctx);
+ if (err != I40E_SUCCESS)
+ return err;
+
+ /* bind queue with VF function, since TX/QX will appear in pair,
+ * so only has QTX_CTL to set.
+ */
+ qtx_ctl = (I40E_QTX_CTL_VF_QUEUE << I40E_QTX_CTL_PFVF_Q_SHIFT) |
+ ((hw->pf_id << I40E_QTX_CTL_PF_INDX_SHIFT) &
+ I40E_QTX_CTL_PF_INDX_MASK) |
+ (((vf->vf_idx + hw->func_caps.vf_base_id) <<
+ I40E_QTX_CTL_VFVM_INDX_SHIFT) &
+ I40E_QTX_CTL_VFVM_INDX_MASK);
+ I40E_WRITE_REG(hw, I40E_QTX_CTL(abs_queue_id), qtx_ctl);
+ I40E_WRITE_FLUSH(hw);
+
+ return I40E_SUCCESS;
+}
+
+static int
+i40e_pf_host_process_cmd_config_vsi_queues(struct i40e_pf_vf *vf,
+ uint8_t *msg,
+ uint16_t msglen)
+{
+ struct i40e_hw *hw = I40E_PF_TO_HW(vf->pf);
+ struct i40e_vsi *vsi = vf->vsi;
+ struct i40e_virtchnl_vsi_queue_config_info *vc_vqci =
+ (struct i40e_virtchnl_vsi_queue_config_info *)msg;
+ struct i40e_virtchnl_queue_pair_info *vc_qpi;
+ int i, ret = I40E_SUCCESS;
+
+ if (!msg || vc_vqci->num_queue_pairs > vsi->nb_qps ||
+ vc_vqci->num_queue_pairs > I40E_MAX_VSI_QP ||
+ msglen < I40E_VIRTCHNL_CONFIG_VSI_QUEUES_SIZE(vc_vqci,
+ vc_vqci->num_queue_pairs)) {
+ PMD_DRV_LOG(ERR, "vsi_queue_config_info argument wrong\n");
+ ret = I40E_ERR_PARAM;
+ goto send_msg;
+ }
+
+ vc_qpi = vc_vqci->qpair;
+ for (i = 0; i < vc_vqci->num_queue_pairs; i++) {
+ if (vc_qpi[i].rxq.queue_id > vsi->nb_qps - 1 ||
+ vc_qpi[i].txq.queue_id > vsi->nb_qps - 1) {
+ ret = I40E_ERR_PARAM;
+ goto send_msg;
+ }
+
+ /*
+ * Apply VF RX queue setting to HMC.
+ * If the opcode is I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES_EXT,
+ * then the extra information of
+ * 'struct i40e_virtchnl_queue_pair_extra_info' is needed,
+ * otherwise set the last parameter to NULL.
+ */
+ if (i40e_pf_host_hmc_config_rxq(hw, vf, &vc_qpi[i].rxq,
+ I40E_CFG_CRCSTRIP_DEFAULT) != I40E_SUCCESS) {
+ PMD_DRV_LOG(ERR, "Configure RX queue HMC failed");
+ ret = I40E_ERR_PARAM;
+ goto send_msg;
+ }
+
+ /* Apply VF TX queue setting to HMC */
+ if (i40e_pf_host_hmc_config_txq(hw, vf,
+ &vc_qpi[i].txq) != I40E_SUCCESS) {
+ PMD_DRV_LOG(ERR, "Configure TX queue HMC failed");
+ ret = I40E_ERR_PARAM;
+ goto send_msg;
+ }
+ }
+
+send_msg:
+ i40e_pf_host_send_msg_to_vf(vf, I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES,
+ ret, NULL, 0);
+
+ return ret;
+}
+
+static int
+i40e_pf_host_process_cmd_config_vsi_queues_ext(struct i40e_pf_vf *vf,
+ uint8_t *msg,
+ uint16_t msglen)
+{
+ struct i40e_hw *hw = I40E_PF_TO_HW(vf->pf);
+ struct i40e_vsi *vsi = vf->vsi;
+ struct i40e_virtchnl_vsi_queue_config_ext_info *vc_vqcei =
+ (struct i40e_virtchnl_vsi_queue_config_ext_info *)msg;
+ struct i40e_virtchnl_queue_pair_ext_info *vc_qpei;
+ int i, ret = I40E_SUCCESS;
+
+ if (!msg || vc_vqcei->num_queue_pairs > vsi->nb_qps ||
+ vc_vqcei->num_queue_pairs > I40E_MAX_VSI_QP ||
+ msglen < I40E_VIRTCHNL_CONFIG_VSI_QUEUES_SIZE(vc_vqcei,
+ vc_vqcei->num_queue_pairs)) {
+ PMD_DRV_LOG(ERR, "vsi_queue_config_ext_info argument wrong\n");
+ ret = I40E_ERR_PARAM;
+ goto send_msg;
+ }
+
+ vc_qpei = vc_vqcei->qpair;
+ for (i = 0; i < vc_vqcei->num_queue_pairs; i++) {
+ if (vc_qpei[i].rxq.queue_id > vsi->nb_qps - 1 ||
+ vc_qpei[i].txq.queue_id > vsi->nb_qps - 1) {
+ ret = I40E_ERR_PARAM;
+ goto send_msg;
+ }
+ /*
+ * Apply VF RX queue setting to HMC.
+ * If the opcode is I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES_EXT,
+ * then the extra information of
+ * 'struct i40e_virtchnl_queue_pair_ext_info' is needed,
+ * otherwise set the last parameter to NULL.
+ */
+ if (i40e_pf_host_hmc_config_rxq(hw, vf, &vc_qpei[i].rxq,
+ vc_qpei[i].rxq_ext.crcstrip) != I40E_SUCCESS) {
+ PMD_DRV_LOG(ERR, "Configure RX queue HMC failed");
+ ret = I40E_ERR_PARAM;
+ goto send_msg;
+ }
+
+ /* Apply VF TX queue setting to HMC */
+ if (i40e_pf_host_hmc_config_txq(hw, vf, &vc_qpei[i].txq) !=
+ I40E_SUCCESS) {
+ PMD_DRV_LOG(ERR, "Configure TX queue HMC failed");
+ ret = I40E_ERR_PARAM;
+ goto send_msg;
+ }
+ }
+
+send_msg:
+ i40e_pf_host_send_msg_to_vf(vf, I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES_EXT,
+ ret, NULL, 0);
+
+ return ret;
+}
+
+static int
+i40e_pf_host_process_cmd_config_irq_map(struct i40e_pf_vf *vf,
+ uint8_t *msg, uint16_t msglen)
+{
+ int ret = I40E_SUCCESS;
+ struct i40e_virtchnl_irq_map_info *irqmap =
+ (struct i40e_virtchnl_irq_map_info *)msg;
+
+ if (msg == NULL || msglen < sizeof(struct i40e_virtchnl_irq_map_info)) {
+ PMD_DRV_LOG(ERR, "buffer too short");
+ ret = I40E_ERR_PARAM;
+ goto send_msg;
+ }
+
+ /* Assume VF only have 1 vector to bind all queues */
+ if (irqmap->num_vectors != 1) {
+ PMD_DRV_LOG(ERR, "DKDK host only support 1 vector");
+ ret = I40E_ERR_PARAM;
+ goto send_msg;
+ }
+
+ /* This MSIX intr store the intr in VF range */
+ vf->vsi->msix_intr = irqmap->vecmap[0].vector_id;
+ vf->vsi->nb_msix = irqmap->num_vectors;
+ vf->vsi->nb_used_qps = vf->vsi->nb_qps;
+
+ /* Don't care how the TX/RX queue mapping with this vector.
+ * Link all VF RX queues together. Only did mapping work.
+ * VF can disable/enable the intr by itself.
+ */
+ i40e_vsi_queues_bind_intr(vf->vsi);
+send_msg:
+ i40e_pf_host_send_msg_to_vf(vf, I40E_VIRTCHNL_OP_CONFIG_IRQ_MAP,
+ ret, NULL, 0);
+
+ return ret;
+}
+
+static int
+i40e_pf_host_switch_queues(struct i40e_pf_vf *vf,
+ struct i40e_virtchnl_queue_select *qsel,
+ bool on)
+{
+ int ret = I40E_SUCCESS;
+ int i;
+ struct i40e_hw *hw = I40E_PF_TO_HW(vf->pf);
+ uint16_t baseq = vf->vsi->base_queue;
+
+ if (qsel->rx_queues + qsel->tx_queues == 0)
+ return I40E_ERR_PARAM;
+
+ /* always enable RX first and disable last */
+ /* Enable RX if it's enable */
+ if (on) {
+ for (i = 0; i < I40E_MAX_QP_NUM_PER_VF; i++)
+ if (qsel->rx_queues & (1 << i)) {
+ ret = i40e_switch_rx_queue(hw, baseq + i, on);
+ if (ret != I40E_SUCCESS)
+ return ret;
+ }
+ }
+
+ /* Enable/Disable TX */
+ for (i = 0; i < I40E_MAX_QP_NUM_PER_VF; i++)
+ if (qsel->tx_queues & (1 << i)) {
+ ret = i40e_switch_tx_queue(hw, baseq + i, on);
+ if (ret != I40E_SUCCESS)
+ return ret;
+ }
+
+ /* disable RX last if it's disable */
+ if (!on) {
+ /* disable RX */
+ for (i = 0; i < I40E_MAX_QP_NUM_PER_VF; i++)
+ if (qsel->rx_queues & (1 << i)) {
+ ret = i40e_switch_rx_queue(hw, baseq + i, on);
+ if (ret != I40E_SUCCESS)
+ return ret;
+ }
+ }
+
+ return ret;
+}
+
+static int
+i40e_pf_host_process_cmd_enable_queues(struct i40e_pf_vf *vf,
+ uint8_t *msg,
+ uint16_t msglen)
+{
+ int ret = I40E_SUCCESS;
+ struct i40e_virtchnl_queue_select *q_sel =
+ (struct i40e_virtchnl_queue_select *)msg;
+
+ if (msg == NULL || msglen != sizeof(*q_sel)) {
+ ret = I40E_ERR_PARAM;
+ goto send_msg;
+ }
+ ret = i40e_pf_host_switch_queues(vf, q_sel, true);
+
+send_msg:
+ i40e_pf_host_send_msg_to_vf(vf, I40E_VIRTCHNL_OP_ENABLE_QUEUES,
+ ret, NULL, 0);
+
+ return ret;
+}
+
+static int
+i40e_pf_host_process_cmd_disable_queues(struct i40e_pf_vf *vf,
+ uint8_t *msg,
+ uint16_t msglen)
+{
+ int ret = I40E_SUCCESS;
+ struct i40e_virtchnl_queue_select *q_sel =
+ (struct i40e_virtchnl_queue_select *)msg;
+
+ if (msg == NULL || msglen != sizeof(*q_sel)) {
+ ret = I40E_ERR_PARAM;
+ goto send_msg;
+ }
+ ret = i40e_pf_host_switch_queues(vf, q_sel, false);
+
+send_msg:
+ i40e_pf_host_send_msg_to_vf(vf, I40E_VIRTCHNL_OP_DISABLE_QUEUES,
+ ret, NULL, 0);
+
+ return ret;
+}
+
+
+static int
+i40e_pf_host_process_cmd_add_ether_address(struct i40e_pf_vf *vf,
+ uint8_t *msg,
+ uint16_t msglen)
+{
+ int ret = I40E_SUCCESS;
+ struct i40e_virtchnl_ether_addr_list *addr_list =
+ (struct i40e_virtchnl_ether_addr_list *)msg;
+ struct i40e_mac_filter_info filter;
+ int i;
+ struct ether_addr *mac;
+
+ memset(&filter, 0 , sizeof(struct i40e_mac_filter_info));
+
+ if (msg == NULL || msglen <= sizeof(*addr_list)) {
+ PMD_DRV_LOG(ERR, "add_ether_address argument too short");
+ ret = I40E_ERR_PARAM;
+ goto send_msg;
+ }
+
+ for (i = 0; i < addr_list->num_elements; i++) {
+ mac = (struct ether_addr *)(addr_list->list[i].addr);
+ (void)rte_memcpy(&filter.mac_addr, mac, ETHER_ADDR_LEN);
+ filter.filter_type = RTE_MACVLAN_PERFECT_MATCH;
+ if(!is_valid_assigned_ether_addr(mac) ||
+ i40e_vsi_add_mac(vf->vsi, &filter)) {
+ ret = I40E_ERR_INVALID_MAC_ADDR;
+ goto send_msg;
+ }
+ }
+
+send_msg:
+ i40e_pf_host_send_msg_to_vf(vf, I40E_VIRTCHNL_OP_ADD_ETHER_ADDRESS,
+ ret, NULL, 0);
+
+ return ret;
+}
+
+static int
+i40e_pf_host_process_cmd_del_ether_address(struct i40e_pf_vf *vf,
+ uint8_t *msg,
+ uint16_t msglen)
+{
+ int ret = I40E_SUCCESS;
+ struct i40e_virtchnl_ether_addr_list *addr_list =
+ (struct i40e_virtchnl_ether_addr_list *)msg;
+ int i;
+ struct ether_addr *mac;
+
+ if (msg == NULL || msglen <= sizeof(*addr_list)) {
+ PMD_DRV_LOG(ERR, "delete_ether_address argument too short");
+ ret = I40E_ERR_PARAM;
+ goto send_msg;
+ }
+
+ for (i = 0; i < addr_list->num_elements; i++) {
+ mac = (struct ether_addr *)(addr_list->list[i].addr);
+ if(!is_valid_assigned_ether_addr(mac) ||
+ i40e_vsi_delete_mac(vf->vsi, mac)) {
+ ret = I40E_ERR_INVALID_MAC_ADDR;
+ goto send_msg;
+ }
+ }
+
+send_msg:
+ i40e_pf_host_send_msg_to_vf(vf, I40E_VIRTCHNL_OP_DEL_ETHER_ADDRESS,
+ ret, NULL, 0);
+
+ return ret;
+}
+
+static int
+i40e_pf_host_process_cmd_add_vlan(struct i40e_pf_vf *vf,
+ uint8_t *msg, uint16_t msglen)
+{
+ int ret = I40E_SUCCESS;
+ struct i40e_virtchnl_vlan_filter_list *vlan_filter_list =
+ (struct i40e_virtchnl_vlan_filter_list *)msg;
+ int i;
+ uint16_t *vid;
+
+ if (msg == NULL || msglen <= sizeof(*vlan_filter_list)) {
+ PMD_DRV_LOG(ERR, "add_vlan argument too short");
+ ret = I40E_ERR_PARAM;
+ goto send_msg;
+ }
+
+ vid = vlan_filter_list->vlan_id;
+
+ for (i = 0; i < vlan_filter_list->num_elements; i++) {
+ ret = i40e_vsi_add_vlan(vf->vsi, vid[i]);
+ if(ret != I40E_SUCCESS)
+ goto send_msg;
+ }
+
+send_msg:
+ i40e_pf_host_send_msg_to_vf(vf, I40E_VIRTCHNL_OP_ADD_VLAN,
+ ret, NULL, 0);
+
+ return ret;
+}
+
+static int
+i40e_pf_host_process_cmd_del_vlan(struct i40e_pf_vf *vf,
+ uint8_t *msg,
+ uint16_t msglen)
+{
+ int ret = I40E_SUCCESS;
+ struct i40e_virtchnl_vlan_filter_list *vlan_filter_list =
+ (struct i40e_virtchnl_vlan_filter_list *)msg;
+ int i;
+ uint16_t *vid;
+
+ if (msg == NULL || msglen <= sizeof(*vlan_filter_list)) {
+ PMD_DRV_LOG(ERR, "delete_vlan argument too short");
+ ret = I40E_ERR_PARAM;
+ goto send_msg;
+ }
+
+ vid = vlan_filter_list->vlan_id;
+ for (i = 0; i < vlan_filter_list->num_elements; i++) {
+ ret = i40e_vsi_delete_vlan(vf->vsi, vid[i]);
+ if(ret != I40E_SUCCESS)
+ goto send_msg;
+ }
+
+send_msg:
+ i40e_pf_host_send_msg_to_vf(vf, I40E_VIRTCHNL_OP_DEL_VLAN,
+ ret, NULL, 0);
+
+ return ret;
+}
+
+static int
+i40e_pf_host_process_cmd_config_promisc_mode(
+ struct i40e_pf_vf *vf,
+ uint8_t *msg,
+ uint16_t msglen)
+{
+ int ret = I40E_SUCCESS;
+ struct i40e_virtchnl_promisc_info *promisc =
+ (struct i40e_virtchnl_promisc_info *)msg;
+ struct i40e_hw *hw = I40E_PF_TO_HW(vf->pf);
+ bool unicast = FALSE, multicast = FALSE;
+
+ if (msg == NULL || msglen != sizeof(*promisc)) {
+ ret = I40E_ERR_PARAM;
+ goto send_msg;
+ }
+
+ if (promisc->flags & I40E_FLAG_VF_UNICAST_PROMISC)
+ unicast = TRUE;
+ ret = i40e_aq_set_vsi_unicast_promiscuous(hw,
+ vf->vsi->seid, unicast, NULL, true);
+ if (ret != I40E_SUCCESS)
+ goto send_msg;
+
+ if (promisc->flags & I40E_FLAG_VF_MULTICAST_PROMISC)
+ multicast = TRUE;
+ ret = i40e_aq_set_vsi_multicast_promiscuous(hw, vf->vsi->seid,
+ multicast, NULL);
+
+send_msg:
+ i40e_pf_host_send_msg_to_vf(vf,
+ I40E_VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE, ret, NULL, 0);
+
+ return ret;
+}
+
+static int
+i40e_pf_host_process_cmd_get_stats(struct i40e_pf_vf *vf)
+{
+ i40e_update_vsi_stats(vf->vsi);
+
+ i40e_pf_host_send_msg_to_vf(vf, I40E_VIRTCHNL_OP_GET_STATS,
+ I40E_SUCCESS, (uint8_t *)&vf->vsi->eth_stats,
+ sizeof(vf->vsi->eth_stats));
+
+ return I40E_SUCCESS;
+}
+
+static void
+i40e_pf_host_process_cmd_get_link_status(struct i40e_pf_vf *vf)
+{
+ struct rte_eth_dev *dev = I40E_VSI_TO_ETH_DEV(vf->pf->main_vsi);
+
+ /* Update link status first to acquire latest link change */
+ i40e_dev_link_update(dev, 1);
+ i40e_pf_host_send_msg_to_vf(vf, I40E_VIRTCHNL_OP_GET_LINK_STAT,
+ I40E_SUCCESS, (uint8_t *)&dev->data->dev_link,
+ sizeof(struct rte_eth_link));
+}
+
+static int
+i40e_pf_host_process_cmd_cfg_vlan_offload(
+ struct i40e_pf_vf *vf,
+ uint8_t *msg,
+ uint16_t msglen)
+{
+ int ret = I40E_SUCCESS;
+ struct i40e_virtchnl_vlan_offload_info *offload =
+ (struct i40e_virtchnl_vlan_offload_info *)msg;
+
+ if (msg == NULL || msglen != sizeof(*offload)) {
+ ret = I40E_ERR_PARAM;
+ goto send_msg;
+ }
+
+ ret = i40e_vsi_config_vlan_stripping(vf->vsi,
+ !!offload->enable_vlan_strip);
+ if (ret != 0)
+ PMD_DRV_LOG(ERR, "Failed to configure vlan stripping");
+
+send_msg:
+ i40e_pf_host_send_msg_to_vf(vf, I40E_VIRTCHNL_OP_CFG_VLAN_OFFLOAD,
+ ret, NULL, 0);
+
+ return ret;
+}
+
+static int
+i40e_pf_host_process_cmd_cfg_pvid(struct i40e_pf_vf *vf,
+ uint8_t *msg,
+ uint16_t msglen)
+{
+ int ret = I40E_SUCCESS;
+ struct i40e_virtchnl_pvid_info *tpid_info =
+ (struct i40e_virtchnl_pvid_info *)msg;
+
+ if (msg == NULL || msglen != sizeof(*tpid_info)) {
+ ret = I40E_ERR_PARAM;
+ goto send_msg;
+ }
+
+ ret = i40e_vsi_vlan_pvid_set(vf->vsi, &tpid_info->info);
+
+send_msg:
+ i40e_pf_host_send_msg_to_vf(vf, I40E_VIRTCHNL_OP_CFG_VLAN_PVID,
+ ret, NULL, 0);
+
+ return ret;
+}
+
+void
+i40e_pf_host_handle_vf_msg(struct rte_eth_dev *dev,
+ uint16_t abs_vf_id, uint32_t opcode,
+ __rte_unused uint32_t retval,
+ uint8_t *msg,
+ uint16_t msglen)
+{
+ struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
+ struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ struct i40e_pf_vf *vf;
+ /* AdminQ will pass absolute VF id, transfer to internal vf id */
+ uint16_t vf_id = abs_vf_id - hw->func_caps.vf_base_id;
+
+ if (vf_id > pf->vf_num - 1 || !pf->vfs) {
+ PMD_DRV_LOG(ERR, "invalid argument");
+ return;
+ }
+
+ vf = &pf->vfs[vf_id];
+ if (!vf->vsi) {
+ PMD_DRV_LOG(ERR, "NO VSI associated with VF found");
+ i40e_pf_host_send_msg_to_vf(vf, opcode,
+ I40E_ERR_NO_AVAILABLE_VSI, NULL, 0);
+ return;
+ }
+
+ switch (opcode) {
+ case I40E_VIRTCHNL_OP_VERSION :
+ PMD_DRV_LOG(INFO, "OP_VERSION received");
+ i40e_pf_host_process_cmd_version(vf);
+ break;
+ case I40E_VIRTCHNL_OP_RESET_VF :
+ PMD_DRV_LOG(INFO, "OP_RESET_VF received");
+ i40e_pf_host_process_cmd_reset_vf(vf);
+ break;
+ case I40E_VIRTCHNL_OP_GET_VF_RESOURCES:
+ PMD_DRV_LOG(INFO, "OP_GET_VF_RESOURCES received");
+ i40e_pf_host_process_cmd_get_vf_resource(vf);
+ break;
+ case I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES:
+ PMD_DRV_LOG(INFO, "OP_CONFIG_VSI_QUEUES received");
+ i40e_pf_host_process_cmd_config_vsi_queues(vf, msg, msglen);
+ break;
+ case I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES_EXT:
+ PMD_DRV_LOG(INFO, "OP_CONFIG_VSI_QUEUES_EXT received");
+ i40e_pf_host_process_cmd_config_vsi_queues_ext(vf, msg,
+ msglen);
+ break;
+ case I40E_VIRTCHNL_OP_CONFIG_IRQ_MAP:
+ PMD_DRV_LOG(INFO, "OP_CONFIG_IRQ_MAP received");
+ i40e_pf_host_process_cmd_config_irq_map(vf, msg, msglen);
+ break;
+ case I40E_VIRTCHNL_OP_ENABLE_QUEUES:
+ PMD_DRV_LOG(INFO, "OP_ENABLE_QUEUES received");
+ i40e_pf_host_process_cmd_enable_queues(vf, msg, msglen);
+ break;
+ case I40E_VIRTCHNL_OP_DISABLE_QUEUES:
+ PMD_DRV_LOG(INFO, "OP_DISABLE_QUEUE received");
+ i40e_pf_host_process_cmd_disable_queues(vf, msg, msglen);
+ break;
+ case I40E_VIRTCHNL_OP_ADD_ETHER_ADDRESS:
+ PMD_DRV_LOG(INFO, "OP_ADD_ETHER_ADDRESS received");
+ i40e_pf_host_process_cmd_add_ether_address(vf, msg, msglen);
+ break;
+ case I40E_VIRTCHNL_OP_DEL_ETHER_ADDRESS:
+ PMD_DRV_LOG(INFO, "OP_DEL_ETHER_ADDRESS received");
+ i40e_pf_host_process_cmd_del_ether_address(vf, msg, msglen);
+ break;
+ case I40E_VIRTCHNL_OP_ADD_VLAN:
+ PMD_DRV_LOG(INFO, "OP_ADD_VLAN received");
+ i40e_pf_host_process_cmd_add_vlan(vf, msg, msglen);
+ break;
+ case I40E_VIRTCHNL_OP_DEL_VLAN:
+ PMD_DRV_LOG(INFO, "OP_DEL_VLAN received");
+ i40e_pf_host_process_cmd_del_vlan(vf, msg, msglen);
+ break;
+ case I40E_VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE:
+ PMD_DRV_LOG(INFO, "OP_CONFIG_PROMISCUOUS_MODE received");
+ i40e_pf_host_process_cmd_config_promisc_mode(vf, msg, msglen);
+ break;
+ case I40E_VIRTCHNL_OP_GET_STATS:
+ PMD_DRV_LOG(INFO, "OP_GET_STATS received");
+ i40e_pf_host_process_cmd_get_stats(vf);
+ break;
+ case I40E_VIRTCHNL_OP_GET_LINK_STAT:
+ PMD_DRV_LOG(INFO, "OP_GET_LINK_STAT received");
+ i40e_pf_host_process_cmd_get_link_status(vf);
+ break;
+ case I40E_VIRTCHNL_OP_CFG_VLAN_OFFLOAD:
+ PMD_DRV_LOG(INFO, "OP_CFG_VLAN_OFFLOAD received");
+ i40e_pf_host_process_cmd_cfg_vlan_offload(vf, msg, msglen);
+ break;
+ case I40E_VIRTCHNL_OP_CFG_VLAN_PVID:
+ PMD_DRV_LOG(INFO, "OP_CFG_VLAN_PVID received");
+ i40e_pf_host_process_cmd_cfg_pvid(vf, msg, msglen);
+ break;
+ /* Don't add command supported below, which will
+ * return an error code.
+ */
+ default:
+ PMD_DRV_LOG(ERR, "%u received, not supported", opcode);
+ i40e_pf_host_send_msg_to_vf(vf, opcode, I40E_ERR_PARAM,
+ NULL, 0);
+ break;
+ }
+}
+
+int
+i40e_pf_host_init(struct rte_eth_dev *dev)
+{
+ struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
+ struct i40e_hw *hw = I40E_PF_TO_HW(pf);
+ int ret, i;
+ uint32_t val;
+
+ PMD_INIT_FUNC_TRACE();
+
+ /**
+ * return if SRIOV not enabled, VF number not configured or
+ * no queue assigned.
+ */
+ if(!hw->func_caps.sr_iov_1_1 || pf->vf_num == 0 || pf->vf_nb_qps == 0)
+ return I40E_SUCCESS;
+
+ /* Allocate memory to store VF structure */
+ pf->vfs = rte_zmalloc("i40e_pf_vf",sizeof(*pf->vfs) * pf->vf_num, 0);
+ if(pf->vfs == NULL)
+ return -ENOMEM;
+
+ /* Disable irq0 for VFR event */
+ i40e_pf_disable_irq0(hw);
+
+ /* Disable VF link status interrupt */
+ val = I40E_READ_REG(hw, I40E_PFGEN_PORTMDIO_NUM);
+ val &= ~I40E_PFGEN_PORTMDIO_NUM_VFLINK_STAT_ENA_MASK;
+ I40E_WRITE_REG(hw, I40E_PFGEN_PORTMDIO_NUM, val);
+ I40E_WRITE_FLUSH(hw);
+
+ for (i = 0; i < pf->vf_num; i++) {
+ pf->vfs[i].pf = pf;
+ pf->vfs[i].state = I40E_VF_INACTIVE;
+ pf->vfs[i].vf_idx = i;
+ ret = i40e_pf_host_vf_reset(&pf->vfs[i], 0);
+ if (ret != I40E_SUCCESS)
+ goto fail;
+ eth_random_addr(pf->vfs[i].mac_addr.addr_bytes);
+ }
+
+ /* restore irq0 */
+ i40e_pf_enable_irq0(hw);
+
+ return I40E_SUCCESS;
+
+fail:
+ rte_free(pf->vfs);
+ i40e_pf_enable_irq0(hw);
+
+ return ret;
+}
+
+int
+i40e_pf_host_uninit(struct rte_eth_dev *dev)
+{
+ struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
+ struct i40e_hw *hw = I40E_PF_TO_HW(pf);
+ uint32_t val;
+
+ PMD_INIT_FUNC_TRACE();
+
+ /**
+ * return if SRIOV not enabled, VF number not configured or
+ * no queue assigned.
+ */
+ if ((!hw->func_caps.sr_iov_1_1) ||
+ (pf->vf_num == 0) ||
+ (pf->vf_nb_qps == 0))
+ return I40E_SUCCESS;
+
+ /* free memory to store VF structure */
+ rte_free(pf->vfs);
+ pf->vfs = NULL;
+
+ /* Disable irq0 for VFR event */
+ i40e_pf_disable_irq0(hw);
+
+ /* Disable VF link status interrupt */
+ val = I40E_READ_REG(hw, I40E_PFGEN_PORTMDIO_NUM);
+ val &= ~I40E_PFGEN_PORTMDIO_NUM_VFLINK_STAT_ENA_MASK;
+ I40E_WRITE_REG(hw, I40E_PFGEN_PORTMDIO_NUM, val);
+ I40E_WRITE_FLUSH(hw);
+
+ return I40E_SUCCESS;
+}
diff --git a/src/dpdk/drivers/net/i40e/i40e_pf.h b/src/dpdk/drivers/net/i40e/i40e_pf.h
new file mode 100644
index 00000000..9c01829a
--- /dev/null
+++ b/src/dpdk/drivers/net/i40e/i40e_pf.h
@@ -0,0 +1,128 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2010-2015 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _I40E_PF_H_
+#define _I40E_PF_H_
+
+/* VERSION info to exchange between VF and PF host. In case VF works with
+ * ND kernel driver, it reads I40E_VIRTCHNL_VERSION_MAJOR/MINOR. In
+ * case works with DPDK host, it reads version below. Then VF realize who it
+ * is talking to and use proper language to communicate.
+ * */
+#define I40E_DPDK_SIGNATURE ('D' << 24 | 'P' << 16 | 'D' << 8 | 'K')
+#define I40E_DPDK_VERSION_MAJOR I40E_DPDK_SIGNATURE
+#define I40E_DPDK_VERSION_MINOR 0
+
+/* Default setting on number of VSIs that VF can contain */
+#define I40E_DEFAULT_VF_VSI_NUM 1
+
+#define I40E_DPDK_OFFSET 0x100
+
+enum i40e_pf_vfr_state {
+ I40E_PF_VFR_INPROGRESS = 0,
+ I40E_PF_VFR_COMPLETED = 1,
+};
+
+/* DPDK pf driver specific command to VF */
+enum i40e_virtchnl_ops_dpdk {
+ /*
+ * Keep some gap between Linux PF commands and
+ * DPDK PF extended commands.
+ */
+ I40E_VIRTCHNL_OP_GET_LINK_STAT = I40E_VIRTCHNL_OP_VERSION +
+ I40E_DPDK_OFFSET,
+ I40E_VIRTCHNL_OP_CFG_VLAN_OFFLOAD,
+ I40E_VIRTCHNL_OP_CFG_VLAN_PVID,
+ I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES_EXT,
+};
+
+/* A structure to support extended info of a receive queue. */
+struct i40e_virtchnl_rxq_ext_info {
+ uint8_t crcstrip;
+};
+
+/*
+ * A structure to support extended info of queue pairs, an additional field
+ * is added, comparing to original 'struct i40e_virtchnl_queue_pair_info'.
+ */
+struct i40e_virtchnl_queue_pair_ext_info {
+ /* vsi_id and queue_id should be identical for both rx and tx queues.*/
+ struct i40e_virtchnl_txq_info txq;
+ struct i40e_virtchnl_rxq_info rxq;
+ struct i40e_virtchnl_rxq_ext_info rxq_ext;
+};
+
+/*
+ * A structure to support extended info of VSI queue pairs,
+ * 'struct i40e_virtchnl_queue_pair_ext_info' is used, see its original
+ * of 'struct i40e_virtchnl_queue_pair_info'.
+ */
+struct i40e_virtchnl_vsi_queue_config_ext_info {
+ uint16_t vsi_id;
+ uint16_t num_queue_pairs;
+ struct i40e_virtchnl_queue_pair_ext_info qpair[0];
+};
+
+struct i40e_virtchnl_vlan_offload_info {
+ uint16_t vsi_id;
+ uint8_t enable_vlan_strip;
+ uint8_t reserved;
+};
+
+/*
+ * Macro to calculate the memory size for configuring VSI queues
+ * via virtual channel.
+ */
+#define I40E_VIRTCHNL_CONFIG_VSI_QUEUES_SIZE(x, n) \
+ (sizeof(*(x)) + sizeof((x)->qpair[0]) * (n))
+
+/*
+ * I40E_VIRTCHNL_OP_CFG_VLAN_PVID
+ * VF sends this message to enable/disable pvid. If it's
+ * enable op, needs to specify the pvid. PF returns status
+ * code in retval.
+ */
+struct i40e_virtchnl_pvid_info {
+ uint16_t vsi_id;
+ struct i40e_vsi_vlan_pvid_info info;
+};
+
+int i40e_pf_host_vf_reset(struct i40e_pf_vf *vf, bool do_hw_reset);
+void i40e_pf_host_handle_vf_msg(struct rte_eth_dev *dev,
+ uint16_t abs_vf_id, uint32_t opcode,
+ __rte_unused uint32_t retval,
+ uint8_t *msg, uint16_t msglen);
+int i40e_pf_host_init(struct rte_eth_dev *dev);
+int i40e_pf_host_uninit(struct rte_eth_dev *dev);
+
+#endif /* _I40E_PF_H_ */
diff --git a/src/dpdk/drivers/net/i40e/i40e_regs.h b/src/dpdk/drivers/net/i40e/i40e_regs.h
new file mode 100644
index 00000000..472c7a06
--- /dev/null
+++ b/src/dpdk/drivers/net/i40e/i40e_regs.h
@@ -0,0 +1,997 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2016 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+struct i40e_reg_info {
+ uint32_t base_addr;
+ uint32_t count1;
+ uint32_t stride1;
+ uint32_t count2;
+ uint32_t stride2;
+ const char *name;
+};
+
+static const struct i40e_reg_info i40e_regs_adminq[] = {
+ {I40E_VFQF_HENA(0), 1, 4, 0, 0, "VFQF_HENA"},
+ {I40E_VFQF_HKEY(0), 12, 4, 0, 0, "VFQF_HKEY"},
+ {I40E_VFQF_HREGION(0), 7, 4, 0, 0, "VFQF_HREGION"},
+ {I40E_VPQF_CTL(0), 127, 4, 0, 0, "VPQF_CTL"},
+ {I40E_PFLAN_QALLOC, 0, 0, 0, 0, "PFLAN_QALLOC"},
+ {I40E_PFQF_CTL_0, 0, 0, 0, 0, "PFQF_CTL_0"},
+ {I40E_VSILAN_QTABLE(0, 0), 7, 2048, 383, 4, "VSILAN_QTABLE"},
+ {I40E_VSIQF_TCREGION(0, 0), 3, 2048, 383, 4, "VSIQF_TCREGION"},
+ {I40E_VSILAN_QBASE(0), 383, 4, 0, 0, "VSILAN_QBASE"},
+ {I40E_VSIQF_CTL(0), 383, 4, 0, 0, "VSIQF_CTL"},
+ {I40E_PFQF_HKEY(0), 12, 128, 0, 0, "PFQF_HKEY"},
+ {I40E_PFQF_HREGION(0), 7, 128, 0, 0, "PFQF_HREGION"},
+ {I40E_PFQF_HENA(0), 1, 128, 0, 0, "PFQF_HENA"},
+ {I40E_PFQF_FDALLOC, 0, 0, 0, 0, "PFQF_FDALLOC"},
+ {I40E_PRTQF_FD_INSET(0, 0), 63, 64, 1, 32, "PRTQF_FD_INSET"},
+ {I40E_PRTQF_FD_INSET(0, 0), 63, 64, 1, 32, "PRTQF_FD_INSET"},
+ {I40E_PRTQF_FD_MSK(0, 0), 63, 64, 1, 32, "PRTQF_FD_MSK"},
+ {I40E_PRTQF_FD_FLXINSET(0), 63, 32, 0, 0, "PRTQF_FD_FLXINSET"},
+ {I40E_PRTQF_CTL_0, 0, 0, 0, 0, "PRTQF_CTL_0"},
+ {I40E_GLQF_FD_MSK(0, 0), 1, 4, 63, 8, "GLQF_FD_MSK"},
+ {I40E_GLQF_HASH_INSET(0, 0), 1, 4, 63, 8, "GLQF_HASH_INSET"},
+ {I40E_GLQF_HASH_MSK(0, 0), 1, 4, 63, 8, "GLQF_HASH_MSK"},
+ {I40E_GLQF_SWAP(0, 0), 1, 4, 63, 8, "GLQF_SWAP"},
+ {I40E_GLFCOE_RCTL, 0, 0, 0, 0, "GLFCOE_RCTL"},
+ {I40E_GLQF_CTL, 0, 0, 0, 0, "GLQF_CTL"},
+ {I40E_GLQF_HSYM(0), 63, 4, 0, 0, "GLQF_HSYM"},
+ {0, 0, 0, 0, 0, NULL}
+};
+
+static const struct i40e_reg_info i40e_regs_others[] = {
+ {I40E_QTX_TAIL1(0), 15, 4, 0, 0, "QTX_TAIL1"},
+ {I40E_VFPE_CQPDB(0), 127, 4, 0, 0, "VFPE_CQPDB"},
+ {I40E_VFPE_CQPTAIL(0), 127, 4, 0, 0, "VFPE_CQPTAIL"},
+ {I40E_VFPE_CCQPSTATUS(0), 127, 4, 0, 0, "VFPE_CCQPSTATUS"},
+ {I40E_VFPE_CCQPLOW(0), 127, 4, 0, 0, "VFPE_CCQPLOW"},
+ {I40E_VFPE_CCQPHIGH(0), 127, 4, 0, 0, "VFPE_CCQPHIGH"},
+ {I40E_VFPE_IPCONFIG0(0), 127, 4, 0, 0, "VFPE_IPCONFIG0"},
+ {I40E_VFPE_CQPERRCODES(0), 127, 4, 0, 0, "VFPE_CQPERRCODES"},
+ {I40E_QRX_TAIL1(0), 15, 4, 0, 0, "QRX_TAIL1"},
+ {I40E_VFINT_ITRN1(0, 0), 2, 64, 15, 4, "VFINT_ITRN1"},
+ {I40E_VFPE_TCPNOWTIMER(0), 127, 4, 0, 0, "VFPE_TCPNOWTIMER"},
+ {I40E_VFPE_MRTEIDXMASK(0), 127, 4, 0, 0, "VFPE_MRTEIDXMASK"},
+ {I40E_VFPE_RCVUNEXPECTEDERROR(0), 127, 4, 0, 0,
+ "VFPE_RCVUNEXPECTEDERROR"},
+ {I40E_VFINT_DYN_CTLN1(0), 15, 4, 0, 0, "VFINT_DYN_CTLN1"},
+ {I40E_VFINT_ICR01, 0, 0, 0, 0, "VFINT_ICR01"},
+ {I40E_VFINT_ITR01(0), 2, 4, 0, 0, "VFINT_ITR01"},
+ {I40E_VFINT_ICR0_ENA1, 0, 0, 0, 0, "VFINT_ICR0_ENA1"},
+ {I40E_VFINT_STAT_CTL01, 0, 0, 0, 0, "VFINT_STAT_CTL01"},
+ {I40E_VFINT_DYN_CTL01, 0, 0, 0, 0, "VFINT_DYN_CTL01"},
+ {I40E_VF_ARQBAH1, 0, 0, 0, 0, "VF_ARQBAH1"},
+ {I40E_VF_ATQH1, 0, 0, 0, 0, "VF_ATQH1"},
+ {I40E_VF_ATQLEN1, 0, 0, 0, 0, "VF_ATQLEN1"},
+ {I40E_VF_ARQBAL1, 0, 0, 0, 0, "VF_ARQBAL1"},
+ {I40E_VF_ARQT1, 0, 0, 0, 0, "VF_ARQT1"},
+ {I40E_VF_ARQH1, 0, 0, 0, 0, "VF_ARQH1"},
+ {I40E_VF_ATQBAH1, 0, 0, 0, 0, "VF_ATQBAH1"},
+ {I40E_VF_ATQBAL1, 0, 0, 0, 0, "VF_ATQBAL1"},
+ {I40E_VF_ARQLEN1, 0, 0, 0, 0, "VF_ARQLEN1"},
+ {I40E_PFPE_CQPDB, 0, 0, 0, 0, "PFPE_CQPDB"},
+ {I40E_PFPE_CQPTAIL, 0, 0, 0, 0, "PFPE_CQPTAIL"},
+ {I40E_PFPE_CCQPSTATUS, 0, 0, 0, 0, "PFPE_CCQPSTATUS"},
+ {I40E_PFPE_CCQPLOW, 0, 0, 0, 0, "PFPE_CCQPLOW"},
+ {I40E_PFPE_CCQPHIGH, 0, 0, 0, 0, "PFPE_CCQPHIGH"},
+ {I40E_PFPE_IPCONFIG0, 0, 0, 0, 0, "PFPE_IPCONFIG0"},
+ {I40E_VF_ATQT1, 0, 0, 0, 0, "VF_ATQT1"},
+ {I40E_PFPE_TCPNOWTIMER, 0, 0, 0, 0, "PFPE_TCPNOWTIMER"},
+ {I40E_PFPE_MRTEIDXMASK, 0, 0, 0, 0, "PFPE_MRTEIDXMASK"},
+ {I40E_PFPE_RCVUNEXPECTEDERROR, 0, 0, 0, 0, "PFPE_RCVUNEXPECTEDERROR"},
+ {I40E_PFPE_UDACTRL, 0, 0, 0, 0, "PFPE_UDACTRL"},
+ {I40E_PFPE_UDAUCFBQPN, 0, 0, 0, 0, "PFPE_UDAUCFBQPN"},
+ {I40E_VFGEN_RSTAT, 0, 0, 0, 0, "VFGEN_RSTAT"},
+ {I40E_PFPE_CQPERRCODES, 0, 0, 0, 0, "PFPE_CQPERRCODES"},
+ {I40E_PFPE_FLMXMITALLOCERR, 0, 0, 0, 0, "PFPE_FLMXMITALLOCERR"},
+ {I40E_PFPE_FLMQ1ALLOCERR, 0, 0, 0, 0, "PFPE_FLMQ1ALLOCERR"},
+ {I40E_VFPE_IPCONFIG01, 0, 0, 0, 0, "VFPE_IPCONFIG01"},
+ {I40E_VFPE_MRTEIDXMASK1, 0, 0, 0, 0, "VFPE_MRTEIDXMASK1"},
+ {I40E_VFPE_RCVUNEXPECTEDERROR1, 0, 0, 0, 0, "VFPE_RCVUNEXPECTEDERROR1"},
+ {I40E_VFPE_CCQPHIGH1, 0, 0, 0, 0, "VFPE_CCQPHIGH1"},
+ {I40E_VFPE_CQPERRCODES1, 0, 0, 0, 0, "VFPE_CQPERRCODES1"},
+ {I40E_VFPE_CQPTAIL1, 0, 0, 0, 0, "VFPE_CQPTAIL1"},
+ {I40E_VFPE_AEQALLOC1, 0, 0, 0, 0, "VFPE_AEQALLOC1"},
+ {I40E_VFPE_TCPNOWTIMER1, 0, 0, 0, 0, "VFPE_TCPNOWTIMER1"},
+ {I40E_VFPE_CCQPLOW1, 0, 0, 0, 0, "VFPE_CCQPLOW1"},
+ {I40E_VFPE_CQACK1, 0, 0, 0, 0, "VFPE_CQACK1"},
+ {I40E_VFPE_CQARM1, 0, 0, 0, 0, "VFPE_CQARM1"},
+ {I40E_VFPE_CCQPSTATUS1, 0, 0, 0, 0, "VFPE_CCQPSTATUS1"},
+ {I40E_VFPE_CQPDB1, 0, 0, 0, 0, "VFPE_CQPDB1"},
+ {I40E_GLPE_VFUDACTRL(0), 31, 4, 0, 0, "GLPE_VFUDACTRL"},
+ {I40E_VFPE_WQEALLOC1, 0, 0, 0, 0, "VFPE_WQEALLOC1"},
+ {I40E_GLPE_VFUDAUCFBQPN(0), 31, 4, 0, 0, "GLPE_VFUDAUCFBQPN"},
+ {I40E_GLPE_VFFLMXMITALLOCERR(0), 31, 4, 0, 0, "GLPE_VFFLMXMITALLOCERR"},
+ {I40E_GLPE_VFFLMQ1ALLOCERR(0), 31, 4, 0, 0, "GLPE_VFFLMQ1ALLOCERR"},
+ {I40E_VFQF_HLUT(0), 15, 4, 0, 0, "VFQF_HLUT"},
+ {I40E_GLPE_CPUSTATUS0, 0, 0, 0, 0, "GLPE_CPUSTATUS0"},
+ {I40E_GLPE_CPUSTATUS1, 0, 0, 0, 0, "GLPE_CPUSTATUS1"},
+ {I40E_GLPE_CPUSTATUS2, 0, 0, 0, 0, "GLPE_CPUSTATUS2"},
+ {I40E_GLPE_CPUTRIG0, 0, 0, 0, 0, "GLPE_CPUTRIG0"},
+ {I40E_GLPE_VFFLMOBJCTRL(0), 31, 4, 0, 0, "GLPE_VFFLMOBJCTRL"},
+ {I40E_VFCM_PE_ERRINFO, 0, 0, 0, 0, "VFCM_PE_ERRINFO"},
+ {I40E_GLPE_RUPM_GCTL, 0, 0, 0, 0, "GLPE_RUPM_GCTL"},
+ {I40E_GLPE_DUAL40_RUPM, 0, 0, 0, 0, "GLPE_DUAL40_RUPM"},
+ {I40E_GLPE_RUPM_TXHOST_EN, 0, 0, 0, 0, "GLPE_RUPM_TXHOST_EN"},
+ {I40E_PRTPE_RUPM_THRES, 0, 0, 0, 0, "PRTPE_RUPM_THRES"},
+ {I40E_PRTPE_RUPM_CTL, 0, 0, 0, 0, "PRTPE_RUPM_CTL"},
+ {I40E_PRTPE_RUPM_PFCCTL, 0, 0, 0, 0, "PRTPE_RUPM_PFCCTL"},
+ {I40E_PRTPE_RUPM_PFCPC, 0, 0, 0, 0, "PRTPE_RUPM_PFCPC"},
+ {I40E_PRTPE_RUPM_PFCTCC, 0, 0, 0, 0, "PRTPE_RUPM_PFCTCC"},
+ {I40E_GLPE_RUPM_PUSHPOOL, 0, 0, 0, 0, "GLPE_RUPM_PUSHPOOL"},
+ {I40E_GLPE_RUPM_FLRPOOL, 0, 0, 0, 0, "GLPE_RUPM_FLRPOOL"},
+ {I40E_GLPE_RUPM_PTXPOOL, 0, 0, 0, 0, "GLPE_RUPM_PTXPOOL"},
+ {I40E_GLPE_RUPM_CQPPOOL, 0, 0, 0, 0, "GLPE_RUPM_CQPPOOL"},
+ {I40E_PRTE_RUPM_TCCNTR03, 0, 0, 0, 0, "PRTE_RUPM_TCCNTR03"},
+ {I40E_PRTPE_RUPM_TCCNTR47, 0, 0, 0, 0, "PRTPE_RUPM_TCCNTR47"},
+ {I40E_PRTPE_RUPM_CNTR, 0, 0, 0, 0, "PRTPE_RUPM_CNTR"},
+ {I40E_PRTPE_RUPM_PTXTCCNTR03, 0, 0, 0, 0, "PRTPE_RUPM_PTXTCCNTR03"},
+ {I40E_PRTPE_RUPM_PTCTCCNTR47, 0, 0, 0, 0, "PRTPE_RUPM_PTCTCCNTR47"},
+ {I40E_VFCM_PE_ERRDATA, 0, 0, 0, 0, "VFCM_PE_ERRDATA"},
+ {I40E_PFPCI_VF_FLUSH_DONE, 0, 0, 0, 0, "PFPCI_VF_FLUSH_DONE"},
+ {I40E_GLPES_PFRXVLANERR(0), 15, 4, 0, 0, "GLPES_PFRXVLANERR"},
+ {I40E_GLPES_PFIP4RXOCTSLO(0), 15, 8, 0, 0, "GLPES_PFIP4RXOCTSLO"},
+ {I40E_GLPES_PFIP4RXOCTSHI(0), 15, 8, 0, 0, "GLPES_PFIP4RXOCTSHI"},
+ {I40E_GLPES_PFIP4RXPKTSLO(0), 15, 8, 0, 0, "GLPES_PFIP4RXPKTSLO"},
+ {I40E_GLPES_PFIP4RXPKTSHI(0), 15, 8, 0, 0, "GLPES_PFIP4RXPKTSHI"},
+ {I40E_GLPES_PFIP4RXDISCARD(0), 15, 4, 0, 0, "GLPES_PFIP4RXDISCARD"},
+ {I40E_GLPES_PFIP4RXTRUNC(0), 15, 4, 0, 0, "GLPES_PFIP4RXTRUNC"},
+ {I40E_GLPES_PFIP4RXFRAGSLO(0), 15, 8, 0, 0, "GLPES_PFIP4RXFRAGSLO"},
+ {I40E_GLPES_PFIP4RXFRAGSHI(0), 15, 8, 0, 0, "GLPES_PFIP4RXFRAGSHI"},
+ {I40E_GLPES_PFIP4RXMCOCTSLO(0), 15, 8, 0, 0, "GLPES_PFIP4RXMCOCTSLO"},
+ {I40E_GLPES_PFIP4RXMCOCTSHI(0), 15, 8, 0, 0, "GLPES_PFIP4RXMCOCTSHI"},
+ {I40E_GLPES_PFIP4RXMCPKTSLO(0), 15, 8, 0, 0, "GLPES_PFIP4RXMCPKTSLO"},
+ {I40E_GLPES_PFIP4RXMCPKTSHI(0), 15, 8, 0, 0, "GLPES_PFIP4RXMCPKTSHI"},
+ {I40E_GLPES_PFIP6RXOCTSLO(0), 15, 8, 0, 0, "GLPES_PFIP6RXOCTSLO"},
+ {I40E_GLPES_PFIP6RXOCTSHI(0), 15, 8, 0, 0, "GLPES_PFIP6RXOCTSHI"},
+ {I40E_GLPES_PFIP6RXPKTSLO(0), 15, 8, 0, 0, "GLPES_PFIP6RXPKTSLO"},
+ {I40E_GLPES_PFIP6RXPKTSHI(0), 15, 8, 0, 0, "GLPES_PFIP6RXPKTSHI"},
+ {I40E_GLPES_PFIP6RXDISCARD(0), 15, 4, 0, 0, "GLPES_PFIP6RXDISCARD"},
+ {I40E_GLPES_PFIP6RXTRUNC(0), 15, 4, 0, 0, "GLPES_PFIP6RXTRUNC"},
+ {I40E_GLPES_PFIP6RXFRAGSLO(0), 15, 8, 0, 0, "GLPES_PFIP6RXFRAGSLO"},
+ {I40E_GLPES_PFIP6RXFRAGSHI(0), 15, 8, 0, 0, "GLPES_PFIP6RXFRAGSHI"},
+ {I40E_GLPES_PFIP6RXMCOCTSLO(0), 15, 8, 0, 0, "GLPES_PFIP6RXMCOCTSLO"},
+ {I40E_GLPES_PFIP6RXMCOCTSHI(0), 15, 8, 0, 0, "GLPES_PFIP6RXMCOCTSHI"},
+ {I40E_GLPES_PFIP6RXMCPKTSLO(0), 15, 8, 0, 0, "GLPES_PFIP6RXMCPKTSLO"},
+ {I40E_GLPES_PFIP6RXMCPKTSHI(0), 15, 8, 0, 0, "GLPES_PFIP6RXMCPKTSHI"},
+ {I40E_GLPES_PFIP4TXOCTSLO(0), 15, 8, 0, 0, "GLPES_PFIP4TXOCTSLO"},
+ {I40E_GLPES_PFIP4TXOCTSHI(0), 15, 8, 0, 0, "GLPES_PFIP4TXOCTSHI"},
+ {I40E_GLPES_PFIP4TXPKTSLO(0), 15, 8, 0, 0, "GLPES_PFIP4TXPKTSLO"},
+ {I40E_GLPES_PFIP4TXPKTSHI(0), 15, 8, 0, 0, "GLPES_PFIP4TXPKTSHI"},
+ {I40E_GLPES_PFIP4TXFRAGSLO(0), 15, 8, 0, 0, "GLPES_PFIP4TXFRAGSLO"},
+ {I40E_GLPES_PFIP4TXFRAGSHI(0), 15, 8, 0, 0, "GLPES_PFIP4TXFRAGSHI"},
+ {I40E_GLPES_PFIP4TXMCOCTSLO(0), 15, 8, 0, 0, "GLPES_PFIP4TXMCOCTSLO"},
+ {I40E_GLPES_PFIP4TXMCOCTSHI(0), 15, 8, 0, 0, "GLPES_PFIP4TXMCOCTSHI"},
+ {I40E_GLPES_PFIP4TXMCPKTSLO(0), 15, 8, 0, 0, "GLPES_PFIP4TXMCPKTSLO"},
+ {I40E_GLPES_PFIP4TXMCPKTSHI(0), 15, 8, 0, 0, "GLPES_PFIP4TXMCPKTSHI"},
+ {I40E_GLPES_PFIP6TXOCTSLO(0), 15, 8, 0, 0, "GLPES_PFIP6TXOCTSLO"},
+ {I40E_GLPES_PFIP6TXOCTSHI(0), 15, 8, 0, 0, "GLPES_PFIP6TXOCTSHI"},
+ {I40E_GLPES_PFIP6TXPKTSLO(0), 15, 8, 0, 0, "GLPES_PFIP6TXPKTSLO"},
+ {I40E_GLPES_PFIP6TXPKTSHI(0), 15, 8, 0, 0, "GLPES_PFIP6TXPKTSHI"},
+ {I40E_GLPES_PFIP6TXFRAGSLO(0), 15, 8, 0, 0, "GLPES_PFIP6TXFRAGSLO"},
+ {I40E_GLPES_PFIP6TXFRAGSHI(0), 15, 8, 0, 0, "GLPES_PFIP6TXFRAGSHI"},
+ {I40E_GLPES_PFIP6TXMCOCTSLO(0), 15, 8, 0, 0, "GLPES_PFIP6TXMCOCTSLO"},
+ {I40E_GLPES_PFIP6TXMCOCTSHI(0), 15, 8, 0, 0, "GLPES_PFIP6TXMCOCTSHI"},
+ {I40E_GLPES_PFIP6TXMCPKTSLO(0), 15, 8, 0, 0, "GLPES_PFIP6TXMCPKTSLO"},
+ {I40E_GLPES_PFIP6TXMCPKTSHI(0), 15, 8, 0, 0, "GLPES_PFIP6TXMCPKTSHI"},
+ {I40E_GLPES_PFIP4TXNOROUTE(0), 15, 4, 0, 0, "GLPES_PFIP4TXNOROUTE"},
+ {I40E_GLPES_PFIP6TXNOROUTE(0), 15, 4, 0, 0, "GLPES_PFIP6TXNOROUTE"},
+ {I40E_GLPES_PFTCPRXSEGSLO(0), 15, 8, 0, 0, "GLPES_PFTCPRXSEGSLO"},
+ {I40E_GLPES_PFTCPRXSEGSHI(0), 15, 8, 0, 0, "GLPES_PFTCPRXSEGSHI"},
+ {I40E_GLPES_PFTCPRXOPTERR(0), 15, 4, 0, 0, "GLPES_PFTCPRXOPTERR"},
+ {I40E_GLPES_PFTCPRXPROTOERR(0), 15, 4, 0, 0, "GLPES_PFTCPRXPROTOERR"},
+ {I40E_GLPES_PFTCPTXSEGLO(0), 15, 8, 0, 0, "GLPES_PFTCPTXSEGLO"},
+ {I40E_GLPES_PFTCPTXSEGHI(0), 15, 8, 0, 0, "GLPES_PFTCPTXSEGHI"},
+ {I40E_GLPES_PFTCPRTXSEG(0), 15, 4, 0, 0, "GLPES_PFTCPRTXSEG"},
+ {I40E_GLPES_PFUDPRXPKTSLO(0), 15, 8, 0, 0, "GLPES_PFUDPRXPKTSLO"},
+ {I40E_GLPES_PFUDPRXPKTSHI(0), 15, 8, 0, 0, "GLPES_PFUDPRXPKTSHI"},
+ {I40E_GLPES_PFUDPTXPKTSLO(0), 15, 8, 0, 0, "GLPES_PFUDPTXPKTSLO"},
+ {I40E_GLPES_PFUDPTXPKTSHI(0), 15, 8, 0, 0, "GLPES_PFUDPTXPKTSHI"},
+ {I40E_GLPES_PFRDMARXWRSLO(0), 15, 8, 0, 0, "GLPES_PFRDMARXWRSLO"},
+ {I40E_GLPES_PFRDMARXWRSHI(0), 15, 8, 0, 0, "GLPES_PFRDMARXWRSHI"},
+ {I40E_GLPES_PFRDMARXRDSLO(0), 15, 8, 0, 0, "GLPES_PFRDMARXRDSLO"},
+ {I40E_GLPES_PFRDMARXRDSHI(0), 15, 8, 0, 0, "GLPES_PFRDMARXRDSHI"},
+ {I40E_GLPES_PFRDMARXSNDSLO(0), 15, 8, 0, 0, "GLPES_PFRDMARXSNDSLO"},
+ {I40E_GLPES_PFRDMARXSNDSHI(0), 15, 8, 0, 0, "GLPES_PFRDMARXSNDSHI"},
+ {I40E_GLPES_PFRDMATXWRSLO(0), 15, 8, 0, 0, "GLPES_PFRDMATXWRSLO"},
+ {I40E_GLPES_PFRDMATXWRSHI(0), 15, 8, 0, 0, "GLPES_PFRDMATXWRSHI"},
+ {I40E_GLPES_PFRDMATXRDSLO(0), 15, 8, 0, 0, "GLPES_PFRDMATXRDSLO"},
+ {I40E_GLPES_PFRDMATXRDSHI(0), 15, 8, 0, 0, "GLPES_PFRDMATXRDSHI"},
+ {I40E_GLPES_PFRDMATXSNDSLO(0), 15, 8, 0, 0, "GLPES_PFRDMATXSNDSLO"},
+ {I40E_GLPES_PFRDMATXSNDSHI(0), 15, 8, 0, 0, "GLPES_PFRDMATXSNDSHI"},
+ {I40E_GLPES_PFRDMAVBNDLO(0), 15, 8, 0, 0, "GLPES_PFRDMAVBNDLO"},
+ {I40E_GLPES_PFRDMAVBNDHI(0), 15, 8, 0, 0, "GLPES_PFRDMAVBNDHI"},
+ {I40E_GLPES_PFRDMAVINVLO(0), 15, 8, 0, 0, "GLPES_PFRDMAVINVLO"},
+ {I40E_GLPES_PFRDMAVINVHI(0), 15, 8, 0, 0, "GLPES_PFRDMAVINVHI"},
+ {I40E_GLPES_VFRXVLANERR(0), 31, 4, 0, 0, "GLPES_VFRXVLANERR"},
+ {I40E_GLPES_VFIP4RXOCTSLO(0), 31, 8, 0, 0, "GLPES_VFIP4RXOCTSLO"},
+ {I40E_GLPES_VFIP4RXOCTSHI(0), 31, 8, 0, 0, "GLPES_VFIP4RXOCTSHI"},
+ {I40E_GLPES_VFIP4RXPKTSLO(0), 31, 8, 0, 0, "GLPES_VFIP4RXPKTSLO"},
+ {I40E_GLPES_VFIP4RXPKTSHI(0), 31, 8, 0, 0, "GLPES_VFIP4RXPKTSHI"},
+ {I40E_GLPES_VFIP4RXDISCARD(0), 31, 4, 0, 0, "GLPES_VFIP4RXDISCARD"},
+ {I40E_GLPES_VFIP4RXTRUNC(0), 31, 4, 0, 0, "GLPES_VFIP4RXTRUNC"},
+ {I40E_GLPES_VFIP4RXFRAGSLO(0), 31, 8, 0, 0, "GLPES_VFIP4RXFRAGSLO"},
+ {I40E_GLPES_VFIP4RXFRAGSHI(0), 31, 8, 0, 0, "GLPES_VFIP4RXFRAGSHI"},
+ {I40E_GLPES_VFIP4RXMCOCTSLO(0), 31, 8, 0, 0, "GLPES_VFIP4RXMCOCTSLO"},
+ {I40E_GLPES_VFIP4RXMCOCTSHI(0), 31, 8, 0, 0, "GLPES_VFIP4RXMCOCTSHI"},
+ {I40E_GLPES_VFIP4RXMCPKTSLO(0), 31, 8, 0, 0, "GLPES_VFIP4RXMCPKTSLO"},
+ {I40E_GLPES_VFIP4RXMCPKTSHI(0), 31, 8, 0, 0, "GLPES_VFIP4RXMCPKTSHI"},
+ {I40E_GLPES_VFIP6RXOCTSLO(0), 31, 8, 0, 0, "GLPES_VFIP6RXOCTSLO"},
+ {I40E_GLPES_VFIP6RXOCTSHI(0), 31, 8, 0, 0, "GLPES_VFIP6RXOCTSHI"},
+ {I40E_GLPES_VFIP6RXPKTSLO(0), 31, 8, 0, 0, "GLPES_VFIP6RXPKTSLO"},
+ {I40E_GLPES_VFIP6RXPKTSHI(0), 31, 8, 0, 0, "GLPES_VFIP6RXPKTSHI"},
+ {I40E_GLPES_VFIP6RXDISCARD(0), 31, 4, 0, 0, "GLPES_VFIP6RXDISCARD"},
+ {I40E_GLPES_VFIP6RXTRUNC(0), 31, 4, 0, 0, "GLPES_VFIP6RXTRUNC"},
+ {I40E_GLPES_VFIP6RXFRAGSLO(0), 31, 8, 0, 0, "GLPES_VFIP6RXFRAGSLO"},
+ {I40E_GLPES_VFIP6RXFRAGSHI(0), 31, 8, 0, 0, "GLPES_VFIP6RXFRAGSHI"},
+ {I40E_GLPES_VFIP6RXMCOCTSLO(0), 31, 8, 0, 0, "GLPES_VFIP6RXMCOCTSLO"},
+ {I40E_GLPES_VFIP6RXMCOCTSHI(0), 31, 8, 0, 0, "GLPES_VFIP6RXMCOCTSHI"},
+ {I40E_GLPES_VFIP6RXMCPKTSLO(0), 31, 8, 0, 0, "GLPES_VFIP6RXMCPKTSLO"},
+ {I40E_GLPES_VFIP6RXMCPKTSHI(0), 31, 8, 0, 0, "GLPES_VFIP6RXMCPKTSHI"},
+ {I40E_GLPES_VFIP4TXOCTSLO(0), 31, 8, 0, 0, "GLPES_VFIP4TXOCTSLO"},
+ {I40E_GLPES_VFIP4TXOCTSHI(0), 31, 8, 0, 0, "GLPES_VFIP4TXOCTSHI"},
+ {I40E_GLPES_VFIP4TXPKTSLO(0), 31, 8, 0, 0, "GLPES_VFIP4TXPKTSLO"},
+ {I40E_GLPES_VFIP4TXPKTSHI(0), 31, 8, 0, 0, "GLPES_VFIP4TXPKTSHI"},
+ {I40E_GLPES_VFIP4TXFRAGSLO(0), 31, 8, 0, 0, "GLPES_VFIP4TXFRAGSLO"},
+ {I40E_GLPES_VFIP4TXFRAGSHI(0), 31, 8, 0, 0, "GLPES_VFIP4TXFRAGSHI"},
+ {I40E_GLPES_VFIP4TXMCOCTSLO(0), 31, 8, 0, 0, "GLPES_VFIP4TXMCOCTSLO"},
+ {I40E_GLPES_VFIP4TXMCOCTSHI(0), 31, 8, 0, 0, "GLPES_VFIP4TXMCOCTSHI"},
+ {I40E_GLPES_VFIP4TXMCPKTSLO(0), 31, 8, 0, 0, "GLPES_VFIP4TXMCPKTSLO"},
+ {I40E_GLPES_VFIP4TXMCPKTSHI(0), 31, 8, 0, 0, "GLPES_VFIP4TXMCPKTSHI"},
+ {I40E_GLPES_VFIP6TXOCTSLO(0), 31, 8, 0, 0, "GLPES_VFIP6TXOCTSLO"},
+ {I40E_GLPES_VFIP6TXOCTSHI(0), 31, 8, 0, 0, "GLPES_VFIP6TXOCTSHI"},
+ {I40E_GLPES_VFIP6TXPKTSLO(0), 31, 8, 0, 0, "GLPES_VFIP6TXPKTSLO"},
+ {I40E_GLPES_VFIP6TXPKTSHI(0), 31, 8, 0, 0, "GLPES_VFIP6TXPKTSHI"},
+ {I40E_GLPES_VFIP6TXFRAGSLO(0), 31, 8, 0, 0, "GLPES_VFIP6TXFRAGSLO"},
+ {I40E_GLPES_VFIP6TXFRAGSHI(0), 31, 8, 0, 0, "GLPES_VFIP6TXFRAGSHI"},
+ {I40E_GLPES_VFIP6TXMCOCTSLO(0), 31, 8, 0, 0, "GLPES_VFIP6TXMCOCTSLO"},
+ {I40E_GLPES_VFIP6TXMCOCTSHI(0), 31, 8, 0, 0, "GLPES_VFIP6TXMCOCTSHI"},
+ {I40E_GLPES_VFIP6TXMCPKTSLO(0), 31, 8, 0, 0, "GLPES_VFIP6TXMCPKTSLO"},
+ {I40E_GLPES_VFIP6TXMCPKTSHI(0), 31, 8, 0, 0, "GLPES_VFIP6TXMCPKTSHI"},
+ {I40E_GLPES_VFIP4TXNOROUTE(0), 31, 4, 0, 0, "GLPES_VFIP4TXNOROUTE"},
+ {I40E_GLPES_VFIP6TXNOROUTE(0), 31, 4, 0, 0, "GLPES_VFIP6TXNOROUTE"},
+ {I40E_GLPES_VFTCPRXSEGSLO(0), 31, 8, 0, 0, "GLPES_VFTCPRXSEGSLO"},
+ {I40E_GLPES_VFTCPRXSEGSHI(0), 31, 8, 0, 0, "GLPES_VFTCPRXSEGSHI"},
+ {I40E_GLPES_VFTCPRXOPTERR(0), 31, 4, 0, 0, "GLPES_VFTCPRXOPTERR"},
+ {I40E_GLPES_VFTCPRXPROTOERR(0), 31, 4, 0, 0, "GLPES_VFTCPRXPROTOERR"},
+ {I40E_GLPES_VFTCPTXSEGLO(0), 31, 8, 0, 0, "GLPES_VFTCPTXSEGLO"},
+ {I40E_GLPES_VFTCPTXSEGHI(0), 31, 8, 0, 0, "GLPES_VFTCPTXSEGHI"},
+ {I40E_GLPES_VFTCPRTXSEG(0), 31, 4, 0, 0, "GLPES_VFTCPRTXSEG"},
+ {I40E_GLPES_VFUDPRXPKTSLO(0), 31, 8, 0, 0, "GLPES_VFUDPRXPKTSLO"},
+ {I40E_GLPES_VFUDPRXPKTSHI(0), 31, 8, 0, 0, "GLPES_VFUDPRXPKTSHI"},
+ {I40E_GLPES_VFUDPTXPKTSLO(0), 31, 8, 0, 0, "GLPES_VFUDPTXPKTSLO"},
+ {I40E_GLPES_VFUDPTXPKTSHI(0), 31, 8, 0, 0, "GLPES_VFUDPTXPKTSHI"},
+ {I40E_GLPES_VFRDMARXWRSLO(0), 31, 8, 0, 0, "GLPES_VFRDMARXWRSLO"},
+ {I40E_GLPES_VFRDMARXWRSHI(0), 31, 8, 0, 0, "GLPES_VFRDMARXWRSHI"},
+ {I40E_GLPES_VFRDMARXRDSLO(0), 31, 8, 0, 0, "GLPES_VFRDMARXRDSLO"},
+ {I40E_GLPES_VFRDMARXRDSHI(0), 31, 8, 0, 0, "GLPES_VFRDMARXRDSHI"},
+ {I40E_GLPES_VFRDMARXSNDSLO(0), 31, 8, 0, 0, "GLPES_VFRDMARXSNDSLO"},
+ {I40E_GLPES_VFRDMARXSNDSHI(0), 31, 8, 0, 0, "GLPES_VFRDMARXSNDSHI"},
+ {I40E_GLPES_VFRDMATXWRSLO(0), 31, 8, 0, 0, "GLPES_VFRDMATXWRSLO"},
+ {I40E_GLPES_VFRDMATXWRSHI(0), 31, 8, 0, 0, "GLPES_VFRDMATXWRSHI"},
+ {I40E_GLPES_VFRDMATXRDSLO(0), 31, 8, 0, 0, "GLPES_VFRDMATXRDSLO"},
+ {I40E_GLPES_VFRDMATXRDSHI(0), 31, 8, 0, 0, "GLPES_VFRDMATXRDSHI"},
+ {I40E_GLPES_VFRDMATXSNDSLO(0), 31, 8, 0, 0, "GLPES_VFRDMATXSNDSLO"},
+ {I40E_GLPES_VFRDMATXSNDSHI(0), 31, 8, 0, 0, "GLPES_VFRDMATXSNDSHI"},
+ {I40E_GLPES_VFRDMAVBNDLO(0), 31, 8, 0, 0, "GLPES_VFRDMAVBNDLO"},
+ {I40E_GLPES_VFRDMAVBNDHI(0), 31, 8, 0, 0, "GLPES_VFRDMAVBNDHI"},
+ {I40E_GLPES_VFRDMAVINVLO(0), 31, 8, 0, 0, "GLPES_VFRDMAVINVLO"},
+ {I40E_GLPES_VFRDMAVINVHI(0), 31, 8, 0, 0, "GLPES_VFRDMAVINVHI"},
+ {I40E_GLPES_RDMARXUNALIGN, 0, 0, 0, 0, "GLPES_RDMARXUNALIGN"},
+ {I40E_GLPES_RDMARXOOONOMARK, 0, 0, 0, 0, "GLPES_RDMARXOOONOMARK"},
+ {I40E_GLPES_RDMARXMULTFPDUSLO, 0, 0, 0, 0, "GLPES_RDMARXMULTFPDUSLO"},
+ {I40E_GLPES_RDMARXMULTFPDUSHI, 0, 0, 0, 0, "GLPES_RDMARXMULTFPDUSHI"},
+ {I40E_GLPES_RDMARXOOODDPLO, 0, 0, 0, 0, "GLPES_RDMARXOOODDPLO"},
+ {I40E_GLPES_RDMARXOOODDPHI, 0, 0, 0, 0, "GLPES_RDMARXOOODDPHI"},
+ {I40E_GLPES_TCPRXPUREACKSLO, 0, 0, 0, 0, "GLPES_TCPRXPUREACKSLO"},
+ {I40E_GLPES_TCPRXPUREACKHI, 0, 0, 0, 0, "GLPES_TCPRXPUREACKHI"},
+ {I40E_GLPES_TCPRXONEHOLELO, 0, 0, 0, 0, "GLPES_TCPRXONEHOLELO"},
+ {I40E_GLPES_TCPRXONEHOLEHI, 0, 0, 0, 0, "GLPES_TCPRXONEHOLEHI"},
+ {I40E_GLPES_TCPRXTWOHOLELO, 0, 0, 0, 0, "GLPES_TCPRXTWOHOLELO"},
+ {I40E_GLPES_TCPRXTWOHOLEHI, 0, 0, 0, 0, "GLPES_TCPRXTWOHOLEHI"},
+ {I40E_GLPES_TCPRXTHREEHOLELO, 0, 0, 0, 0, "GLPES_TCPRXTHREEHOLELO"},
+ {I40E_GLPES_TCPRXTHREEHOLEHI, 0, 0, 0, 0, "GLPES_TCPRXTHREEHOLEHI"},
+ {I40E_GLPES_TCPRXFOURHOLELO, 0, 0, 0, 0, "GLPES_TCPRXFOURHOLELO"},
+ {I40E_GLPES_TCPRXFOURHOLEHI, 0, 0, 0, 0, "GLPES_TCPRXFOURHOLEHI"},
+ {I40E_GLPES_TCPTXRETRANSFASTLO, 0, 0, 0, 0, "GLPES_TCPTXRETRANSFASTLO"},
+ {I40E_GLPES_TCPTXRETRANSFASTHI, 0, 0, 0, 0, "GLPES_TCPTXRETRANSFASTHI"},
+ {I40E_GLPES_TCPTXTOUTSFASTLO, 0, 0, 0, 0, "GLPES_TCPTXTOUTSFASTLO"},
+ {I40E_GLPES_TCPTXTOUTSFASTHI, 0, 0, 0, 0, "GLPES_TCPTXTOUTSFASTHI"},
+ {I40E_GLPES_TCPTXTOUTSLO, 0, 0, 0, 0, "GLPES_TCPTXTOUTSLO"},
+ {I40E_GLPES_TCPTXTOUTSHI, 0, 0, 0, 0, "GLPES_TCPTXTOUTSHI"},
+ {I40E_PRTDCB_TCMSTC_RLPM(0), 7, 32, 0, 0, "PRTDCB_TCMSTC_RLPM"},
+ {I40E_PRTDCB_RLPMC, 0, 0, 0, 0, "PRTDCB_RLPMC"},
+ {I40E_PRTDCB_TCPMC_RLPM, 0, 0, 0, 0, "PRTDCB_TCPMC_RLPM"},
+ {I40E_VFINT_ITRN(0, 0), 2, 2048, 511, 4, "VFINT_ITRN"},
+ {I40E_VFINT_DYN_CTLN(0), 511, 4, 0, 0, "VFINT_DYN_CTLN"},
+ {I40E_VPINT_LNKLSTN(0), 511, 4, 0, 0, "VPINT_LNKLSTN"},
+ {I40E_VPINT_RATEN(0), 511, 4, 0, 0, "VPINT_RATEN"},
+ {I40E_VPINT_CEQCTL(0), 511, 4, 0, 0, "VPINT_CEQCTL"},
+ {I40E_VFINT_ITR0(0, 0), 2, 1024, 127, 4, "VFINT_ITR0"},
+ {I40E_VFINT_STAT_CTL0(0), 127, 4, 0, 0, "VFINT_STAT_CTL0"},
+ {I40E_VFINT_DYN_CTL0(0), 127, 4, 0, 0, "VFINT_DYN_CTL0"},
+ {I40E_VPINT_LNKLST0(0), 127, 4, 0, 0, "VPINT_LNKLST0"},
+ {I40E_VPINT_RATE0(0), 127, 4, 0, 0, "VPINT_RATE0"},
+ {I40E_VPINT_AEQCTL(0), 127, 4, 0, 0, "VPINT_AEQCTL"},
+ {I40E_VFINT_ICR0(0), 127, 4, 0, 0, "VFINT_ICR0"},
+ {I40E_VFINT_ICR0_ENA(0), 127, 4, 0, 0, "VFINT_ICR0_ENA"},
+ {I40E_PFINT_ITRN(0, 0), 2, 2048, 511, 4, "PFINT_ITRN"},
+ {I40E_PFINT_DYN_CTLN(0), 511, 4, 0, 0, "PFINT_DYN_CTLN"},
+ {I40E_PFINT_LNKLSTN(0), 511, 4, 0, 0, "PFINT_LNKLSTN"},
+ {I40E_PFINT_RATEN(0), 511, 4, 0, 0, "PFINT_RATEN"},
+ {I40E_PFINT_CEQCTL(0), 511, 4, 0, 0, "PFINT_CEQCTL"},
+ {I40E_PFINT_ITR0(0), 2, 128, 0, 0, "PFINT_ITR0"},
+ {I40E_PFINT_STAT_CTL0, 0, 0, 0, 0, "PFINT_STAT_CTL0"},
+ {I40E_PFINT_DYN_CTL0, 0, 0, 0, 0, "PFINT_DYN_CTL0"},
+ {I40E_PFINT_LNKLST0, 0, 0, 0, 0, "PFINT_LNKLST0"},
+ {I40E_PFINT_RATE0, 0, 0, 0, 0, "PFINT_RATE0"},
+ {I40E_PFINT_AEQCTL, 0, 0, 0, 0, "PFINT_AEQCTL"},
+ {I40E_PFINT_ICR0, 0, 0, 0, 0, "PFINT_ICR0"},
+ {I40E_PFINT_ICR0_ENA, 0, 0, 0, 0, "PFINT_ICR0_ENA"},
+ {I40E_QINT_RQCTL(0), 1535, 4, 0, 0, "QINT_RQCTL"},
+ {I40E_QINT_TQCTL(0), 1535, 4, 0, 0, "QINT_TQCTL"},
+ {I40E_PFGEN_PORTMDIO_NUM, 0, 0, 0, 0, "PFGEN_PORTMDIO_NUM"},
+ {I40E_GLINT_CTL, 0, 0, 0, 0, "GLINT_CTL"},
+ {I40E_GLLAN_TSOMSK_F, 0, 0, 0, 0, "GLLAN_TSOMSK_F"},
+ {I40E_GLLAN_TSOMSK_M, 0, 0, 0, 0, "GLLAN_TSOMSK_M"},
+ {I40E_GLLAN_TSOMSK_L, 0, 0, 0, 0, "GLLAN_TSOMSK_L"},
+ {I40E_GL_RDPU_CNTRL, 0, 0, 0, 0, "GL_RDPU_CNTRL"},
+ {I40E_PFPM_FHFT_LENGTH(0), 7, 128, 0, 0, "PFPM_FHFT_LENGTH"},
+ {I40E_PFPM_WUC, 0, 0, 0, 0, "PFPM_WUC"},
+ {I40E_PFPM_WUFC, 0, 0, 0, 0, "PFPM_WUFC"},
+ {I40E_PFPM_WUS, 0, 0, 0, 0, "PFPM_WUS"},
+ {I40E_PRTPM_FHFHR, 0, 0, 0, 0, "PRTPM_FHFHR"},
+ {I40E_GLPM_WUMC, 0, 0, 0, 0, "GLPM_WUMC"},
+ {I40E_VPLAN_QTABLE(0, 0), 15, 1024, 127, 4, "VPLAN_QTABLE"},
+ {I40E_VPLAN_MAPENA(0), 127, 4, 0, 0, "VPLAN_MAPENA"},
+ {I40E_VFGEN_RSTAT1(0), 127, 4, 0, 0, "VFGEN_RSTAT1"},
+ {I40E_VPLAN_QBASE(0), 127, 4, 0, 0, "VPLAN_QBASE"},
+ {I40E_PF_ATQBAL, 0, 0, 0, 0, "PF_ATQBAL"},
+ {I40E_GL_ATQBAL, 0, 0, 0, 0, "GL_ATQBAL"},
+ {I40E_PF_ARQBAL, 0, 0, 0, 0, "PF_ARQBAL"},
+ {I40E_GL_ARQBAL, 0, 0, 0, 0, "GL_ARQBAL"},
+ {I40E_PF_ATQBAH, 0, 0, 0, 0, "PF_ATQBAH"},
+ {I40E_GL_ATQBAH, 0, 0, 0, 0, "GL_ATQBAH"},
+ {I40E_PF_ARQBAH, 0, 0, 0, 0, "PF_ARQBAH"},
+ {I40E_GL_ARQBAH, 0, 0, 0, 0, "GL_ARQBAH"},
+ {I40E_PF_ATQLEN, 0, 0, 0, 0, "PF_ATQLEN"},
+ {I40E_GL_ATQLEN, 0, 0, 0, 0, "GL_ATQLEN"},
+ {I40E_PF_ARQLEN, 0, 0, 0, 0, "PF_ARQLEN"},
+ {I40E_PF_ATQH, 0, 0, 0, 0, "PF_ATQH"},
+ {I40E_GL_ATQH, 0, 0, 0, 0, "GL_ATQH"},
+ {I40E_PF_ARQH, 0, 0, 0, 0, "PF_ARQH"},
+ {I40E_GL_ARQH, 0, 0, 0, 0, "GL_ARQH"},
+ {I40E_PF_ATQT, 0, 0, 0, 0, "PF_ATQT"},
+ {I40E_GL_ATQT, 0, 0, 0, 0, "GL_ATQT"},
+ {I40E_PF_ARQT, 0, 0, 0, 0, "PF_ARQT"},
+ {I40E_GL_ARQT, 0, 0, 0, 0, "GL_ARQT"},
+ {I40E_VF_ATQBAL(0), 127, 4, 0, 0, "VF_ATQBAL"},
+ {I40E_VF_ARQBAL(0), 127, 4, 0, 0, "VF_ARQBAL"},
+ {I40E_VF_ATQBAH(0), 127, 4, 0, 0, "VF_ATQBAH"},
+ {I40E_VF_ARQBAH(0), 127, 4, 0, 0, "VF_ARQBAH"},
+ {I40E_VF_ATQLEN(0), 127, 4, 0, 0, "VF_ATQLEN"},
+ {I40E_VF_ARQLEN(0), 127, 4, 0, 0, "VF_ARQLEN"},
+ {I40E_VF_ATQH(0), 127, 4, 0, 0, "VF_ATQH"},
+ {I40E_VF_ARQH(0), 127, 4, 0, 0, "VF_ARQH"},
+ {I40E_VF_ATQT(0), 127, 4, 0, 0, "VF_ATQT"},
+ {I40E_VF_ARQT(0), 127, 4, 0, 0, "VF_ARQT"},
+ {I40E_PRTDCB_GENC, 0, 0, 0, 0, "PRTDCB_GENC"},
+ {I40E_PRTDCB_GENS, 0, 0, 0, 0, "PRTDCB_GENS"},
+ {I40E_GLDCB_GENC, 0, 0, 0, 0, "GLDCB_GENC"},
+ {I40E_GL_FWSTS, 0, 0, 0, 0, "GL_FWSTS"},
+ {I40E_GL_FWRESETCNT, 0, 0, 0, 0, "GL_FWRESETCNT"},
+ {I40E_GL_VF_CTRL_TX(0), 127, 4, 0, 0, "GL_VF_CTRL_TX"},
+ {I40E_GL_VF_CTRL_RX(0), 127, 4, 0, 0, "GL_VF_CTRL_RX"},
+ {I40E_PRTTSYN_CTL1, 0, 0, 0, 0, "PRTTSYN_CTL1"},
+ {I40E_PRTTSYN_RXTIME_H(0), 3, 32, 0, 0, "PRTTSYN_RXTIME_H"},
+ {I40E_PRTTSYN_RXTIME_L(0), 3, 32, 0, 0, "PRTTSYN_RXTIME_L"},
+ {I40E_PRTTSYN_STAT_1, 0, 0, 0, 0, "PRTTSYN_STAT_1"},
+ {I40E_PRT_MNG_FTFT_MASK(0), 7, 32, 0, 0, "PRT_MNG_FTFT_MASK"},
+ {I40E_PRT_MNG_FTFT_LENGTH, 0, 0, 0, 0, "PRT_MNG_FTFT_LENGTH"},
+ {I40E_PRT_MNG_FTFT_DATA(0), 31, 32, 0, 0, "PRT_MNG_FTFT_DATA"},
+ {I40E_GL_PPRS_SPARE, 0, 0, 0, 0, "GL_PPRS_SPARE"},
+ {I40E_PFGEN_STATE, 0, 0, 0, 0, "PFGEN_STATE"},
+ {I40E_PFINT_GPIO_ENA, 0, 0, 0, 0, "PFINT_GPIO_ENA"},
+ {I40E_GLGEN_MISC_SPARE, 0, 0, 0, 0, "GLGEN_MISC_SPARE"},
+ {I40E_GLGEN_GPIO_CTL(0), 29, 4, 0, 0, "GLGEN_GPIO_CTL"},
+ {I40E_GLGEN_LED_CTL, 0, 0, 0, 0, "GLGEN_LED_CTL"},
+ {I40E_GLGEN_GPIO_STAT, 0, 0, 0, 0, "GLGEN_GPIO_STAT"},
+ {I40E_GLGEN_GPIO_TRANSIT, 0, 0, 0, 0, "GLGEN_GPIO_TRANSIT"},
+ {I40E_GLGEN_GPIO_SET, 0, 0, 0, 0, "GLGEN_GPIO_SET"},
+ {I40E_EMPINT_GPIO_ENA, 0, 0, 0, 0, "EMPINT_GPIO_ENA"},
+ {I40E_GLGEN_MSCA(0), 3, 4, 0, 0, "GLGEN_MSCA"},
+ {I40E_GLGEN_MSRWD(0), 3, 4, 0, 0, "GLGEN_MSRWD"},
+ {I40E_GLGEN_I2CPARAMS(0), 3, 4, 0, 0, "GLGEN_I2CPARAMS"},
+ {I40E_GLVFGEN_TIMER, 0, 0, 0, 0, "GLVFGEN_TIMER"},
+ {I40E_GLGEN_MDIO_I2C_SEL(0), 3, 4, 0, 0, "GLGEN_MDIO_I2C_SEL"},
+ {I40E_GLGEN_MDIO_CTRL(0), 3, 4, 0, 0, "GLGEN_MDIO_CTRL"},
+ {I40E_GLGEN_I2CCMD(0), 3, 4, 0, 0, "GLGEN_I2CCMD"},
+ {I40E_PRTMAC_PCS_XAUI_SWAP_A, 0, 0, 0, 0, "PRTMAC_PCS_XAUI_SWAP_A"},
+ {I40E_PRTMAC_PCS_XAUI_SWAP_B, 0, 0, 0, 0, "PRTMAC_PCS_XAUI_SWAP_B"},
+ {I40E_VSIGEN_RTRIG(0), 383, 4, 0, 0, "VSIGEN_RTRIG"},
+ {I40E_VSIGEN_RSTAT(0), 383, 4, 0, 0, "VSIGEN_RSTAT"},
+ {I40E_VPGEN_VFRTRIG(0), 127, 4, 0, 0, "VPGEN_VFRTRIG"},
+ {I40E_VPGEN_VFRSTAT(0), 127, 4, 0, 0, "VPGEN_VFRSTAT"},
+ {I40E_PFGEN_CTRL, 0, 0, 0, 0, "PFGEN_CTRL"},
+ {I40E_PFGEN_DRUN, 0, 0, 0, 0, "PFGEN_DRUN"},
+ {I40E_GLGEN_VFLRSTAT(0), 3, 4, 0, 0, "GLGEN_VFLRSTAT"},
+ {I40E_GL_UFUSE, 0, 0, 0, 0, "GL_UFUSE"},
+ {I40E_GL_GP_FUSE(0), 28, 4, 0, 0, "GL_GP_FUSE"},
+ {I40E_PRTDCB_TETSC_TPB, 0, 0, 0, 0, "PRTDCB_TETSC_TPB"},
+ {I40E_PF_FUNC_RID, 0, 0, 0, 0, "PF_FUNC_RID"},
+ {I40E_PF_PCI_CIAA, 0, 0, 0, 0, "PF_PCI_CIAA"},
+ {I40E_PF_PCI_CIAD, 0, 0, 0, 0, "PF_PCI_CIAD"},
+ {I40E_PFPCI_FACTPS, 0, 0, 0, 0, "PFPCI_FACTPS"},
+ {I40E_PFPCI_ICAUSE, 0, 0, 0, 0, "PFPCI_ICAUSE"},
+ {I40E_PFPCI_IENA, 0, 0, 0, 0, "PFPCI_IENA"},
+ {I40E_PFPCI_VMINDEX, 0, 0, 0, 0, "PFPCI_VMINDEX"},
+ {I40E_PFPCI_VMPEND, 0, 0, 0, 0, "PFPCI_VMPEND"},
+ {I40E_GLPCI_DREVID, 0, 0, 0, 0, "GLPCI_DREVID"},
+ {I40E_GLPCI_BYTCTH, 0, 0, 0, 0, "GLPCI_BYTCTH"},
+ {I40E_GLPCI_BYTCTL, 0, 0, 0, 0, "GLPCI_BYTCTL"},
+ {I40E_GLPCI_GSCL_1, 0, 0, 0, 0, "GLPCI_GSCL_1"},
+ {I40E_GLPCI_GSCL_2, 0, 0, 0, 0, "GLPCI_GSCL_2"},
+ {I40E_GLPCI_GSCL_5_8(0), 3, 4, 0, 0, "GLPCI_GSCL_5_8"},
+ {I40E_GLPCI_GSCN_0_3(0), 3, 4, 0, 0, "GLPCI_GSCN_0_3"},
+ {I40E_GLPCI_PKTCT, 0, 0, 0, 0, "GLPCI_PKTCT"},
+ {I40E_GLPCI_PQ_MAX_USED_SPC, 0, 0, 0, 0, "GLPCI_PQ_MAX_USED_SPC"},
+ {I40E_GLPCI_PM_MUX_PFB, 0, 0, 0, 0, "GLPCI_PM_MUX_PFB"},
+ {I40E_GLPCI_PM_MUX_NPQ, 0, 0, 0, 0, "GLPCI_PM_MUX_NPQ"},
+ {I40E_GLPCI_SPARE_BITS_0, 0, 0, 0, 0, "GLPCI_SPARE_BITS_0"},
+ {I40E_GLPCI_SPARE_BITS_1, 0, 0, 0, 0, "GLPCI_SPARE_BITS_1"},
+ {I40E_GLPCI_CUR_RLAN_ALWD, 0, 0, 0, 0, "GLPCI_CUR_RLAN_ALWD"},
+ {I40E_GLPCI_CUR_TLAN_ALWD, 0, 0, 0, 0, "GLPCI_CUR_TLAN_ALWD"},
+ {I40E_GLPCI_CUR_RXPE_ALWD, 0, 0, 0, 0, "GLPCI_CUR_RXPE_ALWD"},
+ {I40E_GLPCI_CUR_TXPE_ALWD, 0, 0, 0, 0, "GLPCI_CUR_TXPE_ALWD"},
+ {I40E_GLPCI_CUR_PMAT_ALWD, 0, 0, 0, 0, "GLPCI_CUR_PMAT_ALWD"},
+ {I40E_GLPCI_CUR_MNG_ALWD, 0, 0, 0, 0, "GLPCI_CUR_MNG_ALWD"},
+ {I40E_GLPCI_CUR_TDPU_ALWD, 0, 0, 0, 0, "GLPCI_CUR_TDPU_ALWD"},
+ {I40E_GLPCI_CUR_RLAN_RSVD, 0, 0, 0, 0, "GLPCI_CUR_RLAN_RSVD"},
+ {I40E_GLPCI_CUR_TLAN_RSVD, 0, 0, 0, 0, "GLPCI_CUR_TLAN_RSVD"},
+ {I40E_GLPCI_CUR_RXPE_RSVD, 0, 0, 0, 0, "GLPCI_CUR_RXPE_RSVD"},
+ {I40E_GLPCI_CUR_TXPE_RSVD, 0, 0, 0, 0, "GLPCI_CUR_TXPE_RSVD"},
+ {I40E_GLPCI_CUR_PMAT_RSVD, 0, 0, 0, 0, "GLPCI_CUR_PMAT_RSVD"},
+ {I40E_GLPCI_CUR_MNG_RSVD, 0, 0, 0, 0, "GLPCI_CUR_MNG_RSVD"},
+ {I40E_GLPCI_CUR_TDPU_RSVD, 0, 0, 0, 0, "GLPCI_CUR_TDPU_RSVD"},
+ {I40E_PFPCI_VF_FLUSH_DONE1(0), 127, 4, 0, 0, "PFPCI_VF_FLUSH_DONE1"},
+ {I40E_PFPCI_PF_FLUSH_DONE, 0, 0, 0, 0, "PFPCI_PF_FLUSH_DONE"},
+ {I40E_PFPCI_VM_FLUSH_DONE, 0, 0, 0, 0, "PFPCI_VM_FLUSH_DONE"},
+ {I40E_GLPCI_NPQ_CFG, 0, 0, 0, 0, "GLPCI_NPQ_CFG"},
+ {I40E_GLPCI_CUR_CLNT_COMMON, 0, 0, 0, 0, "GLPCI_CUR_CLNT_COMMON"},
+ {I40E_GLPCI_CUR_CLNT_PIPEMON, 0, 0, 0, 0, "GLPCI_CUR_CLNT_PIPEMON"},
+ {I40E_GLPCI_CUR_WATMK_CLNT_COMMON, 0, 0, 0, 0,
+ "GLPCI_CUR_WATMK_CLNT_COMMON"},
+ {I40E_GLPCI_WATMK_CLNT_PIPEMON, 0, 0, 0, 0,
+ "GLPCI_WATMK_CLNT_PIPEMON"},
+ {I40E_GLPCI_WATMK_RLAN_ALWD, 0, 0, 0, 0, "GLPCI_WATMK_RLAN_ALWD"},
+ {I40E_GLPCI_WATMK_TLAN_ALWD, 0, 0, 0, 0, "GLPCI_WATMK_TLAN_ALWD"},
+ {I40E_GLPCI_WATMK_RXPE_ALWD, 0, 0, 0, 0, "GLPCI_WATMK_RXPE_ALWD"},
+ {I40E_GLPCI_WATMK_TXPE_ALWD, 0, 0, 0, 0, "GLPCI_WATMK_TXPE_ALWD"},
+ {I40E_GLPCI_WATMK_PMAT_ALWD, 0, 0, 0, 0, "GLPCI_WATMK_PMAT_ALWD"},
+ {I40E_GLPCI_WATMK_MNG_ALWD, 0, 0, 0, 0, "GLPCI_WATMK_MNG_ALWD"},
+ {I40E_GLPCI_WATMK_TPDU_ALWD, 0, 0, 0, 0, "GLPCI_WATMK_TPDU_ALWD"},
+ {I40E_PRTDCB_TCMSTC(0), 7, 32, 0, 0, "PRTDCB_TCMSTC"},
+ {I40E_PRTDCB_TFMSTC(0), 7, 32, 0, 0, "PRTDCB_TFMSTC"},
+ {I40E_PRTDCB_TDPMC, 0, 0, 0, 0, "PRTDCB_TDPMC"},
+ {I40E_PRTDCB_TCWSTC(0), 7, 32, 0, 0, "PRTDCB_TCWSTC"},
+ {I40E_PRTDCB_TCPMC, 0, 0, 0, 0, "PRTDCB_TCPMC"},
+ {I40E_GL_TUPM_SPARE, 0, 0, 0, 0, "GL_TUPM_SPARE"},
+ {I40E_GLPEOC_CACHESIZE, 0, 0, 0, 0, "GLPEOC_CACHESIZE"},
+ {I40E_GLPBLOC_CACHESIZE, 0, 0, 0, 0, "GLPBLOC_CACHESIZE"},
+ {I40E_GLFOC_CACHESIZE, 0, 0, 0, 0, "GLFOC_CACHESIZE"},
+ {I40E_PRTRPB_DHW(0), 7, 32, 0, 0, "PRTRPB_DHW"},
+ {I40E_PRTRPB_DLW(0), 7, 32, 0, 0, "PRTRPB_DLW"},
+ {I40E_PRTRPB_DPS(0), 7, 32, 0, 0, "PRTRPB_DPS"},
+ {I40E_PRTRPB_SHT(0), 7, 32, 0, 0, "PRTRPB_SHT"},
+ {I40E_PRTRPB_SHW, 0, 0, 0, 0, "PRTRPB_SHW"},
+ {I40E_PRTRPB_SLT(0), 7, 32, 0, 0, "PRTRPB_SLT"},
+ {I40E_PRTRPB_SLW, 0, 0, 0, 0, "PRTRPB_SLW"},
+ {I40E_PRTRPB_SPS, 0, 0, 0, 0, "PRTRPB_SPS"},
+ {I40E_GLRPB_DPSS, 0, 0, 0, 0, "GLRPB_DPSS"},
+ {I40E_GLRPB_GHW, 0, 0, 0, 0, "GLRPB_GHW"},
+ {I40E_GLRPB_GLW, 0, 0, 0, 0, "GLRPB_GLW"},
+ {I40E_GLRPB_PHW, 0, 0, 0, 0, "GLRPB_PHW"},
+ {I40E_GLRPB_PLW, 0, 0, 0, 0, "GLRPB_PLW"},
+ {I40E_PRTDCB_TETSC_TCB, 0, 0, 0, 0, "PRTDCB_TETSC_TCB"},
+ {I40E_GLNVM_ULD, 0, 0, 0, 0, "GLNVM_ULD"},
+ {I40E_GLNVM_ULD, 0, 0, 0, 0, "GLNVM_ULD"},
+ {I40E_GLNVM_PROTCSR(0), 59, 4, 0, 0, "GLNVM_PROTCSR"},
+ {I40E_GLNVM_GENS, 0, 0, 0, 0, "GLNVM_GENS"},
+ {I40E_GLNVM_FLASHID, 0, 0, 0, 0, "GLNVM_FLASHID"},
+ {I40E_GLNVM_FLA, 0, 0, 0, 0, "GLNVM_FLA"},
+ {I40E_GLNVM_FLA, 0, 0, 0, 0, "GLNVM_FLA"},
+ {I40E_GLNVM_SRCTL, 0, 0, 0, 0, "GLNVM_SRCTL"},
+ {I40E_GLNVM_SRDATA, 0, 0, 0, 0, "GLNVM_SRDATA"},
+ {I40E_GLGEN_STAT, 0, 0, 0, 0, "GLGEN_STAT"},
+ {I40E_GL_MNG_HWARB_CTRL, 0, 0, 0, 0, "GL_MNG_HWARB_CTRL"},
+ {I40E_GL_MNG_FWSM, 0, 0, 0, 0, "GL_MNG_FWSM"},
+ {I40E_GLNVM_ALTIMERS, 0, 0, 0, 0, "GLNVM_ALTIMERS"},
+ {I40E_GLNVM_ULT, 0, 0, 0, 0, "GLNVM_ULT"},
+ {I40E_MEM_INIT_DONE_STAT, 0, 0, 0, 0, "MEM_INIT_DONE_STAT"},
+ {I40E_GLNVM_AL_REQ, 0, 0, 0, 0, "GLNVM_AL_REQ"},
+ {I40E_MNGSB_MSGCTL, 0, 0, 0, 0, "MNGSB_MSGCTL"},
+ {I40E_MNGSB_RSPCTL, 0, 0, 0, 0, "MNGSB_RSPCTL"},
+ {I40E_MNGSB_DADD, 0, 0, 0, 0, "MNGSB_DADD"},
+ {I40E_MNGSB_DCNT, 0, 0, 0, 0, "MNGSB_DCNT"},
+ {I40E_MNGSB_FDCS, 0, 0, 0, 0, "MNGSB_FDCS"},
+ {I40E_MNGSB_FDS, 0, 0, 0, 0, "MNGSB_FDS"},
+ {I40E_MNGSB_FDCRC, 0, 0, 0, 0, "MNGSB_FDCRC"},
+ {I40E_MNGSB_WHDR0, 0, 0, 0, 0, "MNGSB_WHDR0"},
+ {I40E_MNGSB_WHDR1, 0, 0, 0, 0, "MNGSB_WHDR1"},
+ {I40E_MNGSB_WHDR2, 0, 0, 0, 0, "MNGSB_WHDR2"},
+ {I40E_MNGSB_WDATA, 0, 0, 0, 0, "MNGSB_WDATA"},
+ {I40E_MNGSB_RHDR0, 0, 0, 0, 0, "MNGSB_RHDR0"},
+ {I40E_MNGSB_RDATA, 0, 0, 0, 0, "MNGSB_RDATA"},
+ {I40E_PFPM_APM, 0, 0, 0, 0, "PFPM_APM"},
+ {I40E_PRTGEN_STATUS, 0, 0, 0, 0, "PRTGEN_STATUS"},
+ {I40E_PRTGEN_CNF, 0, 0, 0, 0, "PRTGEN_CNF"},
+ {I40E_PRTPM_GC, 0, 0, 0, 0, "PRTPM_GC"},
+ {I40E_PRTGEN_CNF2, 0, 0, 0, 0, "PRTGEN_CNF2"},
+ {I40E_GLGEN_RSTCTL, 0, 0, 0, 0, "GLGEN_RSTCTL"},
+ {I40E_GLGEN_CLKSTAT, 0, 0, 0, 0, "GLGEN_CLKSTAT"},
+ {I40E_GLGEN_RSTAT, 0, 0, 0, 0, "GLGEN_RSTAT"},
+ {I40E_GLGEN_RTRIG, 0, 0, 0, 0, "GLGEN_RTRIG"},
+ {I40E_GLGEN_PME_TO, 0, 0, 0, 0, "GLGEN_PME_TO"},
+ {I40E_GLGEN_CAR_DEBUG, 0, 0, 0, 0, "GLGEN_CAR_DEBUG"},
+ {I40E_PFPCI_CNF, 0, 0, 0, 0, "PFPCI_CNF"},
+ {I40E_PFPCI_DEVID, 0, 0, 0, 0, "PFPCI_DEVID"},
+ {I40E_PFPCI_SUBSYSID, 0, 0, 0, 0, "PFPCI_SUBSYSID"},
+ {I40E_PFPCI_FUNC2, 0, 0, 0, 0, "PFPCI_FUNC2"},
+ {I40E_PFPCI_FUNC, 0, 0, 0, 0, "PFPCI_FUNC"},
+ {I40E_PFPCI_STATUS1, 0, 0, 0, 0, "PFPCI_STATUS1"},
+ {I40E_PFPCI_PM, 0, 0, 0, 0, "PFPCI_PM"},
+ {I40E_PFPCI_CLASS, 0, 0, 0, 0, "PFPCI_CLASS"},
+ {I40E_GLTPH_CTRL, 0, 0, 0, 0, "GLTPH_CTRL"},
+ {I40E_GLPCI_LBARCTRL, 0, 0, 0, 0, "GLPCI_LBARCTRL"},
+ {I40E_GLPCI_SUBVENID, 0, 0, 0, 0, "GLPCI_SUBVENID"},
+ {I40E_GLPCI_PWRDATA, 0, 0, 0, 0, "GLPCI_PWRDATA"},
+ {I40E_GLPCI_CNF2, 0, 0, 0, 0, "GLPCI_CNF2"},
+ {I40E_GLPCI_SERL, 0, 0, 0, 0, "GLPCI_SERL"},
+ {I40E_GLPCI_SERH, 0, 0, 0, 0, "GLPCI_SERH"},
+ {I40E_GLPCI_CAPCTRL, 0, 0, 0, 0, "GLPCI_CAPCTRL"},
+ {I40E_GLPCI_CAPSUP, 0, 0, 0, 0, "GLPCI_CAPSUP"},
+ {I40E_GLPCI_LINKCAP, 0, 0, 0, 0, "GLPCI_LINKCAP"},
+ {I40E_GLPCI_PMSUP, 0, 0, 0, 0, "GLPCI_PMSUP"},
+ {I40E_GLPCI_REVID, 0, 0, 0, 0, "GLPCI_REVID"},
+ {I40E_GLPCI_VFSUP, 0, 0, 0, 0, "GLPCI_VFSUP"},
+ {I40E_GLPCI_CNF, 0, 0, 0, 0, "GLPCI_CNF"},
+ {I40E_GLPCI_UPADD, 0, 0, 0, 0, "GLPCI_UPADD"},
+ {I40E_GLPCI_PCIERR, 0, 0, 0, 0, "GLPCI_PCIERR"},
+ {I40E_GLPCI_VENDORID, 0, 0, 0, 0, "GLPCI_VENDORID"},
+ {I40E_GL_UFUSE_SOC, 0, 0, 0, 0, "GL_UFUSE_SOC"},
+ {I40E_PFHMC_SDCMD, 0, 0, 0, 0, "PFHMC_SDCMD"},
+ {I40E_PFHMC_SDDATALOW, 0, 0, 0, 0, "PFHMC_SDDATALOW"},
+ {I40E_PFHMC_SDDATAHIGH, 0, 0, 0, 0, "PFHMC_SDDATAHIGH"},
+ {I40E_PFHMC_PDINV, 0, 0, 0, 0, "PFHMC_PDINV"},
+ {I40E_PFHMC_ERRORINFO, 0, 0, 0, 0, "PFHMC_ERRORINFO"},
+ {I40E_PFHMC_ERRORDATA, 0, 0, 0, 0, "PFHMC_ERRORDATA"},
+ {I40E_GLHMC_SDPART(0), 15, 4, 0, 0, "GLHMC_SDPART"},
+ {I40E_GLHMC_PFPESDPART(0), 15, 4, 0, 0, "GLHMC_PFPESDPART"},
+ {I40E_GLHMC_PFASSIGN(0), 15, 4, 0, 0, "GLHMC_PFASSIGN"},
+ {I40E_GLHMC_LANTXOBJSZ, 0, 0, 0, 0, "GLHMC_LANTXOBJSZ"},
+ {I40E_GLHMC_LANQMAX, 0, 0, 0, 0, "GLHMC_LANQMAX"},
+ {I40E_GLHMC_LANRXOBJSZ, 0, 0, 0, 0, "GLHMC_LANRXOBJSZ"},
+ {I40E_GLHMC_FCOEDDPOBJSZ, 0, 0, 0, 0, "GLHMC_FCOEDDPOBJSZ"},
+ {I40E_GLHMC_FCOEMAX, 0, 0, 0, 0, "GLHMC_FCOEMAX"},
+ {I40E_GLHMC_FCOEFOBJSZ, 0, 0, 0, 0, "GLHMC_FCOEFOBJSZ"},
+ {I40E_GLHMC_PEQPOBJSZ, 0, 0, 0, 0, "GLHMC_PEQPOBJSZ"},
+ {I40E_GLHMC_PECQOBJSZ, 0, 0, 0, 0, "GLHMC_PECQOBJSZ"},
+ {I40E_GLHMC_PESRQOBJSZ, 0, 0, 0, 0, "GLHMC_PESRQOBJSZ"},
+ {I40E_GLHMC_PESRQMAX, 0, 0, 0, 0, "GLHMC_PESRQMAX"},
+ {I40E_GLHMC_PEHTEOBJSZ, 0, 0, 0, 0, "GLHMC_PEHTEOBJSZ"},
+ {I40E_GLHMC_PEHTMAX, 0, 0, 0, 0, "GLHMC_PEHTMAX"},
+ {I40E_GLHMC_PEARPOBJSZ, 0, 0, 0, 0, "GLHMC_PEARPOBJSZ"},
+ {I40E_GLHMC_PEARPMAX, 0, 0, 0, 0, "GLHMC_PEARPMAX"},
+ {I40E_GLHMC_PEMROBJSZ, 0, 0, 0, 0, "GLHMC_PEMROBJSZ"},
+ {I40E_GLHMC_PEMRMAX, 0, 0, 0, 0, "GLHMC_PEMRMAX"},
+ {I40E_GLHMC_PEXFOBJSZ, 0, 0, 0, 0, "GLHMC_PEXFOBJSZ"},
+ {I40E_GLHMC_PEXFMAX, 0, 0, 0, 0, "GLHMC_PEXFMAX"},
+ {I40E_GLHMC_PEXFFLMAX, 0, 0, 0, 0, "GLHMC_PEXFFLMAX"},
+ {I40E_GLHMC_PEQ1OBJSZ, 0, 0, 0, 0, "GLHMC_PEQ1OBJSZ"},
+ {I40E_GLHMC_PEQ1MAX, 0, 0, 0, 0, "GLHMC_PEQ1MAX"},
+ {I40E_GLHMC_PEQ1FLMAX, 0, 0, 0, 0, "GLHMC_PEQ1FLMAX"},
+ {I40E_GLHMC_FSIMCOBJSZ, 0, 0, 0, 0, "GLHMC_FSIMCOBJSZ"},
+ {I40E_GLHMC_FSIMCMAX, 0, 0, 0, 0, "GLHMC_FSIMCMAX"},
+ {I40E_GLHMC_FSIAVOBJSZ, 0, 0, 0, 0, "GLHMC_FSIAVOBJSZ"},
+ {I40E_GLHMC_FSIAVMAX, 0, 0, 0, 0, "GLHMC_FSIAVMAX"},
+ {I40E_GLHMC_PEPBLMAX, 0, 0, 0, 0, "GLHMC_PEPBLMAX"},
+ {I40E_GLHMC_PETIMEROBJSZ, 0, 0, 0, 0, "GLHMC_PETIMEROBJSZ"},
+ {I40E_GLHMC_PETIMERMAX, 0, 0, 0, 0, "GLHMC_PETIMERMAX"},
+ {I40E_GLHMC_FCOEFMAX, 0, 0, 0, 0, "GLHMC_FCOEFMAX"},
+ {I40E_GLHMC_PEPFFIRSTSD, 0, 0, 0, 0, "GLHMC_PEPFFIRSTSD"},
+ {I40E_GLHMC_DBQPMAX, 0, 0, 0, 0, "GLHMC_DBQPMAX"},
+ {I40E_GLHMC_DBCQMAX, 0, 0, 0, 0, "GLHMC_DBCQMAX"},
+ {I40E_GLHMC_PEQPBASE(0), 15, 4, 0, 0, "GLHMC_PEQPBASE"},
+ {I40E_GLHMC_PEQPCNT(0), 15, 4, 0, 0, "GLHMC_PEQPCNT"},
+ {I40E_GLHMC_PECQBASE(0), 15, 4, 0, 0, "GLHMC_PECQBASE"},
+ {I40E_GLHMC_PECQCNT(0), 15, 4, 0, 0, "GLHMC_PECQCNT"},
+ {I40E_GLHMC_PESRQBASE(0), 15, 4, 0, 0, "GLHMC_PESRQBASE"},
+ {I40E_GLHMC_PESRQCNT(0), 15, 4, 0, 0, "GLHMC_PESRQCNT"},
+ {I40E_GLHMC_PEHTEBASE(0), 15, 4, 0, 0, "GLHMC_PEHTEBASE"},
+ {I40E_GLHMC_PEHTCNT(0), 15, 4, 0, 0, "GLHMC_PEHTCNT"},
+ {I40E_GLHMC_PEARPBASE(0), 15, 4, 0, 0, "GLHMC_PEARPBASE"},
+ {I40E_GLHMC_PEARPCNT(0), 15, 4, 0, 0, "GLHMC_PEARPCNT"},
+ {I40E_GLHMC_APBVTINUSEBASE(0), 15, 4, 0, 0, "GLHMC_APBVTINUSEBASE"},
+ {I40E_GLHMC_PEMRBASE(0), 15, 4, 0, 0, "GLHMC_PEMRBASE"},
+ {I40E_GLHMC_PEMRCNT(0), 15, 4, 0, 0, "GLHMC_PEMRCNT"},
+ {I40E_GLHMC_PEXFBASE(0), 15, 4, 0, 0, "GLHMC_PEXFBASE"},
+ {I40E_GLHMC_PEXFCNT(0), 15, 4, 0, 0, "GLHMC_PEXFCNT"},
+ {I40E_GLHMC_PEXFFLBASE(0), 15, 4, 0, 0, "GLHMC_PEXFFLBASE"},
+ {I40E_GLHMC_PEQ1BASE(0), 15, 4, 0, 0, "GLHMC_PEQ1BASE"},
+ {I40E_GLHMC_PEQ1CNT(0), 15, 4, 0, 0, "GLHMC_PEQ1CNT"},
+ {I40E_GLHMC_PEQ1FLBASE(0), 15, 4, 0, 0, "GLHMC_PEQ1FLBASE"},
+ {I40E_GLHMC_FSIAVBASE(0), 15, 4, 0, 0, "GLHMC_FSIAVBASE"},
+ {I40E_GLHMC_FSIAVCNT(0), 15, 4, 0, 0, "GLHMC_FSIAVCNT"},
+ {I40E_GLHMC_PEPBLBASE(0), 15, 4, 0, 0, "GLHMC_PEPBLBASE"},
+ {I40E_GLHMC_PEPBLCNT(0), 15, 4, 0, 0, "GLHMC_PEPBLCNT"},
+ {I40E_GLHMC_PETIMERBASE(0), 15, 4, 0, 0, "GLHMC_PETIMERBASE"},
+ {I40E_GLHMC_PETIMERCNT(0), 15, 4, 0, 0, "GLHMC_PETIMERCNT"},
+ {I40E_GLHMC_FSIMCBASE(0), 15, 4, 0, 0, "GLHMC_FSIMCBASE"},
+ {I40E_GLHMC_FSIMCCNT(0), 15, 4, 0, 0, "GLHMC_FSIMCCNT"},
+ {I40E_GLHMC_LANTXBASE(0), 15, 4, 0, 0, "GLHMC_LANTXBASE"},
+ {I40E_GLHMC_LANTXCNT(0), 15, 4, 0, 0, "GLHMC_LANTXCNT"},
+ {I40E_GLHMC_LANRXBASE(0), 15, 4, 0, 0, "GLHMC_LANRXBASE"},
+ {I40E_GLHMC_LANRXCNT(0), 15, 4, 0, 0, "GLHMC_LANRXCNT"},
+ {I40E_GLHMC_FCOEDDPBASE(0), 15, 4, 0, 0, "GLHMC_FCOEDDPBASE"},
+ {I40E_GLHMC_FCOEDDPCNT(0), 15, 4, 0, 0, "GLHMC_FCOEDDPCNT"},
+ {I40E_GLHMC_FCOEFBASE(0), 15, 4, 0, 0, "GLHMC_FCOEFBASE"},
+ {I40E_GLHMC_FCOEFCNT(0), 15, 4, 0, 0, "GLHMC_FCOEFCNT"},
+ {I40E_GLHMC_VFPDINV(0), 31, 4, 0, 0, "GLHMC_VFPDINV"},
+ {I40E_GLHMC_VFSDPART(0), 31, 4, 0, 0, "GLHMC_VFSDPART"},
+ {I40E_GLHMC_VFPEQPBASE(0), 31, 4, 0, 0, "GLHMC_VFPEQPBASE"},
+ {I40E_GLHMC_VFPEQPCNT(0), 31, 4, 0, 0, "GLHMC_VFPEQPCNT"},
+ {I40E_GLHMC_VFPECQBASE(0), 31, 4, 0, 0, "GLHMC_VFPECQBASE"},
+ {I40E_GLHMC_VFPECQCNT(0), 31, 4, 0, 0, "GLHMC_VFPECQCNT"},
+ {I40E_GLHMC_VFPESRQBASE(0), 31, 4, 0, 0, "GLHMC_VFPESRQBASE"},
+ {I40E_GLHMC_VFPESRQCNT(0), 31, 4, 0, 0, "GLHMC_VFPESRQCNT"},
+ {I40E_GLHMC_VFPEHTEBASE(0), 31, 4, 0, 0, "GLHMC_VFPEHTEBASE"},
+ {I40E_GLHMC_VFPEHTCNT(0), 31, 4, 0, 0, "GLHMC_VFPEHTCNT"},
+ {I40E_GLHMC_VFPEARPBASE(0), 31, 4, 0, 0, "GLHMC_VFPEARPBASE"},
+ {I40E_GLHMC_VFPEARPCNT(0), 31, 4, 0, 0, "GLHMC_VFPEARPCNT"},
+ {I40E_GLHMC_VFAPBVTINUSEBASE(0), 31, 4, 0, 0, "GLHMC_VFAPBVTINUSEBASE"},
+ {I40E_GLHMC_VFPEMRBASE(0), 31, 4, 0, 0, "GLHMC_VFPEMRBASE"},
+ {I40E_GLHMC_VFPEMRCNT(0), 31, 4, 0, 0, "GLHMC_VFPEMRCNT"},
+ {I40E_GLHMC_VFPEXFBASE(0), 31, 4, 0, 0, "GLHMC_VFPEXFBASE"},
+ {I40E_GLHMC_VFPEXFCNT(0), 31, 4, 0, 0, "GLHMC_VFPEXFCNT"},
+ {I40E_GLHMC_VFPEXFFLBASE(0), 31, 4, 0, 0, "GLHMC_VFPEXFFLBASE"},
+ {I40E_GLHMC_VFPEQ1BASE(0), 31, 4, 0, 0, "GLHMC_VFPEQ1BASE"},
+ {I40E_GLHMC_VFPEQ1CNT(0), 31, 4, 0, 0, "GLHMC_VFPEQ1CNT"},
+ {I40E_GLHMC_VFPEQ1FLBASE(0), 31, 4, 0, 0, "GLHMC_VFPEQ1FLBASE"},
+ {I40E_GLHMC_VFFSIAVBASE(0), 31, 4, 0, 0, "GLHMC_VFFSIAVBASE"},
+ {I40E_GLHMC_VFFSIAVCNT(0), 31, 4, 0, 0, "GLHMC_VFFSIAVCNT"},
+ {I40E_GLHMC_VFPEPBLBASE(0), 31, 4, 0, 0, "GLHMC_VFPEPBLBASE"},
+ {I40E_GLHMC_VFPEPBLCNT(0), 31, 4, 0, 0, "GLHMC_VFPEPBLCNT"},
+ {I40E_GLHMC_VFPETIMERBASE(0), 31, 4, 0, 0, "GLHMC_VFPETIMERBASE"},
+ {I40E_GLHMC_VFPETIMERCNT(0), 31, 4, 0, 0, "GLHMC_VFPETIMERCNT"},
+ {I40E_GLPDOC_CACHESIZE, 0, 0, 0, 0, "GLPDOC_CACHESIZE"},
+ {I40E_QTX_HEAD(0), 1535, 4, 0, 0, "QTX_HEAD"},
+ {I40E_VP_MDET_TX(0), 127, 4, 0, 0, "VP_MDET_TX"},
+ {I40E_PF_MDET_TX, 0, 0, 0, 0, "PF_MDET_TX"},
+ {I40E_GL_MDET_TX, 0, 0, 0, 0, "GL_MDET_TX"},
+ {I40E_GL_TLAN_SPARE, 0, 0, 0, 0, "GL_TLAN_SPARE"},
+ {I40E_GLLAN_TXPRE_QDIS(0), 11, 4, 0, 0, "GLLAN_TXPRE_QDIS"},
+ {I40E_QTX_ENA(0), 1535, 4, 0, 0, "QTX_ENA"},
+ {I40E_QTX_CTL(0), 1535, 4, 0, 0, "QTX_CTL"},
+ {I40E_QTX_TAIL(0), 1535, 4, 0, 0, "QTX_TAIL"},
+ {I40E_PFCM_LAN_ERRINFO, 0, 0, 0, 0, "PFCM_LAN_ERRINFO"},
+ {I40E_PFCM_LAN_ERRDATA, 0, 0, 0, 0, "PFCM_LAN_ERRDATA"},
+ {I40E_PFCM_LANCTXDATA(0), 3, 128, 0, 0, "PFCM_LANCTXDATA"},
+ {I40E_PFCM_LANCTXCTL, 0, 0, 0, 0, "PFCM_LANCTXCTL"},
+ {I40E_PFCM_LANCTXSTAT, 0, 0, 0, 0, "PFCM_LANCTXSTAT"},
+ {I40E_GLCM_LAN_CACHESIZE, 0, 0, 0, 0, "GLCM_LAN_CACHESIZE"},
+ {I40E_QRX_ENA(0), 1535, 4, 0, 0, "QRX_ENA"},
+ {I40E_PRTDCB_RETSTCC(0), 7, 32, 0, 0, "PRTDCB_RETSTCC"},
+ {I40E_PRTDCB_RPPMC, 0, 0, 0, 0, "PRTDCB_RPPMC"},
+ {I40E_PRTDCB_RETSC, 0, 0, 0, 0, "PRTDCB_RETSC"},
+ {I40E_PRTDCB_RUPTQ(0), 7, 32, 0, 0, "PRTDCB_RUPTQ"},
+ {I40E_GLDCB_RUPTI, 0, 0, 0, 0, "GLDCB_RUPTI"},
+ {I40E_QRX_TAIL(0), 1535, 4, 0, 0, "QRX_TAIL"},
+ {I40E_VP_MDET_RX(0), 127, 4, 0, 0, "VP_MDET_RX"},
+ {I40E_PF_MDET_RX, 0, 0, 0, 0, "PF_MDET_RX"},
+ {I40E_GLLAN_RCTL_0, 0, 0, 0, 0, "GLLAN_RCTL_0"},
+ {I40E_GL_MDET_RX, 0, 0, 0, 0, "GL_MDET_RX"},
+ {I40E_VFPE_CQARM(0), 127, 4, 0, 0, "VFPE_CQARM"},
+ {I40E_VFPE_CQACK(0), 127, 4, 0, 0, "VFPE_CQACK"},
+ {I40E_VFPE_AEQALLOC(0), 127, 4, 0, 0, "VFPE_AEQALLOC"},
+ {I40E_PFPE_CQARM, 0, 0, 0, 0, "PFPE_CQARM"},
+ {I40E_PFPE_CQACK, 0, 0, 0, 0, "PFPE_CQACK"},
+ {I40E_PFPE_AEQALLOC, 0, 0, 0, 0, "PFPE_AEQALLOC"},
+ {I40E_GLHMC_DBCQPART(0), 15, 4, 0, 0, "GLHMC_DBCQPART"},
+ {I40E_GLHMC_CEQPART(0), 15, 4, 0, 0, "GLHMC_CEQPART"},
+ {I40E_GLPE_PFCQEDROPCNT(0), 15, 4, 0, 0, "GLPE_PFCQEDROPCNT"},
+ {I40E_GLPE_PFCEQEDROPCNT(0), 15, 4, 0, 0, "GLPE_PFCEQEDROPCNT"},
+ {I40E_GLPE_PFAEQEDROPCNT(0), 15, 4, 0, 0, "GLPE_PFAEQEDROPCNT"},
+ {I40E_GLHMC_VFDBCQPART(0), 31, 4, 0, 0, "GLHMC_VFDBCQPART"},
+ {I40E_GLHMC_VFCEQPART(0), 31, 4, 0, 0, "GLHMC_VFCEQPART"},
+ {I40E_GLPE_VFCQEDROPCNT(0), 31, 4, 0, 0, "GLPE_VFCQEDROPCNT"},
+ {I40E_GLPE_VFCEQEDROPCNT(0), 31, 4, 0, 0, "GLPE_VFCEQEDROPCNT"},
+ {I40E_GLPE_VFAEQEDROPCNT(0), 31, 4, 0, 0, "GLPE_VFAEQEDROPCNT"},
+ {I40E_VFPE_WQEALLOC(0), 127, 4, 0, 0, "VFPE_WQEALLOC"},
+ {I40E_VFCM_PE_ERRINFO1(0), 127, 4, 0, 0, "VFCM_PE_ERRINFO1"},
+ {I40E_VFCM_PE_ERRDATA1(0), 127, 4, 0, 0, "VFCM_PE_ERRDATA1"},
+ {I40E_PFPE_WQEALLOC, 0, 0, 0, 0, "PFPE_WQEALLOC"},
+ {I40E_PFCM_PE_ERRINFO, 0, 0, 0, 0, "PFCM_PE_ERRINFO"},
+ {I40E_PFCM_PE_ERRDATA, 0, 0, 0, 0, "PFCM_PE_ERRDATA"},
+ {I40E_GLHMC_DBQPPART(0), 15, 4, 0, 0, "GLHMC_DBQPPART"},
+ {I40E_GLHMC_VFDBQPPART(0), 31, 4, 0, 0, "GLHMC_VFDBQPPART"},
+ {I40E_GLCM_PE_CACHESIZE, 0, 0, 0, 0, "GLCM_PE_CACHESIZE"},
+ {I40E_PFGEN_PORTNUM, 0, 0, 0, 0, "PFGEN_PORTNUM"},
+ {I40E_PF_VT_PFALLOC, 0, 0, 0, 0, "PF_VT_PFALLOC"},
+ {I40E_PRTDCB_TC2PFC, 0, 0, 0, 0, "PRTDCB_TC2PFC"},
+ {I40E_PRTDCB_RUP2TC, 0, 0, 0, 0, "PRTDCB_RUP2TC"},
+ {I40E_GLGEN_PCIFCNCNT, 0, 0, 0, 0, "GLGEN_PCIFCNCNT"},
+ {I40E_PRTDCB_RUP, 0, 0, 0, 0, "PRTDCB_RUP"},
+ {I40E_PRT_L2TAGSEN, 0, 0, 0, 0, "PRT_L2TAGSEN"},
+ {I40E_PRTGL_SAL, 0, 0, 0, 0, "PRTGL_SAL"},
+ {I40E_PRTGL_SAH, 0, 0, 0, 0, "PRTGL_SAH"},
+ {I40E_PRTDCB_MFLCN, 0, 0, 0, 0, "PRTDCB_MFLCN"},
+ {I40E_PRTMAC_LINK_DOWN_COUNTER, 0, 0, 0, 0,
+ "PRTMAC_LINK_DOWN_COUNTER"},
+ {I40E_PRTMAC_HSEC_CTL_RX_PAUSE_ENABLE, 0, 0, 0, 0,
+ "PRTMAC_HSEC_CTL_RX_PAUSE_ENABLE"},
+ {I40E_PRTMAC_HSEC_CTL_TX_PAUSE_ENABLE, 0, 0, 0, 0,
+ "PRTMAC_HSEC_CTL_TX_PAUSE_ENABLE"},
+ {I40E_PRTMAC_HSEC_CTL_RX_ENABLE_GCP, 0, 0, 0, 0,
+ "PRTMAC_HSEC_CTL_RX_ENABLE_GCP"},
+ {I40E_PRTMAC_HSEC_CTL_RX_PAUSE_DA_UCAST_PART1, 0, 0, 0, 0,
+ "PRTMAC_HSEC_CTL_RX_PAUSE_DA_UCAST_PART1"},
+ {I40E_PRTMAC_HSEC_CTL_RX_PAUSE_DA_UCAST_PART2, 0, 0, 0, 0,
+ "PRTMAC_HSEC_CTL_RX_PAUSE_DA_UCAST_PART2"},
+ {I40E_PRTMAC_HSEC_CTL_RX_PAUSE_SA_PART1, 0, 0, 0, 0,
+ "PRTMAC_HSEC_CTL_RX_PAUSE_SA_PART1"},
+ {I40E_PRTMAC_HSEC_CTL_RX_PAUSE_SA_PART2, 0, 0, 0, 0,
+ "PRTMAC_HSEC_CTL_RX_PAUSE_SA_PART2"},
+ {I40E_PRTMAC_HSEC_CTL_RX_ENABLE_GPP, 0, 0, 0, 0,
+ "PRTMAC_HSEC_CTL_RX_ENABLE_GPP"},
+ {I40E_PRTMAC_HSEC_CTL_RX_ENABLE_PPP, 0, 0, 0, 0,
+ "PRTMAC_HSEC_CTL_RX_ENABLE_PPP"},
+ {I40E_PRTMAC_HSEC_CTL_RX_FORWARD_CONTROL, 0, 0, 0, 0,
+ "PRTMAC_HSEC_CTL_RX_FORWARD_CONTROL"},
+ {I40E_PRTMAC_HSEC_CTL_TX_PAUSE_QUANTA(0), 8, 16, 0, 0,
+ "PRTMAC_HSEC_CTL_TX_PAUSE_QUANTA"},
+ {I40E_PRTMAC_HSEC_CTL_TX_PAUSE_REFRESH_TIMER(0), 8, 16, 0, 0,
+ "PRTMAC_HSEC_CTL_TX_PAUSE_REFRESH_TIMER"},
+ {I40E_PRTMAC_HSEC_CTL_TX_SA_PART1, 0, 0, 0, 0,
+ "PRTMAC_HSEC_CTL_TX_SA_PART1"},
+ {I40E_PRTMAC_HSEC_CTL_TX_SA_PART2, 0, 0, 0, 0,
+ "PRTMAC_HSEC_CTL_TX_SA_PART2"},
+ {I40E_PRTTSYN_INC_L, 0, 0, 0, 0, "PRTTSYN_INC_L"},
+ {I40E_PRTTSYN_INC_H, 0, 0, 0, 0, "PRTTSYN_INC_H"},
+ {I40E_PRTTSYN_EVNT_L(0), 1, 32, 0, 0, "PRTTSYN_EVNT_L"},
+ {I40E_PRTTSYN_EVNT_H(0), 1, 32, 0, 0, "PRTTSYN_EVNT_H"},
+ {I40E_PRTTSYN_TIME_L, 0, 0, 0, 0, "PRTTSYN_TIME_L"},
+ {I40E_PRTTSYN_TIME_H, 0, 0, 0, 0, "PRTTSYN_TIME_H"},
+ {I40E_PRTTSYN_TGT_L(0), 1, 32, 0, 0, "PRTTSYN_TGT_L"},
+ {I40E_PRTTSYN_TGT_H(0), 1, 32, 0, 0, "PRTTSYN_TGT_H"},
+ {I40E_PRTTSYN_TXTIME_L, 0, 0, 0, 0, "PRTTSYN_TXTIME_L"},
+ {I40E_PRTTSYN_TXTIME_H, 0, 0, 0, 0, "PRTTSYN_TXTIME_H"},
+ {I40E_PRTTSYN_CTL0, 0, 0, 0, 0, "PRTTSYN_CTL0"},
+ {I40E_PRTTSYN_STAT_0, 0, 0, 0, 0, "PRTTSYN_STAT_0"},
+ {I40E_PRTTSYN_CLKO(0), 1, 32, 0, 0, "PRTTSYN_CLKO"},
+ {I40E_PRTTSYN_ADJ, 0, 0, 0, 0, "PRTTSYN_ADJ"},
+ {I40E_PRTTSYN_AUX_0(0), 1, 32, 0, 0, "PRTTSYN_AUX_0"},
+ {I40E_PRTTSYN_AUX_1(0), 1, 32, 0, 0, "PRTTSYN_AUX_1"},
+ {I40E_PRTPM_EEE_STAT, 0, 0, 0, 0, "PRTPM_EEE_STAT"},
+ {I40E_PRTPM_EEER, 0, 0, 0, 0, "PRTPM_EEER"},
+ {I40E_PRTPM_EEEC, 0, 0, 0, 0, "PRTPM_EEEC"},
+ {I40E_PRTPM_RLPIC, 0, 0, 0, 0, "PRTPM_RLPIC"},
+ {I40E_PRTPM_TLPIC, 0, 0, 0, 0, "PRTPM_TLPIC"},
+ {I40E_PRTPM_EEETXC, 0, 0, 0, 0, "PRTPM_EEETXC"},
+ {I40E_PRTPM_EEEFWD, 0, 0, 0, 0, "PRTPM_EEEFWD"},
+ {I40E_PRTPM_SAL(0), 3, 32, 0, 0, "PRTPM_SAL"},
+ {I40E_PRTPM_SAH(0), 3, 32, 0, 0, "PRTPM_SAH"},
+ {I40E_PRTDCB_TFCS, 0, 0, 0, 0, "PRTDCB_TFCS"},
+ {I40E_PRTDCB_FCTTVN(0), 3, 32, 0, 0, "PRTDCB_FCTTVN"},
+ {I40E_PRTDCB_FCRTV, 0, 0, 0, 0, "PRTDCB_FCRTV"},
+ {I40E_PRTDCB_FCCFG, 0, 0, 0, 0, "PRTDCB_FCCFG"},
+ {I40E_PRTDCB_TPFCTS(0), 7, 32, 0, 0, "PRTDCB_TPFCTS"},
+ {I40E_VFQF_HLUT1(0, 0), 15, 1024, 127, 4, "VFQF_HLUT1"},
+ {I40E_VSIQF_HLUT(0, 0), 15, 2048, 383, 4, "VSIQF_HLUT"},
+ {I40E_VFQF_HKEY1(0, 0), 12, 1024, 127, 4, "VFQF_HKEY1"},
+ {I40E_VFQF_HREGION1(0, 0), 7, 1024, 127, 4, "VFQF_HREGION1"},
+ {I40E_VFQF_HENA1(0, 0), 1, 1024, 127, 4, "VFQF_HENA1"},
+ {I40E_PFQF_HLUT(0), 127, 128, 0, 0, "PFQF_HLUT"},
+ {I40E_X722_PFQF_HLUT(0), 127, 128, 0, 0, "X722_PFQF_HLUT"},
+ {I40E_PFQF_CTL_1, 0, 0, 0, 0, "PFQF_CTL_1"},
+ {I40E_PFQF_FDSTAT, 0, 0, 0, 0, "PFQF_FDSTAT"},
+ {I40E_PRT_MNG_MIPAF6(0), 15, 32, 0, 0, "PRT_MNG_MIPAF6"},
+ {I40E_PRT_MNG_MFUTP(0), 15, 32, 0, 0, "PRT_MNG_MFUTP"},
+ {I40E_PRTQF_FLX_PIT(0), 8, 32, 0, 0, "PRTQF_FLX_PIT"},
+ {I40E_PRT_MNG_MAVTV(0), 7, 32, 0, 0, "PRT_MNG_MAVTV"},
+ {I40E_PRT_MNG_MDEF(0), 7, 32, 0, 0, "PRT_MNG_MDEF"},
+ {I40E_PRT_MNG_MDEF_EXT(0), 7, 32, 0, 0, "PRT_MNG_MDEF_EXT"},
+ {I40E_PRT_MNG_MIPAF4(0), 3, 32, 0, 0, "PRT_MNG_MIPAF4"},
+ {I40E_PRT_MNG_MMAH(0), 3, 32, 0, 0, "PRT_MNG_MMAH"},
+ {I40E_PRT_MNG_MMAL(0), 3, 32, 0, 0, "PRT_MNG_MMAL"},
+ {I40E_PRT_MNG_MDEFVSI(0), 3, 32, 0, 0, "PRT_MNG_MDEFVSI"},
+ {I40E_PRT_MNG_METF(0), 3, 32, 0, 0, "PRT_MNG_METF"},
+ {I40E_PRT_MNG_MANC, 0, 0, 0, 0, "PRT_MNG_MANC"},
+ {I40E_PRT_MNG_MNGONLY, 0, 0, 0, 0, "PRT_MNG_MNGONLY"},
+ {I40E_PRT_MNG_MSFM, 0, 0, 0, 0, "PRT_MNG_MSFM"},
+ {I40E_GLQF_APBVT(0), 2047, 4, 0, 0, "GLQF_APBVT"},
+ {I40E_GLQF_PCNT(0), 511, 4, 0, 0, "GLQF_PCNT"},
+ {I40E_GLQF_FD_PCTYPES(0), 63, 4, 0, 0, "GLQF_FD_PCTYPES"},
+ {I40E_GLQF_ORT(0), 63, 4, 0, 0, "GLQF_ORT"},
+ {I40E_GLQF_PIT(0), 23, 4, 0, 0, "GLQF_PIT"},
+ {I40E_GL_PRS_FVBM(0), 3, 4, 0, 0, "GL_PRS_FVBM"},
+ {I40E_GLQF_FDCNT_0, 0, 0, 0, 0, "GLQF_FDCNT_0"},
+ {I40E_GL_MTG_FLU_MSK_H, 0, 0, 0, 0, "GL_MTG_FLU_MSK_H"},
+ {I40E_GL_SWR_DEF_ACT_EN(0), 1, 4, 0, 0, "GL_SWR_DEF_ACT_EN"},
+ {I40E_GLQF_HKEY(0), 12, 4, 0, 0, "GLQF_HKEY"},
+ {I40E_GL_SWR_DEF_ACT(0), 35, 4, 0, 0, "GL_SWR_DEF_ACT"},
+ {I40E_GLQF_FDEVICTFLAG, 0, 0, 0, 0, "GLQF_FDEVICTFLAG"},
+ {I40E_PFQF_CTL_2, 0, 0, 0, 0, "PFQF_CTL_2"},
+ {I40E_GLQF_FDEVICTENA(0), 1, 4, 0, 0, "GLQF_FDEVICTENA"},
+ {I40E_VSIQF_HKEY(0, 0), 12, 2048, 383, 4, "VSIQF_HKEY"},
+ {I40E_GLPRT_GORCL(0), 3, 8, 0, 0, "GLPRT_GORCL"},
+ {I40E_GLPRT_GORCH(0), 3, 8, 0, 0, "GLPRT_GORCH"},
+ {I40E_GLPRT_MLFC(0), 3, 8, 0, 0, "GLPRT_MLFC"},
+ {I40E_GLPRT_MRFC(0), 3, 8, 0, 0, "GLPRT_MRFC"},
+ {I40E_GLPRT_CRCERRS(0), 3, 8, 0, 0, "GLPRT_CRCERRS"},
+ {I40E_GLPRT_RLEC(0), 3, 8, 0, 0, "GLPRT_RLEC"},
+ {I40E_GLPRT_ILLERRC(0), 3, 8, 0, 0, "GLPRT_ILLERRC"},
+ {I40E_GLPRT_RUC(0), 3, 8, 0, 0, "GLPRT_RUC"},
+ {I40E_GLPRT_ROC(0), 3, 8, 0, 0, "GLPRT_ROC"},
+ {I40E_GLPRT_LXONRXC(0), 3, 8, 0, 0, "GLPRT_LXONRXC"},
+ {I40E_GLPRT_LXOFFRXC(0), 3, 8, 0, 0, "GLPRT_LXOFFRXC"},
+ {I40E_GLPRT_PXONRXC(0, 0), 3, 8, 7, 32, "GLPRT_PXONRXC"},
+ {I40E_GLPRT_PXOFFRXC(0, 0), 3, 8, 7, 32, "GLPRT_PXOFFRXC"},
+ {I40E_GLPRT_RXON2OFFCNT(0, 0), 3, 8, 7, 32, "GLPRT_RXON2OFFCNT"},
+ {I40E_GLPRT_PRC64L(0), 3, 8, 0, 0, "GLPRT_PRC64L"},
+ {I40E_GLPRT_PRC64H(0), 3, 8, 0, 0, "GLPRT_PRC64H"},
+ {I40E_GLPRT_PRC127L(0), 3, 8, 0, 0, "GLPRT_PRC127L"},
+ {I40E_GLPRT_PRC127H(0), 3, 8, 0, 0, "GLPRT_PRC127H"},
+ {I40E_GLPRT_PRC255L(0), 3, 8, 0, 0, "GLPRT_PRC255L"},
+ {I40E_GLPRT_PRC255H(0), 3, 8, 0, 0, "GLPRT_PRC255H"},
+ {I40E_GLPRT_PRC511L(0), 3, 8, 0, 0, "GLPRT_PRC511L"},
+ {I40E_GLPRT_PRC511H(0), 3, 8, 0, 0, "GLPRT_PRC511H"},
+ {I40E_GLPRT_PRC1023L(0), 3, 8, 0, 0, "GLPRT_PRC1023L"},
+ {I40E_GLPRT_PRC1023H(0), 3, 8, 0, 0, "GLPRT_PRC1023H"},
+ {I40E_GLPRT_PRC1522L(0), 3, 8, 0, 0, "GLPRT_PRC1522L"},
+ {I40E_GLPRT_PRC1522H(0), 3, 8, 0, 0, "GLPRT_PRC1522H"},
+ {I40E_GLPRT_PRC9522L(0), 3, 8, 0, 0, "GLPRT_PRC9522L"},
+ {I40E_GLPRT_PRC9522H(0), 3, 8, 0, 0, "GLPRT_PRC9522H"},
+ {I40E_GLPRT_RFC(0), 3, 8, 0, 0, "GLPRT_RFC"},
+ {I40E_GLPRT_RJC(0), 3, 8, 0, 0, "GLPRT_RJC"},
+ {I40E_GLPRT_UPRCL(0), 3, 8, 0, 0, "GLPRT_UPRCL"},
+ {I40E_GLPRT_UPRCH(0), 3, 8, 0, 0, "GLPRT_UPRCH"},
+ {I40E_GLPRT_MPRCL(0), 3, 8, 0, 0, "GLPRT_MPRCL"},
+ {I40E_GLPRT_MPRCH(0), 3, 8, 0, 0, "GLPRT_MPRCH"},
+ {I40E_GLPRT_BPRCL(0), 3, 8, 0, 0, "GLPRT_BPRCL"},
+ {I40E_GLPRT_BPRCH(0), 3, 8, 0, 0, "GLPRT_BPRCH"},
+ {I40E_GLPRT_RDPC(0), 3, 8, 0, 0, "GLPRT_RDPC"},
+ {I40E_GLPRT_LDPC(0), 3, 8, 0, 0, "GLPRT_LDPC"},
+ {I40E_GLPRT_RUPP(0), 3, 8, 0, 0, "GLPRT_RUPP"},
+ {I40E_GLPRT_GOTCL(0), 3, 8, 0, 0, "GLPRT_GOTCL"},
+ {I40E_GLPRT_GOTCH(0), 3, 8, 0, 0, "GLPRT_GOTCH"},
+ {I40E_GLPRT_PTC64L(0), 3, 8, 0, 0, "GLPRT_PTC64L"},
+ {I40E_GLPRT_PTC64H(0), 3, 8, 0, 0, "GLPRT_PTC64H"},
+ {I40E_GLPRT_PTC127L(0), 3, 8, 0, 0, "GLPRT_PTC127L"},
+ {I40E_GLPRT_PTC127H(0), 3, 8, 0, 0, "GLPRT_PTC127H"},
+ {I40E_GLPRT_PTC255L(0), 3, 8, 0, 0, "GLPRT_PTC255L"},
+ {I40E_GLPRT_PTC255H(0), 3, 8, 0, 0, "GLPRT_PTC255H"},
+ {I40E_GLPRT_PTC511L(0), 3, 8, 0, 0, "GLPRT_PTC511L"},
+ {I40E_GLPRT_PTC511H(0), 3, 8, 0, 0, "GLPRT_PTC511H"},
+ {I40E_GLPRT_PTC1023L(0), 3, 8, 0, 0, "GLPRT_PTC1023L"},
+ {I40E_GLPRT_PTC1023H(0), 3, 8, 0, 0, "GLPRT_PTC1023H"},
+ {I40E_GLPRT_PTC1522L(0), 3, 8, 0, 0, "GLPRT_PTC1522L"},
+ {I40E_GLPRT_PTC1522H(0), 3, 8, 0, 0, "GLPRT_PTC1522H"},
+ {I40E_GLPRT_PTC9522L(0), 3, 8, 0, 0, "GLPRT_PTC9522L"},
+ {I40E_GLPRT_PTC9522H(0), 3, 8, 0, 0, "GLPRT_PTC9522H"},
+ {I40E_GLPRT_PXONTXC(0, 0), 3, 8, 7, 32, "GLPRT_PXONTXC"},
+ {I40E_GLPRT_PXOFFTXC(0, 0), 3, 8, 7, 32, "GLPRT_PXOFFTXC"},
+ {I40E_GLPRT_LXONTXC(0), 3, 8, 0, 0, "GLPRT_LXONTXC"},
+ {I40E_GLPRT_LXOFFTXC(0), 3, 8, 0, 0, "GLPRT_LXOFFTXC"},
+ {I40E_GLPRT_UPTCL(0), 3, 8, 0, 0, "GLPRT_UPTCL"},
+ {I40E_GLPRT_UPTCH(0), 3, 8, 0, 0, "GLPRT_UPTCH"},
+ {I40E_GLPRT_MPTCL(0), 3, 8, 0, 0, "GLPRT_MPTCL"},
+ {I40E_GLPRT_MPTCH(0), 3, 8, 0, 0, "GLPRT_MPTCH"},
+ {I40E_GLPRT_BPTCL(0), 3, 8, 0, 0, "GLPRT_BPTCL"},
+ {I40E_GLPRT_BPTCH(0), 3, 8, 0, 0, "GLPRT_BPTCH"},
+ {I40E_GLPRT_TDOLD(0), 3, 8, 0, 0, "GLPRT_TDOLD"},
+ {I40E_GLV_RDPC(0), 383, 8, 0, 0, "GLV_RDPC"},
+ {I40E_GL_FCOELAST(0), 143, 8, 0, 0, "GL_FCOELAST"},
+ {I40E_GL_FCOEDDPC(0), 143, 8, 0, 0, "GL_FCOEDDPC"},
+ {I40E_GL_FCOECRC(0), 143, 8, 0, 0, "GL_FCOECRC"},
+ {I40E_GL_FCOEPRC(0), 143, 8, 0, 0, "GL_FCOEPRC"},
+ {I40E_GL_RXERR1_L(0), 143, 8, 0, 0, "GL_RXERR1_L"},
+ {I40E_GL_FCOEDIFEC(0), 143, 8, 0, 0, "GL_FCOEDIFEC"},
+ {I40E_GL_RXERR2_L(0), 143, 8, 0, 0, "GL_RXERR2_L"},
+ {I40E_GL_FCOEDWRCL(0), 143, 8, 0, 0, "GL_FCOEDWRCL"},
+ {I40E_GL_FCOEDWRCH(0), 143, 8, 0, 0, "GL_FCOEDWRCH"},
+ {I40E_GL_FCOERPDC(0), 143, 8, 0, 0, "GL_FCOERPDC"},
+ {I40E_GLV_GOTCL(0), 383, 8, 0, 0, "GLV_GOTCL"},
+ {I40E_GLV_GOTCH(0), 383, 8, 0, 0, "GLV_GOTCH"},
+ {I40E_GLSW_GOTCL(0), 15, 8, 0, 0, "GLSW_GOTCL"},
+ {I40E_GLSW_GOTCH(0), 15, 8, 0, 0, "GLSW_GOTCH"},
+ {I40E_GLVEBVL_GOTCL(0), 127, 8, 0, 0, "GLVEBVL_GOTCL"},
+ {I40E_GLVEBVL_GOTCH(0), 127, 8, 0, 0, "GLVEBVL_GOTCH"},
+ {I40E_GLVEBTC_TBCL(0, 0), 7, 8, 15, 64, "GLVEBTC_TBCL"},
+ {I40E_GLVEBTC_TBCH(0, 0), 7, 8, 15, 64, "GLVEBTC_TBCH"},
+ {I40E_GLVEBTC_TPCL(0, 0), 7, 8, 15, 64, "GLVEBTC_TPCL"},
+ {I40E_GLVEBTC_TPCH(0, 0), 7, 8, 15, 64, "GLVEBTC_TPCH"},
+ {I40E_GLV_UPTCL(0), 383, 8, 0, 0, "GLV_UPTCL"},
+ {I40E_GLV_UPTCH(0), 383, 8, 0, 0, "GLV_UPTCH"},
+ {I40E_GLV_MPTCL(0), 383, 8, 0, 0, "GLV_MPTCL"},
+ {I40E_GLV_MPTCH(0), 383, 8, 0, 0, "GLV_MPTCH"},
+ {I40E_GLV_BPTCL(0), 383, 8, 0, 0, "GLV_BPTCL"},
+ {I40E_GLV_BPTCH(0), 383, 8, 0, 0, "GLV_BPTCH"},
+ {I40E_GLSW_UPTCL(0), 15, 8, 0, 0, "GLSW_UPTCL"},
+ {I40E_GLSW_UPTCH(0), 15, 8, 0, 0, "GLSW_UPTCH"},
+ {I40E_GLSW_MPTCL(0), 15, 8, 0, 0, "GLSW_MPTCL"},
+ {I40E_GLSW_MPTCH(0), 15, 8, 0, 0, "GLSW_MPTCH"},
+ {I40E_GLSW_BPTCL(0), 15, 8, 0, 0, "GLSW_BPTCL"},
+ {I40E_GLSW_BPTCH(0), 15, 8, 0, 0, "GLSW_BPTCH"},
+ {I40E_GLV_TEPC(0), 383, 4, 0, 0, "GLV_TEPC"},
+ {I40E_GL_FCOEPTC(0), 143, 8, 0, 0, "GL_FCOEPTC"},
+ {I40E_GLSW_TDPC(0), 15, 8, 0, 0, "GLSW_TDPC"},
+ {I40E_GL_FCOEDWTCL(0), 143, 8, 0, 0, "GL_FCOEDWTCL"},
+ {I40E_GL_FCOEDWTCH(0), 143, 8, 0, 0, "GL_FCOEDWTCH"},
+ {I40E_GL_FCOEDIXEC(0), 143, 8, 0, 0, "GL_FCOEDIXEC"},
+ {I40E_GL_FCOEDIXVC(0), 143, 8, 0, 0, "GL_FCOEDIXVC"},
+ {I40E_GL_FCOEDIFTCL(0), 143, 8, 0, 0, "GL_FCOEDIFTCL"},
+ {I40E_GLV_GORCL(0), 383, 8, 0, 0, "GLV_GORCL"},
+ {I40E_GLV_GORCH(0), 383, 8, 0, 0, "GLV_GORCH"},
+ {I40E_GLSW_GORCL(0), 15, 8, 0, 0, "GLSW_GORCL"},
+ {I40E_GLSW_GORCH(0), 15, 8, 0, 0, "GLSW_GORCH"},
+ {I40E_GLVEBVL_GORCL(0), 127, 8, 0, 0, "GLVEBVL_GORCL"},
+ {I40E_GLVEBVL_GORCH(0), 127, 8, 0, 0, "GLVEBVL_GORCH"},
+ {I40E_GLVEBTC_RBCL(0, 0), 7, 8, 15, 64, "GLVEBTC_RBCL"},
+ {I40E_GLVEBTC_RBCH(0, 0), 7, 8, 15, 64, "GLVEBTC_RBCH"},
+ {I40E_GLVEBTC_RPCL(0, 0), 7, 8, 15, 64, "GLVEBTC_RPCL"},
+ {I40E_GLVEBTC_RPCH(0, 0), 7, 8, 15, 64, "GLVEBTC_RPCH"},
+ {I40E_GLV_UPRCL(0), 383, 8, 0, 0, "GLV_UPRCL"},
+ {I40E_GLV_UPRCH(0), 383, 8, 0, 0, "GLV_UPRCH"},
+ {I40E_GLV_MPRCL(0), 383, 8, 0, 0, "GLV_MPRCL"},
+ {I40E_GLV_MPRCH(0), 383, 8, 0, 0, "GLV_MPRCH"},
+ {I40E_GLV_BPRCL(0), 383, 8, 0, 0, "GLV_BPRCL"},
+ {I40E_GLV_BPRCH(0), 383, 8, 0, 0, "GLV_BPRCH"},
+ {I40E_GLV_RUPP(0), 383, 8, 0, 0, "GLV_RUPP"},
+ {I40E_GLSW_UPRCL(0), 15, 8, 0, 0, "GLSW_UPRCL"},
+ {I40E_GLSW_UPRCH(0), 15, 8, 0, 0, "GLSW_UPRCH"},
+ {I40E_GLSW_MPRCL(0), 15, 8, 0, 0, "GLSW_MPRCL"},
+ {I40E_GLSW_MPRCH(0), 15, 8, 0, 0, "GLSW_MPRCH"},
+ {I40E_GLSW_BPRCL(0), 15, 8, 0, 0, "GLSW_BPRCL"},
+ {I40E_GLSW_BPRCH(0), 15, 8, 0, 0, "GLSW_BPRCH"},
+ {I40E_GLSW_RUPP(0), 15, 8, 0, 0, "GLSW_RUPP"},
+ {I40E_GLVEBVL_UPCL(0), 127, 8, 0, 0, "GLVEBVL_UPCL"},
+ {I40E_GLVEBVL_UPCH(0), 127, 8, 0, 0, "GLVEBVL_UPCH"},
+ {I40E_GLVEBVL_MPCL(0), 127, 8, 0, 0, "GLVEBVL_MPCL"},
+ {I40E_GLVEBVL_MPCH(0), 127, 8, 0, 0, "GLVEBVL_MPCH"},
+ {I40E_GLVEBVL_BPCL(0), 127, 8, 0, 0, "GLVEBVL_BPCL"},
+ {I40E_GLVEBVL_BPCH(0), 127, 8, 0, 0, "GLVEBVL_BPCH"},
+ {I40E_GLGEN_STAT_HALT, 0, 0, 0, 0, "GLGEN_STAT_HALT"},
+ {I40E_GLGEN_STAT_CLEAR, 0, 0, 0, 0, "GLGEN_STAT_CLEAR"},
+ {0, 0, 0, 0, 0, NULL}
+};
diff --git a/src/dpdk/drivers/net/i40e/i40e_rxtx.c b/src/dpdk/drivers/net/i40e/i40e_rxtx.c
new file mode 100644
index 00000000..d3cfb98f
--- /dev/null
+++ b/src/dpdk/drivers/net/i40e/i40e_rxtx.c
@@ -0,0 +1,3329 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2010-2015 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <errno.h>
+#include <stdint.h>
+#include <stdarg.h>
+#include <unistd.h>
+#include <inttypes.h>
+#include <sys/queue.h>
+
+#include <rte_string_fns.h>
+#include <rte_memzone.h>
+#include <rte_mbuf.h>
+#include <rte_malloc.h>
+#include <rte_ether.h>
+#include <rte_ethdev.h>
+#include <rte_tcp.h>
+#include <rte_sctp.h>
+#include <rte_udp.h>
+
+#include "i40e_logs.h"
+#include "base/i40e_prototype.h"
+#include "base/i40e_type.h"
+#include "i40e_ethdev.h"
+#include "i40e_rxtx.h"
+
+#define DEFAULT_TX_RS_THRESH 32
+#define DEFAULT_TX_FREE_THRESH 32
+#define I40E_MAX_PKT_TYPE 256
+
+#define I40E_TX_MAX_BURST 32
+
+#define I40E_DMA_MEM_ALIGN 4096
+
+/* Base address of the HW descriptor ring should be 128B aligned. */
+#define I40E_RING_BASE_ALIGN 128
+
+#define I40E_SIMPLE_FLAGS ((uint32_t)ETH_TXQ_FLAGS_NOMULTSEGS | \
+ ETH_TXQ_FLAGS_NOOFFLOADS)
+
+#define I40E_TXD_CMD (I40E_TX_DESC_CMD_EOP | I40E_TX_DESC_CMD_RS)
+
+#define I40E_TX_CKSUM_OFFLOAD_MASK ( \
+ PKT_TX_IP_CKSUM | \
+ PKT_TX_L4_MASK | \
+ PKT_TX_TCP_SEG | \
+ PKT_TX_OUTER_IP_CKSUM)
+
+static uint16_t i40e_xmit_pkts_simple(void *tx_queue,
+ struct rte_mbuf **tx_pkts,
+ uint16_t nb_pkts);
+
+static inline void
+i40e_rxd_to_vlan_tci(struct rte_mbuf *mb, volatile union i40e_rx_desc *rxdp)
+{
+ if (rte_le_to_cpu_64(rxdp->wb.qword1.status_error_len) &
+ (1 << I40E_RX_DESC_STATUS_L2TAG1P_SHIFT)) {
+ mb->ol_flags |= PKT_RX_VLAN_PKT | PKT_RX_VLAN_STRIPPED;
+ mb->vlan_tci =
+ rte_le_to_cpu_16(rxdp->wb.qword0.lo_dword.l2tag1);
+ PMD_RX_LOG(DEBUG, "Descriptor l2tag1: %u",
+ rte_le_to_cpu_16(rxdp->wb.qword0.lo_dword.l2tag1));
+ } else {
+ mb->vlan_tci = 0;
+ }
+#ifndef RTE_LIBRTE_I40E_16BYTE_RX_DESC
+ if (rte_le_to_cpu_16(rxdp->wb.qword2.ext_status) &
+ (1 << I40E_RX_DESC_EXT_STATUS_L2TAG2P_SHIFT)) {
+ mb->ol_flags |= PKT_RX_QINQ_STRIPPED;
+ mb->vlan_tci_outer = mb->vlan_tci;
+ mb->vlan_tci = rte_le_to_cpu_16(rxdp->wb.qword2.l2tag2_2);
+ PMD_RX_LOG(DEBUG, "Descriptor l2tag2_1: %u, l2tag2_2: %u",
+ rte_le_to_cpu_16(rxdp->wb.qword2.l2tag2_1),
+ rte_le_to_cpu_16(rxdp->wb.qword2.l2tag2_2));
+ } else {
+ mb->vlan_tci_outer = 0;
+ }
+#endif
+ PMD_RX_LOG(DEBUG, "Mbuf vlan_tci: %u, vlan_tci_outer: %u",
+ mb->vlan_tci, mb->vlan_tci_outer);
+}
+
+/* Translate the rx descriptor status to pkt flags */
+static inline uint64_t
+i40e_rxd_status_to_pkt_flags(uint64_t qword)
+{
+ uint64_t flags;
+
+ /* Check if RSS_HASH */
+ flags = (((qword >> I40E_RX_DESC_STATUS_FLTSTAT_SHIFT) &
+ I40E_RX_DESC_FLTSTAT_RSS_HASH) ==
+ I40E_RX_DESC_FLTSTAT_RSS_HASH) ? PKT_RX_RSS_HASH : 0;
+
+ /* Check if FDIR Match */
+ flags |= (qword & (1 << I40E_RX_DESC_STATUS_FLM_SHIFT) ?
+ PKT_RX_FDIR : 0);
+
+ return flags;
+}
+
+static inline uint64_t
+i40e_rxd_error_to_pkt_flags(uint64_t qword)
+{
+ uint64_t flags = 0;
+ uint64_t error_bits = (qword >> I40E_RXD_QW1_ERROR_SHIFT);
+
+#define I40E_RX_ERR_BITS 0x3f
+ if (likely((error_bits & I40E_RX_ERR_BITS) == 0))
+ return flags;
+ if (unlikely(error_bits & (1 << I40E_RX_DESC_ERROR_IPE_SHIFT)))
+ flags |= PKT_RX_IP_CKSUM_BAD;
+ if (unlikely(error_bits & (1 << I40E_RX_DESC_ERROR_L4E_SHIFT)))
+ flags |= PKT_RX_L4_CKSUM_BAD;
+ if (unlikely(error_bits & (1 << I40E_RX_DESC_ERROR_EIPE_SHIFT)))
+ flags |= PKT_RX_EIP_CKSUM_BAD;
+
+ return flags;
+}
+
+/* Function to check and set the ieee1588 timesync index and get the
+ * appropriate flags.
+ */
+#ifdef RTE_LIBRTE_IEEE1588
+static inline uint64_t
+i40e_get_iee15888_flags(struct rte_mbuf *mb, uint64_t qword)
+{
+ uint64_t pkt_flags = 0;
+ uint16_t tsyn = (qword & (I40E_RXD_QW1_STATUS_TSYNVALID_MASK
+ | I40E_RXD_QW1_STATUS_TSYNINDX_MASK))
+ >> I40E_RX_DESC_STATUS_TSYNINDX_SHIFT;
+
+ if ((mb->packet_type & RTE_PTYPE_L2_MASK)
+ == RTE_PTYPE_L2_ETHER_TIMESYNC)
+ pkt_flags = PKT_RX_IEEE1588_PTP;
+ if (tsyn & 0x04) {
+ pkt_flags |= PKT_RX_IEEE1588_TMST;
+ mb->timesync = tsyn & 0x03;
+ }
+
+ return pkt_flags;
+}
+#endif
+
+/* For each value it means, datasheet of hardware can tell more details
+ *
+ * @note: fix i40e_dev_supported_ptypes_get() if any change here.
+ */
+static inline uint32_t
+i40e_rxd_pkt_type_mapping(uint8_t ptype)
+{
+ static const uint32_t type_table[UINT8_MAX + 1] __rte_cache_aligned = {
+ /* L2 types */
+ /* [0] reserved */
+ [1] = RTE_PTYPE_L2_ETHER,
+ [2] = RTE_PTYPE_L2_ETHER_TIMESYNC,
+ /* [3] - [5] reserved */
+ [6] = RTE_PTYPE_L2_ETHER_LLDP,
+ /* [7] - [10] reserved */
+ [11] = RTE_PTYPE_L2_ETHER_ARP,
+ /* [12] - [21] reserved */
+
+ /* Non tunneled IPv4 */
+ [22] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_L4_FRAG,
+ [23] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_L4_NONFRAG,
+ [24] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_L4_UDP,
+ /* [25] reserved */
+ [26] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_L4_TCP,
+ [27] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_L4_SCTP,
+ [28] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_L4_ICMP,
+
+ /* IPv4 --> IPv4 */
+ [29] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_TUNNEL_IP |
+ RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L4_FRAG,
+ [30] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_TUNNEL_IP |
+ RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L4_NONFRAG,
+ [31] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_TUNNEL_IP |
+ RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L4_UDP,
+ /* [32] reserved */
+ [33] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_TUNNEL_IP |
+ RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L4_TCP,
+ [34] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_TUNNEL_IP |
+ RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L4_SCTP,
+ [35] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_TUNNEL_IP |
+ RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L4_ICMP,
+
+ /* IPv4 --> IPv6 */
+ [36] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_TUNNEL_IP |
+ RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L4_FRAG,
+ [37] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_TUNNEL_IP |
+ RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L4_NONFRAG,
+ [38] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_TUNNEL_IP |
+ RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L4_UDP,
+ /* [39] reserved */
+ [40] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_TUNNEL_IP |
+ RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L4_TCP,
+ [41] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_TUNNEL_IP |
+ RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L4_SCTP,
+ [42] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_TUNNEL_IP |
+ RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L4_ICMP,
+
+ /* IPv4 --> GRE/Teredo/VXLAN */
+ [43] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_TUNNEL_GRENAT,
+
+ /* IPv4 --> GRE/Teredo/VXLAN --> IPv4 */
+ [44] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_TUNNEL_GRENAT |
+ RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L4_FRAG,
+ [45] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_TUNNEL_GRENAT |
+ RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L4_NONFRAG,
+ [46] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_TUNNEL_GRENAT |
+ RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L4_UDP,
+ /* [47] reserved */
+ [48] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_TUNNEL_GRENAT |
+ RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L4_TCP,
+ [49] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_TUNNEL_GRENAT |
+ RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L4_SCTP,
+ [50] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_TUNNEL_GRENAT |
+ RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L4_ICMP,
+
+ /* IPv4 --> GRE/Teredo/VXLAN --> IPv6 */
+ [51] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_TUNNEL_GRENAT |
+ RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L4_FRAG,
+ [52] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_TUNNEL_GRENAT |
+ RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L4_NONFRAG,
+ [53] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_TUNNEL_GRENAT |
+ RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L4_UDP,
+ /* [54] reserved */
+ [55] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_TUNNEL_GRENAT |
+ RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L4_TCP,
+ [56] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_TUNNEL_GRENAT |
+ RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L4_SCTP,
+ [57] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_TUNNEL_GRENAT |
+ RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L4_ICMP,
+
+ /* IPv4 --> GRE/Teredo/VXLAN --> MAC */
+ [58] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER,
+
+ /* IPv4 --> GRE/Teredo/VXLAN --> MAC --> IPv4 */
+ [59] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
+ RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L4_FRAG,
+ [60] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
+ RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L4_NONFRAG,
+ [61] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
+ RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L4_UDP,
+ /* [62] reserved */
+ [63] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
+ RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L4_TCP,
+ [64] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
+ RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L4_SCTP,
+ [65] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
+ RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L4_ICMP,
+
+ /* IPv4 --> GRE/Teredo/VXLAN --> MAC --> IPv6 */
+ [66] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
+ RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L4_FRAG,
+ [67] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
+ RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L4_NONFRAG,
+ [68] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
+ RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L4_UDP,
+ /* [69] reserved */
+ [70] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
+ RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L4_TCP,
+ [71] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
+ RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L4_SCTP,
+ [72] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
+ RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L4_ICMP,
+
+ /* IPv4 --> GRE/Teredo/VXLAN --> MAC/VLAN */
+ [73] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_TUNNEL_GRENAT |
+ RTE_PTYPE_INNER_L2_ETHER_VLAN,
+
+ /* IPv4 --> GRE/Teredo/VXLAN --> MAC/VLAN --> IPv4 */
+ [74] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_TUNNEL_GRENAT |
+ RTE_PTYPE_INNER_L2_ETHER_VLAN |
+ RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L4_FRAG,
+ [75] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_TUNNEL_GRENAT |
+ RTE_PTYPE_INNER_L2_ETHER_VLAN |
+ RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L4_NONFRAG,
+ [76] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_TUNNEL_GRENAT |
+ RTE_PTYPE_INNER_L2_ETHER_VLAN |
+ RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L4_UDP,
+ /* [77] reserved */
+ [78] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_TUNNEL_GRENAT |
+ RTE_PTYPE_INNER_L2_ETHER_VLAN |
+ RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L4_TCP,
+ [79] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_TUNNEL_GRENAT |
+ RTE_PTYPE_INNER_L2_ETHER_VLAN |
+ RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L4_SCTP,
+ [80] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_TUNNEL_GRENAT |
+ RTE_PTYPE_INNER_L2_ETHER_VLAN |
+ RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L4_ICMP,
+
+ /* IPv4 --> GRE/Teredo/VXLAN --> MAC/VLAN --> IPv6 */
+ [81] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_TUNNEL_GRENAT |
+ RTE_PTYPE_INNER_L2_ETHER_VLAN |
+ RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L4_FRAG,
+ [82] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_TUNNEL_GRENAT |
+ RTE_PTYPE_INNER_L2_ETHER_VLAN |
+ RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L4_NONFRAG,
+ [83] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_TUNNEL_GRENAT |
+ RTE_PTYPE_INNER_L2_ETHER_VLAN |
+ RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L4_UDP,
+ /* [84] reserved */
+ [85] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_TUNNEL_GRENAT |
+ RTE_PTYPE_INNER_L2_ETHER_VLAN |
+ RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L4_TCP,
+ [86] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_TUNNEL_GRENAT |
+ RTE_PTYPE_INNER_L2_ETHER_VLAN |
+ RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L4_SCTP,
+ [87] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_TUNNEL_GRENAT |
+ RTE_PTYPE_INNER_L2_ETHER_VLAN |
+ RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L4_ICMP,
+
+ /* Non tunneled IPv6 */
+ [88] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_L4_FRAG,
+ [89] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_L4_NONFRAG,
+ [90] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_L4_UDP,
+ /* [91] reserved */
+ [92] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_L4_TCP,
+ [93] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_L4_SCTP,
+ [94] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_L4_ICMP,
+
+ /* IPv6 --> IPv4 */
+ [95] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_TUNNEL_IP |
+ RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L4_FRAG,
+ [96] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_TUNNEL_IP |
+ RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L4_NONFRAG,
+ [97] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_TUNNEL_IP |
+ RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L4_UDP,
+ /* [98] reserved */
+ [99] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_TUNNEL_IP |
+ RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L4_TCP,
+ [100] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_TUNNEL_IP |
+ RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L4_SCTP,
+ [101] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_TUNNEL_IP |
+ RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L4_ICMP,
+
+ /* IPv6 --> IPv6 */
+ [102] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_TUNNEL_IP |
+ RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L4_FRAG,
+ [103] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_TUNNEL_IP |
+ RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L4_NONFRAG,
+ [104] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_TUNNEL_IP |
+ RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L4_UDP,
+ /* [105] reserved */
+ [106] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_TUNNEL_IP |
+ RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L4_TCP,
+ [107] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_TUNNEL_IP |
+ RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L4_SCTP,
+ [108] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_TUNNEL_IP |
+ RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L4_ICMP,
+
+ /* IPv6 --> GRE/Teredo/VXLAN */
+ [109] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_TUNNEL_GRENAT,
+
+ /* IPv6 --> GRE/Teredo/VXLAN --> IPv4 */
+ [110] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_TUNNEL_GRENAT |
+ RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L4_FRAG,
+ [111] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_TUNNEL_GRENAT |
+ RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L4_NONFRAG,
+ [112] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_TUNNEL_GRENAT |
+ RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L4_UDP,
+ /* [113] reserved */
+ [114] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_TUNNEL_GRENAT |
+ RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L4_TCP,
+ [115] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_TUNNEL_GRENAT |
+ RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L4_SCTP,
+ [116] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_TUNNEL_GRENAT |
+ RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L4_ICMP,
+
+ /* IPv6 --> GRE/Teredo/VXLAN --> IPv6 */
+ [117] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_TUNNEL_GRENAT |
+ RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L4_FRAG,
+ [118] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_TUNNEL_GRENAT |
+ RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L4_NONFRAG,
+ [119] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_TUNNEL_GRENAT |
+ RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L4_UDP,
+ /* [120] reserved */
+ [121] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_TUNNEL_GRENAT |
+ RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L4_TCP,
+ [122] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_TUNNEL_GRENAT |
+ RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L4_SCTP,
+ [123] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_TUNNEL_GRENAT |
+ RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L4_ICMP,
+
+ /* IPv6 --> GRE/Teredo/VXLAN --> MAC */
+ [124] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER,
+
+ /* IPv6 --> GRE/Teredo/VXLAN --> MAC --> IPv4 */
+ [125] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
+ RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L4_FRAG,
+ [126] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
+ RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L4_NONFRAG,
+ [127] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
+ RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L4_UDP,
+ /* [128] reserved */
+ [129] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
+ RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L4_TCP,
+ [130] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
+ RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L4_SCTP,
+ [131] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
+ RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L4_ICMP,
+
+ /* IPv6 --> GRE/Teredo/VXLAN --> MAC --> IPv6 */
+ [132] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
+ RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L4_FRAG,
+ [133] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
+ RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L4_NONFRAG,
+ [134] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
+ RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L4_UDP,
+ /* [135] reserved */
+ [136] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
+ RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L4_TCP,
+ [137] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
+ RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L4_SCTP,
+ [138] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
+ RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L4_ICMP,
+
+ /* IPv6 --> GRE/Teredo/VXLAN --> MAC/VLAN */
+ [139] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_TUNNEL_GRENAT |
+ RTE_PTYPE_INNER_L2_ETHER_VLAN,
+
+ /* IPv6 --> GRE/Teredo/VXLAN --> MAC/VLAN --> IPv4 */
+ [140] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_TUNNEL_GRENAT |
+ RTE_PTYPE_INNER_L2_ETHER_VLAN |
+ RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L4_FRAG,
+ [141] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_TUNNEL_GRENAT |
+ RTE_PTYPE_INNER_L2_ETHER_VLAN |
+ RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L4_NONFRAG,
+ [142] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_TUNNEL_GRENAT |
+ RTE_PTYPE_INNER_L2_ETHER_VLAN |
+ RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L4_UDP,
+ /* [143] reserved */
+ [144] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_TUNNEL_GRENAT |
+ RTE_PTYPE_INNER_L2_ETHER_VLAN |
+ RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L4_TCP,
+ [145] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_TUNNEL_GRENAT |
+ RTE_PTYPE_INNER_L2_ETHER_VLAN |
+ RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L4_SCTP,
+ [146] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_TUNNEL_GRENAT |
+ RTE_PTYPE_INNER_L2_ETHER_VLAN |
+ RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L4_ICMP,
+
+ /* IPv6 --> GRE/Teredo/VXLAN --> MAC/VLAN --> IPv6 */
+ [147] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_TUNNEL_GRENAT |
+ RTE_PTYPE_INNER_L2_ETHER_VLAN |
+ RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L4_FRAG,
+ [148] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_TUNNEL_GRENAT |
+ RTE_PTYPE_INNER_L2_ETHER_VLAN |
+ RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L4_NONFRAG,
+ [149] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_TUNNEL_GRENAT |
+ RTE_PTYPE_INNER_L2_ETHER_VLAN |
+ RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L4_UDP,
+ /* [150] reserved */
+ [151] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_TUNNEL_GRENAT |
+ RTE_PTYPE_INNER_L2_ETHER_VLAN |
+ RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L4_TCP,
+ [152] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_TUNNEL_GRENAT |
+ RTE_PTYPE_INNER_L2_ETHER_VLAN |
+ RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L4_SCTP,
+ [153] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_TUNNEL_GRENAT |
+ RTE_PTYPE_INNER_L2_ETHER_VLAN |
+ RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L4_ICMP,
+
+ /* L2 NSH packet type */
+ [154] = RTE_PTYPE_L2_ETHER_NSH,
+ [155] = RTE_PTYPE_L2_ETHER_NSH | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_L4_FRAG,
+ [156] = RTE_PTYPE_L2_ETHER_NSH | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_L4_NONFRAG,
+ [157] = RTE_PTYPE_L2_ETHER_NSH | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_L4_UDP,
+ [158] = RTE_PTYPE_L2_ETHER_NSH | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_L4_TCP,
+ [159] = RTE_PTYPE_L2_ETHER_NSH | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_L4_SCTP,
+ [160] = RTE_PTYPE_L2_ETHER_NSH | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_L4_ICMP,
+ [161] = RTE_PTYPE_L2_ETHER_NSH | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_L4_FRAG,
+ [162] = RTE_PTYPE_L2_ETHER_NSH | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_L4_NONFRAG,
+ [163] = RTE_PTYPE_L2_ETHER_NSH | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_L4_UDP,
+ [164] = RTE_PTYPE_L2_ETHER_NSH | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_L4_TCP,
+ [165] = RTE_PTYPE_L2_ETHER_NSH | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_L4_SCTP,
+ [166] = RTE_PTYPE_L2_ETHER_NSH | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_L4_ICMP,
+
+ /* All others reserved */
+ };
+
+ return type_table[ptype];
+}
+
+#define I40E_RX_DESC_EXT_STATUS_FLEXBH_MASK 0x03
+#define I40E_RX_DESC_EXT_STATUS_FLEXBH_FD_ID 0x01
+#define I40E_RX_DESC_EXT_STATUS_FLEXBH_FLEX 0x02
+#define I40E_RX_DESC_EXT_STATUS_FLEXBL_MASK 0x03
+#define I40E_RX_DESC_EXT_STATUS_FLEXBL_FLEX 0x01
+
+static inline uint64_t
+i40e_rxd_build_fdir(volatile union i40e_rx_desc *rxdp, struct rte_mbuf *mb)
+{
+ uint64_t flags = 0;
+#ifndef RTE_LIBRTE_I40E_16BYTE_RX_DESC
+ uint16_t flexbh, flexbl;
+
+ flexbh = (rte_le_to_cpu_32(rxdp->wb.qword2.ext_status) >>
+ I40E_RX_DESC_EXT_STATUS_FLEXBH_SHIFT) &
+ I40E_RX_DESC_EXT_STATUS_FLEXBH_MASK;
+ flexbl = (rte_le_to_cpu_32(rxdp->wb.qword2.ext_status) >>
+ I40E_RX_DESC_EXT_STATUS_FLEXBL_SHIFT) &
+ I40E_RX_DESC_EXT_STATUS_FLEXBL_MASK;
+
+
+ if (flexbh == I40E_RX_DESC_EXT_STATUS_FLEXBH_FD_ID) {
+ mb->hash.fdir.hi =
+ rte_le_to_cpu_32(rxdp->wb.qword3.hi_dword.fd_id);
+ flags |= PKT_RX_FDIR_ID;
+ } else if (flexbh == I40E_RX_DESC_EXT_STATUS_FLEXBH_FLEX) {
+ mb->hash.fdir.hi =
+ rte_le_to_cpu_32(rxdp->wb.qword3.hi_dword.flex_bytes_hi);
+ flags |= PKT_RX_FDIR_FLX;
+ }
+ if (flexbl == I40E_RX_DESC_EXT_STATUS_FLEXBL_FLEX) {
+ mb->hash.fdir.lo =
+ rte_le_to_cpu_32(rxdp->wb.qword3.lo_dword.flex_bytes_lo);
+ flags |= PKT_RX_FDIR_FLX;
+ }
+#else
+ mb->hash.fdir.hi =
+ rte_le_to_cpu_32(rxdp->wb.qword0.hi_dword.fd_id);
+ flags |= PKT_RX_FDIR_ID;
+#endif
+ return flags;
+}
+static inline void
+i40e_txd_enable_checksum(uint64_t ol_flags,
+ uint32_t *td_cmd,
+ uint32_t *td_offset,
+ union i40e_tx_offload tx_offload,
+ uint32_t *cd_tunneling)
+{
+ /* UDP tunneling packet TX checksum offload */
+ if (ol_flags & PKT_TX_OUTER_IP_CKSUM) {
+
+ *td_offset |= (tx_offload.outer_l2_len >> 1)
+ << I40E_TX_DESC_LENGTH_MACLEN_SHIFT;
+
+ if (ol_flags & PKT_TX_OUTER_IP_CKSUM)
+ *cd_tunneling |= I40E_TX_CTX_EXT_IP_IPV4;
+ else if (ol_flags & PKT_TX_OUTER_IPV4)
+ *cd_tunneling |= I40E_TX_CTX_EXT_IP_IPV4_NO_CSUM;
+ else if (ol_flags & PKT_TX_OUTER_IPV6)
+ *cd_tunneling |= I40E_TX_CTX_EXT_IP_IPV6;
+
+ /* Now set the ctx descriptor fields */
+ *cd_tunneling |= (tx_offload.outer_l3_len >> 2) <<
+ I40E_TXD_CTX_QW0_EXT_IPLEN_SHIFT |
+ (tx_offload.l2_len >> 1) <<
+ I40E_TXD_CTX_QW0_NATLEN_SHIFT;
+
+ } else
+ *td_offset |= (tx_offload.l2_len >> 1)
+ << I40E_TX_DESC_LENGTH_MACLEN_SHIFT;
+
+ /* Enable L3 checksum offloads */
+ if (ol_flags & PKT_TX_IP_CKSUM) {
+ *td_cmd |= I40E_TX_DESC_CMD_IIPT_IPV4_CSUM;
+ *td_offset |= (tx_offload.l3_len >> 2)
+ << I40E_TX_DESC_LENGTH_IPLEN_SHIFT;
+ } else if (ol_flags & PKT_TX_IPV4) {
+ *td_cmd |= I40E_TX_DESC_CMD_IIPT_IPV4;
+ *td_offset |= (tx_offload.l3_len >> 2)
+ << I40E_TX_DESC_LENGTH_IPLEN_SHIFT;
+ } else if (ol_flags & PKT_TX_IPV6) {
+ *td_cmd |= I40E_TX_DESC_CMD_IIPT_IPV6;
+ *td_offset |= (tx_offload.l3_len >> 2)
+ << I40E_TX_DESC_LENGTH_IPLEN_SHIFT;
+ }
+
+ if (ol_flags & PKT_TX_TCP_SEG) {
+ *td_cmd |= I40E_TX_DESC_CMD_L4T_EOFT_TCP;
+ *td_offset |= (tx_offload.l4_len >> 2)
+ << I40E_TX_DESC_LENGTH_L4_FC_LEN_SHIFT;
+ return;
+ }
+
+ /* Enable L4 checksum offloads */
+ switch (ol_flags & PKT_TX_L4_MASK) {
+ case PKT_TX_TCP_CKSUM:
+ *td_cmd |= I40E_TX_DESC_CMD_L4T_EOFT_TCP;
+ *td_offset |= (sizeof(struct tcp_hdr) >> 2) <<
+ I40E_TX_DESC_LENGTH_L4_FC_LEN_SHIFT;
+ break;
+ case PKT_TX_SCTP_CKSUM:
+ *td_cmd |= I40E_TX_DESC_CMD_L4T_EOFT_SCTP;
+ *td_offset |= (sizeof(struct sctp_hdr) >> 2) <<
+ I40E_TX_DESC_LENGTH_L4_FC_LEN_SHIFT;
+ break;
+ case PKT_TX_UDP_CKSUM:
+ *td_cmd |= I40E_TX_DESC_CMD_L4T_EOFT_UDP;
+ *td_offset |= (sizeof(struct udp_hdr) >> 2) <<
+ I40E_TX_DESC_LENGTH_L4_FC_LEN_SHIFT;
+ break;
+ default:
+ break;
+ }
+}
+
+/* Construct the tx flags */
+static inline uint64_t
+i40e_build_ctob(uint32_t td_cmd,
+ uint32_t td_offset,
+ unsigned int size,
+ uint32_t td_tag)
+{
+ return rte_cpu_to_le_64(I40E_TX_DESC_DTYPE_DATA |
+ ((uint64_t)td_cmd << I40E_TXD_QW1_CMD_SHIFT) |
+ ((uint64_t)td_offset << I40E_TXD_QW1_OFFSET_SHIFT) |
+ ((uint64_t)size << I40E_TXD_QW1_TX_BUF_SZ_SHIFT) |
+ ((uint64_t)td_tag << I40E_TXD_QW1_L2TAG1_SHIFT));
+}
+
+static inline int
+i40e_xmit_cleanup(struct i40e_tx_queue *txq)
+{
+ struct i40e_tx_entry *sw_ring = txq->sw_ring;
+ volatile struct i40e_tx_desc *txd = txq->tx_ring;
+ uint16_t last_desc_cleaned = txq->last_desc_cleaned;
+ uint16_t nb_tx_desc = txq->nb_tx_desc;
+ uint16_t desc_to_clean_to;
+ uint16_t nb_tx_to_clean;
+
+ desc_to_clean_to = (uint16_t)(last_desc_cleaned + txq->tx_rs_thresh);
+ if (desc_to_clean_to >= nb_tx_desc)
+ desc_to_clean_to = (uint16_t)(desc_to_clean_to - nb_tx_desc);
+
+ desc_to_clean_to = sw_ring[desc_to_clean_to].last_id;
+ if ((txd[desc_to_clean_to].cmd_type_offset_bsz &
+ rte_cpu_to_le_64(I40E_TXD_QW1_DTYPE_MASK)) !=
+ rte_cpu_to_le_64(I40E_TX_DESC_DTYPE_DESC_DONE)) {
+ PMD_TX_FREE_LOG(DEBUG, "TX descriptor %4u is not done "
+ "(port=%d queue=%d)", desc_to_clean_to,
+ txq->port_id, txq->queue_id);
+ return -1;
+ }
+
+ if (last_desc_cleaned > desc_to_clean_to)
+ nb_tx_to_clean = (uint16_t)((nb_tx_desc - last_desc_cleaned) +
+ desc_to_clean_to);
+ else
+ nb_tx_to_clean = (uint16_t)(desc_to_clean_to -
+ last_desc_cleaned);
+
+ txd[desc_to_clean_to].cmd_type_offset_bsz = 0;
+
+ txq->last_desc_cleaned = desc_to_clean_to;
+ txq->nb_tx_free = (uint16_t)(txq->nb_tx_free + nb_tx_to_clean);
+
+ return 0;
+}
+
+static inline int
+#ifdef RTE_LIBRTE_I40E_RX_ALLOW_BULK_ALLOC
+check_rx_burst_bulk_alloc_preconditions(struct i40e_rx_queue *rxq)
+#else
+check_rx_burst_bulk_alloc_preconditions(__rte_unused struct i40e_rx_queue *rxq)
+#endif
+{
+ int ret = 0;
+
+#ifdef RTE_LIBRTE_I40E_RX_ALLOW_BULK_ALLOC
+ if (!(rxq->rx_free_thresh >= RTE_PMD_I40E_RX_MAX_BURST)) {
+ PMD_INIT_LOG(DEBUG, "Rx Burst Bulk Alloc Preconditions: "
+ "rxq->rx_free_thresh=%d, "
+ "RTE_PMD_I40E_RX_MAX_BURST=%d",
+ rxq->rx_free_thresh, RTE_PMD_I40E_RX_MAX_BURST);
+ ret = -EINVAL;
+ } else if (!(rxq->rx_free_thresh < rxq->nb_rx_desc)) {
+ PMD_INIT_LOG(DEBUG, "Rx Burst Bulk Alloc Preconditions: "
+ "rxq->rx_free_thresh=%d, "
+ "rxq->nb_rx_desc=%d",
+ rxq->rx_free_thresh, rxq->nb_rx_desc);
+ ret = -EINVAL;
+ } else if (rxq->nb_rx_desc % rxq->rx_free_thresh != 0) {
+ PMD_INIT_LOG(DEBUG, "Rx Burst Bulk Alloc Preconditions: "
+ "rxq->nb_rx_desc=%d, "
+ "rxq->rx_free_thresh=%d",
+ rxq->nb_rx_desc, rxq->rx_free_thresh);
+ ret = -EINVAL;
+ } else if (!(rxq->nb_rx_desc < (I40E_MAX_RING_DESC -
+ RTE_PMD_I40E_RX_MAX_BURST))) {
+ PMD_INIT_LOG(DEBUG, "Rx Burst Bulk Alloc Preconditions: "
+ "rxq->nb_rx_desc=%d, "
+ "I40E_MAX_RING_DESC=%d, "
+ "RTE_PMD_I40E_RX_MAX_BURST=%d",
+ rxq->nb_rx_desc, I40E_MAX_RING_DESC,
+ RTE_PMD_I40E_RX_MAX_BURST);
+ ret = -EINVAL;
+ }
+#else
+ ret = -EINVAL;
+#endif
+
+ return ret;
+}
+
+#ifdef RTE_LIBRTE_I40E_RX_ALLOW_BULK_ALLOC
+#define I40E_LOOK_AHEAD 8
+#if (I40E_LOOK_AHEAD != 8)
+#error "PMD I40E: I40E_LOOK_AHEAD must be 8\n"
+#endif
+static inline int
+i40e_rx_scan_hw_ring(struct i40e_rx_queue *rxq)
+{
+ volatile union i40e_rx_desc *rxdp;
+ struct i40e_rx_entry *rxep;
+ struct rte_mbuf *mb;
+ uint16_t pkt_len;
+ uint64_t qword1;
+ uint32_t rx_status;
+ int32_t s[I40E_LOOK_AHEAD], nb_dd;
+ int32_t i, j, nb_rx = 0;
+ uint64_t pkt_flags;
+
+ rxdp = &rxq->rx_ring[rxq->rx_tail];
+ rxep = &rxq->sw_ring[rxq->rx_tail];
+
+ qword1 = rte_le_to_cpu_64(rxdp->wb.qword1.status_error_len);
+ rx_status = (qword1 & I40E_RXD_QW1_STATUS_MASK) >>
+ I40E_RXD_QW1_STATUS_SHIFT;
+
+ /* Make sure there is at least 1 packet to receive */
+ if (!(rx_status & (1 << I40E_RX_DESC_STATUS_DD_SHIFT)))
+ return 0;
+
+ /**
+ * Scan LOOK_AHEAD descriptors at a time to determine which
+ * descriptors reference packets that are ready to be received.
+ */
+ for (i = 0; i < RTE_PMD_I40E_RX_MAX_BURST; i+=I40E_LOOK_AHEAD,
+ rxdp += I40E_LOOK_AHEAD, rxep += I40E_LOOK_AHEAD) {
+ /* Read desc statuses backwards to avoid race condition */
+ for (j = I40E_LOOK_AHEAD - 1; j >= 0; j--) {
+ qword1 = rte_le_to_cpu_64(\
+ rxdp[j].wb.qword1.status_error_len);
+ s[j] = (qword1 & I40E_RXD_QW1_STATUS_MASK) >>
+ I40E_RXD_QW1_STATUS_SHIFT;
+ }
+
+ /* Compute how many status bits were set */
+ for (j = 0, nb_dd = 0; j < I40E_LOOK_AHEAD; j++)
+ nb_dd += s[j] & (1 << I40E_RX_DESC_STATUS_DD_SHIFT);
+
+ nb_rx += nb_dd;
+
+ /* Translate descriptor info to mbuf parameters */
+ for (j = 0; j < nb_dd; j++) {
+ mb = rxep[j].mbuf;
+ qword1 = rte_le_to_cpu_64(\
+ rxdp[j].wb.qword1.status_error_len);
+ pkt_len = ((qword1 & I40E_RXD_QW1_LENGTH_PBUF_MASK) >>
+ I40E_RXD_QW1_LENGTH_PBUF_SHIFT) - rxq->crc_len;
+ mb->data_len = pkt_len;
+ mb->pkt_len = pkt_len;
+ mb->ol_flags = 0;
+ i40e_rxd_to_vlan_tci(mb, &rxdp[j]);
+ pkt_flags = i40e_rxd_status_to_pkt_flags(qword1);
+ pkt_flags |= i40e_rxd_error_to_pkt_flags(qword1);
+ mb->packet_type =
+ i40e_rxd_pkt_type_mapping((uint8_t)((qword1 &
+ I40E_RXD_QW1_PTYPE_MASK) >>
+ I40E_RXD_QW1_PTYPE_SHIFT));
+ if (pkt_flags & PKT_RX_RSS_HASH)
+ mb->hash.rss = rte_le_to_cpu_32(\
+ rxdp[j].wb.qword0.hi_dword.rss);
+ if (pkt_flags & PKT_RX_FDIR)
+ pkt_flags |= i40e_rxd_build_fdir(&rxdp[j], mb);
+
+#ifdef RTE_LIBRTE_IEEE1588
+ pkt_flags |= i40e_get_iee15888_flags(mb, qword1);
+#endif
+ mb->ol_flags |= pkt_flags;
+
+ }
+
+ for (j = 0; j < I40E_LOOK_AHEAD; j++)
+ rxq->rx_stage[i + j] = rxep[j].mbuf;
+
+ if (nb_dd != I40E_LOOK_AHEAD)
+ break;
+ }
+
+ /* Clear software ring entries */
+ for (i = 0; i < nb_rx; i++)
+ rxq->sw_ring[rxq->rx_tail + i].mbuf = NULL;
+
+ return nb_rx;
+}
+
+static inline uint16_t
+i40e_rx_fill_from_stage(struct i40e_rx_queue *rxq,
+ struct rte_mbuf **rx_pkts,
+ uint16_t nb_pkts)
+{
+ uint16_t i;
+ struct rte_mbuf **stage = &rxq->rx_stage[rxq->rx_next_avail];
+
+ nb_pkts = (uint16_t)RTE_MIN(nb_pkts, rxq->rx_nb_avail);
+
+ for (i = 0; i < nb_pkts; i++)
+ rx_pkts[i] = stage[i];
+
+ rxq->rx_nb_avail = (uint16_t)(rxq->rx_nb_avail - nb_pkts);
+ rxq->rx_next_avail = (uint16_t)(rxq->rx_next_avail + nb_pkts);
+
+ return nb_pkts;
+}
+
+static inline int
+i40e_rx_alloc_bufs(struct i40e_rx_queue *rxq)
+{
+ volatile union i40e_rx_desc *rxdp;
+ struct i40e_rx_entry *rxep;
+ struct rte_mbuf *mb;
+ uint16_t alloc_idx, i;
+ uint64_t dma_addr;
+ int diag;
+
+ /* Allocate buffers in bulk */
+ alloc_idx = (uint16_t)(rxq->rx_free_trigger -
+ (rxq->rx_free_thresh - 1));
+ rxep = &(rxq->sw_ring[alloc_idx]);
+ diag = rte_mempool_get_bulk(rxq->mp, (void *)rxep,
+ rxq->rx_free_thresh);
+ if (unlikely(diag != 0)) {
+ PMD_DRV_LOG(ERR, "Failed to get mbufs in bulk");
+ return -ENOMEM;
+ }
+
+ rxdp = &rxq->rx_ring[alloc_idx];
+ for (i = 0; i < rxq->rx_free_thresh; i++) {
+ if (likely(i < (rxq->rx_free_thresh - 1)))
+ /* Prefetch next mbuf */
+ rte_prefetch0(rxep[i + 1].mbuf);
+
+ mb = rxep[i].mbuf;
+ rte_mbuf_refcnt_set(mb, 1);
+ mb->next = NULL;
+ mb->data_off = RTE_PKTMBUF_HEADROOM;
+ mb->nb_segs = 1;
+ mb->port = rxq->port_id;
+ dma_addr = rte_cpu_to_le_64(\
+ rte_mbuf_data_dma_addr_default(mb));
+ rxdp[i].read.hdr_addr = 0;
+ rxdp[i].read.pkt_addr = dma_addr;
+ }
+
+ /* Update rx tail regsiter */
+ rte_wmb();
+ I40E_PCI_REG_WRITE(rxq->qrx_tail, rxq->rx_free_trigger);
+
+ rxq->rx_free_trigger =
+ (uint16_t)(rxq->rx_free_trigger + rxq->rx_free_thresh);
+ if (rxq->rx_free_trigger >= rxq->nb_rx_desc)
+ rxq->rx_free_trigger = (uint16_t)(rxq->rx_free_thresh - 1);
+
+ return 0;
+}
+
+static inline uint16_t
+rx_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
+{
+ struct i40e_rx_queue *rxq = (struct i40e_rx_queue *)rx_queue;
+ uint16_t nb_rx = 0;
+
+ if (!nb_pkts)
+ return 0;
+
+ if (rxq->rx_nb_avail)
+ return i40e_rx_fill_from_stage(rxq, rx_pkts, nb_pkts);
+
+ nb_rx = (uint16_t)i40e_rx_scan_hw_ring(rxq);
+ rxq->rx_next_avail = 0;
+ rxq->rx_nb_avail = nb_rx;
+ rxq->rx_tail = (uint16_t)(rxq->rx_tail + nb_rx);
+
+ if (rxq->rx_tail > rxq->rx_free_trigger) {
+ if (i40e_rx_alloc_bufs(rxq) != 0) {
+ uint16_t i, j;
+
+ PMD_RX_LOG(DEBUG, "Rx mbuf alloc failed for "
+ "port_id=%u, queue_id=%u",
+ rxq->port_id, rxq->queue_id);
+ rxq->rx_nb_avail = 0;
+ rxq->rx_tail = (uint16_t)(rxq->rx_tail - nb_rx);
+ for (i = 0, j = rxq->rx_tail; i < nb_rx; i++, j++)
+ rxq->sw_ring[j].mbuf = rxq->rx_stage[i];
+
+ return 0;
+ }
+ }
+
+ if (rxq->rx_tail >= rxq->nb_rx_desc)
+ rxq->rx_tail = 0;
+
+ if (rxq->rx_nb_avail)
+ return i40e_rx_fill_from_stage(rxq, rx_pkts, nb_pkts);
+
+ return 0;
+}
+
+static uint16_t
+i40e_recv_pkts_bulk_alloc(void *rx_queue,
+ struct rte_mbuf **rx_pkts,
+ uint16_t nb_pkts)
+{
+ uint16_t nb_rx = 0, n, count;
+
+ if (unlikely(nb_pkts == 0))
+ return 0;
+
+ if (likely(nb_pkts <= RTE_PMD_I40E_RX_MAX_BURST))
+ return rx_recv_pkts(rx_queue, rx_pkts, nb_pkts);
+
+ while (nb_pkts) {
+ n = RTE_MIN(nb_pkts, RTE_PMD_I40E_RX_MAX_BURST);
+ count = rx_recv_pkts(rx_queue, &rx_pkts[nb_rx], n);
+ nb_rx = (uint16_t)(nb_rx + count);
+ nb_pkts = (uint16_t)(nb_pkts - count);
+ if (count < n)
+ break;
+ }
+
+ return nb_rx;
+}
+#else
+static uint16_t
+i40e_recv_pkts_bulk_alloc(void __rte_unused *rx_queue,
+ struct rte_mbuf __rte_unused **rx_pkts,
+ uint16_t __rte_unused nb_pkts)
+{
+ return 0;
+}
+#endif /* RTE_LIBRTE_I40E_RX_ALLOW_BULK_ALLOC */
+
+uint16_t
+i40e_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
+{
+ struct i40e_rx_queue *rxq;
+ volatile union i40e_rx_desc *rx_ring;
+ volatile union i40e_rx_desc *rxdp;
+ union i40e_rx_desc rxd;
+ struct i40e_rx_entry *sw_ring;
+ struct i40e_rx_entry *rxe;
+ struct rte_mbuf *rxm;
+ struct rte_mbuf *nmb;
+ uint16_t nb_rx;
+ uint32_t rx_status;
+ uint64_t qword1;
+ uint16_t rx_packet_len;
+ uint16_t rx_id, nb_hold;
+ uint64_t dma_addr;
+ uint64_t pkt_flags;
+
+ nb_rx = 0;
+ nb_hold = 0;
+ rxq = rx_queue;
+ rx_id = rxq->rx_tail;
+ rx_ring = rxq->rx_ring;
+ sw_ring = rxq->sw_ring;
+
+ while (nb_rx < nb_pkts) {
+ rxdp = &rx_ring[rx_id];
+ qword1 = rte_le_to_cpu_64(rxdp->wb.qword1.status_error_len);
+ rx_status = (qword1 & I40E_RXD_QW1_STATUS_MASK)
+ >> I40E_RXD_QW1_STATUS_SHIFT;
+
+ /* Check the DD bit first */
+ if (!(rx_status & (1 << I40E_RX_DESC_STATUS_DD_SHIFT)))
+ break;
+
+ nmb = rte_mbuf_raw_alloc(rxq->mp);
+ if (unlikely(!nmb))
+ break;
+ rxd = *rxdp;
+
+ nb_hold++;
+ rxe = &sw_ring[rx_id];
+ rx_id++;
+ if (unlikely(rx_id == rxq->nb_rx_desc))
+ rx_id = 0;
+
+ /* Prefetch next mbuf */
+ rte_prefetch0(sw_ring[rx_id].mbuf);
+
+ /**
+ * When next RX descriptor is on a cache line boundary,
+ * prefetch the next 4 RX descriptors and next 8 pointers
+ * to mbufs.
+ */
+ if ((rx_id & 0x3) == 0) {
+ rte_prefetch0(&rx_ring[rx_id]);
+ rte_prefetch0(&sw_ring[rx_id]);
+ }
+ rxm = rxe->mbuf;
+ rxe->mbuf = nmb;
+ dma_addr =
+ rte_cpu_to_le_64(rte_mbuf_data_dma_addr_default(nmb));
+ rxdp->read.hdr_addr = 0;
+ rxdp->read.pkt_addr = dma_addr;
+
+ rx_packet_len = ((qword1 & I40E_RXD_QW1_LENGTH_PBUF_MASK) >>
+ I40E_RXD_QW1_LENGTH_PBUF_SHIFT) - rxq->crc_len;
+
+ rxm->data_off = RTE_PKTMBUF_HEADROOM;
+ rte_prefetch0(RTE_PTR_ADD(rxm->buf_addr, RTE_PKTMBUF_HEADROOM));
+ rxm->nb_segs = 1;
+ rxm->next = NULL;
+ rxm->pkt_len = rx_packet_len;
+ rxm->data_len = rx_packet_len;
+ rxm->port = rxq->port_id;
+ rxm->ol_flags = 0;
+ i40e_rxd_to_vlan_tci(rxm, &rxd);
+ pkt_flags = i40e_rxd_status_to_pkt_flags(qword1);
+ pkt_flags |= i40e_rxd_error_to_pkt_flags(qword1);
+ rxm->packet_type =
+ i40e_rxd_pkt_type_mapping((uint8_t)((qword1 &
+ I40E_RXD_QW1_PTYPE_MASK) >> I40E_RXD_QW1_PTYPE_SHIFT));
+ if (pkt_flags & PKT_RX_RSS_HASH)
+ rxm->hash.rss =
+ rte_le_to_cpu_32(rxd.wb.qword0.hi_dword.rss);
+ if (pkt_flags & PKT_RX_FDIR)
+ pkt_flags |= i40e_rxd_build_fdir(&rxd, rxm);
+
+#ifdef RTE_LIBRTE_IEEE1588
+ pkt_flags |= i40e_get_iee15888_flags(rxm, qword1);
+#endif
+ rxm->ol_flags |= pkt_flags;
+
+ rx_pkts[nb_rx++] = rxm;
+ }
+ rxq->rx_tail = rx_id;
+
+ /**
+ * If the number of free RX descriptors is greater than the RX free
+ * threshold of the queue, advance the receive tail register of queue.
+ * Update that register with the value of the last processed RX
+ * descriptor minus 1.
+ */
+ nb_hold = (uint16_t)(nb_hold + rxq->nb_rx_hold);
+ if (nb_hold > rxq->rx_free_thresh) {
+ rx_id = (uint16_t) ((rx_id == 0) ?
+ (rxq->nb_rx_desc - 1) : (rx_id - 1));
+ I40E_PCI_REG_WRITE(rxq->qrx_tail, rx_id);
+ nb_hold = 0;
+ }
+ rxq->nb_rx_hold = nb_hold;
+
+ return nb_rx;
+}
+
+uint16_t
+i40e_recv_scattered_pkts(void *rx_queue,
+ struct rte_mbuf **rx_pkts,
+ uint16_t nb_pkts)
+{
+ struct i40e_rx_queue *rxq = rx_queue;
+ volatile union i40e_rx_desc *rx_ring = rxq->rx_ring;
+ volatile union i40e_rx_desc *rxdp;
+ union i40e_rx_desc rxd;
+ struct i40e_rx_entry *sw_ring = rxq->sw_ring;
+ struct i40e_rx_entry *rxe;
+ struct rte_mbuf *first_seg = rxq->pkt_first_seg;
+ struct rte_mbuf *last_seg = rxq->pkt_last_seg;
+ struct rte_mbuf *nmb, *rxm;
+ uint16_t rx_id = rxq->rx_tail;
+ uint16_t nb_rx = 0, nb_hold = 0, rx_packet_len;
+ uint32_t rx_status;
+ uint64_t qword1;
+ uint64_t dma_addr;
+ uint64_t pkt_flags;
+
+ while (nb_rx < nb_pkts) {
+ rxdp = &rx_ring[rx_id];
+ qword1 = rte_le_to_cpu_64(rxdp->wb.qword1.status_error_len);
+ rx_status = (qword1 & I40E_RXD_QW1_STATUS_MASK) >>
+ I40E_RXD_QW1_STATUS_SHIFT;
+
+ /* Check the DD bit */
+ if (!(rx_status & (1 << I40E_RX_DESC_STATUS_DD_SHIFT)))
+ break;
+
+ nmb = rte_mbuf_raw_alloc(rxq->mp);
+ if (unlikely(!nmb))
+ break;
+ rxd = *rxdp;
+ nb_hold++;
+ rxe = &sw_ring[rx_id];
+ rx_id++;
+ if (rx_id == rxq->nb_rx_desc)
+ rx_id = 0;
+
+ /* Prefetch next mbuf */
+ rte_prefetch0(sw_ring[rx_id].mbuf);
+
+ /**
+ * When next RX descriptor is on a cache line boundary,
+ * prefetch the next 4 RX descriptors and next 8 pointers
+ * to mbufs.
+ */
+ if ((rx_id & 0x3) == 0) {
+ rte_prefetch0(&rx_ring[rx_id]);
+ rte_prefetch0(&sw_ring[rx_id]);
+ }
+
+ rxm = rxe->mbuf;
+ rxe->mbuf = nmb;
+ dma_addr =
+ rte_cpu_to_le_64(rte_mbuf_data_dma_addr_default(nmb));
+
+ /* Set data buffer address and data length of the mbuf */
+ rxdp->read.hdr_addr = 0;
+ rxdp->read.pkt_addr = dma_addr;
+ rx_packet_len = (qword1 & I40E_RXD_QW1_LENGTH_PBUF_MASK) >>
+ I40E_RXD_QW1_LENGTH_PBUF_SHIFT;
+ rxm->data_len = rx_packet_len;
+ rxm->data_off = RTE_PKTMBUF_HEADROOM;
+
+ /**
+ * If this is the first buffer of the received packet, set the
+ * pointer to the first mbuf of the packet and initialize its
+ * context. Otherwise, update the total length and the number
+ * of segments of the current scattered packet, and update the
+ * pointer to the last mbuf of the current packet.
+ */
+ if (!first_seg) {
+ first_seg = rxm;
+ first_seg->nb_segs = 1;
+ first_seg->pkt_len = rx_packet_len;
+ } else {
+ first_seg->pkt_len =
+ (uint16_t)(first_seg->pkt_len +
+ rx_packet_len);
+ first_seg->nb_segs++;
+ last_seg->next = rxm;
+ }
+
+ /**
+ * If this is not the last buffer of the received packet,
+ * update the pointer to the last mbuf of the current scattered
+ * packet and continue to parse the RX ring.
+ */
+ if (!(rx_status & (1 << I40E_RX_DESC_STATUS_EOF_SHIFT))) {
+ last_seg = rxm;
+ continue;
+ }
+
+ /**
+ * This is the last buffer of the received packet. If the CRC
+ * is not stripped by the hardware:
+ * - Subtract the CRC length from the total packet length.
+ * - If the last buffer only contains the whole CRC or a part
+ * of it, free the mbuf associated to the last buffer. If part
+ * of the CRC is also contained in the previous mbuf, subtract
+ * the length of that CRC part from the data length of the
+ * previous mbuf.
+ */
+ rxm->next = NULL;
+ if (unlikely(rxq->crc_len > 0)) {
+ first_seg->pkt_len -= ETHER_CRC_LEN;
+ if (rx_packet_len <= ETHER_CRC_LEN) {
+ rte_pktmbuf_free_seg(rxm);
+ first_seg->nb_segs--;
+ last_seg->data_len =
+ (uint16_t)(last_seg->data_len -
+ (ETHER_CRC_LEN - rx_packet_len));
+ last_seg->next = NULL;
+ } else
+ rxm->data_len = (uint16_t)(rx_packet_len -
+ ETHER_CRC_LEN);
+ }
+
+ first_seg->port = rxq->port_id;
+ first_seg->ol_flags = 0;
+ i40e_rxd_to_vlan_tci(first_seg, &rxd);
+ pkt_flags = i40e_rxd_status_to_pkt_flags(qword1);
+ pkt_flags |= i40e_rxd_error_to_pkt_flags(qword1);
+ first_seg->packet_type =
+ i40e_rxd_pkt_type_mapping((uint8_t)((qword1 &
+ I40E_RXD_QW1_PTYPE_MASK) >> I40E_RXD_QW1_PTYPE_SHIFT));
+ if (pkt_flags & PKT_RX_RSS_HASH)
+ rxm->hash.rss =
+ rte_le_to_cpu_32(rxd.wb.qword0.hi_dword.rss);
+ if (pkt_flags & PKT_RX_FDIR)
+ pkt_flags |= i40e_rxd_build_fdir(&rxd, rxm);
+
+#ifdef RTE_LIBRTE_IEEE1588
+ pkt_flags |= i40e_get_iee15888_flags(first_seg, qword1);
+#endif
+ first_seg->ol_flags |= pkt_flags;
+
+ /* Prefetch data of first segment, if configured to do so. */
+ rte_prefetch0(RTE_PTR_ADD(first_seg->buf_addr,
+ first_seg->data_off));
+ rx_pkts[nb_rx++] = first_seg;
+ first_seg = NULL;
+ }
+
+ /* Record index of the next RX descriptor to probe. */
+ rxq->rx_tail = rx_id;
+ rxq->pkt_first_seg = first_seg;
+ rxq->pkt_last_seg = last_seg;
+
+ /**
+ * If the number of free RX descriptors is greater than the RX free
+ * threshold of the queue, advance the Receive Descriptor Tail (RDT)
+ * register. Update the RDT with the value of the last processed RX
+ * descriptor minus 1, to guarantee that the RDT register is never
+ * equal to the RDH register, which creates a "full" ring situtation
+ * from the hardware point of view.
+ */
+ nb_hold = (uint16_t)(nb_hold + rxq->nb_rx_hold);
+ if (nb_hold > rxq->rx_free_thresh) {
+ rx_id = (uint16_t)(rx_id == 0 ?
+ (rxq->nb_rx_desc - 1) : (rx_id - 1));
+ I40E_PCI_REG_WRITE(rxq->qrx_tail, rx_id);
+ nb_hold = 0;
+ }
+ rxq->nb_rx_hold = nb_hold;
+
+ return nb_rx;
+}
+
+/* Check if the context descriptor is needed for TX offloading */
+static inline uint16_t
+i40e_calc_context_desc(uint64_t flags)
+{
+ static uint64_t mask = PKT_TX_OUTER_IP_CKSUM |
+ PKT_TX_TCP_SEG |
+ PKT_TX_QINQ_PKT;
+
+#ifdef RTE_LIBRTE_IEEE1588
+ mask |= PKT_TX_IEEE1588_TMST;
+#endif
+
+ return (flags & mask) ? 1 : 0;
+}
+
+/* set i40e TSO context descriptor */
+static inline uint64_t
+i40e_set_tso_ctx(struct rte_mbuf *mbuf, union i40e_tx_offload tx_offload)
+{
+ uint64_t ctx_desc = 0;
+ uint32_t cd_cmd, hdr_len, cd_tso_len;
+
+ if (!tx_offload.l4_len) {
+ PMD_DRV_LOG(DEBUG, "L4 length set to 0");
+ return ctx_desc;
+ }
+
+ /**
+ * in case of tunneling packet, the outer_l2_len and
+ * outer_l3_len must be 0.
+ */
+ hdr_len = tx_offload.outer_l2_len +
+ tx_offload.outer_l3_len +
+ tx_offload.l2_len +
+ tx_offload.l3_len +
+ tx_offload.l4_len;
+
+ cd_cmd = I40E_TX_CTX_DESC_TSO;
+ cd_tso_len = mbuf->pkt_len - hdr_len;
+ ctx_desc |= ((uint64_t)cd_cmd << I40E_TXD_CTX_QW1_CMD_SHIFT) |
+ ((uint64_t)cd_tso_len <<
+ I40E_TXD_CTX_QW1_TSO_LEN_SHIFT) |
+ ((uint64_t)mbuf->tso_segsz <<
+ I40E_TXD_CTX_QW1_MSS_SHIFT);
+
+ return ctx_desc;
+}
+
+uint16_t
+i40e_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
+{
+ struct i40e_tx_queue *txq;
+ struct i40e_tx_entry *sw_ring;
+ struct i40e_tx_entry *txe, *txn;
+ volatile struct i40e_tx_desc *txd;
+ volatile struct i40e_tx_desc *txr;
+ struct rte_mbuf *tx_pkt;
+ struct rte_mbuf *m_seg;
+ uint32_t cd_tunneling_params;
+ uint16_t tx_id;
+ uint16_t nb_tx;
+ uint32_t td_cmd;
+ uint32_t td_offset;
+ uint32_t tx_flags;
+ uint32_t td_tag;
+ uint64_t ol_flags;
+ uint16_t nb_used;
+ uint16_t nb_ctx;
+ uint16_t tx_last;
+ uint16_t slen;
+ uint64_t buf_dma_addr;
+ union i40e_tx_offload tx_offload = {0};
+
+ txq = tx_queue;
+ sw_ring = txq->sw_ring;
+ txr = txq->tx_ring;
+ tx_id = txq->tx_tail;
+ txe = &sw_ring[tx_id];
+
+ /* Check if the descriptor ring needs to be cleaned. */
+ if (txq->nb_tx_free < txq->tx_free_thresh)
+ i40e_xmit_cleanup(txq);
+
+ for (nb_tx = 0; nb_tx < nb_pkts; nb_tx++) {
+ td_cmd = 0;
+ td_tag = 0;
+ td_offset = 0;
+ tx_flags = 0;
+
+ tx_pkt = *tx_pkts++;
+ RTE_MBUF_PREFETCH_TO_FREE(txe->mbuf);
+
+ ol_flags = tx_pkt->ol_flags;
+ tx_offload.l2_len = tx_pkt->l2_len;
+ tx_offload.l3_len = tx_pkt->l3_len;
+ tx_offload.outer_l2_len = tx_pkt->outer_l2_len;
+ tx_offload.outer_l3_len = tx_pkt->outer_l3_len;
+ tx_offload.l4_len = tx_pkt->l4_len;
+ tx_offload.tso_segsz = tx_pkt->tso_segsz;
+
+ /* Calculate the number of context descriptors needed. */
+ nb_ctx = i40e_calc_context_desc(ol_flags);
+
+ /**
+ * The number of descriptors that must be allocated for
+ * a packet equals to the number of the segments of that
+ * packet plus 1 context descriptor if needed.
+ */
+ nb_used = (uint16_t)(tx_pkt->nb_segs + nb_ctx);
+ tx_last = (uint16_t)(tx_id + nb_used - 1);
+
+ /* Circular ring */
+ if (tx_last >= txq->nb_tx_desc)
+ tx_last = (uint16_t)(tx_last - txq->nb_tx_desc);
+
+ if (nb_used > txq->nb_tx_free) {
+ if (i40e_xmit_cleanup(txq) != 0) {
+ if (nb_tx == 0)
+ return 0;
+ goto end_of_tx;
+ }
+ if (unlikely(nb_used > txq->tx_rs_thresh)) {
+ while (nb_used > txq->nb_tx_free) {
+ if (i40e_xmit_cleanup(txq) != 0) {
+ if (nb_tx == 0)
+ return 0;
+ goto end_of_tx;
+ }
+ }
+ }
+ }
+
+ /* Descriptor based VLAN insertion */
+ if (ol_flags & (PKT_TX_VLAN_PKT | PKT_TX_QINQ_PKT)) {
+ tx_flags |= tx_pkt->vlan_tci <<
+ I40E_TX_FLAG_L2TAG1_SHIFT;
+ tx_flags |= I40E_TX_FLAG_INSERT_VLAN;
+ td_cmd |= I40E_TX_DESC_CMD_IL2TAG1;
+ td_tag = (tx_flags & I40E_TX_FLAG_L2TAG1_MASK) >>
+ I40E_TX_FLAG_L2TAG1_SHIFT;
+ }
+
+ /* Always enable CRC offload insertion */
+ td_cmd |= I40E_TX_DESC_CMD_ICRC;
+
+ /* Enable checksum offloading */
+ cd_tunneling_params = 0;
+ if (ol_flags & I40E_TX_CKSUM_OFFLOAD_MASK) {
+ i40e_txd_enable_checksum(ol_flags, &td_cmd, &td_offset,
+ tx_offload, &cd_tunneling_params);
+ }
+
+ if (nb_ctx) {
+ /* Setup TX context descriptor if required */
+ volatile struct i40e_tx_context_desc *ctx_txd =
+ (volatile struct i40e_tx_context_desc *)\
+ &txr[tx_id];
+ uint16_t cd_l2tag2 = 0;
+ uint64_t cd_type_cmd_tso_mss =
+ I40E_TX_DESC_DTYPE_CONTEXT;
+
+ txn = &sw_ring[txe->next_id];
+ RTE_MBUF_PREFETCH_TO_FREE(txn->mbuf);
+ if (txe->mbuf != NULL) {
+ rte_pktmbuf_free_seg(txe->mbuf);
+ txe->mbuf = NULL;
+ }
+
+ /* TSO enabled means no timestamp */
+ if (ol_flags & PKT_TX_TCP_SEG)
+ cd_type_cmd_tso_mss |=
+ i40e_set_tso_ctx(tx_pkt, tx_offload);
+ else {
+#ifdef RTE_LIBRTE_IEEE1588
+ if (ol_flags & PKT_TX_IEEE1588_TMST)
+ cd_type_cmd_tso_mss |=
+ ((uint64_t)I40E_TX_CTX_DESC_TSYN <<
+ I40E_TXD_CTX_QW1_CMD_SHIFT);
+#endif
+ }
+
+ ctx_txd->tunneling_params =
+ rte_cpu_to_le_32(cd_tunneling_params);
+ if (ol_flags & PKT_TX_QINQ_PKT) {
+ cd_l2tag2 = tx_pkt->vlan_tci_outer;
+ cd_type_cmd_tso_mss |=
+ ((uint64_t)I40E_TX_CTX_DESC_IL2TAG2 <<
+ I40E_TXD_CTX_QW1_CMD_SHIFT);
+ }
+ ctx_txd->l2tag2 = rte_cpu_to_le_16(cd_l2tag2);
+ ctx_txd->type_cmd_tso_mss =
+ rte_cpu_to_le_64(cd_type_cmd_tso_mss);
+
+ PMD_TX_LOG(DEBUG, "mbuf: %p, TCD[%u]:\n"
+ "tunneling_params: %#x;\n"
+ "l2tag2: %#hx;\n"
+ "rsvd: %#hx;\n"
+ "type_cmd_tso_mss: %#"PRIx64";\n",
+ tx_pkt, tx_id,
+ ctx_txd->tunneling_params,
+ ctx_txd->l2tag2,
+ ctx_txd->rsvd,
+ ctx_txd->type_cmd_tso_mss);
+
+ txe->last_id = tx_last;
+ tx_id = txe->next_id;
+ txe = txn;
+ }
+
+ m_seg = tx_pkt;
+ do {
+ txd = &txr[tx_id];
+ txn = &sw_ring[txe->next_id];
+
+ if (txe->mbuf)
+ rte_pktmbuf_free_seg(txe->mbuf);
+ txe->mbuf = m_seg;
+
+ /* Setup TX Descriptor */
+ slen = m_seg->data_len;
+ buf_dma_addr = rte_mbuf_data_dma_addr(m_seg);
+
+ PMD_TX_LOG(DEBUG, "mbuf: %p, TDD[%u]:\n"
+ "buf_dma_addr: %#"PRIx64";\n"
+ "td_cmd: %#x;\n"
+ "td_offset: %#x;\n"
+ "td_len: %u;\n"
+ "td_tag: %#x;\n",
+ tx_pkt, tx_id, buf_dma_addr,
+ td_cmd, td_offset, slen, td_tag);
+
+ txd->buffer_addr = rte_cpu_to_le_64(buf_dma_addr);
+ txd->cmd_type_offset_bsz = i40e_build_ctob(td_cmd,
+ td_offset, slen, td_tag);
+ txe->last_id = tx_last;
+ tx_id = txe->next_id;
+ txe = txn;
+ m_seg = m_seg->next;
+ } while (m_seg != NULL);
+
+ /* The last packet data descriptor needs End Of Packet (EOP) */
+ td_cmd |= I40E_TX_DESC_CMD_EOP;
+ txq->nb_tx_used = (uint16_t)(txq->nb_tx_used + nb_used);
+ txq->nb_tx_free = (uint16_t)(txq->nb_tx_free - nb_used);
+
+ if (txq->nb_tx_used >= txq->tx_rs_thresh) {
+ PMD_TX_FREE_LOG(DEBUG,
+ "Setting RS bit on TXD id="
+ "%4u (port=%d queue=%d)",
+ tx_last, txq->port_id, txq->queue_id);
+
+ td_cmd |= I40E_TX_DESC_CMD_RS;
+
+ /* Update txq RS bit counters */
+ txq->nb_tx_used = 0;
+ }
+
+ txd->cmd_type_offset_bsz |=
+ rte_cpu_to_le_64(((uint64_t)td_cmd) <<
+ I40E_TXD_QW1_CMD_SHIFT);
+ }
+
+end_of_tx:
+ rte_wmb();
+
+ PMD_TX_LOG(DEBUG, "port_id=%u queue_id=%u tx_tail=%u nb_tx=%u",
+ (unsigned) txq->port_id, (unsigned) txq->queue_id,
+ (unsigned) tx_id, (unsigned) nb_tx);
+
+ I40E_PCI_REG_WRITE(txq->qtx_tail, tx_id);
+ txq->tx_tail = tx_id;
+
+ return nb_tx;
+}
+
+static inline int __attribute__((always_inline))
+i40e_tx_free_bufs(struct i40e_tx_queue *txq)
+{
+ struct i40e_tx_entry *txep;
+ uint16_t i;
+
+ if ((txq->tx_ring[txq->tx_next_dd].cmd_type_offset_bsz &
+ rte_cpu_to_le_64(I40E_TXD_QW1_DTYPE_MASK)) !=
+ rte_cpu_to_le_64(I40E_TX_DESC_DTYPE_DESC_DONE))
+ return 0;
+
+ txep = &(txq->sw_ring[txq->tx_next_dd - (txq->tx_rs_thresh - 1)]);
+
+ for (i = 0; i < txq->tx_rs_thresh; i++)
+ rte_prefetch0((txep + i)->mbuf);
+
+ if (txq->txq_flags & (uint32_t)ETH_TXQ_FLAGS_NOREFCOUNT) {
+ for (i = 0; i < txq->tx_rs_thresh; ++i, ++txep) {
+ rte_mempool_put(txep->mbuf->pool, txep->mbuf);
+ txep->mbuf = NULL;
+ }
+ } else {
+ for (i = 0; i < txq->tx_rs_thresh; ++i, ++txep) {
+ rte_pktmbuf_free_seg(txep->mbuf);
+ txep->mbuf = NULL;
+ }
+ }
+
+ txq->nb_tx_free = (uint16_t)(txq->nb_tx_free + txq->tx_rs_thresh);
+ txq->tx_next_dd = (uint16_t)(txq->tx_next_dd + txq->tx_rs_thresh);
+ if (txq->tx_next_dd >= txq->nb_tx_desc)
+ txq->tx_next_dd = (uint16_t)(txq->tx_rs_thresh - 1);
+
+ return txq->tx_rs_thresh;
+}
+
+/* Populate 4 descriptors with data from 4 mbufs */
+static inline void
+tx4(volatile struct i40e_tx_desc *txdp, struct rte_mbuf **pkts)
+{
+ uint64_t dma_addr;
+ uint32_t i;
+
+ for (i = 0; i < 4; i++, txdp++, pkts++) {
+ dma_addr = rte_mbuf_data_dma_addr(*pkts);
+ txdp->buffer_addr = rte_cpu_to_le_64(dma_addr);
+ txdp->cmd_type_offset_bsz =
+ i40e_build_ctob((uint32_t)I40E_TD_CMD, 0,
+ (*pkts)->data_len, 0);
+ }
+}
+
+/* Populate 1 descriptor with data from 1 mbuf */
+static inline void
+tx1(volatile struct i40e_tx_desc *txdp, struct rte_mbuf **pkts)
+{
+ uint64_t dma_addr;
+
+ dma_addr = rte_mbuf_data_dma_addr(*pkts);
+ txdp->buffer_addr = rte_cpu_to_le_64(dma_addr);
+ txdp->cmd_type_offset_bsz =
+ i40e_build_ctob((uint32_t)I40E_TD_CMD, 0,
+ (*pkts)->data_len, 0);
+}
+
+/* Fill hardware descriptor ring with mbuf data */
+static inline void
+i40e_tx_fill_hw_ring(struct i40e_tx_queue *txq,
+ struct rte_mbuf **pkts,
+ uint16_t nb_pkts)
+{
+ volatile struct i40e_tx_desc *txdp = &(txq->tx_ring[txq->tx_tail]);
+ struct i40e_tx_entry *txep = &(txq->sw_ring[txq->tx_tail]);
+ const int N_PER_LOOP = 4;
+ const int N_PER_LOOP_MASK = N_PER_LOOP - 1;
+ int mainpart, leftover;
+ int i, j;
+
+ mainpart = (nb_pkts & ((uint32_t) ~N_PER_LOOP_MASK));
+ leftover = (nb_pkts & ((uint32_t) N_PER_LOOP_MASK));
+ for (i = 0; i < mainpart; i += N_PER_LOOP) {
+ for (j = 0; j < N_PER_LOOP; ++j) {
+ (txep + i + j)->mbuf = *(pkts + i + j);
+ }
+ tx4(txdp + i, pkts + i);
+ }
+ if (unlikely(leftover > 0)) {
+ for (i = 0; i < leftover; ++i) {
+ (txep + mainpart + i)->mbuf = *(pkts + mainpart + i);
+ tx1(txdp + mainpart + i, pkts + mainpart + i);
+ }
+ }
+}
+
+static inline uint16_t
+tx_xmit_pkts(struct i40e_tx_queue *txq,
+ struct rte_mbuf **tx_pkts,
+ uint16_t nb_pkts)
+{
+ volatile struct i40e_tx_desc *txr = txq->tx_ring;
+ uint16_t n = 0;
+
+ /**
+ * Begin scanning the H/W ring for done descriptors when the number
+ * of available descriptors drops below tx_free_thresh. For each done
+ * descriptor, free the associated buffer.
+ */
+ if (txq->nb_tx_free < txq->tx_free_thresh)
+ i40e_tx_free_bufs(txq);
+
+ /* Use available descriptor only */
+ nb_pkts = (uint16_t)RTE_MIN(txq->nb_tx_free, nb_pkts);
+ if (unlikely(!nb_pkts))
+ return 0;
+
+ txq->nb_tx_free = (uint16_t)(txq->nb_tx_free - nb_pkts);
+ if ((txq->tx_tail + nb_pkts) > txq->nb_tx_desc) {
+ n = (uint16_t)(txq->nb_tx_desc - txq->tx_tail);
+ i40e_tx_fill_hw_ring(txq, tx_pkts, n);
+ txr[txq->tx_next_rs].cmd_type_offset_bsz |=
+ rte_cpu_to_le_64(((uint64_t)I40E_TX_DESC_CMD_RS) <<
+ I40E_TXD_QW1_CMD_SHIFT);
+ txq->tx_next_rs = (uint16_t)(txq->tx_rs_thresh - 1);
+ txq->tx_tail = 0;
+ }
+
+ /* Fill hardware descriptor ring with mbuf data */
+ i40e_tx_fill_hw_ring(txq, tx_pkts + n, (uint16_t)(nb_pkts - n));
+ txq->tx_tail = (uint16_t)(txq->tx_tail + (nb_pkts - n));
+
+ /* Determin if RS bit needs to be set */
+ if (txq->tx_tail > txq->tx_next_rs) {
+ txr[txq->tx_next_rs].cmd_type_offset_bsz |=
+ rte_cpu_to_le_64(((uint64_t)I40E_TX_DESC_CMD_RS) <<
+ I40E_TXD_QW1_CMD_SHIFT);
+ txq->tx_next_rs =
+ (uint16_t)(txq->tx_next_rs + txq->tx_rs_thresh);
+ if (txq->tx_next_rs >= txq->nb_tx_desc)
+ txq->tx_next_rs = (uint16_t)(txq->tx_rs_thresh - 1);
+ }
+
+ if (txq->tx_tail >= txq->nb_tx_desc)
+ txq->tx_tail = 0;
+
+ /* Update the tx tail register */
+ rte_wmb();
+ I40E_PCI_REG_WRITE(txq->qtx_tail, txq->tx_tail);
+
+ return nb_pkts;
+}
+
+static uint16_t
+i40e_xmit_pkts_simple(void *tx_queue,
+ struct rte_mbuf **tx_pkts,
+ uint16_t nb_pkts)
+{
+ uint16_t nb_tx = 0;
+
+ if (likely(nb_pkts <= I40E_TX_MAX_BURST))
+ return tx_xmit_pkts((struct i40e_tx_queue *)tx_queue,
+ tx_pkts, nb_pkts);
+
+ while (nb_pkts) {
+ uint16_t ret, num = (uint16_t)RTE_MIN(nb_pkts,
+ I40E_TX_MAX_BURST);
+
+ ret = tx_xmit_pkts((struct i40e_tx_queue *)tx_queue,
+ &tx_pkts[nb_tx], num);
+ nb_tx = (uint16_t)(nb_tx + ret);
+ nb_pkts = (uint16_t)(nb_pkts - ret);
+ if (ret < num)
+ break;
+ }
+
+ return nb_tx;
+}
+
+/*
+ * Find the VSI the queue belongs to. 'queue_idx' is the queue index
+ * application used, which assume having sequential ones. But from driver's
+ * perspective, it's different. For example, q0 belongs to FDIR VSI, q1-q64
+ * to MAIN VSI, , q65-96 to SRIOV VSIs, q97-128 to VMDQ VSIs. For application
+ * running on host, q1-64 and q97-128 can be used, total 96 queues. They can
+ * use queue_idx from 0 to 95 to access queues, while real queue would be
+ * different. This function will do a queue mapping to find VSI the queue
+ * belongs to.
+ */
+static struct i40e_vsi*
+i40e_pf_get_vsi_by_qindex(struct i40e_pf *pf, uint16_t queue_idx)
+{
+ /* the queue in MAIN VSI range */
+ if (queue_idx < pf->main_vsi->nb_qps)
+ return pf->main_vsi;
+
+ queue_idx -= pf->main_vsi->nb_qps;
+
+ /* queue_idx is greater than VMDQ VSIs range */
+ if (queue_idx > pf->nb_cfg_vmdq_vsi * pf->vmdq_nb_qps - 1) {
+ PMD_INIT_LOG(ERR, "queue_idx out of range. VMDQ configured?");
+ return NULL;
+ }
+
+ return pf->vmdq[queue_idx / pf->vmdq_nb_qps].vsi;
+}
+
+static uint16_t
+i40e_get_queue_offset_by_qindex(struct i40e_pf *pf, uint16_t queue_idx)
+{
+ /* the queue in MAIN VSI range */
+ if (queue_idx < pf->main_vsi->nb_qps)
+ return queue_idx;
+
+ /* It's VMDQ queues */
+ queue_idx -= pf->main_vsi->nb_qps;
+
+ if (pf->nb_cfg_vmdq_vsi)
+ return queue_idx % pf->vmdq_nb_qps;
+ else {
+ PMD_INIT_LOG(ERR, "Fail to get queue offset");
+ return (uint16_t)(-1);
+ }
+}
+
+int
+i40e_dev_rx_queue_start(struct rte_eth_dev *dev, uint16_t rx_queue_id)
+{
+ struct i40e_rx_queue *rxq;
+ int err = -1;
+ struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+
+ PMD_INIT_FUNC_TRACE();
+
+ if (rx_queue_id < dev->data->nb_rx_queues) {
+ rxq = dev->data->rx_queues[rx_queue_id];
+
+ err = i40e_alloc_rx_queue_mbufs(rxq);
+ if (err) {
+ PMD_DRV_LOG(ERR, "Failed to allocate RX queue mbuf");
+ return err;
+ }
+
+ rte_wmb();
+
+ /* Init the RX tail regieter. */
+ I40E_PCI_REG_WRITE(rxq->qrx_tail, rxq->nb_rx_desc - 1);
+
+ err = i40e_switch_rx_queue(hw, rxq->reg_idx, TRUE);
+
+ if (err) {
+ PMD_DRV_LOG(ERR, "Failed to switch RX queue %u on",
+ rx_queue_id);
+
+ i40e_rx_queue_release_mbufs(rxq);
+ i40e_reset_rx_queue(rxq);
+ } else
+ dev->data->rx_queue_state[rx_queue_id] = RTE_ETH_QUEUE_STATE_STARTED;
+ }
+
+ return err;
+}
+
+int
+i40e_dev_rx_queue_stop(struct rte_eth_dev *dev, uint16_t rx_queue_id)
+{
+ struct i40e_rx_queue *rxq;
+ int err;
+ struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+
+ if (rx_queue_id < dev->data->nb_rx_queues) {
+ rxq = dev->data->rx_queues[rx_queue_id];
+
+ /*
+ * rx_queue_id is queue id aplication refers to, while
+ * rxq->reg_idx is the real queue index.
+ */
+ err = i40e_switch_rx_queue(hw, rxq->reg_idx, FALSE);
+
+ if (err) {
+ PMD_DRV_LOG(ERR, "Failed to switch RX queue %u off",
+ rx_queue_id);
+ return err;
+ }
+ i40e_rx_queue_release_mbufs(rxq);
+ i40e_reset_rx_queue(rxq);
+ dev->data->rx_queue_state[rx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED;
+ }
+
+ return 0;
+}
+
+int
+i40e_dev_tx_queue_start(struct rte_eth_dev *dev, uint16_t tx_queue_id)
+{
+ int err = -1;
+ struct i40e_tx_queue *txq;
+ struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+
+ PMD_INIT_FUNC_TRACE();
+
+ if (tx_queue_id < dev->data->nb_tx_queues) {
+ txq = dev->data->tx_queues[tx_queue_id];
+
+ /*
+ * tx_queue_id is queue id aplication refers to, while
+ * rxq->reg_idx is the real queue index.
+ */
+ err = i40e_switch_tx_queue(hw, txq->reg_idx, TRUE);
+ if (err)
+ PMD_DRV_LOG(ERR, "Failed to switch TX queue %u on",
+ tx_queue_id);
+ else
+ dev->data->tx_queue_state[tx_queue_id] = RTE_ETH_QUEUE_STATE_STARTED;
+ }
+
+ return err;
+}
+
+int
+i40e_dev_tx_queue_stop(struct rte_eth_dev *dev, uint16_t tx_queue_id)
+{
+ struct i40e_tx_queue *txq;
+ int err;
+ struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+
+ if (tx_queue_id < dev->data->nb_tx_queues) {
+ txq = dev->data->tx_queues[tx_queue_id];
+
+ /*
+ * tx_queue_id is queue id aplication refers to, while
+ * txq->reg_idx is the real queue index.
+ */
+ err = i40e_switch_tx_queue(hw, txq->reg_idx, FALSE);
+
+ if (err) {
+ PMD_DRV_LOG(ERR, "Failed to switch TX queue %u of",
+ tx_queue_id);
+ return err;
+ }
+
+ i40e_tx_queue_release_mbufs(txq);
+ i40e_reset_tx_queue(txq);
+ dev->data->tx_queue_state[tx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED;
+ }
+
+ return 0;
+}
+
+const uint32_t *
+i40e_dev_supported_ptypes_get(struct rte_eth_dev *dev)
+{
+ static const uint32_t ptypes[] = {
+ /* refers to i40e_rxd_pkt_type_mapping() */
+ RTE_PTYPE_L2_ETHER,
+ RTE_PTYPE_L2_ETHER_TIMESYNC,
+ RTE_PTYPE_L2_ETHER_LLDP,
+ RTE_PTYPE_L2_ETHER_ARP,
+ RTE_PTYPE_L3_IPV4_EXT_UNKNOWN,
+ RTE_PTYPE_L3_IPV6_EXT_UNKNOWN,
+ RTE_PTYPE_L4_FRAG,
+ RTE_PTYPE_L4_ICMP,
+ RTE_PTYPE_L4_NONFRAG,
+ RTE_PTYPE_L4_SCTP,
+ RTE_PTYPE_L4_TCP,
+ RTE_PTYPE_L4_UDP,
+ RTE_PTYPE_TUNNEL_GRENAT,
+ RTE_PTYPE_TUNNEL_IP,
+ RTE_PTYPE_INNER_L2_ETHER,
+ RTE_PTYPE_INNER_L2_ETHER_VLAN,
+ RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN,
+ RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN,
+ RTE_PTYPE_INNER_L4_FRAG,
+ RTE_PTYPE_INNER_L4_ICMP,
+ RTE_PTYPE_INNER_L4_NONFRAG,
+ RTE_PTYPE_INNER_L4_SCTP,
+ RTE_PTYPE_INNER_L4_TCP,
+ RTE_PTYPE_INNER_L4_UDP,
+ RTE_PTYPE_UNKNOWN
+ };
+
+ if (dev->rx_pkt_burst == i40e_recv_pkts ||
+#ifdef RTE_LIBRTE_I40E_RX_ALLOW_BULK_ALLOC
+ dev->rx_pkt_burst == i40e_recv_pkts_bulk_alloc ||
+#endif
+ dev->rx_pkt_burst == i40e_recv_scattered_pkts)
+ return ptypes;
+ return NULL;
+}
+
+int
+i40e_dev_rx_queue_setup(struct rte_eth_dev *dev,
+ uint16_t queue_idx,
+ uint16_t nb_desc,
+ unsigned int socket_id,
+ const struct rte_eth_rxconf *rx_conf,
+ struct rte_mempool *mp)
+{
+ struct i40e_vsi *vsi;
+ struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
+ struct i40e_adapter *ad =
+ I40E_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
+ struct i40e_rx_queue *rxq;
+ const struct rte_memzone *rz;
+ uint32_t ring_size;
+ uint16_t len, i;
+ uint16_t base, bsf, tc_mapping;
+ int use_def_burst_func = 1;
+
+ if (hw->mac.type == I40E_MAC_VF || hw->mac.type == I40E_MAC_X722_VF) {
+ struct i40e_vf *vf =
+ I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
+ vsi = &vf->vsi;
+ } else
+ vsi = i40e_pf_get_vsi_by_qindex(pf, queue_idx);
+
+ if (vsi == NULL) {
+ PMD_DRV_LOG(ERR, "VSI not available or queue "
+ "index exceeds the maximum");
+ return I40E_ERR_PARAM;
+ }
+ if (nb_desc % I40E_ALIGN_RING_DESC != 0 ||
+ (nb_desc > I40E_MAX_RING_DESC) ||
+ (nb_desc < I40E_MIN_RING_DESC)) {
+ PMD_DRV_LOG(ERR, "Number (%u) of receive descriptors is "
+ "invalid", nb_desc);
+ return I40E_ERR_PARAM;
+ }
+
+ /* Free memory if needed */
+ if (dev->data->rx_queues[queue_idx]) {
+ i40e_dev_rx_queue_release(dev->data->rx_queues[queue_idx]);
+ dev->data->rx_queues[queue_idx] = NULL;
+ }
+
+ /* Allocate the rx queue data structure */
+ rxq = rte_zmalloc_socket("i40e rx queue",
+ sizeof(struct i40e_rx_queue),
+ RTE_CACHE_LINE_SIZE,
+ socket_id);
+ if (!rxq) {
+ PMD_DRV_LOG(ERR, "Failed to allocate memory for "
+ "rx queue data structure");
+ return -ENOMEM;
+ }
+ rxq->mp = mp;
+ rxq->nb_rx_desc = nb_desc;
+ rxq->rx_free_thresh = rx_conf->rx_free_thresh;
+ rxq->queue_id = queue_idx;
+ if (hw->mac.type == I40E_MAC_VF || hw->mac.type == I40E_MAC_X722_VF)
+ rxq->reg_idx = queue_idx;
+ else /* PF device */
+ rxq->reg_idx = vsi->base_queue +
+ i40e_get_queue_offset_by_qindex(pf, queue_idx);
+
+ rxq->port_id = dev->data->port_id;
+ rxq->crc_len = (uint8_t) ((dev->data->dev_conf.rxmode.hw_strip_crc) ?
+ 0 : ETHER_CRC_LEN);
+ rxq->drop_en = rx_conf->rx_drop_en;
+ rxq->vsi = vsi;
+ rxq->rx_deferred_start = rx_conf->rx_deferred_start;
+
+ /* Allocate the maximun number of RX ring hardware descriptor. */
+ ring_size = sizeof(union i40e_rx_desc) * I40E_MAX_RING_DESC;
+ ring_size = RTE_ALIGN(ring_size, I40E_DMA_MEM_ALIGN);
+ rz = rte_eth_dma_zone_reserve(dev, "rx_ring", queue_idx,
+ ring_size, I40E_RING_BASE_ALIGN, socket_id);
+ if (!rz) {
+ i40e_dev_rx_queue_release(rxq);
+ PMD_DRV_LOG(ERR, "Failed to reserve DMA memory for RX");
+ return -ENOMEM;
+ }
+
+ /* Zero all the descriptors in the ring. */
+ memset(rz->addr, 0, ring_size);
+
+ rxq->rx_ring_phys_addr = rte_mem_phy2mch(rz->memseg_id, rz->phys_addr);
+ rxq->rx_ring = (union i40e_rx_desc *)rz->addr;
+
+#ifdef RTE_LIBRTE_I40E_RX_ALLOW_BULK_ALLOC
+ len = (uint16_t)(nb_desc + RTE_PMD_I40E_RX_MAX_BURST);
+#else
+ len = nb_desc;
+#endif
+
+ /* Allocate the software ring. */
+ rxq->sw_ring =
+ rte_zmalloc_socket("i40e rx sw ring",
+ sizeof(struct i40e_rx_entry) * len,
+ RTE_CACHE_LINE_SIZE,
+ socket_id);
+ if (!rxq->sw_ring) {
+ i40e_dev_rx_queue_release(rxq);
+ PMD_DRV_LOG(ERR, "Failed to allocate memory for SW ring");
+ return -ENOMEM;
+ }
+
+ i40e_reset_rx_queue(rxq);
+ rxq->q_set = TRUE;
+ dev->data->rx_queues[queue_idx] = rxq;
+
+ use_def_burst_func = check_rx_burst_bulk_alloc_preconditions(rxq);
+
+ if (!use_def_burst_func) {
+#ifdef RTE_LIBRTE_I40E_RX_ALLOW_BULK_ALLOC
+ PMD_INIT_LOG(DEBUG, "Rx Burst Bulk Alloc Preconditions are "
+ "satisfied. Rx Burst Bulk Alloc function will be "
+ "used on port=%d, queue=%d.",
+ rxq->port_id, rxq->queue_id);
+#endif /* RTE_LIBRTE_I40E_RX_ALLOW_BULK_ALLOC */
+ } else {
+ PMD_INIT_LOG(DEBUG, "Rx Burst Bulk Alloc Preconditions are "
+ "not satisfied, Scattered Rx is requested, "
+ "or RTE_LIBRTE_I40E_RX_ALLOW_BULK_ALLOC is "
+ "not enabled on port=%d, queue=%d.",
+ rxq->port_id, rxq->queue_id);
+ ad->rx_bulk_alloc_allowed = false;
+ }
+
+ for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
+ if (!(vsi->enabled_tc & (1 << i)))
+ continue;
+ tc_mapping = rte_le_to_cpu_16(vsi->info.tc_mapping[i]);
+ base = (tc_mapping & I40E_AQ_VSI_TC_QUE_OFFSET_MASK) >>
+ I40E_AQ_VSI_TC_QUE_OFFSET_SHIFT;
+ bsf = (tc_mapping & I40E_AQ_VSI_TC_QUE_NUMBER_MASK) >>
+ I40E_AQ_VSI_TC_QUE_NUMBER_SHIFT;
+
+ if (queue_idx >= base && queue_idx < (base + BIT(bsf)))
+ rxq->dcb_tc = i;
+ }
+
+ return 0;
+}
+
+void
+i40e_dev_rx_queue_release(void *rxq)
+{
+ struct i40e_rx_queue *q = (struct i40e_rx_queue *)rxq;
+
+ if (!q) {
+ PMD_DRV_LOG(DEBUG, "Pointer to rxq is NULL");
+ return;
+ }
+
+ i40e_rx_queue_release_mbufs(q);
+ rte_free(q->sw_ring);
+ rte_free(q);
+}
+
+uint32_t
+i40e_dev_rx_queue_count(struct rte_eth_dev *dev, uint16_t rx_queue_id)
+{
+#define I40E_RXQ_SCAN_INTERVAL 4
+ volatile union i40e_rx_desc *rxdp;
+ struct i40e_rx_queue *rxq;
+ uint16_t desc = 0;
+
+ if (unlikely(rx_queue_id >= dev->data->nb_rx_queues)) {
+ PMD_DRV_LOG(ERR, "Invalid RX queue id %u", rx_queue_id);
+ return 0;
+ }
+
+ rxq = dev->data->rx_queues[rx_queue_id];
+ rxdp = &(rxq->rx_ring[rxq->rx_tail]);
+ while ((desc < rxq->nb_rx_desc) &&
+ ((rte_le_to_cpu_64(rxdp->wb.qword1.status_error_len) &
+ I40E_RXD_QW1_STATUS_MASK) >> I40E_RXD_QW1_STATUS_SHIFT) &
+ (1 << I40E_RX_DESC_STATUS_DD_SHIFT)) {
+ /**
+ * Check the DD bit of a rx descriptor of each 4 in a group,
+ * to avoid checking too frequently and downgrading performance
+ * too much.
+ */
+ desc += I40E_RXQ_SCAN_INTERVAL;
+ rxdp += I40E_RXQ_SCAN_INTERVAL;
+ if (rxq->rx_tail + desc >= rxq->nb_rx_desc)
+ rxdp = &(rxq->rx_ring[rxq->rx_tail +
+ desc - rxq->nb_rx_desc]);
+ }
+
+ return desc;
+}
+
+int
+i40e_dev_rx_descriptor_done(void *rx_queue, uint16_t offset)
+{
+ volatile union i40e_rx_desc *rxdp;
+ struct i40e_rx_queue *rxq = rx_queue;
+ uint16_t desc;
+ int ret;
+
+ if (unlikely(offset >= rxq->nb_rx_desc)) {
+ PMD_DRV_LOG(ERR, "Invalid RX queue id %u", offset);
+ return 0;
+ }
+
+ desc = rxq->rx_tail + offset;
+ if (desc >= rxq->nb_rx_desc)
+ desc -= rxq->nb_rx_desc;
+
+ rxdp = &(rxq->rx_ring[desc]);
+
+ ret = !!(((rte_le_to_cpu_64(rxdp->wb.qword1.status_error_len) &
+ I40E_RXD_QW1_STATUS_MASK) >> I40E_RXD_QW1_STATUS_SHIFT) &
+ (1 << I40E_RX_DESC_STATUS_DD_SHIFT));
+
+ return ret;
+}
+
+int
+i40e_dev_tx_queue_setup(struct rte_eth_dev *dev,
+ uint16_t queue_idx,
+ uint16_t nb_desc,
+ unsigned int socket_id,
+ const struct rte_eth_txconf *tx_conf)
+{
+ struct i40e_vsi *vsi;
+ struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
+ struct i40e_tx_queue *txq;
+ const struct rte_memzone *tz;
+ uint32_t ring_size;
+ uint16_t tx_rs_thresh, tx_free_thresh;
+ uint16_t i, base, bsf, tc_mapping;
+
+ if (hw->mac.type == I40E_MAC_VF || hw->mac.type == I40E_MAC_X722_VF) {
+ struct i40e_vf *vf =
+ I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
+ vsi = &vf->vsi;
+ } else
+ vsi = i40e_pf_get_vsi_by_qindex(pf, queue_idx);
+
+ if (vsi == NULL) {
+ PMD_DRV_LOG(ERR, "VSI is NULL, or queue index (%u) "
+ "exceeds the maximum", queue_idx);
+ return I40E_ERR_PARAM;
+ }
+
+ if (nb_desc % I40E_ALIGN_RING_DESC != 0 ||
+ (nb_desc > I40E_MAX_RING_DESC) ||
+ (nb_desc < I40E_MIN_RING_DESC)) {
+ PMD_DRV_LOG(ERR, "Number (%u) of transmit descriptors is "
+ "invalid", nb_desc);
+ return I40E_ERR_PARAM;
+ }
+
+ /**
+ * The following two parameters control the setting of the RS bit on
+ * transmit descriptors. TX descriptors will have their RS bit set
+ * after txq->tx_rs_thresh descriptors have been used. The TX
+ * descriptor ring will be cleaned after txq->tx_free_thresh
+ * descriptors are used or if the number of descriptors required to
+ * transmit a packet is greater than the number of free TX descriptors.
+ *
+ * The following constraints must be satisfied:
+ * - tx_rs_thresh must be greater than 0.
+ * - tx_rs_thresh must be less than the size of the ring minus 2.
+ * - tx_rs_thresh must be less than or equal to tx_free_thresh.
+ * - tx_rs_thresh must be a divisor of the ring size.
+ * - tx_free_thresh must be greater than 0.
+ * - tx_free_thresh must be less than the size of the ring minus 3.
+ *
+ * One descriptor in the TX ring is used as a sentinel to avoid a H/W
+ * race condition, hence the maximum threshold constraints. When set
+ * to zero use default values.
+ */
+ tx_rs_thresh = (uint16_t)((tx_conf->tx_rs_thresh) ?
+ tx_conf->tx_rs_thresh : DEFAULT_TX_RS_THRESH);
+ tx_free_thresh = (uint16_t)((tx_conf->tx_free_thresh) ?
+ tx_conf->tx_free_thresh : DEFAULT_TX_FREE_THRESH);
+ if (tx_rs_thresh >= (nb_desc - 2)) {
+ PMD_INIT_LOG(ERR, "tx_rs_thresh must be less than the "
+ "number of TX descriptors minus 2. "
+ "(tx_rs_thresh=%u port=%d queue=%d)",
+ (unsigned int)tx_rs_thresh,
+ (int)dev->data->port_id,
+ (int)queue_idx);
+ return I40E_ERR_PARAM;
+ }
+ if (tx_free_thresh >= (nb_desc - 3)) {
+ PMD_INIT_LOG(ERR, "tx_rs_thresh must be less than the "
+ "tx_free_thresh must be less than the "
+ "number of TX descriptors minus 3. "
+ "(tx_free_thresh=%u port=%d queue=%d)",
+ (unsigned int)tx_free_thresh,
+ (int)dev->data->port_id,
+ (int)queue_idx);
+ return I40E_ERR_PARAM;
+ }
+ if (tx_rs_thresh > tx_free_thresh) {
+ PMD_INIT_LOG(ERR, "tx_rs_thresh must be less than or "
+ "equal to tx_free_thresh. (tx_free_thresh=%u"
+ " tx_rs_thresh=%u port=%d queue=%d)",
+ (unsigned int)tx_free_thresh,
+ (unsigned int)tx_rs_thresh,
+ (int)dev->data->port_id,
+ (int)queue_idx);
+ return I40E_ERR_PARAM;
+ }
+ if ((nb_desc % tx_rs_thresh) != 0) {
+ PMD_INIT_LOG(ERR, "tx_rs_thresh must be a divisor of the "
+ "number of TX descriptors. (tx_rs_thresh=%u"
+ " port=%d queue=%d)",
+ (unsigned int)tx_rs_thresh,
+ (int)dev->data->port_id,
+ (int)queue_idx);
+ return I40E_ERR_PARAM;
+ }
+ if ((tx_rs_thresh > 1) && (tx_conf->tx_thresh.wthresh != 0)) {
+ PMD_INIT_LOG(ERR, "TX WTHRESH must be set to 0 if "
+ "tx_rs_thresh is greater than 1. "
+ "(tx_rs_thresh=%u port=%d queue=%d)",
+ (unsigned int)tx_rs_thresh,
+ (int)dev->data->port_id,
+ (int)queue_idx);
+ return I40E_ERR_PARAM;
+ }
+
+ /* Free memory if needed. */
+ if (dev->data->tx_queues[queue_idx]) {
+ i40e_dev_tx_queue_release(dev->data->tx_queues[queue_idx]);
+ dev->data->tx_queues[queue_idx] = NULL;
+ }
+
+ /* Allocate the TX queue data structure. */
+ txq = rte_zmalloc_socket("i40e tx queue",
+ sizeof(struct i40e_tx_queue),
+ RTE_CACHE_LINE_SIZE,
+ socket_id);
+ if (!txq) {
+ PMD_DRV_LOG(ERR, "Failed to allocate memory for "
+ "tx queue structure");
+ return -ENOMEM;
+ }
+
+ /* Allocate TX hardware ring descriptors. */
+ ring_size = sizeof(struct i40e_tx_desc) * I40E_MAX_RING_DESC;
+ ring_size = RTE_ALIGN(ring_size, I40E_DMA_MEM_ALIGN);
+ tz = rte_eth_dma_zone_reserve(dev, "tx_ring", queue_idx,
+ ring_size, I40E_RING_BASE_ALIGN, socket_id);
+ if (!tz) {
+ i40e_dev_tx_queue_release(txq);
+ PMD_DRV_LOG(ERR, "Failed to reserve DMA memory for TX");
+ return -ENOMEM;
+ }
+
+ txq->nb_tx_desc = nb_desc;
+ txq->tx_rs_thresh = tx_rs_thresh;
+ txq->tx_free_thresh = tx_free_thresh;
+ txq->pthresh = tx_conf->tx_thresh.pthresh;
+ txq->hthresh = tx_conf->tx_thresh.hthresh;
+ txq->wthresh = tx_conf->tx_thresh.wthresh;
+ txq->queue_id = queue_idx;
+ if (hw->mac.type == I40E_MAC_VF || hw->mac.type == I40E_MAC_X722_VF)
+ txq->reg_idx = queue_idx;
+ else /* PF device */
+ txq->reg_idx = vsi->base_queue +
+ i40e_get_queue_offset_by_qindex(pf, queue_idx);
+
+ txq->port_id = dev->data->port_id;
+ txq->txq_flags = tx_conf->txq_flags;
+ txq->vsi = vsi;
+ txq->tx_deferred_start = tx_conf->tx_deferred_start;
+
+ txq->tx_ring_phys_addr = rte_mem_phy2mch(tz->memseg_id, tz->phys_addr);
+ txq->tx_ring = (struct i40e_tx_desc *)tz->addr;
+
+ /* Allocate software ring */
+ txq->sw_ring =
+ rte_zmalloc_socket("i40e tx sw ring",
+ sizeof(struct i40e_tx_entry) * nb_desc,
+ RTE_CACHE_LINE_SIZE,
+ socket_id);
+ if (!txq->sw_ring) {
+ i40e_dev_tx_queue_release(txq);
+ PMD_DRV_LOG(ERR, "Failed to allocate memory for SW TX ring");
+ return -ENOMEM;
+ }
+
+ i40e_reset_tx_queue(txq);
+ txq->q_set = TRUE;
+ dev->data->tx_queues[queue_idx] = txq;
+
+ /* Use a simple TX queue without offloads or multi segs if possible */
+ i40e_set_tx_function_flag(dev, txq);
+
+ for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
+ if (!(vsi->enabled_tc & (1 << i)))
+ continue;
+ tc_mapping = rte_le_to_cpu_16(vsi->info.tc_mapping[i]);
+ base = (tc_mapping & I40E_AQ_VSI_TC_QUE_OFFSET_MASK) >>
+ I40E_AQ_VSI_TC_QUE_OFFSET_SHIFT;
+ bsf = (tc_mapping & I40E_AQ_VSI_TC_QUE_NUMBER_MASK) >>
+ I40E_AQ_VSI_TC_QUE_NUMBER_SHIFT;
+
+ if (queue_idx >= base && queue_idx < (base + BIT(bsf)))
+ txq->dcb_tc = i;
+ }
+
+ return 0;
+}
+
+void
+i40e_dev_tx_queue_release(void *txq)
+{
+ struct i40e_tx_queue *q = (struct i40e_tx_queue *)txq;
+
+ if (!q) {
+ PMD_DRV_LOG(DEBUG, "Pointer to TX queue is NULL");
+ return;
+ }
+
+ i40e_tx_queue_release_mbufs(q);
+ rte_free(q->sw_ring);
+ rte_free(q);
+}
+
+const struct rte_memzone *
+i40e_memzone_reserve(const char *name, uint32_t len, int socket_id)
+{
+ const struct rte_memzone *mz;
+
+ mz = rte_memzone_lookup(name);
+ if (mz)
+ return mz;
+
+ if (rte_xen_dom0_supported())
+ mz = rte_memzone_reserve_bounded(name, len,
+ socket_id, 0, I40E_RING_BASE_ALIGN, RTE_PGSIZE_2M);
+ else
+ mz = rte_memzone_reserve_aligned(name, len,
+ socket_id, 0, I40E_RING_BASE_ALIGN);
+ return mz;
+}
+
+void
+i40e_rx_queue_release_mbufs(struct i40e_rx_queue *rxq)
+{
+ uint16_t i;
+
+ /* SSE Vector driver has a different way of releasing mbufs. */
+ if (rxq->rx_using_sse) {
+ i40e_rx_queue_release_mbufs_vec(rxq);
+ return;
+ }
+
+ if (!rxq->sw_ring) {
+ PMD_DRV_LOG(DEBUG, "Pointer to sw_ring is NULL");
+ return;
+ }
+
+ for (i = 0; i < rxq->nb_rx_desc; i++) {
+ if (rxq->sw_ring[i].mbuf) {
+ rte_pktmbuf_free_seg(rxq->sw_ring[i].mbuf);
+ rxq->sw_ring[i].mbuf = NULL;
+ }
+ }
+#ifdef RTE_LIBRTE_I40E_RX_ALLOW_BULK_ALLOC
+ if (rxq->rx_nb_avail == 0)
+ return;
+ for (i = 0; i < rxq->rx_nb_avail; i++) {
+ struct rte_mbuf *mbuf;
+
+ mbuf = rxq->rx_stage[rxq->rx_next_avail + i];
+ rte_pktmbuf_free_seg(mbuf);
+ }
+ rxq->rx_nb_avail = 0;
+#endif /* RTE_LIBRTE_I40E_RX_ALLOW_BULK_ALLOC */
+}
+
+void
+i40e_reset_rx_queue(struct i40e_rx_queue *rxq)
+{
+ unsigned i;
+ uint16_t len;
+
+ if (!rxq) {
+ PMD_DRV_LOG(DEBUG, "Pointer to rxq is NULL");
+ return;
+ }
+
+#ifdef RTE_LIBRTE_I40E_RX_ALLOW_BULK_ALLOC
+ if (check_rx_burst_bulk_alloc_preconditions(rxq) == 0)
+ len = (uint16_t)(rxq->nb_rx_desc + RTE_PMD_I40E_RX_MAX_BURST);
+ else
+#endif /* RTE_LIBRTE_I40E_RX_ALLOW_BULK_ALLOC */
+ len = rxq->nb_rx_desc;
+
+ for (i = 0; i < len * sizeof(union i40e_rx_desc); i++)
+ ((volatile char *)rxq->rx_ring)[i] = 0;
+
+#ifdef RTE_LIBRTE_I40E_RX_ALLOW_BULK_ALLOC
+ memset(&rxq->fake_mbuf, 0x0, sizeof(rxq->fake_mbuf));
+ for (i = 0; i < RTE_PMD_I40E_RX_MAX_BURST; ++i)
+ rxq->sw_ring[rxq->nb_rx_desc + i].mbuf = &rxq->fake_mbuf;
+
+ rxq->rx_nb_avail = 0;
+ rxq->rx_next_avail = 0;
+ rxq->rx_free_trigger = (uint16_t)(rxq->rx_free_thresh - 1);
+#endif /* RTE_LIBRTE_I40E_RX_ALLOW_BULK_ALLOC */
+ rxq->rx_tail = 0;
+ rxq->nb_rx_hold = 0;
+ rxq->pkt_first_seg = NULL;
+ rxq->pkt_last_seg = NULL;
+
+ rxq->rxrearm_start = 0;
+ rxq->rxrearm_nb = 0;
+}
+
+void
+i40e_tx_queue_release_mbufs(struct i40e_tx_queue *txq)
+{
+ uint16_t i;
+
+ if (!txq || !txq->sw_ring) {
+ PMD_DRV_LOG(DEBUG, "Pointer to rxq or sw_ring is NULL");
+ return;
+ }
+
+ for (i = 0; i < txq->nb_tx_desc; i++) {
+ if (txq->sw_ring[i].mbuf) {
+ rte_pktmbuf_free_seg(txq->sw_ring[i].mbuf);
+ txq->sw_ring[i].mbuf = NULL;
+ }
+ }
+}
+
+void
+i40e_reset_tx_queue(struct i40e_tx_queue *txq)
+{
+ struct i40e_tx_entry *txe;
+ uint16_t i, prev, size;
+
+ if (!txq) {
+ PMD_DRV_LOG(DEBUG, "Pointer to txq is NULL");
+ return;
+ }
+
+ txe = txq->sw_ring;
+ size = sizeof(struct i40e_tx_desc) * txq->nb_tx_desc;
+ for (i = 0; i < size; i++)
+ ((volatile char *)txq->tx_ring)[i] = 0;
+
+ prev = (uint16_t)(txq->nb_tx_desc - 1);
+ for (i = 0; i < txq->nb_tx_desc; i++) {
+ volatile struct i40e_tx_desc *txd = &txq->tx_ring[i];
+
+ txd->cmd_type_offset_bsz =
+ rte_cpu_to_le_64(I40E_TX_DESC_DTYPE_DESC_DONE);
+ txe[i].mbuf = NULL;
+ txe[i].last_id = i;
+ txe[prev].next_id = i;
+ prev = i;
+ }
+
+ txq->tx_next_dd = (uint16_t)(txq->tx_rs_thresh - 1);
+ txq->tx_next_rs = (uint16_t)(txq->tx_rs_thresh - 1);
+
+ txq->tx_tail = 0;
+ txq->nb_tx_used = 0;
+
+ txq->last_desc_cleaned = (uint16_t)(txq->nb_tx_desc - 1);
+ txq->nb_tx_free = (uint16_t)(txq->nb_tx_desc - 1);
+}
+
+/* Init the TX queue in hardware */
+int
+i40e_tx_queue_init(struct i40e_tx_queue *txq)
+{
+ enum i40e_status_code err = I40E_SUCCESS;
+ struct i40e_vsi *vsi = txq->vsi;
+ struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
+ uint16_t pf_q = txq->reg_idx;
+ struct i40e_hmc_obj_txq tx_ctx;
+ uint32_t qtx_ctl;
+
+ /* clear the context structure first */
+ memset(&tx_ctx, 0, sizeof(tx_ctx));
+ tx_ctx.new_context = 1;
+ tx_ctx.base = txq->tx_ring_phys_addr / I40E_QUEUE_BASE_ADDR_UNIT;
+ tx_ctx.qlen = txq->nb_tx_desc;
+
+#ifdef RTE_LIBRTE_IEEE1588
+ tx_ctx.timesync_ena = 1;
+#endif
+ tx_ctx.rdylist = rte_le_to_cpu_16(vsi->info.qs_handle[txq->dcb_tc]);
+ if (vsi->type == I40E_VSI_FDIR)
+ tx_ctx.fd_ena = TRUE;
+
+ err = i40e_clear_lan_tx_queue_context(hw, pf_q);
+ if (err != I40E_SUCCESS) {
+ PMD_DRV_LOG(ERR, "Failure of clean lan tx queue context");
+ return err;
+ }
+
+ err = i40e_set_lan_tx_queue_context(hw, pf_q, &tx_ctx);
+ if (err != I40E_SUCCESS) {
+ PMD_DRV_LOG(ERR, "Failure of set lan tx queue context");
+ return err;
+ }
+
+ /* Now associate this queue with this PCI function */
+ qtx_ctl = I40E_QTX_CTL_PF_QUEUE;
+ qtx_ctl |= ((hw->pf_id << I40E_QTX_CTL_PF_INDX_SHIFT) &
+ I40E_QTX_CTL_PF_INDX_MASK);
+ I40E_WRITE_REG(hw, I40E_QTX_CTL(pf_q), qtx_ctl);
+ I40E_WRITE_FLUSH(hw);
+
+ txq->qtx_tail = hw->hw_addr + I40E_QTX_TAIL(pf_q);
+
+ return err;
+}
+
+int
+i40e_alloc_rx_queue_mbufs(struct i40e_rx_queue *rxq)
+{
+ struct i40e_rx_entry *rxe = rxq->sw_ring;
+ uint64_t dma_addr;
+ uint16_t i;
+
+ for (i = 0; i < rxq->nb_rx_desc; i++) {
+ volatile union i40e_rx_desc *rxd;
+ struct rte_mbuf *mbuf = rte_mbuf_raw_alloc(rxq->mp);
+
+ if (unlikely(!mbuf)) {
+ PMD_DRV_LOG(ERR, "Failed to allocate mbuf for RX");
+ return -ENOMEM;
+ }
+
+ rte_mbuf_refcnt_set(mbuf, 1);
+ mbuf->next = NULL;
+ mbuf->data_off = RTE_PKTMBUF_HEADROOM;
+ mbuf->nb_segs = 1;
+ mbuf->port = rxq->port_id;
+
+ dma_addr =
+ rte_cpu_to_le_64(rte_mbuf_data_dma_addr_default(mbuf));
+
+ rxd = &rxq->rx_ring[i];
+ rxd->read.pkt_addr = dma_addr;
+ rxd->read.hdr_addr = 0;
+#ifndef RTE_LIBRTE_I40E_16BYTE_RX_DESC
+ rxd->read.rsvd1 = 0;
+ rxd->read.rsvd2 = 0;
+#endif /* RTE_LIBRTE_I40E_16BYTE_RX_DESC */
+
+ rxe[i].mbuf = mbuf;
+ }
+
+ return 0;
+}
+
+/*
+ * Calculate the buffer length, and check the jumbo frame
+ * and maximum packet length.
+ */
+static int
+i40e_rx_queue_config(struct i40e_rx_queue *rxq)
+{
+ struct i40e_pf *pf = I40E_VSI_TO_PF(rxq->vsi);
+ struct i40e_hw *hw = I40E_VSI_TO_HW(rxq->vsi);
+ struct rte_eth_dev_data *data = pf->dev_data;
+ uint16_t buf_size, len;
+
+ buf_size = (uint16_t)(rte_pktmbuf_data_room_size(rxq->mp) -
+ RTE_PKTMBUF_HEADROOM);
+
+ switch (pf->flags & (I40E_FLAG_HEADER_SPLIT_DISABLED |
+ I40E_FLAG_HEADER_SPLIT_ENABLED)) {
+ case I40E_FLAG_HEADER_SPLIT_ENABLED: /* Not supported */
+ rxq->rx_hdr_len = RTE_ALIGN(I40E_RXBUF_SZ_1024,
+ (1 << I40E_RXQ_CTX_HBUFF_SHIFT));
+ rxq->rx_buf_len = RTE_ALIGN(I40E_RXBUF_SZ_2048,
+ (1 << I40E_RXQ_CTX_DBUFF_SHIFT));
+ rxq->hs_mode = i40e_header_split_enabled;
+ break;
+ case I40E_FLAG_HEADER_SPLIT_DISABLED:
+ default:
+ rxq->rx_hdr_len = 0;
+ rxq->rx_buf_len = RTE_ALIGN(buf_size,
+ (1 << I40E_RXQ_CTX_DBUFF_SHIFT));
+ rxq->hs_mode = i40e_header_split_none;
+ break;
+ }
+
+ len = hw->func_caps.rx_buf_chain_len * rxq->rx_buf_len;
+ rxq->max_pkt_len = RTE_MIN(len, data->dev_conf.rxmode.max_rx_pkt_len);
+ if (data->dev_conf.rxmode.jumbo_frame == 1) {
+ if (rxq->max_pkt_len <= ETHER_MAX_LEN ||
+ rxq->max_pkt_len > I40E_FRAME_SIZE_MAX) {
+ PMD_DRV_LOG(ERR, "maximum packet length must "
+ "be larger than %u and smaller than %u,"
+ "as jumbo frame is enabled",
+ (uint32_t)ETHER_MAX_LEN,
+ (uint32_t)I40E_FRAME_SIZE_MAX);
+ return I40E_ERR_CONFIG;
+ }
+ } else {
+ if (rxq->max_pkt_len < ETHER_MIN_LEN ||
+ rxq->max_pkt_len > ETHER_MAX_LEN) {
+ PMD_DRV_LOG(ERR, "maximum packet length must be "
+ "larger than %u and smaller than %u, "
+ "as jumbo frame is disabled",
+ (uint32_t)ETHER_MIN_LEN,
+ (uint32_t)ETHER_MAX_LEN);
+ return I40E_ERR_CONFIG;
+ }
+ }
+
+ return 0;
+}
+
+/* Init the RX queue in hardware */
+int
+i40e_rx_queue_init(struct i40e_rx_queue *rxq)
+{
+ int err = I40E_SUCCESS;
+ struct i40e_hw *hw = I40E_VSI_TO_HW(rxq->vsi);
+ struct rte_eth_dev_data *dev_data = I40E_VSI_TO_DEV_DATA(rxq->vsi);
+ uint16_t pf_q = rxq->reg_idx;
+ uint16_t buf_size;
+ struct i40e_hmc_obj_rxq rx_ctx;
+
+ err = i40e_rx_queue_config(rxq);
+ if (err < 0) {
+ PMD_DRV_LOG(ERR, "Failed to config RX queue");
+ return err;
+ }
+
+ /* Clear the context structure first */
+ memset(&rx_ctx, 0, sizeof(struct i40e_hmc_obj_rxq));
+ rx_ctx.dbuff = rxq->rx_buf_len >> I40E_RXQ_CTX_DBUFF_SHIFT;
+ rx_ctx.hbuff = rxq->rx_hdr_len >> I40E_RXQ_CTX_HBUFF_SHIFT;
+
+ rx_ctx.base = rxq->rx_ring_phys_addr / I40E_QUEUE_BASE_ADDR_UNIT;
+ rx_ctx.qlen = rxq->nb_rx_desc;
+#ifndef RTE_LIBRTE_I40E_16BYTE_RX_DESC
+ rx_ctx.dsize = 1;
+#endif
+ rx_ctx.dtype = rxq->hs_mode;
+ if (rxq->hs_mode)
+ rx_ctx.hsplit_0 = I40E_HEADER_SPLIT_ALL;
+ else
+ rx_ctx.hsplit_0 = I40E_HEADER_SPLIT_NONE;
+ rx_ctx.rxmax = rxq->max_pkt_len;
+ rx_ctx.tphrdesc_ena = 1;
+ rx_ctx.tphwdesc_ena = 1;
+ rx_ctx.tphdata_ena = 1;
+ rx_ctx.tphhead_ena = 1;
+ rx_ctx.lrxqthresh = 2;
+ rx_ctx.crcstrip = (rxq->crc_len == 0) ? 1 : 0;
+ rx_ctx.l2tsel = 1;
+ /* showiv indicates if inner VLAN is stripped inside of tunnel
+ * packet. When set it to 1, vlan information is stripped from
+ * the inner header, but the hardware does not put it in the
+ * descriptor. So set it zero by default.
+ */
+ rx_ctx.showiv = 0;
+ rx_ctx.prefena = 1;
+
+ err = i40e_clear_lan_rx_queue_context(hw, pf_q);
+ if (err != I40E_SUCCESS) {
+ PMD_DRV_LOG(ERR, "Failed to clear LAN RX queue context");
+ return err;
+ }
+ err = i40e_set_lan_rx_queue_context(hw, pf_q, &rx_ctx);
+ if (err != I40E_SUCCESS) {
+ PMD_DRV_LOG(ERR, "Failed to set LAN RX queue context");
+ return err;
+ }
+
+ rxq->qrx_tail = hw->hw_addr + I40E_QRX_TAIL(pf_q);
+
+ buf_size = (uint16_t)(rte_pktmbuf_data_room_size(rxq->mp) -
+ RTE_PKTMBUF_HEADROOM);
+
+ /* Check if scattered RX needs to be used. */
+ if ((rxq->max_pkt_len + 2 * I40E_VLAN_TAG_SIZE) > buf_size) {
+ dev_data->scattered_rx = 1;
+ }
+
+ /* Init the RX tail regieter. */
+ I40E_PCI_REG_WRITE(rxq->qrx_tail, rxq->nb_rx_desc - 1);
+
+ return 0;
+}
+
+void
+i40e_dev_clear_queues(struct rte_eth_dev *dev)
+{
+ uint16_t i;
+
+ PMD_INIT_FUNC_TRACE();
+
+ for (i = 0; i < dev->data->nb_tx_queues; i++) {
+ i40e_tx_queue_release_mbufs(dev->data->tx_queues[i]);
+ i40e_reset_tx_queue(dev->data->tx_queues[i]);
+ }
+
+ for (i = 0; i < dev->data->nb_rx_queues; i++) {
+ i40e_rx_queue_release_mbufs(dev->data->rx_queues[i]);
+ i40e_reset_rx_queue(dev->data->rx_queues[i]);
+ }
+}
+
+void
+i40e_dev_free_queues(struct rte_eth_dev *dev)
+{
+ uint16_t i;
+
+ PMD_INIT_FUNC_TRACE();
+
+ for (i = 0; i < dev->data->nb_rx_queues; i++) {
+ i40e_dev_rx_queue_release(dev->data->rx_queues[i]);
+ dev->data->rx_queues[i] = NULL;
+ }
+ dev->data->nb_rx_queues = 0;
+
+ for (i = 0; i < dev->data->nb_tx_queues; i++) {
+ i40e_dev_tx_queue_release(dev->data->tx_queues[i]);
+ dev->data->tx_queues[i] = NULL;
+ }
+ dev->data->nb_tx_queues = 0;
+}
+
+#define I40E_FDIR_NUM_TX_DESC I40E_MIN_RING_DESC
+#define I40E_FDIR_NUM_RX_DESC I40E_MIN_RING_DESC
+
+enum i40e_status_code
+i40e_fdir_setup_tx_resources(struct i40e_pf *pf)
+{
+ struct i40e_tx_queue *txq;
+ const struct rte_memzone *tz = NULL;
+ uint32_t ring_size;
+ struct rte_eth_dev *dev;
+
+ if (!pf) {
+ PMD_DRV_LOG(ERR, "PF is not available");
+ return I40E_ERR_BAD_PTR;
+ }
+
+ dev = pf->adapter->eth_dev;
+
+ /* Allocate the TX queue data structure. */
+ txq = rte_zmalloc_socket("i40e fdir tx queue",
+ sizeof(struct i40e_tx_queue),
+ RTE_CACHE_LINE_SIZE,
+ SOCKET_ID_ANY);
+ if (!txq) {
+ PMD_DRV_LOG(ERR, "Failed to allocate memory for "
+ "tx queue structure.");
+ return I40E_ERR_NO_MEMORY;
+ }
+
+ /* Allocate TX hardware ring descriptors. */
+ ring_size = sizeof(struct i40e_tx_desc) * I40E_FDIR_NUM_TX_DESC;
+ ring_size = RTE_ALIGN(ring_size, I40E_DMA_MEM_ALIGN);
+
+ tz = rte_eth_dma_zone_reserve(dev, "fdir_tx_ring",
+ I40E_FDIR_QUEUE_ID, ring_size,
+ I40E_RING_BASE_ALIGN, SOCKET_ID_ANY);
+ if (!tz) {
+ i40e_dev_tx_queue_release(txq);
+ PMD_DRV_LOG(ERR, "Failed to reserve DMA memory for TX.");
+ return I40E_ERR_NO_MEMORY;
+ }
+
+ txq->nb_tx_desc = I40E_FDIR_NUM_TX_DESC;
+ txq->queue_id = I40E_FDIR_QUEUE_ID;
+ txq->reg_idx = pf->fdir.fdir_vsi->base_queue;
+ txq->vsi = pf->fdir.fdir_vsi;
+
+ txq->tx_ring_phys_addr = rte_mem_phy2mch(tz->memseg_id, tz->phys_addr);
+ txq->tx_ring = (struct i40e_tx_desc *)tz->addr;
+ /*
+ * don't need to allocate software ring and reset for the fdir
+ * program queue just set the queue has been configured.
+ */
+ txq->q_set = TRUE;
+ pf->fdir.txq = txq;
+
+ return I40E_SUCCESS;
+}
+
+enum i40e_status_code
+i40e_fdir_setup_rx_resources(struct i40e_pf *pf)
+{
+ struct i40e_rx_queue *rxq;
+ const struct rte_memzone *rz = NULL;
+ uint32_t ring_size;
+ struct rte_eth_dev *dev;
+
+ if (!pf) {
+ PMD_DRV_LOG(ERR, "PF is not available");
+ return I40E_ERR_BAD_PTR;
+ }
+
+ dev = pf->adapter->eth_dev;
+
+ /* Allocate the RX queue data structure. */
+ rxq = rte_zmalloc_socket("i40e fdir rx queue",
+ sizeof(struct i40e_rx_queue),
+ RTE_CACHE_LINE_SIZE,
+ SOCKET_ID_ANY);
+ if (!rxq) {
+ PMD_DRV_LOG(ERR, "Failed to allocate memory for "
+ "rx queue structure.");
+ return I40E_ERR_NO_MEMORY;
+ }
+
+ /* Allocate RX hardware ring descriptors. */
+ ring_size = sizeof(union i40e_rx_desc) * I40E_FDIR_NUM_RX_DESC;
+ ring_size = RTE_ALIGN(ring_size, I40E_DMA_MEM_ALIGN);
+
+ rz = rte_eth_dma_zone_reserve(dev, "fdir_rx_ring",
+ I40E_FDIR_QUEUE_ID, ring_size,
+ I40E_RING_BASE_ALIGN, SOCKET_ID_ANY);
+ if (!rz) {
+ i40e_dev_rx_queue_release(rxq);
+ PMD_DRV_LOG(ERR, "Failed to reserve DMA memory for RX.");
+ return I40E_ERR_NO_MEMORY;
+ }
+
+ rxq->nb_rx_desc = I40E_FDIR_NUM_RX_DESC;
+ rxq->queue_id = I40E_FDIR_QUEUE_ID;
+ rxq->reg_idx = pf->fdir.fdir_vsi->base_queue;
+ rxq->vsi = pf->fdir.fdir_vsi;
+
+ rxq->rx_ring_phys_addr = rte_mem_phy2mch(rz->memseg_id, rz->phys_addr);
+ rxq->rx_ring = (union i40e_rx_desc *)rz->addr;
+
+ /*
+ * Don't need to allocate software ring and reset for the fdir
+ * rx queue, just set the queue has been configured.
+ */
+ rxq->q_set = TRUE;
+ pf->fdir.rxq = rxq;
+
+ return I40E_SUCCESS;
+}
+
+void
+i40e_rxq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,
+ struct rte_eth_rxq_info *qinfo)
+{
+ struct i40e_rx_queue *rxq;
+
+ rxq = dev->data->rx_queues[queue_id];
+
+ qinfo->mp = rxq->mp;
+ qinfo->scattered_rx = dev->data->scattered_rx;
+ qinfo->nb_desc = rxq->nb_rx_desc;
+
+ qinfo->conf.rx_free_thresh = rxq->rx_free_thresh;
+ qinfo->conf.rx_drop_en = rxq->drop_en;
+ qinfo->conf.rx_deferred_start = rxq->rx_deferred_start;
+}
+
+void
+i40e_txq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,
+ struct rte_eth_txq_info *qinfo)
+{
+ struct i40e_tx_queue *txq;
+
+ txq = dev->data->tx_queues[queue_id];
+
+ qinfo->nb_desc = txq->nb_tx_desc;
+
+ qinfo->conf.tx_thresh.pthresh = txq->pthresh;
+ qinfo->conf.tx_thresh.hthresh = txq->hthresh;
+ qinfo->conf.tx_thresh.wthresh = txq->wthresh;
+
+ qinfo->conf.tx_free_thresh = txq->tx_free_thresh;
+ qinfo->conf.tx_rs_thresh = txq->tx_rs_thresh;
+ qinfo->conf.txq_flags = txq->txq_flags;
+ qinfo->conf.tx_deferred_start = txq->tx_deferred_start;
+}
+
+void __attribute__((cold))
+i40e_set_rx_function(struct rte_eth_dev *dev)
+{
+ struct i40e_adapter *ad =
+ I40E_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
+ uint16_t rx_using_sse, i;
+ /* In order to allow Vector Rx there are a few configuration
+ * conditions to be met and Rx Bulk Allocation should be allowed.
+ */
+ if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
+ if (i40e_rx_vec_dev_conf_condition_check(dev) ||
+ !ad->rx_bulk_alloc_allowed) {
+ PMD_INIT_LOG(DEBUG, "Port[%d] doesn't meet"
+ " Vector Rx preconditions",
+ dev->data->port_id);
+
+ ad->rx_vec_allowed = false;
+ }
+ if (ad->rx_vec_allowed) {
+ for (i = 0; i < dev->data->nb_rx_queues; i++) {
+ struct i40e_rx_queue *rxq =
+ dev->data->rx_queues[i];
+
+ if (i40e_rxq_vec_setup(rxq)) {
+ ad->rx_vec_allowed = false;
+ break;
+ }
+ }
+ }
+ }
+
+ if (dev->data->scattered_rx) {
+ /* Set the non-LRO scattered callback: there are Vector and
+ * single allocation versions.
+ */
+ if (ad->rx_vec_allowed) {
+ PMD_INIT_LOG(DEBUG, "Using Vector Scattered Rx "
+ "callback (port=%d).",
+ dev->data->port_id);
+
+ dev->rx_pkt_burst = i40e_recv_scattered_pkts_vec;
+ } else {
+ PMD_INIT_LOG(DEBUG, "Using a Scattered with bulk "
+ "allocation callback (port=%d).",
+ dev->data->port_id);
+ dev->rx_pkt_burst = i40e_recv_scattered_pkts;
+ }
+ /* If parameters allow we are going to choose between the following
+ * callbacks:
+ * - Vector
+ * - Bulk Allocation
+ * - Single buffer allocation (the simplest one)
+ */
+ } else if (ad->rx_vec_allowed) {
+ PMD_INIT_LOG(DEBUG, "Vector rx enabled, please make sure RX "
+ "burst size no less than %d (port=%d).",
+ RTE_I40E_DESCS_PER_LOOP,
+ dev->data->port_id);
+
+ dev->rx_pkt_burst = i40e_recv_pkts_vec;
+ } else if (ad->rx_bulk_alloc_allowed) {
+ PMD_INIT_LOG(DEBUG, "Rx Burst Bulk Alloc Preconditions are "
+ "satisfied. Rx Burst Bulk Alloc function "
+ "will be used on port=%d.",
+ dev->data->port_id);
+
+ dev->rx_pkt_burst = i40e_recv_pkts_bulk_alloc;
+ } else {
+ PMD_INIT_LOG(DEBUG, "Rx Burst Bulk Alloc Preconditions are not "
+ "satisfied, or Scattered Rx is requested "
+ "(port=%d).",
+ dev->data->port_id);
+
+ dev->rx_pkt_burst = i40e_recv_pkts;
+ }
+
+ /* Propagate information about RX function choice through all queues. */
+ if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
+ rx_using_sse =
+ (dev->rx_pkt_burst == i40e_recv_scattered_pkts_vec ||
+ dev->rx_pkt_burst == i40e_recv_pkts_vec);
+
+ for (i = 0; i < dev->data->nb_rx_queues; i++) {
+ struct i40e_rx_queue *rxq = dev->data->rx_queues[i];
+
+ rxq->rx_using_sse = rx_using_sse;
+ }
+ }
+}
+
+void __attribute__((cold))
+i40e_set_tx_function_flag(struct rte_eth_dev *dev, struct i40e_tx_queue *txq)
+{
+ struct i40e_adapter *ad =
+ I40E_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
+
+ /* Use a simple Tx queue (no offloads, no multi segs) if possible */
+ if (((txq->txq_flags & I40E_SIMPLE_FLAGS) == I40E_SIMPLE_FLAGS)
+ && (txq->tx_rs_thresh >= RTE_PMD_I40E_TX_MAX_BURST)) {
+ if (txq->tx_rs_thresh <= RTE_I40E_TX_MAX_FREE_BUF_SZ) {
+ PMD_INIT_LOG(DEBUG, "Vector tx"
+ " can be enabled on this txq.");
+
+ } else {
+ ad->tx_vec_allowed = false;
+ }
+ } else {
+ ad->tx_simple_allowed = false;
+ }
+}
+
+void __attribute__((cold))
+i40e_set_tx_function(struct rte_eth_dev *dev)
+{
+ struct i40e_adapter *ad =
+ I40E_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
+ int i;
+
+ if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
+ if (ad->tx_vec_allowed) {
+ for (i = 0; i < dev->data->nb_tx_queues; i++) {
+ struct i40e_tx_queue *txq =
+ dev->data->tx_queues[i];
+
+ if (i40e_txq_vec_setup(txq)) {
+ ad->tx_vec_allowed = false;
+ break;
+ }
+ }
+ }
+ }
+
+ if (ad->tx_simple_allowed) {
+ if (ad->tx_vec_allowed) {
+ PMD_INIT_LOG(DEBUG, "Vector tx finally be used.");
+ dev->tx_pkt_burst = i40e_xmit_pkts_vec;
+ } else {
+ PMD_INIT_LOG(DEBUG, "Simple tx finally be used.");
+ dev->tx_pkt_burst = i40e_xmit_pkts_simple;
+ }
+ } else {
+ PMD_INIT_LOG(DEBUG, "Xmit tx finally be used.");
+ dev->tx_pkt_burst = i40e_xmit_pkts;
+ }
+}
+
+/* Stubs needed for linkage when CONFIG_RTE_I40E_INC_VECTOR is set to 'n' */
+int __attribute__((weak))
+i40e_rx_vec_dev_conf_condition_check(struct rte_eth_dev __rte_unused *dev)
+{
+ return -1;
+}
+
+uint16_t __attribute__((weak))
+i40e_recv_pkts_vec(
+ void __rte_unused *rx_queue,
+ struct rte_mbuf __rte_unused **rx_pkts,
+ uint16_t __rte_unused nb_pkts)
+{
+ return 0;
+}
+
+uint16_t __attribute__((weak))
+i40e_recv_scattered_pkts_vec(
+ void __rte_unused *rx_queue,
+ struct rte_mbuf __rte_unused **rx_pkts,
+ uint16_t __rte_unused nb_pkts)
+{
+ return 0;
+}
+
+int __attribute__((weak))
+i40e_rxq_vec_setup(struct i40e_rx_queue __rte_unused *rxq)
+{
+ return -1;
+}
+
+int __attribute__((weak))
+i40e_txq_vec_setup(struct i40e_tx_queue __rte_unused *txq)
+{
+ return -1;
+}
+
+void __attribute__((weak))
+i40e_rx_queue_release_mbufs_vec(struct i40e_rx_queue __rte_unused*rxq)
+{
+ return;
+}
+
+uint16_t __attribute__((weak))
+i40e_xmit_pkts_vec(void __rte_unused *tx_queue,
+ struct rte_mbuf __rte_unused **tx_pkts,
+ uint16_t __rte_unused nb_pkts)
+{
+ return 0;
+}
diff --git a/src/dpdk/drivers/net/i40e/i40e_rxtx.h b/src/dpdk/drivers/net/i40e/i40e_rxtx.h
new file mode 100644
index 00000000..98179f00
--- /dev/null
+++ b/src/dpdk/drivers/net/i40e/i40e_rxtx.h
@@ -0,0 +1,258 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2010-2015 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _I40E_RXTX_H_
+#define _I40E_RXTX_H_
+
+/**
+ * 32 bits tx flags, high 16 bits for L2TAG1 (VLAN),
+ * low 16 bits for others.
+ */
+#define I40E_TX_FLAG_L2TAG1_SHIFT 16
+#define I40E_TX_FLAG_L2TAG1_MASK 0xffff0000
+#define I40E_TX_FLAG_CSUM ((uint32_t)(1 << 0))
+#define I40E_TX_FLAG_INSERT_VLAN ((uint32_t)(1 << 1))
+#define I40E_TX_FLAG_TSYN ((uint32_t)(1 << 2))
+
+#define RTE_PMD_I40E_RX_MAX_BURST 32
+#define RTE_PMD_I40E_TX_MAX_BURST 32
+
+#define RTE_I40E_VPMD_RX_BURST 32
+#define RTE_I40E_VPMD_TX_BURST 32
+#define RTE_I40E_RXQ_REARM_THRESH 32
+#define RTE_I40E_MAX_RX_BURST RTE_I40E_RXQ_REARM_THRESH
+#define RTE_I40E_TX_MAX_FREE_BUF_SZ 64
+#define RTE_I40E_DESCS_PER_LOOP 4
+
+#define I40E_RXBUF_SZ_1024 1024
+#define I40E_RXBUF_SZ_2048 2048
+
+/* In none-PXE mode QLEN must be whole number of 32 descriptors. */
+#define I40E_ALIGN_RING_DESC 32
+
+#define I40E_MIN_RING_DESC 64
+#define I40E_MAX_RING_DESC 4096
+
+#undef container_of
+#define container_of(ptr, type, member) ({ \
+ typeof(((type *)0)->member)(*__mptr) = (ptr); \
+ (type *)((char *)__mptr - offsetof(type, member)); })
+
+#define I40E_TD_CMD (I40E_TX_DESC_CMD_ICRC |\
+ I40E_TX_DESC_CMD_EOP)
+
+enum i40e_header_split_mode {
+ i40e_header_split_none = 0,
+ i40e_header_split_enabled = 1,
+ i40e_header_split_always = 2,
+ i40e_header_split_reserved
+};
+
+#define I40E_HEADER_SPLIT_NONE ((uint8_t)0)
+#define I40E_HEADER_SPLIT_L2 ((uint8_t)(1 << 0))
+#define I40E_HEADER_SPLIT_IP ((uint8_t)(1 << 1))
+#define I40E_HEADER_SPLIT_UDP_TCP ((uint8_t)(1 << 2))
+#define I40E_HEADER_SPLIT_SCTP ((uint8_t)(1 << 3))
+#define I40E_HEADER_SPLIT_ALL (I40E_HEADER_SPLIT_L2 | \
+ I40E_HEADER_SPLIT_IP | \
+ I40E_HEADER_SPLIT_UDP_TCP | \
+ I40E_HEADER_SPLIT_SCTP)
+
+/* HW desc structure, both 16-byte and 32-byte types are supported */
+#ifdef RTE_LIBRTE_I40E_16BYTE_RX_DESC
+#define i40e_rx_desc i40e_16byte_rx_desc
+#else
+#define i40e_rx_desc i40e_32byte_rx_desc
+#endif
+
+struct i40e_rx_entry {
+ struct rte_mbuf *mbuf;
+};
+
+/*
+ * Structure associated with each RX queue.
+ */
+struct i40e_rx_queue {
+ struct rte_mempool *mp; /**< mbuf pool to populate RX ring */
+ volatile union i40e_rx_desc *rx_ring;/**< RX ring virtual address */
+ uint64_t rx_ring_phys_addr; /**< RX ring DMA address */
+ struct i40e_rx_entry *sw_ring; /**< address of RX soft ring */
+ uint16_t nb_rx_desc; /**< number of RX descriptors */
+ uint16_t rx_free_thresh; /**< max free RX desc to hold */
+ uint16_t rx_tail; /**< current value of tail */
+ uint16_t nb_rx_hold; /**< number of held free RX desc */
+ struct rte_mbuf *pkt_first_seg; /**< first segment of current packet */
+ struct rte_mbuf *pkt_last_seg; /**< last segment of current packet */
+#ifdef RTE_LIBRTE_I40E_RX_ALLOW_BULK_ALLOC
+ uint16_t rx_nb_avail; /**< number of staged packets ready */
+ uint16_t rx_next_avail; /**< index of next staged packets */
+ uint16_t rx_free_trigger; /**< triggers rx buffer allocation */
+ struct rte_mbuf fake_mbuf; /**< dummy mbuf */
+ struct rte_mbuf *rx_stage[RTE_PMD_I40E_RX_MAX_BURST * 2];
+#endif
+
+ uint16_t rxrearm_nb; /**< number of remaining to be re-armed */
+ uint16_t rxrearm_start; /**< the idx we start the re-arming from */
+ uint64_t mbuf_initializer; /**< value to init mbufs */
+
+ uint8_t port_id; /**< device port ID */
+ uint8_t crc_len; /**< 0 if CRC stripped, 4 otherwise */
+ uint16_t queue_id; /**< RX queue index */
+ uint16_t reg_idx; /**< RX queue register index */
+ uint8_t drop_en; /**< if not 0, set register bit */
+ volatile uint8_t *qrx_tail; /**< register address of tail */
+ struct i40e_vsi *vsi; /**< the VSI this queue belongs to */
+ uint16_t rx_buf_len; /* The packet buffer size */
+ uint16_t rx_hdr_len; /* The header buffer size */
+ uint16_t max_pkt_len; /* Maximum packet length */
+ uint8_t hs_mode; /* Header Split mode */
+ bool q_set; /**< indicate if rx queue has been configured */
+ bool rx_deferred_start; /**< don't start this queue in dev start */
+ uint16_t rx_using_sse; /**<flag indicate the usage of vPMD for rx */
+ uint8_t dcb_tc; /**< Traffic class of rx queue */
+};
+
+struct i40e_tx_entry {
+ struct rte_mbuf *mbuf;
+ uint16_t next_id;
+ uint16_t last_id;
+};
+
+/*
+ * Structure associated with each TX queue.
+ */
+struct i40e_tx_queue {
+ uint16_t nb_tx_desc; /**< number of TX descriptors */
+ uint64_t tx_ring_phys_addr; /**< TX ring DMA address */
+ volatile struct i40e_tx_desc *tx_ring; /**< TX ring virtual address */
+ struct i40e_tx_entry *sw_ring; /**< virtual address of SW ring */
+ uint16_t tx_tail; /**< current value of tail register */
+ volatile uint8_t *qtx_tail; /**< register address of tail */
+ uint16_t nb_tx_used; /**< number of TX desc used since RS bit set */
+ /**< index to last TX descriptor to have been cleaned */
+ uint16_t last_desc_cleaned;
+ /**< Total number of TX descriptors ready to be allocated. */
+ uint16_t nb_tx_free;
+ /**< Start freeing TX buffers if there are less free descriptors than
+ this value. */
+ uint16_t tx_free_thresh;
+ /** Number of TX descriptors to use before RS bit is set. */
+ uint16_t tx_rs_thresh;
+ uint8_t pthresh; /**< Prefetch threshold register. */
+ uint8_t hthresh; /**< Host threshold register. */
+ uint8_t wthresh; /**< Write-back threshold reg. */
+ uint8_t port_id; /**< Device port identifier. */
+ uint16_t queue_id; /**< TX queue index. */
+ uint16_t reg_idx;
+ uint32_t txq_flags;
+ struct i40e_vsi *vsi; /**< the VSI this queue belongs to */
+ uint16_t tx_next_dd;
+ uint16_t tx_next_rs;
+ bool q_set; /**< indicate if tx queue has been configured */
+ bool tx_deferred_start; /**< don't start this queue in dev start */
+ uint8_t dcb_tc; /**< Traffic class of tx queue */
+};
+
+/** Offload features */
+union i40e_tx_offload {
+ uint64_t data;
+ struct {
+ uint64_t l2_len:7; /**< L2 (MAC) Header Length. */
+ uint64_t l3_len:9; /**< L3 (IP) Header Length. */
+ uint64_t l4_len:8; /**< L4 Header Length. */
+ uint64_t tso_segsz:16; /**< TCP TSO segment size */
+ uint64_t outer_l2_len:8; /**< outer L2 Header Length */
+ uint64_t outer_l3_len:16; /**< outer L3 Header Length */
+ };
+};
+
+int i40e_dev_rx_queue_start(struct rte_eth_dev *dev, uint16_t rx_queue_id);
+int i40e_dev_rx_queue_stop(struct rte_eth_dev *dev, uint16_t rx_queue_id);
+int i40e_dev_tx_queue_start(struct rte_eth_dev *dev, uint16_t tx_queue_id);
+int i40e_dev_tx_queue_stop(struct rte_eth_dev *dev, uint16_t tx_queue_id);
+const uint32_t *i40e_dev_supported_ptypes_get(struct rte_eth_dev *dev);
+int i40e_dev_rx_queue_setup(struct rte_eth_dev *dev,
+ uint16_t queue_idx,
+ uint16_t nb_desc,
+ unsigned int socket_id,
+ const struct rte_eth_rxconf *rx_conf,
+ struct rte_mempool *mp);
+int i40e_dev_tx_queue_setup(struct rte_eth_dev *dev,
+ uint16_t queue_idx,
+ uint16_t nb_desc,
+ unsigned int socket_id,
+ const struct rte_eth_txconf *tx_conf);
+void i40e_dev_rx_queue_release(void *rxq);
+void i40e_dev_tx_queue_release(void *txq);
+uint16_t i40e_recv_pkts(void *rx_queue,
+ struct rte_mbuf **rx_pkts,
+ uint16_t nb_pkts);
+uint16_t i40e_recv_scattered_pkts(void *rx_queue,
+ struct rte_mbuf **rx_pkts,
+ uint16_t nb_pkts);
+uint16_t i40e_xmit_pkts(void *tx_queue,
+ struct rte_mbuf **tx_pkts,
+ uint16_t nb_pkts);
+int i40e_tx_queue_init(struct i40e_tx_queue *txq);
+int i40e_rx_queue_init(struct i40e_rx_queue *rxq);
+void i40e_free_tx_resources(struct i40e_tx_queue *txq);
+void i40e_free_rx_resources(struct i40e_rx_queue *rxq);
+void i40e_dev_clear_queues(struct rte_eth_dev *dev);
+void i40e_dev_free_queues(struct rte_eth_dev *dev);
+void i40e_reset_rx_queue(struct i40e_rx_queue *rxq);
+void i40e_reset_tx_queue(struct i40e_tx_queue *txq);
+void i40e_tx_queue_release_mbufs(struct i40e_tx_queue *txq);
+int i40e_alloc_rx_queue_mbufs(struct i40e_rx_queue *rxq);
+void i40e_rx_queue_release_mbufs(struct i40e_rx_queue *rxq);
+
+uint32_t i40e_dev_rx_queue_count(struct rte_eth_dev *dev,
+ uint16_t rx_queue_id);
+int i40e_dev_rx_descriptor_done(void *rx_queue, uint16_t offset);
+
+uint16_t i40e_recv_pkts_vec(void *rx_queue, struct rte_mbuf **rx_pkts,
+ uint16_t nb_pkts);
+uint16_t i40e_recv_scattered_pkts_vec(void *rx_queue,
+ struct rte_mbuf **rx_pkts,
+ uint16_t nb_pkts);
+int i40e_rx_vec_dev_conf_condition_check(struct rte_eth_dev *dev);
+int i40e_rxq_vec_setup(struct i40e_rx_queue *rxq);
+int i40e_txq_vec_setup(struct i40e_tx_queue *txq);
+void i40e_rx_queue_release_mbufs_vec(struct i40e_rx_queue *rxq);
+uint16_t i40e_xmit_pkts_vec(void *tx_queue, struct rte_mbuf **tx_pkts,
+ uint16_t nb_pkts);
+void i40e_set_rx_function(struct rte_eth_dev *dev);
+void i40e_set_tx_function_flag(struct rte_eth_dev *dev,
+ struct i40e_tx_queue *txq);
+void i40e_set_tx_function(struct rte_eth_dev *dev);
+
+#endif /* _I40E_RXTX_H_ */
diff --git a/src/dpdk/drivers/net/i40e/i40e_rxtx_vec.c b/src/dpdk/drivers/net/i40e/i40e_rxtx_vec.c
new file mode 100644
index 00000000..05cb415e
--- /dev/null
+++ b/src/dpdk/drivers/net/i40e/i40e_rxtx_vec.c
@@ -0,0 +1,761 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2010-2015 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <stdint.h>
+#include <rte_ethdev.h>
+#include <rte_malloc.h>
+
+#include "base/i40e_prototype.h"
+#include "base/i40e_type.h"
+#include "i40e_ethdev.h"
+#include "i40e_rxtx.h"
+
+#include <tmmintrin.h>
+
+#ifndef __INTEL_COMPILER
+#pragma GCC diagnostic ignored "-Wcast-qual"
+#endif
+
+static inline void
+i40e_rxq_rearm(struct i40e_rx_queue *rxq)
+{
+ int i;
+ uint16_t rx_id;
+ volatile union i40e_rx_desc *rxdp;
+ struct i40e_rx_entry *rxep = &rxq->sw_ring[rxq->rxrearm_start];
+ struct rte_mbuf *mb0, *mb1;
+ __m128i hdr_room = _mm_set_epi64x(RTE_PKTMBUF_HEADROOM,
+ RTE_PKTMBUF_HEADROOM);
+ __m128i dma_addr0, dma_addr1;
+
+ rxdp = rxq->rx_ring + rxq->rxrearm_start;
+
+ /* Pull 'n' more MBUFs into the software ring */
+ if (rte_mempool_get_bulk(rxq->mp,
+ (void *)rxep,
+ RTE_I40E_RXQ_REARM_THRESH) < 0) {
+ if (rxq->rxrearm_nb + RTE_I40E_RXQ_REARM_THRESH >=
+ rxq->nb_rx_desc) {
+ dma_addr0 = _mm_setzero_si128();
+ for (i = 0; i < RTE_I40E_DESCS_PER_LOOP; i++) {
+ rxep[i].mbuf = &rxq->fake_mbuf;
+ _mm_store_si128((__m128i *)&rxdp[i].read,
+ dma_addr0);
+ }
+ }
+ rte_eth_devices[rxq->port_id].data->rx_mbuf_alloc_failed +=
+ RTE_I40E_RXQ_REARM_THRESH;
+ return;
+ }
+
+ /* Initialize the mbufs in vector, process 2 mbufs in one loop */
+ for (i = 0; i < RTE_I40E_RXQ_REARM_THRESH; i += 2, rxep += 2) {
+ __m128i vaddr0, vaddr1;
+ uintptr_t p0, p1;
+
+ mb0 = rxep[0].mbuf;
+ mb1 = rxep[1].mbuf;
+
+ /* Flush mbuf with pkt template.
+ * Data to be rearmed is 6 bytes long.
+ * Though, RX will overwrite ol_flags that are coming next
+ * anyway. So overwrite whole 8 bytes with one load:
+ * 6 bytes of rearm_data plus first 2 bytes of ol_flags.
+ */
+ p0 = (uintptr_t)&mb0->rearm_data;
+ *(uint64_t *)p0 = rxq->mbuf_initializer;
+ p1 = (uintptr_t)&mb1->rearm_data;
+ *(uint64_t *)p1 = rxq->mbuf_initializer;
+
+ /* load buf_addr(lo 64bit) and buf_physaddr(hi 64bit) */
+ vaddr0 = _mm_loadu_si128((__m128i *)&mb0->buf_addr);
+ vaddr1 = _mm_loadu_si128((__m128i *)&mb1->buf_addr);
+
+ /* convert pa to dma_addr hdr/data */
+ dma_addr0 = _mm_unpackhi_epi64(vaddr0, vaddr0);
+ dma_addr1 = _mm_unpackhi_epi64(vaddr1, vaddr1);
+
+ /* add headroom to pa values */
+ dma_addr0 = _mm_add_epi64(dma_addr0, hdr_room);
+ dma_addr1 = _mm_add_epi64(dma_addr1, hdr_room);
+
+ /* flush desc with pa dma_addr */
+ _mm_store_si128((__m128i *)&rxdp++->read, dma_addr0);
+ _mm_store_si128((__m128i *)&rxdp++->read, dma_addr1);
+ }
+
+ rxq->rxrearm_start += RTE_I40E_RXQ_REARM_THRESH;
+ if (rxq->rxrearm_start >= rxq->nb_rx_desc)
+ rxq->rxrearm_start = 0;
+
+ rxq->rxrearm_nb -= RTE_I40E_RXQ_REARM_THRESH;
+
+ rx_id = (uint16_t)((rxq->rxrearm_start == 0) ?
+ (rxq->nb_rx_desc - 1) : (rxq->rxrearm_start - 1));
+
+ /* Update the tail pointer on the NIC */
+ I40E_PCI_REG_WRITE(rxq->qrx_tail, rx_id);
+}
+
+/* Handling the offload flags (olflags) field takes computation
+ * time when receiving packets. Therefore we provide a flag to disable
+ * the processing of the olflags field when they are not needed. This
+ * gives improved performance, at the cost of losing the offload info
+ * in the received packet
+ */
+#ifdef RTE_LIBRTE_I40E_RX_OLFLAGS_ENABLE
+
+static inline void
+desc_to_olflags_v(__m128i descs[4], struct rte_mbuf **rx_pkts)
+{
+ __m128i vlan0, vlan1, rss;
+ union {
+ uint16_t e[4];
+ uint64_t dword;
+ } vol;
+
+ /* mask everything except RSS, flow director and VLAN flags
+ * bit2 is for VLAN tag, bit11 for flow director indication
+ * bit13:12 for RSS indication.
+ */
+ const __m128i rss_vlan_msk = _mm_set_epi16(
+ 0x0000, 0x0000, 0x0000, 0x0000,
+ 0x3804, 0x3804, 0x3804, 0x3804);
+
+ /* map rss and vlan type to rss hash and vlan flag */
+ const __m128i vlan_flags = _mm_set_epi8(0, 0, 0, 0,
+ 0, 0, 0, 0,
+ 0, 0, 0, PKT_RX_VLAN_PKT | PKT_RX_VLAN_STRIPPED,
+ 0, 0, 0, 0);
+
+ const __m128i rss_flags = _mm_set_epi8(0, 0, 0, 0,
+ 0, 0, 0, 0,
+ PKT_RX_RSS_HASH | PKT_RX_FDIR, PKT_RX_RSS_HASH, 0, 0,
+ 0, 0, PKT_RX_FDIR, 0);
+
+ vlan0 = _mm_unpackhi_epi16(descs[0], descs[1]);
+ vlan1 = _mm_unpackhi_epi16(descs[2], descs[3]);
+ vlan0 = _mm_unpacklo_epi32(vlan0, vlan1);
+
+ vlan1 = _mm_and_si128(vlan0, rss_vlan_msk);
+ vlan0 = _mm_shuffle_epi8(vlan_flags, vlan1);
+
+ rss = _mm_srli_epi16(vlan1, 11);
+ rss = _mm_shuffle_epi8(rss_flags, rss);
+
+ vlan0 = _mm_or_si128(vlan0, rss);
+ vol.dword = _mm_cvtsi128_si64(vlan0);
+
+ rx_pkts[0]->ol_flags = vol.e[0];
+ rx_pkts[1]->ol_flags = vol.e[1];
+ rx_pkts[2]->ol_flags = vol.e[2];
+ rx_pkts[3]->ol_flags = vol.e[3];
+}
+#else
+#define desc_to_olflags_v(desc, rx_pkts) do {} while (0)
+#endif
+
+#define PKTLEN_SHIFT 10
+
+ /*
+ * Notice:
+ * - nb_pkts < RTE_I40E_DESCS_PER_LOOP, just return no packet
+ * - nb_pkts > RTE_I40E_VPMD_RX_BURST, only scan RTE_I40E_VPMD_RX_BURST
+ * numbers of DD bits
+ */
+static inline uint16_t
+_recv_raw_pkts_vec(struct i40e_rx_queue *rxq, struct rte_mbuf **rx_pkts,
+ uint16_t nb_pkts, uint8_t *split_packet)
+{
+ volatile union i40e_rx_desc *rxdp;
+ struct i40e_rx_entry *sw_ring;
+ uint16_t nb_pkts_recd;
+ int pos;
+ uint64_t var;
+ __m128i shuf_msk;
+
+ __m128i crc_adjust = _mm_set_epi16(
+ 0, 0, 0, /* ignore non-length fields */
+ -rxq->crc_len, /* sub crc on data_len */
+ 0, /* ignore high-16bits of pkt_len */
+ -rxq->crc_len, /* sub crc on pkt_len */
+ 0, 0 /* ignore pkt_type field */
+ );
+ __m128i dd_check, eop_check;
+
+ /* nb_pkts shall be less equal than RTE_I40E_MAX_RX_BURST */
+ nb_pkts = RTE_MIN(nb_pkts, RTE_I40E_MAX_RX_BURST);
+
+ /* nb_pkts has to be floor-aligned to RTE_I40E_DESCS_PER_LOOP */
+ nb_pkts = RTE_ALIGN_FLOOR(nb_pkts, RTE_I40E_DESCS_PER_LOOP);
+
+ /* Just the act of getting into the function from the application is
+ * going to cost about 7 cycles
+ */
+ rxdp = rxq->rx_ring + rxq->rx_tail;
+
+ _mm_prefetch((const void *)rxdp, _MM_HINT_T0);
+
+ /* See if we need to rearm the RX queue - gives the prefetch a bit
+ * of time to act
+ */
+ if (rxq->rxrearm_nb > RTE_I40E_RXQ_REARM_THRESH)
+ i40e_rxq_rearm(rxq);
+
+ /* Before we start moving massive data around, check to see if
+ * there is actually a packet available
+ */
+ if (!(rxdp->wb.qword1.status_error_len &
+ rte_cpu_to_le_32(1 << I40E_RX_DESC_STATUS_DD_SHIFT)))
+ return 0;
+
+ /* 4 packets DD mask */
+ dd_check = _mm_set_epi64x(0x0000000100000001LL, 0x0000000100000001LL);
+
+ /* 4 packets EOP mask */
+ eop_check = _mm_set_epi64x(0x0000000200000002LL, 0x0000000200000002LL);
+
+ /* mask to shuffle from desc. to mbuf */
+ shuf_msk = _mm_set_epi8(
+ 7, 6, 5, 4, /* octet 4~7, 32bits rss */
+ 3, 2, /* octet 2~3, low 16 bits vlan_macip */
+ 15, 14, /* octet 15~14, 16 bits data_len */
+ 0xFF, 0xFF, /* skip high 16 bits pkt_len, zero out */
+ 15, 14, /* octet 15~14, low 16 bits pkt_len */
+ 0xFF, 0xFF, /* pkt_type set as unknown */
+ 0xFF, 0xFF /*pkt_type set as unknown */
+ );
+
+ /* Cache is empty -> need to scan the buffer rings, but first move
+ * the next 'n' mbufs into the cache
+ */
+ sw_ring = &rxq->sw_ring[rxq->rx_tail];
+
+ /* A. load 4 packet in one loop
+ * [A*. mask out 4 unused dirty field in desc]
+ * B. copy 4 mbuf point from swring to rx_pkts
+ * C. calc the number of DD bits among the 4 packets
+ * [C*. extract the end-of-packet bit, if requested]
+ * D. fill info. from desc to mbuf
+ */
+
+ for (pos = 0, nb_pkts_recd = 0; pos < RTE_I40E_VPMD_RX_BURST;
+ pos += RTE_I40E_DESCS_PER_LOOP,
+ rxdp += RTE_I40E_DESCS_PER_LOOP) {
+ __m128i descs[RTE_I40E_DESCS_PER_LOOP];
+ __m128i pkt_mb1, pkt_mb2, pkt_mb3, pkt_mb4;
+ __m128i zero, staterr, sterr_tmp1, sterr_tmp2;
+ __m128i mbp1, mbp2; /* two mbuf pointer in one XMM reg. */
+
+ /* B.1 load 1 mbuf point */
+ mbp1 = _mm_loadu_si128((__m128i *)&sw_ring[pos]);
+ /* Read desc statuses backwards to avoid race condition */
+ /* A.1 load 4 pkts desc */
+ descs[3] = _mm_loadu_si128((__m128i *)(rxdp + 3));
+
+ /* B.2 copy 2 mbuf point into rx_pkts */
+ _mm_storeu_si128((__m128i *)&rx_pkts[pos], mbp1);
+
+ /* B.1 load 1 mbuf point */
+ mbp2 = _mm_loadu_si128((__m128i *)&sw_ring[pos+2]);
+
+ descs[2] = _mm_loadu_si128((__m128i *)(rxdp + 2));
+ /* B.1 load 2 mbuf point */
+ descs[1] = _mm_loadu_si128((__m128i *)(rxdp + 1));
+ descs[0] = _mm_loadu_si128((__m128i *)(rxdp));
+
+ /* B.2 copy 2 mbuf point into rx_pkts */
+ _mm_storeu_si128((__m128i *)&rx_pkts[pos+2], mbp2);
+
+ if (split_packet) {
+ rte_mbuf_prefetch_part2(rx_pkts[pos]);
+ rte_mbuf_prefetch_part2(rx_pkts[pos + 1]);
+ rte_mbuf_prefetch_part2(rx_pkts[pos + 2]);
+ rte_mbuf_prefetch_part2(rx_pkts[pos + 3]);
+ }
+
+ /* avoid compiler reorder optimization */
+ rte_compiler_barrier();
+
+ /* pkt 3,4 shift the pktlen field to be 16-bit aligned*/
+ const __m128i len3 = _mm_slli_epi32(descs[3], PKTLEN_SHIFT);
+ const __m128i len2 = _mm_slli_epi32(descs[2], PKTLEN_SHIFT);
+
+ /* merge the now-aligned packet length fields back in */
+ descs[3] = _mm_blend_epi16(descs[3], len3, 0x80);
+ descs[2] = _mm_blend_epi16(descs[2], len2, 0x80);
+
+ /* D.1 pkt 3,4 convert format from desc to pktmbuf */
+ pkt_mb4 = _mm_shuffle_epi8(descs[3], shuf_msk);
+ pkt_mb3 = _mm_shuffle_epi8(descs[2], shuf_msk);
+
+ /* C.1 4=>2 filter staterr info only */
+ sterr_tmp2 = _mm_unpackhi_epi32(descs[3], descs[2]);
+ /* C.1 4=>2 filter staterr info only */
+ sterr_tmp1 = _mm_unpackhi_epi32(descs[1], descs[0]);
+
+ desc_to_olflags_v(descs, &rx_pkts[pos]);
+
+ /* D.2 pkt 3,4 set in_port/nb_seg and remove crc */
+ pkt_mb4 = _mm_add_epi16(pkt_mb4, crc_adjust);
+ pkt_mb3 = _mm_add_epi16(pkt_mb3, crc_adjust);
+
+ /* pkt 1,2 shift the pktlen field to be 16-bit aligned*/
+ const __m128i len1 = _mm_slli_epi32(descs[1], PKTLEN_SHIFT);
+ const __m128i len0 = _mm_slli_epi32(descs[0], PKTLEN_SHIFT);
+
+ /* merge the now-aligned packet length fields back in */
+ descs[1] = _mm_blend_epi16(descs[1], len1, 0x80);
+ descs[0] = _mm_blend_epi16(descs[0], len0, 0x80);
+
+ /* D.1 pkt 1,2 convert format from desc to pktmbuf */
+ pkt_mb2 = _mm_shuffle_epi8(descs[1], shuf_msk);
+ pkt_mb1 = _mm_shuffle_epi8(descs[0], shuf_msk);
+
+ /* C.2 get 4 pkts staterr value */
+ zero = _mm_xor_si128(dd_check, dd_check);
+ staterr = _mm_unpacklo_epi32(sterr_tmp1, sterr_tmp2);
+
+ /* D.3 copy final 3,4 data to rx_pkts */
+ _mm_storeu_si128((void *)&rx_pkts[pos+3]->rx_descriptor_fields1,
+ pkt_mb4);
+ _mm_storeu_si128((void *)&rx_pkts[pos+2]->rx_descriptor_fields1,
+ pkt_mb3);
+
+ /* D.2 pkt 1,2 set in_port/nb_seg and remove crc */
+ pkt_mb2 = _mm_add_epi16(pkt_mb2, crc_adjust);
+ pkt_mb1 = _mm_add_epi16(pkt_mb1, crc_adjust);
+
+ /* C* extract and record EOP bit */
+ if (split_packet) {
+ __m128i eop_shuf_mask = _mm_set_epi8(
+ 0xFF, 0xFF, 0xFF, 0xFF,
+ 0xFF, 0xFF, 0xFF, 0xFF,
+ 0xFF, 0xFF, 0xFF, 0xFF,
+ 0x04, 0x0C, 0x00, 0x08
+ );
+
+ /* and with mask to extract bits, flipping 1-0 */
+ __m128i eop_bits = _mm_andnot_si128(staterr, eop_check);
+ /* the staterr values are not in order, as the count
+ * count of dd bits doesn't care. However, for end of
+ * packet tracking, we do care, so shuffle. This also
+ * compresses the 32-bit values to 8-bit
+ */
+ eop_bits = _mm_shuffle_epi8(eop_bits, eop_shuf_mask);
+ /* store the resulting 32-bit value */
+ *(int *)split_packet = _mm_cvtsi128_si32(eop_bits);
+ split_packet += RTE_I40E_DESCS_PER_LOOP;
+
+ /* zero-out next pointers */
+ rx_pkts[pos]->next = NULL;
+ rx_pkts[pos + 1]->next = NULL;
+ rx_pkts[pos + 2]->next = NULL;
+ rx_pkts[pos + 3]->next = NULL;
+ }
+
+ /* C.3 calc available number of desc */
+ staterr = _mm_and_si128(staterr, dd_check);
+ staterr = _mm_packs_epi32(staterr, zero);
+
+ /* D.3 copy final 1,2 data to rx_pkts */
+ _mm_storeu_si128((void *)&rx_pkts[pos+1]->rx_descriptor_fields1,
+ pkt_mb2);
+ _mm_storeu_si128((void *)&rx_pkts[pos]->rx_descriptor_fields1,
+ pkt_mb1);
+ /* C.4 calc avaialbe number of desc */
+ var = __builtin_popcountll(_mm_cvtsi128_si64(staterr));
+ nb_pkts_recd += var;
+ if (likely(var != RTE_I40E_DESCS_PER_LOOP))
+ break;
+ }
+
+ /* Update our internal tail pointer */
+ rxq->rx_tail = (uint16_t)(rxq->rx_tail + nb_pkts_recd);
+ rxq->rx_tail = (uint16_t)(rxq->rx_tail & (rxq->nb_rx_desc - 1));
+ rxq->rxrearm_nb = (uint16_t)(rxq->rxrearm_nb + nb_pkts_recd);
+
+ return nb_pkts_recd;
+}
+
+ /*
+ * Notice:
+ * - nb_pkts < RTE_I40E_DESCS_PER_LOOP, just return no packet
+ * - nb_pkts > RTE_I40E_VPMD_RX_BURST, only scan RTE_I40E_VPMD_RX_BURST
+ * numbers of DD bits
+ */
+uint16_t
+i40e_recv_pkts_vec(void *rx_queue, struct rte_mbuf **rx_pkts,
+ uint16_t nb_pkts)
+{
+ return _recv_raw_pkts_vec(rx_queue, rx_pkts, nb_pkts, NULL);
+}
+
+static inline uint16_t
+reassemble_packets(struct i40e_rx_queue *rxq, struct rte_mbuf **rx_bufs,
+ uint16_t nb_bufs, uint8_t *split_flags)
+{
+ struct rte_mbuf *pkts[RTE_I40E_VPMD_RX_BURST]; /*finished pkts*/
+ struct rte_mbuf *start = rxq->pkt_first_seg;
+ struct rte_mbuf *end = rxq->pkt_last_seg;
+ unsigned pkt_idx, buf_idx;
+
+ for (buf_idx = 0, pkt_idx = 0; buf_idx < nb_bufs; buf_idx++) {
+ if (end != NULL) {
+ /* processing a split packet */
+ end->next = rx_bufs[buf_idx];
+ rx_bufs[buf_idx]->data_len += rxq->crc_len;
+
+ start->nb_segs++;
+ start->pkt_len += rx_bufs[buf_idx]->data_len;
+ end = end->next;
+
+ if (!split_flags[buf_idx]) {
+ /* it's the last packet of the set */
+ start->hash = end->hash;
+ start->ol_flags = end->ol_flags;
+ /* we need to strip crc for the whole packet */
+ start->pkt_len -= rxq->crc_len;
+ if (end->data_len > rxq->crc_len) {
+ end->data_len -= rxq->crc_len;
+ } else {
+ /* free up last mbuf */
+ struct rte_mbuf *secondlast = start;
+
+ while (secondlast->next != end)
+ secondlast = secondlast->next;
+ secondlast->data_len -= (rxq->crc_len -
+ end->data_len);
+ secondlast->next = NULL;
+ rte_pktmbuf_free_seg(end);
+ end = secondlast;
+ }
+ pkts[pkt_idx++] = start;
+ start = end = NULL;
+ }
+ } else {
+ /* not processing a split packet */
+ if (!split_flags[buf_idx]) {
+ /* not a split packet, save and skip */
+ pkts[pkt_idx++] = rx_bufs[buf_idx];
+ continue;
+ }
+ end = start = rx_bufs[buf_idx];
+ rx_bufs[buf_idx]->data_len += rxq->crc_len;
+ rx_bufs[buf_idx]->pkt_len += rxq->crc_len;
+ }
+ }
+
+ /* save the partial packet for next time */
+ rxq->pkt_first_seg = start;
+ rxq->pkt_last_seg = end;
+ memcpy(rx_bufs, pkts, pkt_idx * (sizeof(*pkts)));
+ return pkt_idx;
+}
+
+ /* vPMD receive routine that reassembles scattered packets
+ * Notice:
+ * - nb_pkts < RTE_I40E_DESCS_PER_LOOP, just return no packet
+ * - nb_pkts > RTE_I40E_VPMD_RX_BURST, only scan RTE_I40E_VPMD_RX_BURST
+ * numbers of DD bits
+ */
+uint16_t
+i40e_recv_scattered_pkts_vec(void *rx_queue, struct rte_mbuf **rx_pkts,
+ uint16_t nb_pkts)
+{
+
+ struct i40e_rx_queue *rxq = rx_queue;
+ uint8_t split_flags[RTE_I40E_VPMD_RX_BURST] = {0};
+
+ /* get some new buffers */
+ uint16_t nb_bufs = _recv_raw_pkts_vec(rxq, rx_pkts, nb_pkts,
+ split_flags);
+ if (nb_bufs == 0)
+ return 0;
+
+ /* happy day case, full burst + no packets to be joined */
+ const uint64_t *split_fl64 = (uint64_t *)split_flags;
+
+ if (rxq->pkt_first_seg == NULL &&
+ split_fl64[0] == 0 && split_fl64[1] == 0 &&
+ split_fl64[2] == 0 && split_fl64[3] == 0)
+ return nb_bufs;
+
+ /* reassemble any packets that need reassembly*/
+ unsigned i = 0;
+
+ if (rxq->pkt_first_seg == NULL) {
+ /* find the first split flag, and only reassemble then*/
+ while (i < nb_bufs && !split_flags[i])
+ i++;
+ if (i == nb_bufs)
+ return nb_bufs;
+ }
+ return i + reassemble_packets(rxq, &rx_pkts[i], nb_bufs - i,
+ &split_flags[i]);
+}
+
+static inline void
+vtx1(volatile struct i40e_tx_desc *txdp,
+ struct rte_mbuf *pkt, uint64_t flags)
+{
+ uint64_t high_qw = (I40E_TX_DESC_DTYPE_DATA |
+ ((uint64_t)flags << I40E_TXD_QW1_CMD_SHIFT) |
+ ((uint64_t)pkt->data_len << I40E_TXD_QW1_TX_BUF_SZ_SHIFT));
+
+ __m128i descriptor = _mm_set_epi64x(high_qw,
+ pkt->buf_physaddr + pkt->data_off);
+ _mm_store_si128((__m128i *)txdp, descriptor);
+}
+
+static inline void
+vtx(volatile struct i40e_tx_desc *txdp,
+ struct rte_mbuf **pkt, uint16_t nb_pkts, uint64_t flags)
+{
+ int i;
+
+ for (i = 0; i < nb_pkts; ++i, ++txdp, ++pkt)
+ vtx1(txdp, *pkt, flags);
+}
+
+static inline int __attribute__((always_inline))
+i40e_tx_free_bufs(struct i40e_tx_queue *txq)
+{
+ struct i40e_tx_entry *txep;
+ uint32_t n;
+ uint32_t i;
+ int nb_free = 0;
+ struct rte_mbuf *m, *free[RTE_I40E_TX_MAX_FREE_BUF_SZ];
+
+ /* check DD bits on threshold descriptor */
+ if ((txq->tx_ring[txq->tx_next_dd].cmd_type_offset_bsz &
+ rte_cpu_to_le_64(I40E_TXD_QW1_DTYPE_MASK)) !=
+ rte_cpu_to_le_64(I40E_TX_DESC_DTYPE_DESC_DONE))
+ return 0;
+
+ n = txq->tx_rs_thresh;
+
+ /* first buffer to free from S/W ring is at index
+ * tx_next_dd - (tx_rs_thresh-1)
+ */
+ txep = &txq->sw_ring[txq->tx_next_dd - (n - 1)];
+ m = __rte_pktmbuf_prefree_seg(txep[0].mbuf);
+ if (likely(m != NULL)) {
+ free[0] = m;
+ nb_free = 1;
+ for (i = 1; i < n; i++) {
+ m = __rte_pktmbuf_prefree_seg(txep[i].mbuf);
+ if (likely(m != NULL)) {
+ if (likely(m->pool == free[0]->pool)) {
+ free[nb_free++] = m;
+ } else {
+ rte_mempool_put_bulk(free[0]->pool,
+ (void *)free,
+ nb_free);
+ free[0] = m;
+ nb_free = 1;
+ }
+ }
+ }
+ rte_mempool_put_bulk(free[0]->pool, (void **)free, nb_free);
+ } else {
+ for (i = 1; i < n; i++) {
+ m = __rte_pktmbuf_prefree_seg(txep[i].mbuf);
+ if (m != NULL)
+ rte_mempool_put(m->pool, m);
+ }
+ }
+
+ /* buffers were freed, update counters */
+ txq->nb_tx_free = (uint16_t)(txq->nb_tx_free + txq->tx_rs_thresh);
+ txq->tx_next_dd = (uint16_t)(txq->tx_next_dd + txq->tx_rs_thresh);
+ if (txq->tx_next_dd >= txq->nb_tx_desc)
+ txq->tx_next_dd = (uint16_t)(txq->tx_rs_thresh - 1);
+
+ return txq->tx_rs_thresh;
+}
+
+static inline void __attribute__((always_inline))
+tx_backlog_entry(struct i40e_tx_entry *txep,
+ struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
+{
+ int i;
+
+ for (i = 0; i < (int)nb_pkts; ++i)
+ txep[i].mbuf = tx_pkts[i];
+}
+
+uint16_t
+i40e_xmit_pkts_vec(void *tx_queue, struct rte_mbuf **tx_pkts,
+ uint16_t nb_pkts)
+{
+ struct i40e_tx_queue *txq = (struct i40e_tx_queue *)tx_queue;
+ volatile struct i40e_tx_desc *txdp;
+ struct i40e_tx_entry *txep;
+ uint16_t n, nb_commit, tx_id;
+ uint64_t flags = I40E_TD_CMD;
+ uint64_t rs = I40E_TX_DESC_CMD_RS | I40E_TD_CMD;
+ int i;
+
+ /* cross rx_thresh boundary is not allowed */
+ nb_pkts = RTE_MIN(nb_pkts, txq->tx_rs_thresh);
+
+ if (txq->nb_tx_free < txq->tx_free_thresh)
+ i40e_tx_free_bufs(txq);
+
+ nb_commit = nb_pkts = (uint16_t)RTE_MIN(txq->nb_tx_free, nb_pkts);
+ if (unlikely(nb_pkts == 0))
+ return 0;
+
+ tx_id = txq->tx_tail;
+ txdp = &txq->tx_ring[tx_id];
+ txep = &txq->sw_ring[tx_id];
+
+ txq->nb_tx_free = (uint16_t)(txq->nb_tx_free - nb_pkts);
+
+ n = (uint16_t)(txq->nb_tx_desc - tx_id);
+ if (nb_commit >= n) {
+ tx_backlog_entry(txep, tx_pkts, n);
+
+ for (i = 0; i < n - 1; ++i, ++tx_pkts, ++txdp)
+ vtx1(txdp, *tx_pkts, flags);
+
+ vtx1(txdp, *tx_pkts++, rs);
+
+ nb_commit = (uint16_t)(nb_commit - n);
+
+ tx_id = 0;
+ txq->tx_next_rs = (uint16_t)(txq->tx_rs_thresh - 1);
+
+ /* avoid reach the end of ring */
+ txdp = &txq->tx_ring[tx_id];
+ txep = &txq->sw_ring[tx_id];
+ }
+
+ tx_backlog_entry(txep, tx_pkts, nb_commit);
+
+ vtx(txdp, tx_pkts, nb_commit, flags);
+
+ tx_id = (uint16_t)(tx_id + nb_commit);
+ if (tx_id > txq->tx_next_rs) {
+ txq->tx_ring[txq->tx_next_rs].cmd_type_offset_bsz |=
+ rte_cpu_to_le_64(((uint64_t)I40E_TX_DESC_CMD_RS) <<
+ I40E_TXD_QW1_CMD_SHIFT);
+ txq->tx_next_rs =
+ (uint16_t)(txq->tx_next_rs + txq->tx_rs_thresh);
+ }
+
+ txq->tx_tail = tx_id;
+
+ I40E_PCI_REG_WRITE(txq->qtx_tail, txq->tx_tail);
+
+ return nb_pkts;
+}
+
+void __attribute__((cold))
+i40e_rx_queue_release_mbufs_vec(struct i40e_rx_queue *rxq)
+{
+ const unsigned mask = rxq->nb_rx_desc - 1;
+ unsigned i;
+
+ if (rxq->sw_ring == NULL || rxq->rxrearm_nb >= rxq->nb_rx_desc)
+ return;
+
+ /* free all mbufs that are valid in the ring */
+ for (i = rxq->rx_tail; i != rxq->rxrearm_start; i = (i + 1) & mask)
+ rte_pktmbuf_free_seg(rxq->sw_ring[i].mbuf);
+ rxq->rxrearm_nb = rxq->nb_rx_desc;
+
+ /* set all entries to NULL */
+ memset(rxq->sw_ring, 0, sizeof(rxq->sw_ring[0]) * rxq->nb_rx_desc);
+}
+
+int __attribute__((cold))
+i40e_rxq_vec_setup(struct i40e_rx_queue *rxq)
+{
+ uintptr_t p;
+ struct rte_mbuf mb_def = { .buf_addr = 0 }; /* zeroed mbuf */
+
+ mb_def.nb_segs = 1;
+ mb_def.data_off = RTE_PKTMBUF_HEADROOM;
+ mb_def.port = rxq->port_id;
+ rte_mbuf_refcnt_set(&mb_def, 1);
+
+ /* prevent compiler reordering: rearm_data covers previous fields */
+ rte_compiler_barrier();
+ p = (uintptr_t)&mb_def.rearm_data;
+ rxq->mbuf_initializer = *(uint64_t *)p;
+ return 0;
+}
+
+int __attribute__((cold))
+i40e_txq_vec_setup(struct i40e_tx_queue __rte_unused *txq)
+{
+ return 0;
+}
+
+int __attribute__((cold))
+i40e_rx_vec_dev_conf_condition_check(struct rte_eth_dev *dev)
+{
+#ifndef RTE_LIBRTE_IEEE1588
+ struct rte_eth_rxmode *rxmode = &dev->data->dev_conf.rxmode;
+ struct rte_fdir_conf *fconf = &dev->data->dev_conf.fdir_conf;
+
+ /* need SSE4.1 support */
+ if (!rte_cpu_get_flag_enabled(RTE_CPUFLAG_SSE4_1))
+ return -1;
+
+#ifndef RTE_LIBRTE_I40E_RX_OLFLAGS_ENABLE
+ /* whithout rx ol_flags, no VP flag report */
+ if (rxmode->hw_vlan_strip != 0 ||
+ rxmode->hw_vlan_extend != 0)
+ return -1;
+#endif
+
+ /* no fdir support */
+ if (fconf->mode != RTE_FDIR_MODE_NONE)
+ return -1;
+
+ /* - no csum error report support
+ * - no header split support
+ */
+ if (rxmode->hw_ip_checksum == 1 ||
+ rxmode->header_split == 1)
+ return -1;
+
+ return 0;
+#else
+ RTE_SET_USED(dev);
+ return -1;
+#endif
+}