summaryrefslogtreecommitdiffstats
path: root/src/plugins
diff options
context:
space:
mode:
Diffstat (limited to 'src/plugins')
-rw-r--r--src/plugins/idpf/CMakeLists.txt28
-rw-r--r--src/plugins/idpf/README.rst59
-rw-r--r--src/plugins/idpf/cli.c135
-rw-r--r--src/plugins/idpf/device.c2265
-rw-r--r--src/plugins/idpf/format.c77
-rw-r--r--src/plugins/idpf/idpf.api80
-rw-r--r--src/plugins/idpf/idpf.h929
-rw-r--r--src/plugins/idpf/idpf_api.c111
-rw-r--r--src/plugins/idpf/idpf_controlq.c890
-rw-r--r--src/plugins/idpf/idpf_test.c169
-rw-r--r--src/plugins/idpf/plugin.c34
-rw-r--r--src/plugins/idpf/virtchnl2.h855
-rw-r--r--src/plugins/idpf/virtchnl2_lan_desc.h610
13 files changed, 6242 insertions, 0 deletions
diff --git a/src/plugins/idpf/CMakeLists.txt b/src/plugins/idpf/CMakeLists.txt
new file mode 100644
index 00000000000..1c7e5ec619c
--- /dev/null
+++ b/src/plugins/idpf/CMakeLists.txt
@@ -0,0 +1,28 @@
+# Copyright (c) 2023 Intel and/or its affiliates.
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at:
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+add_vpp_plugin(idpf
+ SOURCES
+ cli.c
+ device.c
+ format.c
+ plugin.c
+ idpf_controlq.c
+ idpf_api.c
+
+ API_FILES
+ idpf.api
+
+ API_TEST_SOURCES
+ idpf_test.c
+)
diff --git a/src/plugins/idpf/README.rst b/src/plugins/idpf/README.rst
new file mode 100644
index 00000000000..7d4a6b93f3a
--- /dev/null
+++ b/src/plugins/idpf/README.rst
@@ -0,0 +1,59 @@
+Intel IDPF device driver
+========================
+
+Overview
+--------
+
+This plugins provides native device support for Intel Infrastructure
+Data Path Function (IDPF). The current IDPF is a driver specification
+for future Intel Physical Function devices. IDPF defines communication
+channel between Data Plane (DP) and Control Plane (CP).
+
+Prerequisites
+-------------
+
+- Driver requires MSI-X interrupt support, which is not supported by
+ uio_pci_generic driver, so vfio-pci needs to be used. On systems
+ without IOMMU vfio driver can still be used with recent kernels which
+ support no-iommu mode.
+
+Known issues
+------------
+
+- This driver is still in experimental phase, and the corresponding device
+is not released yet.
+
+- Current version only supports device initialization. Basic I/O function
+will be supported in the next release.
+
+Usage
+-----
+
+Interface Creation
+~~~~~~~~~~~~~~~~~~
+
+Interfaces can be dynamically created by using following CLI:
+
+::
+
+ create interface idpf 0000:4b:00.0 vport-num 1 rx-single 1 tx-single 1
+ set int state idpf-0/4b/0/0 up
+
+vport-num: number of vport to be created. Each vport is related to one netdev.
+rx-single: configure Rx queue mode, split queue mode by default.
+tx-single: configure Tx queue mode, split queue mode by default.
+
+Interface Deletion
+~~~~~~~~~~~~~~~~~~
+
+Interface can be deleted with following CLI:
+
+::
+
+ delete interface idpf <interface name>
+
+Interface Statistics
+~~~~~~~~~~~~~~~~~~~~
+
+Interface statistics can be displayed with
+``sh hardware-interface <if-name>`` command.
diff --git a/src/plugins/idpf/cli.c b/src/plugins/idpf/cli.c
new file mode 100644
index 00000000000..592c2612c97
--- /dev/null
+++ b/src/plugins/idpf/cli.c
@@ -0,0 +1,135 @@
+/*
+ *------------------------------------------------------------------
+ * Copyright (c) 2023 Intel and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *------------------------------------------------------------------
+ */
+#include <idpf/idpf.h>
+
+static clib_error_t *
+idpf_create_command_fn (vlib_main_t *vm, unformat_input_t *input,
+ vlib_cli_command_t *cmd)
+{
+ unformat_input_t _line_input, *line_input = &_line_input;
+ idpf_create_if_args_t args;
+ u32 tmp;
+
+ clib_memset (&args, 0, sizeof (idpf_create_if_args_t));
+
+ /* Get a line of input. */
+ if (!unformat_user (input, unformat_line_input, line_input))
+ return 0;
+
+ while (unformat_check_input (line_input) != UNFORMAT_END_OF_INPUT)
+ {
+ if (unformat (line_input, "%U", unformat_vlib_pci_addr, &args.addr))
+ ;
+ else if (unformat (line_input, "rx-single %u", &tmp))
+ args.rxq_single = 1;
+ else if (unformat (line_input, "tx-single %u", &tmp))
+ args.txq_single = 1;
+ else if (unformat (line_input, "rxq-num %u", &tmp))
+ args.rxq_num = tmp;
+ else if (unformat (line_input, "txq-num %u", &tmp))
+ args.txq_num = tmp;
+ else if (unformat (line_input, "rxq-size %u", &tmp))
+ args.rxq_size = tmp;
+ else if (unformat (line_input, "txq-size %u", &tmp))
+ args.txq_size = tmp;
+ else if (unformat (line_input, "vport-num %u", &tmp))
+ args.req_vport_nb = tmp;
+ else if (unformat (line_input, "name %s", &args.name))
+ ;
+ else
+ return clib_error_return (0, "unknown input `%U'",
+ format_unformat_error, input);
+ }
+ unformat_free (line_input);
+
+ idpf_create_if (vm, &args);
+
+ vec_free (args.name);
+
+ return args.error;
+}
+
+VLIB_CLI_COMMAND (idpf_create_command, static) = {
+ .path = "create interface idpf",
+ .short_help = "create interface idpf <pci-address> "
+ "[vport <size>] [rx-single <size>] [tx-single <size>]",
+ .function = idpf_create_command_fn,
+};
+
+static clib_error_t *
+idpf_delete_command_fn (vlib_main_t *vm, unformat_input_t *input,
+ vlib_cli_command_t *cmd)
+{
+ unformat_input_t _line_input, *line_input = &_line_input;
+ u32 sw_if_index = ~0;
+ vnet_hw_interface_t *hw;
+ vnet_main_t *vnm = vnet_get_main ();
+
+ /* Get a line of input. */
+ if (!unformat_user (input, unformat_line_input, line_input))
+ return 0;
+
+ while (unformat_check_input (line_input) != UNFORMAT_END_OF_INPUT)
+ {
+ if (unformat (line_input, "sw_if_index %d", &sw_if_index))
+ ;
+ else if (unformat (line_input, "%U", unformat_vnet_sw_interface, vnm,
+ &sw_if_index))
+ ;
+ else
+ return clib_error_return (0, "unknown input `%U'",
+ format_unformat_error, input);
+ }
+ unformat_free (line_input);
+
+ if (sw_if_index == ~0)
+ return clib_error_return (0,
+ "please specify interface name or sw_if_index");
+
+ hw = vnet_get_sup_hw_interface_api_visible_or_null (vnm, sw_if_index);
+ if (hw == NULL || idpf_device_class.index != hw->dev_class_index)
+ return clib_error_return (0, "not an IDPF interface");
+
+ vlib_process_signal_event (vm, idpf_process_node.index,
+ IDPF_PROCESS_EVENT_DELETE_IF, hw->dev_instance);
+
+ return 0;
+}
+
+VLIB_CLI_COMMAND (idpf_delete_command, static) = {
+ .path = "delete interface idpf",
+ .short_help = "delete interface idpf "
+ "{<interface> | sw_if_index <sw_idx>}",
+ .function = idpf_delete_command_fn,
+ .is_mp_safe = 1,
+};
+
+clib_error_t *
+idpf_cli_init (vlib_main_t *vm)
+{
+ return 0;
+}
+
+VLIB_INIT_FUNCTION (idpf_cli_init);
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/src/plugins/idpf/device.c b/src/plugins/idpf/device.c
new file mode 100644
index 00000000000..44b8116d996
--- /dev/null
+++ b/src/plugins/idpf/device.c
@@ -0,0 +1,2265 @@
+/*
+ *------------------------------------------------------------------
+ * Copyright (c) 2023 Intel and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *------------------------------------------------------------------
+ */
+
+#include <idpf/idpf.h>
+#include <vpp/app/version.h>
+#include <vnet/plugin/plugin.h>
+
+#define IDPF_RXQ_SZ 512
+#define IDPF_TXQ_SZ 512
+
+#define PCI_VENDOR_ID_INTEL 0x8086
+#define PCI_DEVICE_ID_INTEL_IDPF_PF 0x1452
+#define PCI_DEVICE_ID_INTEL_IDPF_VF 0x1889
+
+VLIB_REGISTER_LOG_CLASS (idpf_log) = {
+ .class_name = "idpf",
+};
+
+VLIB_REGISTER_LOG_CLASS (idpf_stats_log) = {
+ .class_name = "idpf",
+ .subclass_name = "stats",
+};
+
+idpf_main_t idpf_main;
+void idpf_delete_if (vlib_main_t *vm, idpf_device_t *id, int with_barrier);
+
+static pci_device_id_t idpf_pci_device_ids[] = {
+ { .vendor_id = PCI_VENDOR_ID_INTEL,
+ .device_id = PCI_DEVICE_ID_INTEL_IDPF_PF },
+ { .vendor_id = PCI_VENDOR_ID_INTEL,
+ .device_id = PCI_DEVICE_ID_INTEL_IDPF_VF },
+ { 0 },
+};
+
+static int
+idpf_vc_clean (vlib_main_t *vm, idpf_device_t *id)
+{
+ idpf_ctlq_msg_t *q_msg[IDPF_CTLQ_LEN];
+ uint16_t num_q_msg = IDPF_CTLQ_LEN;
+ idpf_dma_mem_t *dma_mem;
+ uint32_t i;
+ int err;
+
+ for (i = 0; i < 10; i++)
+ {
+ err = idpf_ctlq_clean_sq (id->asq, &num_q_msg, q_msg);
+ vlib_process_suspend (vm, 0.02);
+ if (num_q_msg > 0)
+ break;
+ }
+ if (err != 0)
+ return err;
+
+ /* Empty queue is not an error */
+ for (i = 0; i < num_q_msg; i++)
+ {
+ dma_mem = q_msg[i]->ctx.indirect.payload;
+ if (dma_mem != NULL)
+ idpf_free_dma_mem (id, dma_mem);
+ clib_mem_free (q_msg[i]);
+ }
+
+ return 0;
+}
+
+static idpf_vc_result_t
+idpf_read_msg_from_cp (idpf_device_t *id, u16 buf_len, u8 *buf)
+{
+ idpf_ctlq_msg_t ctlq_msg;
+ idpf_dma_mem_t *dma_mem = NULL;
+ idpf_vc_result_t result = IDPF_MSG_NON;
+ u32 opcode;
+ u16 pending = 1;
+ int ret;
+
+ ret = idpf_ctlq_recv (id->arq, &pending, &ctlq_msg);
+ if (ret != 0)
+ {
+ idpf_log_debug (id, "Can't read msg from AQ");
+ if (ret != -ENOMSG)
+ result = IDPF_MSG_ERR;
+ return result;
+ }
+
+ clib_memcpy_fast (buf, ctlq_msg.ctx.indirect.payload->va, buf_len);
+
+ opcode = ctlq_msg.cookie.mbx.chnl_opcode;
+ id->cmd_retval = ctlq_msg.cookie.mbx.chnl_retval;
+
+ idpf_log_debug (id, "CQ from CP carries opcode %u, retval %d", opcode,
+ id->cmd_retval);
+
+ if (opcode == VIRTCHNL2_OP_EVENT)
+ {
+ virtchnl2_event_t *ve =
+ (virtchnl2_event_t *) ctlq_msg.ctx.indirect.payload->va;
+
+ result = IDPF_MSG_SYS;
+ switch (ve->event)
+ {
+ case VIRTCHNL2_EVENT_LINK_CHANGE:
+ break;
+ default:
+ idpf_log_err (id, "%s: Unknown event %d from CP", __func__,
+ ve->event);
+ break;
+ }
+ }
+ else
+ {
+ /* async reply msg on command issued by pf previously */
+ result = IDPF_MSG_CMD;
+ if (opcode != id->pend_cmd)
+ {
+ idpf_log_warn (id, "command mismatch, expect %u, get %u",
+ id->pend_cmd, opcode);
+ result = IDPF_MSG_ERR;
+ }
+ }
+
+ if (ctlq_msg.data_len != 0)
+ dma_mem = ctlq_msg.ctx.indirect.payload;
+ else
+ pending = 0;
+
+ ret = idpf_ctlq_post_rx_buffs (id, id->arq, &pending, &dma_mem);
+ if (ret != 0 && dma_mem != NULL)
+ idpf_free_dma_mem (id, dma_mem);
+
+ return result;
+}
+
+clib_error_t *
+idpf_send_vc_msg (vlib_main_t *vm, idpf_device_t *id, virtchnl2_op_t op,
+ u8 *in, u16 in_len)
+{
+ idpf_ctlq_msg_t *ctlq_msg;
+ idpf_dma_mem_t *dma_mem;
+ int error = 0;
+
+ error = idpf_vc_clean (vm, id);
+ if (error)
+ goto err;
+
+ ctlq_msg = clib_mem_alloc (sizeof (idpf_ctlq_msg_t));
+ if (ctlq_msg == NULL)
+ goto err;
+ clib_memset (ctlq_msg, 0, sizeof (idpf_ctlq_msg_t));
+
+ dma_mem = clib_mem_alloc (sizeof (idpf_dma_mem_t));
+ if (dma_mem == NULL)
+ goto dma_mem_error;
+ clib_memset (dma_mem, 0, sizeof (idpf_dma_mem_t));
+
+ dma_mem->va = idpf_alloc_dma_mem (vm, id, dma_mem, IDPF_DFLT_MBX_BUF_SIZE);
+ if (dma_mem->va == NULL)
+ {
+ clib_mem_free (dma_mem);
+ goto err;
+ }
+
+ clib_memcpy (dma_mem->va, in, in_len);
+
+ ctlq_msg->opcode = idpf_mbq_opc_send_msg_to_pf;
+ ctlq_msg->func_id = 0;
+ ctlq_msg->data_len = in_len;
+ ctlq_msg->cookie.mbx.chnl_opcode = op;
+ ctlq_msg->cookie.mbx.chnl_retval = VIRTCHNL2_STATUS_SUCCESS;
+ ctlq_msg->ctx.indirect.payload = dma_mem;
+
+ error = idpf_ctlq_send (id, id->asq, 1, ctlq_msg);
+ if (error)
+ goto send_error;
+
+ return 0;
+
+send_error:
+ idpf_free_dma_mem (id, dma_mem);
+dma_mem_error:
+ clib_mem_free (ctlq_msg);
+err:
+ return clib_error_return (0, "idpf send vc msg to PF failed");
+}
+
+clib_error_t *
+idpf_read_one_msg (vlib_main_t *vm, idpf_device_t *id, u32 ops, u8 *buf,
+ u16 buf_len)
+{
+ int i = 0, ret;
+ f64 suspend_time = IDPF_SEND_TO_PF_SUSPEND_TIME;
+
+ do
+ {
+ ret = idpf_read_msg_from_cp (id, buf_len, buf);
+ if (ret == IDPF_MSG_CMD)
+ break;
+ vlib_process_suspend (vm, suspend_time);
+ }
+ while (i++ < IDPF_SEND_TO_PF_MAX_TRY_TIMES);
+ if (i >= IDPF_SEND_TO_PF_MAX_TRY_TIMES ||
+ id->cmd_retval != VIRTCHNL2_STATUS_SUCCESS)
+ return clib_error_return (0, "idpf read one msg failed");
+
+ return 0;
+}
+
+clib_error_t *
+idpf_execute_vc_cmd (vlib_main_t *vm, idpf_device_t *id, idpf_cmd_info_t *args)
+{
+ clib_error_t *error = 0;
+ f64 suspend_time = IDPF_SEND_TO_PF_SUSPEND_TIME;
+ int i = 0;
+
+ if (id->pend_cmd == VIRTCHNL2_OP_UNKNOWN)
+ id->pend_cmd = args->ops;
+ else
+ return clib_error_return (0, "There is incomplete cmd %d", id->pend_cmd);
+
+ if ((error = idpf_send_vc_msg (vm, id, args->ops, args->in_args,
+ args->in_args_size)))
+ return error;
+
+ switch (args->ops)
+ {
+ case VIRTCHNL2_OP_VERSION:
+ case VIRTCHNL2_OP_GET_CAPS:
+ case VIRTCHNL2_OP_CREATE_VPORT:
+ case VIRTCHNL2_OP_DESTROY_VPORT:
+ case VIRTCHNL2_OP_SET_RSS_KEY:
+ case VIRTCHNL2_OP_SET_RSS_LUT:
+ case VIRTCHNL2_OP_SET_RSS_HASH:
+ case VIRTCHNL2_OP_CONFIG_RX_QUEUES:
+ case VIRTCHNL2_OP_CONFIG_TX_QUEUES:
+ case VIRTCHNL2_OP_ENABLE_QUEUES:
+ case VIRTCHNL2_OP_DISABLE_QUEUES:
+ case VIRTCHNL2_OP_ENABLE_VPORT:
+ case VIRTCHNL2_OP_DISABLE_VPORT:
+ case VIRTCHNL2_OP_MAP_QUEUE_VECTOR:
+ case VIRTCHNL2_OP_UNMAP_QUEUE_VECTOR:
+ case VIRTCHNL2_OP_ALLOC_VECTORS:
+ case VIRTCHNL2_OP_DEALLOC_VECTORS:
+ case VIRTCHNL2_OP_GET_STATS:
+ /* for init virtchnl ops, need to poll the response */
+ error = idpf_read_one_msg (vm, id, args->ops, args->out_buffer,
+ args->out_size);
+ if (error)
+ return clib_error_return (0, "idpf read vc message from PF failed");
+ clear_cmd (id);
+ break;
+ case VIRTCHNL2_OP_GET_PTYPE_INFO:
+ break;
+ default:
+ do
+ {
+ if (id->pend_cmd == VIRTCHNL2_OP_UNKNOWN)
+ break;
+ vlib_process_suspend (vm, suspend_time);
+ /* If don't read msg or read sys event, continue */
+ }
+ while (i++ < IDPF_SEND_TO_PF_MAX_TRY_TIMES);
+ /* If there's no response is received, clear command */
+ if (i >= IDPF_SEND_TO_PF_MAX_TRY_TIMES ||
+ id->cmd_retval != VIRTCHNL2_STATUS_SUCCESS)
+ return clib_error_return (
+ 0, "No response or return failure (%d) for cmd %d", id->cmd_retval,
+ args->ops);
+ break;
+ }
+
+ return error;
+}
+
+static inline uword
+idpf_dma_addr (vlib_main_t *vm, idpf_device_t *id, void *p)
+{
+ return (id->flags & IDPF_DEVICE_F_VA_DMA) ? pointer_to_uword (p) :
+ vlib_physmem_get_pa (vm, p);
+}
+
+clib_error_t *
+idpf_vc_config_irq_map_unmap (vlib_main_t *vm, idpf_device_t *id,
+ idpf_vport_t *vport, bool map)
+{
+ virtchnl2_queue_vector_maps_t *map_info;
+ virtchnl2_queue_vector_t *vecmap;
+ u16 nb_rxq = vport->id->n_rx_queues;
+ idpf_cmd_info_t args;
+ clib_error_t *error;
+ int len, i;
+
+ len = sizeof (virtchnl2_queue_vector_maps_t) +
+ (nb_rxq - 1) * sizeof (virtchnl2_queue_vector_t);
+
+ map_info = clib_mem_alloc_aligned (len, CLIB_CACHE_LINE_BYTES);
+ clib_memset (map_info, 0, len);
+
+ map_info->vport_id = vport->vport_id;
+ map_info->num_qv_maps = nb_rxq;
+ for (i = 0; i < nb_rxq; i++)
+ {
+ vecmap = &map_info->qv_maps[i];
+ vecmap->queue_id = vport->qv_map[i].queue_id;
+ vecmap->vector_id = vport->qv_map[i].vector_id;
+ vecmap->itr_idx = VIRTCHNL2_ITR_IDX_0;
+ vecmap->queue_type = VIRTCHNL2_QUEUE_TYPE_RX;
+ }
+
+ args.ops =
+ map ? VIRTCHNL2_OP_MAP_QUEUE_VECTOR : VIRTCHNL2_OP_UNMAP_QUEUE_VECTOR;
+ args.in_args = (u8 *) map_info;
+ args.in_args_size = len;
+ args.out_buffer = id->mbx_resp;
+ args.out_size = IDPF_DFLT_MBX_BUF_SIZE;
+ error = idpf_execute_vc_cmd (vm, id, &args);
+ if (error != 0)
+ return clib_error_return (
+ 0, "Failed to execute command of VIRTCHNL2_OP_%s_QUEUE_VECTOR",
+ map ? "MAP" : "UNMAP");
+
+ clib_mem_free (map_info);
+ return error;
+}
+
+clib_error_t *
+idpf_config_rx_queues_irqs (vlib_main_t *vm, idpf_device_t *id,
+ idpf_vport_t *vport)
+{
+ virtchnl2_queue_vector_t *qv_map;
+ clib_error_t *error = 0;
+ u32 dynctl_reg_start;
+ u32 itrn_reg_start;
+ u32 dynctl_val, itrn_val;
+ int i;
+
+ qv_map = clib_mem_alloc_aligned (id->n_rx_queues *
+ sizeof (virtchnl2_queue_vector_t),
+ CLIB_CACHE_LINE_BYTES);
+ clib_memset (qv_map, 0, id->n_rx_queues * sizeof (virtchnl2_queue_vector_t));
+
+ dynctl_reg_start = vport->recv_vectors->vchunks.vchunks->dynctl_reg_start;
+ itrn_reg_start = vport->recv_vectors->vchunks.vchunks->itrn_reg_start;
+ dynctl_val = idpf_reg_read (id, dynctl_reg_start);
+ idpf_log_debug (id, "Value of dynctl_reg_start is 0x%x", dynctl_val);
+ itrn_val = idpf_reg_read (id, itrn_reg_start);
+ idpf_log_debug (id, "Value of itrn_reg_start is 0x%x", itrn_val);
+
+ if (itrn_val != 0)
+ idpf_reg_write (id, dynctl_reg_start,
+ VIRTCHNL2_ITR_IDX_0 << PF_GLINT_DYN_CTL_ITR_INDX_S |
+ PF_GLINT_DYN_CTL_WB_ON_ITR_M |
+ itrn_val << PF_GLINT_DYN_CTL_INTERVAL_S);
+ else
+ idpf_reg_write (id, dynctl_reg_start,
+ VIRTCHNL2_ITR_IDX_0 << PF_GLINT_DYN_CTL_ITR_INDX_S |
+ PF_GLINT_DYN_CTL_WB_ON_ITR_M |
+ IDPF_DFLT_INTERVAL << PF_GLINT_DYN_CTL_INTERVAL_S);
+
+ for (i = 0; i < id->n_rx_queues; i++)
+ {
+ /* map all queues to the same vector */
+ qv_map[i].queue_id = vport->chunks_info.rx_start_qid + i;
+ qv_map[i].vector_id =
+ vport->recv_vectors->vchunks.vchunks->start_vector_id;
+ }
+ vport->qv_map = qv_map;
+
+ if ((error = idpf_vc_config_irq_map_unmap (vm, id, vport, true)))
+ {
+ idpf_log_err (id, "config interrupt mapping failed");
+ goto config_irq_map_err;
+ }
+
+ return error;
+
+config_irq_map_err:
+ clib_mem_free (vport->qv_map);
+ vport->qv_map = NULL;
+
+ return error;
+}
+
+clib_error_t *
+idpf_rx_split_bufq_setup (vlib_main_t *vm, idpf_device_t *id,
+ idpf_vport_t *vport, idpf_rxq_t *bufq, u16 qid,
+ u16 rxq_size)
+{
+ clib_error_t *err;
+ u32 n_alloc, i;
+
+ bufq->size = rxq_size;
+ bufq->next = 0;
+ bufq->descs = vlib_physmem_alloc_aligned_on_numa (
+ vm, bufq->size * sizeof (virtchnl2_rx_desc_t), 2 * CLIB_CACHE_LINE_BYTES,
+ id->numa_node);
+
+ bufq->buffer_pool_index =
+ vlib_buffer_pool_get_default_for_numa (vm, id->numa_node);
+
+ if ((err = vlib_pci_map_dma (vm, id->pci_dev_handle, (void *) bufq->descs)))
+ return err;
+
+ clib_memset ((void *) bufq->descs, 0,
+ bufq->size * sizeof (virtchnl2_rx_desc_t));
+ vec_validate_aligned (bufq->bufs, bufq->size, CLIB_CACHE_LINE_BYTES);
+ bufq->qrx_tail = id->bar0 + (vport->chunks_info.rx_buf_qtail_start +
+ qid * vport->chunks_info.rx_buf_qtail_spacing);
+
+ n_alloc = vlib_buffer_alloc_from_pool (vm, bufq->bufs, bufq->size - 8,
+ bufq->buffer_pool_index);
+ if (n_alloc == 0)
+ return clib_error_return (0, "buffer allocation error");
+
+ bufq->n_enqueued = n_alloc;
+ virtchnl2_rx_desc_t *d = bufq->descs;
+ for (i = 0; i < n_alloc; i++)
+ {
+ vlib_buffer_t *b = vlib_get_buffer (vm, bufq->bufs[i]);
+ if (id->flags & IDPF_DEVICE_F_VA_DMA)
+ d->qword[0] = vlib_buffer_get_va (b);
+ else
+ d->qword[0] = vlib_buffer_get_pa (vm, b);
+ d++;
+ }
+
+ return 0;
+}
+
+clib_error_t *
+idpf_split_rxq_init (vlib_main_t *vm, idpf_device_t *id, idpf_vport_t *vport,
+ u16 qid, u16 rxq_size)
+{
+ clib_error_t *err;
+ idpf_rxq_t *rxq;
+ u32 n_alloc, i;
+
+ vec_validate_aligned (vport->rxqs, qid, CLIB_CACHE_LINE_BYTES);
+ rxq = vec_elt_at_index (vport->rxqs, qid);
+ rxq->size = rxq_size;
+ rxq->next = 0;
+ rxq->descs = vlib_physmem_alloc_aligned_on_numa (
+ vm, rxq->size * sizeof (virtchnl2_rx_desc_t), 2 * CLIB_CACHE_LINE_BYTES,
+ id->numa_node);
+
+ rxq->buffer_pool_index =
+ vlib_buffer_pool_get_default_for_numa (vm, id->numa_node);
+
+ if (rxq->descs == 0)
+ return vlib_physmem_last_error (vm);
+
+ if ((err = vlib_pci_map_dma (vm, id->pci_dev_handle, (void *) rxq->descs)))
+ return err;
+
+ clib_memset ((void *) rxq->descs, 0,
+ rxq->size * sizeof (virtchnl2_rx_desc_t));
+ vec_validate_aligned (rxq->bufs, rxq->size, CLIB_CACHE_LINE_BYTES);
+ rxq->qrx_tail = id->bar0 + (vport->chunks_info.rx_qtail_start +
+ qid * vport->chunks_info.rx_qtail_spacing);
+
+ n_alloc = vlib_buffer_alloc_from_pool (vm, rxq->bufs, rxq->size - 8,
+ rxq->buffer_pool_index);
+
+ if (n_alloc == 0)
+ return clib_error_return (0, "buffer allocation error");
+
+ rxq->n_enqueued = n_alloc;
+ virtchnl2_rx_desc_t *d = rxq->descs;
+ for (i = 0; i < n_alloc; i++)
+ {
+ vlib_buffer_t *b = vlib_get_buffer (vm, rxq->bufs[i]);
+ if (id->flags & IDPF_DEVICE_F_VA_DMA)
+ d->qword[0] = vlib_buffer_get_va (b);
+ else
+ d->qword[0] = vlib_buffer_get_pa (vm, b);
+ d++;
+ }
+
+ err =
+ idpf_rx_split_bufq_setup (vm, id, vport, rxq->bufq1, 2 * qid, rxq_size);
+ if (err)
+ return err;
+ err =
+ idpf_rx_split_bufq_setup (vm, id, vport, rxq->bufq2, 2 * qid, rxq_size);
+ if (err)
+ return err;
+
+ return 0;
+}
+
+clib_error_t *
+idpf_single_rxq_init (vlib_main_t *vm, idpf_device_t *id, idpf_vport_t *vport,
+ u16 qid, u16 rxq_size)
+{
+ clib_error_t *err;
+ idpf_rxq_t *rxq;
+ u32 n_alloc, i;
+
+ vec_validate_aligned (vport->rxqs, qid, CLIB_CACHE_LINE_BYTES);
+ rxq = vec_elt_at_index (vport->rxqs, qid);
+ rxq->queue_index = vport->chunks_info.rx_start_qid + qid;
+ rxq->size = rxq_size;
+ rxq->next = 0;
+ rxq->descs = vlib_physmem_alloc_aligned_on_numa (
+ vm, rxq->size * sizeof (virtchnl2_rx_desc_t), 2 * CLIB_CACHE_LINE_BYTES,
+ id->numa_node);
+
+ rxq->buffer_pool_index =
+ vlib_buffer_pool_get_default_for_numa (vm, id->numa_node);
+
+ if (rxq->descs == 0)
+ return vlib_physmem_last_error (vm);
+
+ err = vlib_pci_map_dma (vm, id->pci_dev_handle, (void *) rxq->descs);
+ if (err)
+ return err;
+
+ clib_memset ((void *) rxq->descs, 0,
+ rxq->size * sizeof (virtchnl2_rx_desc_t));
+ vec_validate_aligned (rxq->bufs, rxq->size, CLIB_CACHE_LINE_BYTES);
+ rxq->qrx_tail = id->bar0 + (vport->chunks_info.rx_qtail_start +
+ qid * vport->chunks_info.rx_qtail_spacing);
+
+ n_alloc = vlib_buffer_alloc_from_pool (vm, rxq->bufs, rxq->size - 8,
+ rxq->buffer_pool_index);
+
+ if (n_alloc == 0)
+ return clib_error_return (0, "buffer allocation error");
+
+ rxq->n_enqueued = n_alloc;
+ virtchnl2_rx_desc_t *d = rxq->descs;
+ for (i = 0; i < n_alloc; i++)
+ {
+ vlib_buffer_t *b = vlib_get_buffer (vm, rxq->bufs[i]);
+ if (id->flags & IDPF_DEVICE_F_VA_DMA)
+ d->qword[0] = vlib_buffer_get_va (b);
+ else
+ d->qword[0] = vlib_buffer_get_pa (vm, b);
+ d++;
+ }
+
+ return 0;
+}
+
+clib_error_t *
+idpf_rx_queue_setup (vlib_main_t *vm, idpf_device_t *id, idpf_vport_t *vport,
+ u16 qid, u16 rxq_size)
+{
+ if (vport->rxq_model == VIRTCHNL2_QUEUE_MODEL_SINGLE)
+ return idpf_single_rxq_init (vm, id, vport, qid, rxq_size);
+ else
+ return idpf_split_rxq_init (vm, id, vport, qid, rxq_size);
+}
+
+clib_error_t *
+idpf_tx_split_complq_setup (vlib_main_t *vm, idpf_device_t *id,
+ idpf_vport_t *vport, idpf_txq_t *complq, u16 qid,
+ u16 txq_size)
+{
+ clib_error_t *err;
+ u16 n;
+ u8 bpi = vlib_buffer_pool_get_default_for_numa (vm, id->numa_node);
+
+ complq->size = txq_size;
+ complq->next = 0;
+ clib_spinlock_init (&complq->lock);
+
+ n = (complq->size / 510) + 1;
+ vec_validate_aligned (complq->ph_bufs, n, CLIB_CACHE_LINE_BYTES);
+
+ if (!vlib_buffer_alloc_from_pool (vm, complq->ph_bufs, n, bpi))
+ return clib_error_return (0, "buffer allocation error");
+
+ complq->descs = vlib_physmem_alloc_aligned_on_numa (
+ vm, complq->size * sizeof (idpf_tx_desc_t), 2 * CLIB_CACHE_LINE_BYTES,
+ id->numa_node);
+ if (complq->descs == 0)
+ return vlib_physmem_last_error (vm);
+
+ if ((err =
+ vlib_pci_map_dma (vm, id->pci_dev_handle, (void *) complq->descs)))
+ return err;
+
+ vec_validate_aligned (complq->bufs, complq->size, CLIB_CACHE_LINE_BYTES);
+ complq->qtx_tail =
+ id->bar0 + (vport->chunks_info.tx_compl_qtail_start +
+ qid * vport->chunks_info.tx_compl_qtail_spacing);
+
+ /* initialize ring of pending RS slots */
+ clib_ring_new_aligned (complq->rs_slots, 32, CLIB_CACHE_LINE_BYTES);
+
+ vec_validate_aligned (complq->tmp_descs, complq->size,
+ CLIB_CACHE_LINE_BYTES);
+ vec_validate_aligned (complq->tmp_bufs, complq->size, CLIB_CACHE_LINE_BYTES);
+
+ return 0;
+}
+
+clib_error_t *
+idpf_split_txq_init (vlib_main_t *vm, idpf_device_t *id, idpf_vport_t *vport,
+ u16 qid, u16 txq_size)
+{
+ clib_error_t *err;
+ idpf_txq_t *txq;
+ u16 n, complq_qid;
+ u8 bpi = vlib_buffer_pool_get_default_for_numa (vm, id->numa_node);
+
+ vec_validate_aligned (vport->txqs, qid, CLIB_CACHE_LINE_BYTES);
+ txq = vec_elt_at_index (vport->txqs, qid);
+ txq->size = txq_size;
+ txq->next = 0;
+ clib_spinlock_init (&txq->lock);
+
+ n = (txq->size / 510) + 1;
+ vec_validate_aligned (txq->ph_bufs, n, CLIB_CACHE_LINE_BYTES);
+
+ if (!vlib_buffer_alloc_from_pool (vm, txq->ph_bufs, n, bpi))
+ return clib_error_return (0, "buffer allocation error");
+
+ txq->descs = vlib_physmem_alloc_aligned_on_numa (
+ vm, txq->size * sizeof (idpf_tx_desc_t), 2 * CLIB_CACHE_LINE_BYTES,
+ id->numa_node);
+ if (txq->descs == 0)
+ return vlib_physmem_last_error (vm);
+
+ err = vlib_pci_map_dma (vm, id->pci_dev_handle, (void *) txq->descs);
+ if (err)
+ return err;
+
+ vec_validate_aligned (txq->bufs, txq->size, CLIB_CACHE_LINE_BYTES);
+ txq->qtx_tail = id->bar0 + (vport->chunks_info.tx_qtail_start +
+ qid * vport->chunks_info.tx_qtail_spacing);
+
+ /* initialize ring of pending RS slots */
+ clib_ring_new_aligned (txq->rs_slots, 32, CLIB_CACHE_LINE_BYTES);
+
+ vec_validate_aligned (txq->tmp_descs, txq->size, CLIB_CACHE_LINE_BYTES);
+ vec_validate_aligned (txq->tmp_bufs, txq->size, CLIB_CACHE_LINE_BYTES);
+
+ complq_qid = vport->chunks_info.tx_compl_start_qid + qid;
+ err = idpf_tx_split_complq_setup (vm, id, vport, txq->complq, complq_qid,
+ 2 * txq_size);
+ if (err)
+ return err;
+
+ return 0;
+}
+
+clib_error_t *
+idpf_single_txq_init (vlib_main_t *vm, idpf_device_t *id, idpf_vport_t *vport,
+ u16 qid, u16 txq_size)
+{
+ clib_error_t *err;
+ idpf_txq_t *txq;
+ u16 n;
+ u8 bpi = vlib_buffer_pool_get_default_for_numa (vm, id->numa_node);
+
+ vec_validate_aligned (vport->txqs, qid, CLIB_CACHE_LINE_BYTES);
+ txq = vec_elt_at_index (vport->txqs, qid);
+ txq->queue_index = vport->chunks_info.tx_start_qid + qid;
+ txq->size = txq_size;
+ txq->next = 0;
+ clib_spinlock_init (&txq->lock);
+
+ n = (txq->size / 510) + 1;
+ vec_validate_aligned (txq->ph_bufs, n, CLIB_CACHE_LINE_BYTES);
+
+ if (!vlib_buffer_alloc_from_pool (vm, txq->ph_bufs, n, bpi))
+ return clib_error_return (0, "buffer allocation error");
+
+ txq->descs = vlib_physmem_alloc_aligned_on_numa (
+ vm, txq->size * sizeof (idpf_tx_desc_t), 2 * CLIB_CACHE_LINE_BYTES,
+ id->numa_node);
+ if (txq->descs == 0)
+ return vlib_physmem_last_error (vm);
+
+ err = vlib_pci_map_dma (vm, id->pci_dev_handle, (void *) txq->descs);
+ if (err)
+ return err;
+
+ vec_validate_aligned (txq->bufs, txq->size, CLIB_CACHE_LINE_BYTES);
+ txq->qtx_tail = id->bar0 + (vport->chunks_info.tx_qtail_start +
+ qid * vport->chunks_info.tx_qtail_spacing);
+
+ /* initialize ring of pending RS slots */
+ clib_ring_new_aligned (txq->rs_slots, 32, CLIB_CACHE_LINE_BYTES);
+
+ vec_validate_aligned (txq->tmp_descs, txq->size, CLIB_CACHE_LINE_BYTES);
+ vec_validate_aligned (txq->tmp_bufs, txq->size, CLIB_CACHE_LINE_BYTES);
+
+ return 0;
+}
+
+clib_error_t *
+idpf_tx_queue_setup (vlib_main_t *vm, idpf_device_t *id, idpf_vport_t *vport,
+ u16 qid, u16 txq_size)
+{
+ if (vport->txq_model == VIRTCHNL2_QUEUE_MODEL_SINGLE)
+ return idpf_single_txq_init (vm, id, vport, qid, txq_size);
+ else
+ return idpf_split_txq_init (vm, id, vport, qid, txq_size);
+}
+
+clib_error_t *
+idpf_vc_config_txq (vlib_main_t *vm, idpf_device_t *id, idpf_vport_t *vport,
+ u16 qid)
+{
+ idpf_txq_t *txq;
+ virtchnl2_config_tx_queues_t *vc_txqs = NULL;
+ virtchnl2_txq_info_t *txq_info;
+ idpf_cmd_info_t args;
+ clib_error_t *error;
+ u16 num_qs;
+ int size;
+
+ vec_validate_aligned (vport->txqs, qid, CLIB_CACHE_LINE_BYTES);
+ txq = vec_elt_at_index (vport->txqs, qid);
+
+ if (vport->txq_model == VIRTCHNL2_QUEUE_MODEL_SINGLE)
+ num_qs = IDPF_TXQ_PER_GRP;
+ else
+ num_qs = IDPF_TXQ_PER_GRP + IDPF_TX_COMPLQ_PER_GRP;
+
+ size = sizeof (*vc_txqs) + (num_qs - 1) * sizeof (virtchnl2_txq_info_t);
+ vc_txqs = clib_mem_alloc_aligned (size, CLIB_CACHE_LINE_BYTES);
+ clib_memset (vc_txqs, 0, size);
+
+ vc_txqs->vport_id = vport->vport_id;
+ vc_txqs->num_qinfo = num_qs;
+
+ if (vport->txq_model == VIRTCHNL2_QUEUE_MODEL_SINGLE)
+ {
+ txq_info = &vc_txqs->qinfo[0];
+ txq_info->dma_ring_addr = idpf_dma_addr (vm, id, (void *) txq->descs);
+ txq_info->type = VIRTCHNL2_QUEUE_TYPE_TX;
+ txq_info->queue_id = txq->queue_index;
+ txq_info->model = VIRTCHNL2_QUEUE_MODEL_SINGLE;
+ txq_info->sched_mode = VIRTCHNL2_TXQ_SCHED_MODE_QUEUE;
+ txq_info->ring_len = txq->size;
+ }
+ else
+ {
+ /* txq info */
+ txq_info = &vc_txqs->qinfo[0];
+ txq_info->dma_ring_addr = idpf_dma_addr (vm, id, (void *) txq->descs);
+ txq_info->type = VIRTCHNL2_QUEUE_TYPE_TX;
+ txq_info->queue_id = txq->queue_index;
+ txq_info->model = VIRTCHNL2_QUEUE_MODEL_SPLIT;
+ txq_info->sched_mode = VIRTCHNL2_TXQ_SCHED_MODE_FLOW;
+ txq_info->ring_len = txq->size;
+ txq_info->tx_compl_queue_id = txq->complq->queue_index;
+ txq_info->relative_queue_id = txq_info->queue_id;
+
+ /* tx completion queue info */
+ idpf_txq_t *complq = txq->complq;
+ txq_info = &vc_txqs->qinfo[1];
+ txq_info->dma_ring_addr = idpf_dma_addr (vm, id, (void *) complq->descs);
+ txq_info->type = VIRTCHNL2_QUEUE_TYPE_TX_COMPLETION;
+ txq_info->queue_id = complq->queue_index;
+ txq_info->model = VIRTCHNL2_QUEUE_MODEL_SPLIT;
+ txq_info->sched_mode = VIRTCHNL2_TXQ_SCHED_MODE_FLOW;
+ txq_info->ring_len = complq->size;
+ }
+
+ clib_memset (&args, 0, sizeof (args));
+ args.ops = VIRTCHNL2_OP_CONFIG_TX_QUEUES;
+ args.in_args = (u8 *) vc_txqs;
+ args.in_args_size = size;
+ args.out_buffer = id->mbx_resp;
+ args.out_size = IDPF_DFLT_MBX_BUF_SIZE;
+
+ error = idpf_execute_vc_cmd (vm, id, &args);
+ clib_mem_free (vc_txqs);
+ if (error != 0)
+ return clib_error_return (
+ 0, "Failed to execute command VIRTCHNL2_OP_CONFIG_TX_QUEUES");
+
+ return error;
+}
+
+clib_error_t *
+idpf_vc_config_rxq (vlib_main_t *vm, idpf_device_t *id, idpf_vport_t *vport,
+ u16 qid)
+{
+ idpf_rxq_t *rxq;
+ virtchnl2_config_rx_queues_t *vc_rxqs = NULL;
+ virtchnl2_rxq_info_t *rxq_info;
+ idpf_cmd_info_t args;
+ clib_error_t *error;
+ u16 num_qs;
+ int size, i;
+
+ vec_validate_aligned (vport->rxqs, qid, CLIB_CACHE_LINE_BYTES);
+ rxq = vec_elt_at_index (vport->rxqs, qid);
+
+ if (vport->rxq_model == VIRTCHNL2_QUEUE_MODEL_SINGLE)
+ num_qs = IDPF_RXQ_PER_GRP;
+ else
+ num_qs = IDPF_RXQ_PER_GRP + IDPF_RX_BUFQ_PER_GRP;
+
+ size = sizeof (*vc_rxqs) + (num_qs - 1) * sizeof (virtchnl2_rxq_info_t);
+ vc_rxqs = clib_mem_alloc_aligned (size, CLIB_CACHE_LINE_BYTES);
+ clib_memset (vc_rxqs, 0, size);
+
+ vc_rxqs->vport_id = vport->vport_id;
+ vc_rxqs->num_qinfo = num_qs;
+
+ if (vport->rxq_model == VIRTCHNL2_QUEUE_MODEL_SINGLE)
+ {
+ rxq_info = &vc_rxqs->qinfo[0];
+ rxq_info->dma_ring_addr = idpf_dma_addr (vm, id, (void *) rxq->descs);
+ rxq_info->type = VIRTCHNL2_QUEUE_TYPE_RX;
+ rxq_info->queue_id = rxq->queue_index;
+ rxq_info->model = VIRTCHNL2_QUEUE_MODEL_SINGLE;
+ rxq_info->data_buffer_size = vlib_buffer_get_default_data_size (vm);
+ rxq_info->max_pkt_size = ETHERNET_MAX_PACKET_BYTES;
+
+ rxq_info->desc_ids = VIRTCHNL2_RXDID_2_FLEX_SQ_NIC_M;
+ rxq_info->qflags |= VIRTCHNL2_RX_DESC_SIZE_32BYTE;
+
+ rxq_info->ring_len = rxq->size;
+ }
+ else
+ {
+ /* Rx queue */
+ rxq_info = &vc_rxqs->qinfo[0];
+ rxq_info->dma_ring_addr = idpf_dma_addr (vm, id, (void *) rxq->descs);
+ rxq_info->type = VIRTCHNL2_QUEUE_TYPE_RX;
+ rxq_info->queue_id = rxq->queue_index;
+ rxq_info->model = VIRTCHNL2_QUEUE_MODEL_SINGLE;
+ rxq_info->data_buffer_size = vlib_buffer_get_default_data_size (vm);
+ rxq_info->max_pkt_size = ETHERNET_MAX_PACKET_BYTES;
+
+ rxq_info->desc_ids = VIRTCHNL2_RXDID_2_FLEX_SPLITQ_M;
+ rxq_info->qflags |= VIRTCHNL2_RX_DESC_SIZE_32BYTE;
+
+ rxq_info->ring_len = rxq->size;
+ rxq_info->rx_bufq1_id = rxq->bufq1->queue_index;
+ rxq_info->rx_bufq2_id = rxq->bufq2->queue_index;
+ rxq_info->rx_buffer_low_watermark = 64;
+
+ /* Buffer queue */
+ for (i = 1; i <= IDPF_RX_BUFQ_PER_GRP; i++)
+ {
+ idpf_rxq_t *bufq = (i == 1 ? rxq->bufq1 : rxq->bufq2);
+ rxq_info = &vc_rxqs->qinfo[i];
+ rxq_info->dma_ring_addr =
+ idpf_dma_addr (vm, id, (void *) bufq->descs);
+ rxq_info->type = VIRTCHNL2_QUEUE_TYPE_RX_BUFFER;
+ rxq_info->queue_id = bufq->queue_index;
+ rxq_info->model = VIRTCHNL2_QUEUE_MODEL_SPLIT;
+ rxq_info->data_buffer_size = vlib_buffer_get_default_data_size (vm);
+ rxq_info->desc_ids = VIRTCHNL2_RXDID_2_FLEX_SPLITQ_M;
+ rxq_info->ring_len = bufq->size;
+
+ rxq_info->buffer_notif_stride = IDPF_RX_BUF_STRIDE;
+ rxq_info->rx_buffer_low_watermark = 64;
+ }
+ }
+
+ clib_memset (&args, 0, sizeof (args));
+ args.ops = VIRTCHNL2_OP_CONFIG_RX_QUEUES;
+ args.in_args = (u8 *) vc_rxqs;
+ args.in_args_size = size;
+ args.out_buffer = id->mbx_resp;
+ args.out_size = IDPF_DFLT_MBX_BUF_SIZE;
+
+ error = idpf_execute_vc_cmd (vm, id, &args);
+ clib_mem_free (vc_rxqs);
+ if (error != 0)
+ return clib_error_return (
+ 0, "Failed to execute command VIRTCHNL2_OP_CONFIG_RX_QUEUES");
+
+ return error;
+}
+
+clib_error_t *
+idpf_alloc_vectors (vlib_main_t *vm, idpf_device_t *id, idpf_vport_t *vport,
+ uint16_t num_vectors)
+{
+ virtchnl2_alloc_vectors_t *alloc_vec;
+ idpf_cmd_info_t args;
+ clib_error_t *error;
+ int len;
+
+ len = sizeof (virtchnl2_alloc_vectors_t) +
+ (num_vectors - 1) * sizeof (virtchnl2_vector_chunk_t);
+ alloc_vec = clib_mem_alloc_aligned (len, CLIB_CACHE_LINE_BYTES);
+ clib_memset (alloc_vec, 0, len);
+
+ alloc_vec->num_vectors = num_vectors;
+
+ args.ops = VIRTCHNL2_OP_ALLOC_VECTORS;
+ args.in_args = (u8 *) alloc_vec;
+ args.in_args_size = sizeof (virtchnl2_alloc_vectors_t);
+ args.out_buffer = id->mbx_resp;
+ args.out_size = IDPF_DFLT_MBX_BUF_SIZE;
+ error = idpf_execute_vc_cmd (vm, id, &args);
+ if (error != 0)
+ return clib_error_return (
+ 0, "Failed to execute command VIRTCHNL2_OP_ALLOC_VECTORS");
+
+ if (vport->recv_vectors == NULL)
+ {
+ vport->recv_vectors =
+ clib_mem_alloc_aligned (len, CLIB_CACHE_LINE_BYTES);
+ clib_memset (vport->recv_vectors, 0, len);
+ }
+
+ clib_memcpy (vport->recv_vectors, args.out_buffer, len);
+ clib_mem_free (alloc_vec);
+ return error;
+}
+
+clib_error_t *
+idpf_vc_ena_dis_one_queue (vlib_main_t *vm, idpf_device_t *id,
+ idpf_vport_t *vport, u16 qid, u32 type, bool on)
+{
+ virtchnl2_del_ena_dis_queues_t *queue_select;
+ virtchnl2_queue_chunk_t *queue_chunk;
+ idpf_cmd_info_t args;
+ clib_error_t *error = 0;
+ int len;
+
+ len = sizeof (virtchnl2_del_ena_dis_queues_t);
+ queue_select = clib_mem_alloc_aligned (len, CLIB_CACHE_LINE_BYTES);
+ clib_memset (queue_select, 0, len);
+
+ queue_chunk = queue_select->chunks.chunks;
+ queue_select->chunks.num_chunks = 1;
+ queue_select->vport_id = vport->vport_id;
+
+ queue_chunk->type = type;
+ queue_chunk->start_queue_id = qid;
+ queue_chunk->num_queues = 1;
+
+ args.ops = on ? VIRTCHNL2_OP_ENABLE_QUEUES : VIRTCHNL2_OP_DISABLE_QUEUES;
+ args.in_args = (u8 *) queue_select;
+ args.in_args_size = len;
+ args.out_buffer = id->mbx_resp;
+ args.out_size = IDPF_DFLT_MBX_BUF_SIZE;
+ error = idpf_execute_vc_cmd (vm, id, &args);
+ if (error != 0)
+ return clib_error_return (
+ 0, "Failed to execute command of VIRTCHNL2_OP_%s_QUEUES",
+ on ? "ENABLE" : "DISABLE");
+
+ clib_mem_free (queue_select);
+ return error;
+}
+
+clib_error_t *
+idpf_op_enable_queues (vlib_main_t *vm, idpf_device_t *id, idpf_vport_t *vport,
+ u16 qid, bool rx, bool on)
+{
+ clib_error_t *error;
+ u16 queue_index;
+ u32 type;
+
+ /* switch txq/rxq */
+ type = rx ? VIRTCHNL2_QUEUE_TYPE_RX : VIRTCHNL2_QUEUE_TYPE_TX;
+
+ if (type == VIRTCHNL2_QUEUE_TYPE_RX)
+ {
+ queue_index = vport->chunks_info.rx_start_qid + qid;
+ error = idpf_vc_ena_dis_one_queue (vm, id, vport, queue_index, type, on);
+ }
+ else
+ {
+ queue_index = vport->chunks_info.tx_start_qid + qid;
+ error = idpf_vc_ena_dis_one_queue (vm, id, vport, queue_index, type, on);
+ }
+ if (error != 0)
+ return error;
+
+ /* switch tx completion queue */
+ if (!rx && vport->txq_model == VIRTCHNL2_QUEUE_MODEL_SPLIT)
+ {
+ type = VIRTCHNL2_QUEUE_TYPE_TX_COMPLETION;
+ queue_index = vport->chunks_info.tx_compl_start_qid + qid;
+ error = idpf_vc_ena_dis_one_queue (vm, id, vport, queue_index, type, on);
+ if (error != 0)
+ return error;
+ }
+
+ /* switch rx buffer queue */
+ if (rx && vport->rxq_model == VIRTCHNL2_QUEUE_MODEL_SPLIT)
+ {
+ type = VIRTCHNL2_QUEUE_TYPE_RX_BUFFER;
+ queue_index = vport->chunks_info.rx_buf_start_qid + 2 * qid;
+ error = idpf_vc_ena_dis_one_queue (vm, id, vport, queue_index, type, on);
+ if (error != 0)
+ return error;
+ queue_index++;
+ error = idpf_vc_ena_dis_one_queue (vm, id, vport, queue_index, type, on);
+ if (error != 0)
+ return error;
+ }
+
+ return error;
+}
+
+clib_error_t *
+idpf_queue_init (vlib_main_t *vm, idpf_device_t *id, idpf_vport_t *vport,
+ idpf_create_if_args_t *args)
+{
+ clib_error_t *error = 0;
+ int i;
+
+ for (i = 0; i < id->n_rx_queues; i++)
+ {
+ if ((error = idpf_rx_queue_setup (vm, id, vport, i, args->rxq_size)))
+ return error;
+ if ((error = idpf_vc_config_rxq (vm, id, vport, i)))
+ return error;
+ if ((error = idpf_op_enable_queues (vm, id, vport, i, true, true)))
+ return error;
+ }
+
+ for (i = 0; i < id->n_tx_queues; i++)
+ {
+ if ((error = idpf_tx_queue_setup (vm, id, vport, i, args->txq_size)))
+ return error;
+ if ((error = idpf_vc_config_txq (vm, id, vport, i)))
+ return error;
+ if ((error = idpf_op_enable_queues (vm, id, vport, i, false, true)))
+ return error;
+ }
+
+ if ((error = idpf_alloc_vectors (vm, id, vport, IDPF_DFLT_Q_VEC_NUM)))
+ return error;
+
+ if ((error = idpf_config_rx_queues_irqs (vm, id, vport)))
+ return error;
+
+ return error;
+}
+
+clib_error_t *
+idpf_op_version (vlib_main_t *vm, idpf_device_t *id)
+{
+ clib_error_t *error = 0;
+ idpf_cmd_info_t args;
+ virtchnl2_version_info_t myver = {
+ .major = VIRTCHNL2_VERSION_MAJOR_2,
+ .minor = VIRTCHNL2_VERSION_MINOR_0,
+ };
+ virtchnl2_version_info_t ver = { 0 };
+
+ idpf_log_debug (id, "version: major %u minor %u", myver.major, myver.minor);
+
+ args.ops = VIRTCHNL2_OP_VERSION;
+ args.in_args = (u8 *) &myver;
+ args.in_args_size = sizeof (myver);
+ args.out_buffer = id->mbx_resp;
+ args.out_size = IDPF_DFLT_MBX_BUF_SIZE;
+ error = idpf_execute_vc_cmd (vm, id, &args);
+ if (error != 0)
+ return clib_error_return (0,
+ "Failed to execute command VIRTCHNL_OP_VERSION");
+
+ clib_memcpy (&ver, args.out_buffer, sizeof (ver));
+
+ if (ver.major != VIRTCHNL2_VERSION_MAJOR_2 ||
+ ver.minor != VIRTCHNL2_VERSION_MINOR_0)
+ return clib_error_return (0,
+ "incompatible virtchnl version "
+ "(remote %d.%d)",
+ ver.major, ver.minor);
+
+ return 0;
+}
+
+clib_error_t *
+idpf_op_get_caps (vlib_main_t *vm, idpf_device_t *id,
+ virtchnl2_get_capabilities_t *caps)
+{
+ virtchnl2_get_capabilities_t caps_msg = { 0 };
+ idpf_cmd_info_t args;
+ clib_error_t *error = 0;
+
+ caps_msg.csum_caps =
+ VIRTCHNL2_CAP_TX_CSUM_L3_IPV4 | VIRTCHNL2_CAP_TX_CSUM_L4_IPV4_TCP |
+ VIRTCHNL2_CAP_TX_CSUM_L4_IPV4_UDP | VIRTCHNL2_CAP_TX_CSUM_L4_IPV4_SCTP |
+ VIRTCHNL2_CAP_TX_CSUM_L4_IPV6_TCP | VIRTCHNL2_CAP_TX_CSUM_L4_IPV6_UDP |
+ VIRTCHNL2_CAP_TX_CSUM_L4_IPV6_SCTP | VIRTCHNL2_CAP_TX_CSUM_GENERIC |
+ VIRTCHNL2_CAP_RX_CSUM_L3_IPV4 | VIRTCHNL2_CAP_RX_CSUM_L4_IPV4_TCP |
+ VIRTCHNL2_CAP_RX_CSUM_L4_IPV4_UDP | VIRTCHNL2_CAP_RX_CSUM_L4_IPV4_SCTP |
+ VIRTCHNL2_CAP_RX_CSUM_L4_IPV6_TCP | VIRTCHNL2_CAP_RX_CSUM_L4_IPV6_UDP |
+ VIRTCHNL2_CAP_RX_CSUM_L4_IPV6_SCTP | VIRTCHNL2_CAP_RX_CSUM_GENERIC;
+
+ caps_msg.other_caps = VIRTCHNL2_CAP_WB_ON_ITR;
+
+ args.ops = VIRTCHNL2_OP_GET_CAPS;
+ args.in_args = (u8 *) &caps_msg;
+ args.in_args_size = sizeof (caps_msg);
+ args.out_buffer = id->mbx_resp;
+ args.out_size = IDPF_DFLT_MBX_BUF_SIZE;
+ error = idpf_execute_vc_cmd (vm, id, &args);
+ if (error != 0)
+ return clib_error_return (
+ 0, "Failed to execute command VIRTCHNL2_OP_GET_CAPS");
+
+ clib_memcpy (caps, args.out_buffer, sizeof (*caps));
+ return error;
+}
+
+#define CTLQ_NUM 2
+clib_error_t *
+idpf_mbx_init (vlib_main_t *vm, idpf_device_t *id)
+{
+ idpf_ctlq_create_info_t ctlq_info[CTLQ_NUM] = {
+ {
+ .type = IDPF_CTLQ_TYPE_MAILBOX_TX,
+ .id = IDPF_CTLQ_ID,
+ .len = IDPF_CTLQ_LEN,
+ .buf_size = IDPF_DFLT_MBX_BUF_SIZE,
+ .reg = {
+ .head = PF_FW_ATQH,
+ .tail = PF_FW_ATQT,
+ .len = PF_FW_ATQLEN,
+ .bah = PF_FW_ATQBAH,
+ .bal = PF_FW_ATQBAL,
+ .len_mask = PF_FW_ATQLEN_ATQLEN_M,
+ .len_ena_mask = PF_FW_ATQLEN_ATQENABLE_M,
+ .head_mask = PF_FW_ATQH_ATQH_M,
+ }
+ },
+ {
+ .type = IDPF_CTLQ_TYPE_MAILBOX_RX,
+ .id = IDPF_CTLQ_ID,
+ .len = IDPF_CTLQ_LEN,
+ .buf_size = IDPF_DFLT_MBX_BUF_SIZE,
+ .reg = {
+ .head = PF_FW_ARQH,
+ .tail = PF_FW_ARQT,
+ .len = PF_FW_ARQLEN,
+ .bah = PF_FW_ARQBAH,
+ .bal = PF_FW_ARQBAL,
+ .len_mask = PF_FW_ARQLEN_ARQLEN_M,
+ .len_ena_mask = PF_FW_ARQLEN_ARQENABLE_M,
+ .head_mask = PF_FW_ARQH_ARQH_M,
+ }
+ }
+ };
+ struct idpf_ctlq_info *ctlq;
+
+ if (idpf_ctlq_init (vm, id, CTLQ_NUM, ctlq_info))
+ return clib_error_return (0, "ctlq init failed");
+
+ LIST_FOR_EACH_ENTRY_SAFE (ctlq, NULL, &id->cq_list_head,
+ struct idpf_ctlq_info, cq_list)
+ {
+ if (ctlq->q_id == IDPF_CTLQ_ID &&
+ ctlq->cq_type == IDPF_CTLQ_TYPE_MAILBOX_TX)
+ id->asq = ctlq;
+ if (ctlq->q_id == IDPF_CTLQ_ID &&
+ ctlq->cq_type == IDPF_CTLQ_TYPE_MAILBOX_RX)
+ id->arq = ctlq;
+ }
+
+ if (!id->asq || !id->arq)
+ {
+ idpf_ctlq_deinit (id);
+ return clib_error_return (0, "ctlq deinit");
+ }
+
+ return 0;
+}
+
+clib_error_t *
+idpf_vc_query_ptype_info (vlib_main_t *vm, idpf_device_t *id)
+{
+ virtchnl2_get_ptype_info_t ptype_info;
+ idpf_cmd_info_t args;
+ clib_error_t *error;
+
+ ptype_info.start_ptype_id = 0;
+ ptype_info.num_ptypes = IDPF_MAX_PKT_TYPE;
+ args.ops = VIRTCHNL2_OP_GET_PTYPE_INFO;
+ args.in_args = (u8 *) &ptype_info;
+ args.in_args_size = sizeof (virtchnl2_get_ptype_info_t);
+ args.out_buffer = NULL;
+ args.out_size = 0;
+
+ error = idpf_execute_vc_cmd (vm, id, &args);
+ if (error != 0)
+ return clib_error_return (
+ 0, "Failed to execute command VIRTCHNL2_OP_GET_PTYPE_INFO");
+
+ return error;
+}
+
+clib_error_t *
+idpf_get_pkt_type (vlib_main_t *vm, idpf_device_t *id)
+{
+ virtchnl2_get_ptype_info_t *ptype_info;
+ u16 ptype_recvd = 0, ptype_offset, i, j;
+ clib_error_t *error;
+
+ error = idpf_vc_query_ptype_info (vm, id);
+ if (error != 0)
+ return clib_error_return (0, "Fail to query packet type information");
+
+ ptype_info =
+ clib_mem_alloc_aligned (IDPF_DFLT_MBX_BUF_SIZE, CLIB_CACHE_LINE_BYTES);
+
+ while (ptype_recvd < IDPF_MAX_PKT_TYPE)
+ {
+ error = idpf_read_one_msg (vm, id, VIRTCHNL2_OP_GET_PTYPE_INFO,
+ (u8 *) ptype_info, IDPF_DFLT_MBX_BUF_SIZE);
+ if (error != 0)
+ {
+ error = clib_error_return (0, "Fail to get packet type information");
+ goto free_ptype_info;
+ }
+
+ ptype_recvd += ptype_info->num_ptypes;
+ ptype_offset =
+ sizeof (virtchnl2_get_ptype_info_t) - sizeof (virtchnl2_ptype_t);
+
+ for (i = 0; i < ptype_info->num_ptypes; i++)
+ {
+ bool is_inner = false, is_ip = false;
+ virtchnl2_ptype_t *ptype;
+ u32 proto_hdr = 0;
+
+ ptype = (virtchnl2_ptype_t *) ((u8 *) ptype_info + ptype_offset);
+ ptype_offset += IDPF_GET_PTYPE_SIZE (ptype);
+ if (ptype_offset > IDPF_DFLT_MBX_BUF_SIZE)
+ {
+ error =
+ clib_error_return (0, "Ptype offset exceeds mbx buffer size");
+ goto free_ptype_info;
+ }
+
+ if (ptype->ptype_id_10 == 0xFFFF)
+ goto free_ptype_info;
+
+ for (j = 0; j < ptype->proto_id_count; j++)
+ {
+ switch (ptype->proto_id[j])
+ {
+ case VIRTCHNL2_PROTO_HDR_GRE:
+ case VIRTCHNL2_PROTO_HDR_VXLAN:
+ proto_hdr &= ~IDPF_PTYPE_L4_MASK;
+ proto_hdr |= IDPF_PTYPE_TUNNEL_GRENAT;
+ is_inner = true;
+ break;
+ case VIRTCHNL2_PROTO_HDR_MAC:
+ if (is_inner)
+ {
+ proto_hdr &= ~IDPF_PTYPE_INNER_L2_MASK;
+ proto_hdr |= IDPF_PTYPE_INNER_L2_ETHER;
+ }
+ else
+ {
+ proto_hdr &= ~IDPF_PTYPE_L2_MASK;
+ proto_hdr |= IDPF_PTYPE_L2_ETHER;
+ }
+ break;
+ case VIRTCHNL2_PROTO_HDR_VLAN:
+ if (is_inner)
+ {
+ proto_hdr &= ~IDPF_PTYPE_INNER_L2_MASK;
+ proto_hdr |= IDPF_PTYPE_INNER_L2_ETHER_VLAN;
+ }
+ break;
+ case VIRTCHNL2_PROTO_HDR_PTP:
+ proto_hdr &= ~IDPF_PTYPE_L2_MASK;
+ proto_hdr |= IDPF_PTYPE_L2_ETHER_TIMESYNC;
+ break;
+ case VIRTCHNL2_PROTO_HDR_LLDP:
+ proto_hdr &= ~IDPF_PTYPE_L2_MASK;
+ proto_hdr |= IDPF_PTYPE_L2_ETHER_LLDP;
+ break;
+ case VIRTCHNL2_PROTO_HDR_ARP:
+ proto_hdr &= ~IDPF_PTYPE_L2_MASK;
+ proto_hdr |= IDPF_PTYPE_L2_ETHER_ARP;
+ break;
+ case VIRTCHNL2_PROTO_HDR_PPPOE:
+ proto_hdr &= ~IDPF_PTYPE_L2_MASK;
+ proto_hdr |= IDPF_PTYPE_L2_ETHER_PPPOE;
+ break;
+ case VIRTCHNL2_PROTO_HDR_IPV4:
+ if (!is_ip)
+ {
+ proto_hdr |= IDPF_PTYPE_L3_IPV4_EXT_UNKNOWN;
+ is_ip = true;
+ }
+ else
+ {
+ proto_hdr |= IDPF_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
+ IDPF_PTYPE_TUNNEL_IP;
+ is_inner = true;
+ }
+ break;
+ case VIRTCHNL2_PROTO_HDR_IPV6:
+ if (!is_ip)
+ {
+ proto_hdr |= IDPF_PTYPE_L3_IPV6_EXT_UNKNOWN;
+ is_ip = true;
+ }
+ else
+ {
+ proto_hdr |= IDPF_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
+ IDPF_PTYPE_TUNNEL_IP;
+ is_inner = true;
+ }
+ break;
+ case VIRTCHNL2_PROTO_HDR_IPV4_FRAG:
+ case VIRTCHNL2_PROTO_HDR_IPV6_FRAG:
+ if (is_inner)
+ proto_hdr |= IDPF_PTYPE_INNER_L4_FRAG;
+ else
+ proto_hdr |= IDPF_PTYPE_L4_FRAG;
+ break;
+ case VIRTCHNL2_PROTO_HDR_UDP:
+ if (is_inner)
+ proto_hdr |= IDPF_PTYPE_INNER_L4_UDP;
+ else
+ proto_hdr |= IDPF_PTYPE_L4_UDP;
+ break;
+ case VIRTCHNL2_PROTO_HDR_TCP:
+ if (is_inner)
+ proto_hdr |= IDPF_PTYPE_INNER_L4_TCP;
+ else
+ proto_hdr |= IDPF_PTYPE_L4_TCP;
+ break;
+ case VIRTCHNL2_PROTO_HDR_SCTP:
+ if (is_inner)
+ proto_hdr |= IDPF_PTYPE_INNER_L4_SCTP;
+ else
+ proto_hdr |= IDPF_PTYPE_L4_SCTP;
+ break;
+ case VIRTCHNL2_PROTO_HDR_ICMP:
+ if (is_inner)
+ proto_hdr |= IDPF_PTYPE_INNER_L4_ICMP;
+ else
+ proto_hdr |= IDPF_PTYPE_L4_ICMP;
+ break;
+ case VIRTCHNL2_PROTO_HDR_ICMPV6:
+ if (is_inner)
+ proto_hdr |= IDPF_PTYPE_INNER_L4_ICMP;
+ else
+ proto_hdr |= IDPF_PTYPE_L4_ICMP;
+ break;
+ case VIRTCHNL2_PROTO_HDR_L2TPV2:
+ case VIRTCHNL2_PROTO_HDR_L2TPV2_CONTROL:
+ case VIRTCHNL2_PROTO_HDR_L2TPV3:
+ is_inner = true;
+ proto_hdr |= IDPF_PTYPE_TUNNEL_L2TP;
+ break;
+ case VIRTCHNL2_PROTO_HDR_NVGRE:
+ is_inner = true;
+ proto_hdr |= IDPF_PTYPE_TUNNEL_NVGRE;
+ break;
+ case VIRTCHNL2_PROTO_HDR_GTPC_TEID:
+ is_inner = true;
+ proto_hdr |= IDPF_PTYPE_TUNNEL_GTPC;
+ break;
+ case VIRTCHNL2_PROTO_HDR_GTPU:
+ case VIRTCHNL2_PROTO_HDR_GTPU_UL:
+ case VIRTCHNL2_PROTO_HDR_GTPU_DL:
+ is_inner = true;
+ proto_hdr |= IDPF_PTYPE_TUNNEL_GTPU;
+ break;
+ case VIRTCHNL2_PROTO_HDR_PAY:
+ case VIRTCHNL2_PROTO_HDR_IPV6_EH:
+ case VIRTCHNL2_PROTO_HDR_PRE_MAC:
+ case VIRTCHNL2_PROTO_HDR_POST_MAC:
+ case VIRTCHNL2_PROTO_HDR_ETHERTYPE:
+ case VIRTCHNL2_PROTO_HDR_SVLAN:
+ case VIRTCHNL2_PROTO_HDR_CVLAN:
+ case VIRTCHNL2_PROTO_HDR_MPLS:
+ case VIRTCHNL2_PROTO_HDR_MMPLS:
+ case VIRTCHNL2_PROTO_HDR_CTRL:
+ case VIRTCHNL2_PROTO_HDR_ECP:
+ case VIRTCHNL2_PROTO_HDR_EAPOL:
+ case VIRTCHNL2_PROTO_HDR_PPPOD:
+ case VIRTCHNL2_PROTO_HDR_IGMP:
+ case VIRTCHNL2_PROTO_HDR_AH:
+ case VIRTCHNL2_PROTO_HDR_ESP:
+ case VIRTCHNL2_PROTO_HDR_IKE:
+ case VIRTCHNL2_PROTO_HDR_NATT_KEEP:
+ case VIRTCHNL2_PROTO_HDR_GTP:
+ case VIRTCHNL2_PROTO_HDR_GTP_EH:
+ case VIRTCHNL2_PROTO_HDR_GTPCV2:
+ case VIRTCHNL2_PROTO_HDR_ECPRI:
+ case VIRTCHNL2_PROTO_HDR_VRRP:
+ case VIRTCHNL2_PROTO_HDR_OSPF:
+ case VIRTCHNL2_PROTO_HDR_TUN:
+ case VIRTCHNL2_PROTO_HDR_VXLAN_GPE:
+ case VIRTCHNL2_PROTO_HDR_GENEVE:
+ case VIRTCHNL2_PROTO_HDR_NSH:
+ case VIRTCHNL2_PROTO_HDR_QUIC:
+ case VIRTCHNL2_PROTO_HDR_PFCP:
+ case VIRTCHNL2_PROTO_HDR_PFCP_NODE:
+ case VIRTCHNL2_PROTO_HDR_PFCP_SESSION:
+ case VIRTCHNL2_PROTO_HDR_RTP:
+ case VIRTCHNL2_PROTO_HDR_NO_PROTO:
+ default:
+ continue;
+ }
+ id->ptype_tbl[ptype->ptype_id_10] = proto_hdr;
+ }
+ }
+ }
+
+free_ptype_info:
+ clib_mem_free (ptype_info);
+ clear_cmd (id);
+ return error;
+}
+
+static void
+idpf_reset_pf (idpf_device_t *id)
+{
+ u32 reg;
+
+ reg = idpf_reg_read (id, PFGEN_CTRL);
+ idpf_reg_write (id, PFGEN_CTRL, (reg | PFGEN_CTRL_PFSWR));
+}
+
+#define IDPF_RESET_WAIT_CNT 100
+clib_error_t *
+idpf_check_pf_reset_done (vlib_main_t *vm, idpf_device_t *id)
+{
+ u32 reg;
+ int i;
+
+ for (i = 0; i < IDPF_RESET_WAIT_CNT; i++)
+ {
+ reg = idpf_reg_read (id, PFGEN_RSTAT);
+ if (reg != 0xFFFFFFFF && (reg & PFGEN_RSTAT_PFR_STATE_M))
+ return 0;
+ vlib_process_suspend (vm, 1.0);
+ }
+
+ return clib_error_return (0, "pf reset time out");
+}
+
+void
+idpf_init_vport_req_info (idpf_device_t *id,
+ virtchnl2_create_vport_t *vport_info)
+{
+ vport_info->vport_type = VIRTCHNL2_VPORT_TYPE_DEFAULT;
+ if (id->txq_model == 1)
+ {
+ vport_info->txq_model = VIRTCHNL2_QUEUE_MODEL_SPLIT;
+ vport_info->num_tx_q = IDPF_DEFAULT_TXQ_NUM;
+ vport_info->num_tx_complq =
+ IDPF_DEFAULT_TXQ_NUM * IDPF_TX_COMPLQ_PER_GRP;
+ }
+ else
+ {
+ vport_info->txq_model = VIRTCHNL2_QUEUE_MODEL_SINGLE;
+ vport_info->num_tx_q = IDPF_DEFAULT_TXQ_NUM;
+ vport_info->num_tx_complq = 0;
+ }
+ if (id->rxq_model == 1)
+ {
+ vport_info->rxq_model = VIRTCHNL2_QUEUE_MODEL_SPLIT;
+ vport_info->num_rx_q = IDPF_DEFAULT_RXQ_NUM;
+ vport_info->num_rx_bufq = IDPF_DEFAULT_RXQ_NUM * IDPF_RX_BUFQ_PER_GRP;
+ }
+ else
+ {
+ vport_info->rxq_model = VIRTCHNL2_QUEUE_MODEL_SINGLE;
+ vport_info->num_rx_q = IDPF_DEFAULT_RXQ_NUM;
+ vport_info->num_rx_bufq = 0;
+ }
+
+ return;
+}
+
+clib_error_t *
+idpf_vc_create_vport (vlib_main_t *vm, idpf_device_t *id, idpf_vport_t *vport,
+ virtchnl2_create_vport_t *vport_req_info)
+{
+ virtchnl2_create_vport_t vport_msg = { 0 };
+ idpf_cmd_info_t args;
+ clib_error_t *error;
+
+ vport_msg.vport_type = vport_req_info->vport_type;
+ vport_msg.txq_model = vport_req_info->txq_model;
+ vport_msg.rxq_model = vport_req_info->rxq_model;
+ vport_msg.num_tx_q = vport_req_info->num_tx_q;
+ vport_msg.num_tx_complq = vport_req_info->num_tx_complq;
+ vport_msg.num_rx_q = vport_req_info->num_rx_q;
+ vport_msg.num_rx_bufq = vport_req_info->num_rx_bufq;
+
+ clib_memset (&args, 0, sizeof (args));
+ args.ops = VIRTCHNL2_OP_CREATE_VPORT;
+ args.in_args = (u8 *) &vport_msg;
+ args.in_args_size = sizeof (vport_msg);
+ args.out_buffer = id->mbx_resp;
+ args.out_size = IDPF_DFLT_MBX_BUF_SIZE;
+ error = idpf_execute_vc_cmd (vm, id, &args);
+ if (error != 0)
+ return clib_error_return (
+ 0, "Failed to execute command of VIRTCHNL2_OP_CREATE_VPORT");
+
+ clib_memcpy (vport->vport_info, args.out_buffer, IDPF_DFLT_MBX_BUF_SIZE);
+ return error;
+}
+
+clib_error_t *
+idpf_vc_destroy_vport (vlib_main_t *vm, idpf_device_t *id, idpf_vport_t *vport)
+{
+ virtchnl2_vport_t vc_vport;
+ idpf_cmd_info_t args;
+ clib_error_t *error = 0;
+
+ vc_vport.vport_id = vport->vport_id;
+
+ clib_memset (&args, 0, sizeof (args));
+ args.ops = VIRTCHNL2_OP_DESTROY_VPORT;
+ args.in_args = (u8 *) &vc_vport;
+ args.in_args_size = sizeof (vc_vport);
+ args.out_buffer = id->mbx_resp;
+ args.out_size = IDPF_DFLT_MBX_BUF_SIZE;
+ error = idpf_execute_vc_cmd (vm, id, &args);
+ if (error != 0)
+ return clib_error_return (
+ 0, "Failed to execute command of VIRTCHNL2_OP_DESTROY_VPORT");
+
+ return error;
+}
+
+clib_error_t *
+idpf_init_vport (idpf_device_t *id, idpf_vport_t *vport)
+{
+ virtchnl2_create_vport_t *vport_info = vport->vport_info;
+ int i, type;
+
+ vport->vport_id = vport_info->vport_id;
+ vport->txq_model = vport_info->txq_model;
+ vport->rxq_model = vport_info->rxq_model;
+ vport->num_tx_q = vport_info->num_tx_q;
+ vport->num_tx_complq = vport_info->num_tx_complq;
+ vport->num_rx_q = vport_info->num_rx_q;
+ vport->num_rx_bufq = vport_info->num_rx_bufq;
+ vport->max_mtu = vport_info->max_mtu;
+ clib_memcpy (vport->default_mac_addr, vport_info->default_mac_addr,
+ IDPF_ETH_ALEN);
+
+ for (i = 0; i < vport_info->chunks.num_chunks; i++)
+ {
+ type = vport_info->chunks.chunks[i].type;
+ switch (type)
+ {
+ case VIRTCHNL2_QUEUE_TYPE_TX:
+ vport->chunks_info.tx_start_qid =
+ vport_info->chunks.chunks[i].start_queue_id;
+ vport->chunks_info.tx_qtail_start =
+ vport_info->chunks.chunks[i].qtail_reg_start;
+ vport->chunks_info.tx_qtail_spacing =
+ vport_info->chunks.chunks[i].qtail_reg_spacing;
+ break;
+ case VIRTCHNL2_QUEUE_TYPE_RX:
+ vport->chunks_info.rx_start_qid =
+ vport_info->chunks.chunks[i].start_queue_id;
+ vport->chunks_info.rx_qtail_start =
+ vport_info->chunks.chunks[i].qtail_reg_start;
+ vport->chunks_info.rx_qtail_spacing =
+ vport_info->chunks.chunks[i].qtail_reg_spacing;
+ break;
+ case VIRTCHNL2_QUEUE_TYPE_TX_COMPLETION:
+ vport->chunks_info.tx_compl_start_qid =
+ vport_info->chunks.chunks[i].start_queue_id;
+ vport->chunks_info.tx_compl_qtail_start =
+ vport_info->chunks.chunks[i].qtail_reg_start;
+ vport->chunks_info.tx_compl_qtail_spacing =
+ vport_info->chunks.chunks[i].qtail_reg_spacing;
+ break;
+ case VIRTCHNL2_QUEUE_TYPE_RX_BUFFER:
+ vport->chunks_info.rx_buf_start_qid =
+ vport_info->chunks.chunks[i].start_queue_id;
+ vport->chunks_info.rx_buf_qtail_start =
+ vport_info->chunks.chunks[i].qtail_reg_start;
+ vport->chunks_info.rx_buf_qtail_spacing =
+ vport_info->chunks.chunks[i].qtail_reg_spacing;
+ break;
+ default:
+ return clib_error_return (0, "Unsupported queue type");
+ }
+ }
+
+ return 0;
+}
+
+clib_error_t *
+idpf_ena_dis_vport (vlib_main_t *vm, idpf_device_t *id, idpf_vport_t *vport,
+ bool enable)
+{
+ virtchnl2_vport_t vc_vport;
+ idpf_cmd_info_t args;
+ clib_error_t *error;
+
+ vc_vport.vport_id = vport->vport_id;
+ args.ops = enable ? VIRTCHNL2_OP_ENABLE_VPORT : VIRTCHNL2_OP_DISABLE_VPORT;
+ args.in_args = (u8 *) &vc_vport;
+ args.in_args_size = sizeof (vc_vport);
+ args.out_buffer = id->mbx_resp;
+ args.out_size = IDPF_DFLT_MBX_BUF_SIZE;
+
+ error = idpf_execute_vc_cmd (vm, id, &args);
+ if (error != 0)
+ {
+ return clib_error_return (
+ 0, "Failed to execute command of VIRTCHNL2_OP_%s_VPORT",
+ enable ? "ENABLE" : "DISABLE");
+ }
+
+ return error;
+}
+
+clib_error_t *
+idpf_dealloc_vectors (vlib_main_t *vm, idpf_device_t *id, idpf_vport_t *vport)
+{
+ virtchnl2_alloc_vectors_t *alloc_vec;
+ virtchnl2_vector_chunks_t *vcs;
+ idpf_cmd_info_t args;
+ clib_error_t *error;
+ int len;
+
+ alloc_vec = vport->recv_vectors;
+ vcs = &alloc_vec->vchunks;
+
+ len = sizeof (virtchnl2_vector_chunks_t) +
+ (vcs->num_vchunks - 1) * sizeof (virtchnl2_vector_chunk_t);
+
+ args.ops = VIRTCHNL2_OP_DEALLOC_VECTORS;
+ args.in_args = (u8 *) vcs;
+ args.in_args_size = len;
+ args.out_buffer = id->mbx_resp;
+ args.out_size = IDPF_DFLT_MBX_BUF_SIZE;
+ error = idpf_execute_vc_cmd (vm, id, &args);
+ if (error != 0)
+ return clib_error_return (
+ 0, "Failed to execute command VIRTCHNL2_OP_DEALLOC_VECTORS");
+
+ return error;
+}
+
+clib_error_t *
+idpf_dev_vport_init (vlib_main_t *vm, idpf_device_t *id,
+ idpf_vport_param_t *param)
+{
+ idpf_vport_t *vport;
+ virtchnl2_create_vport_t vport_req_info = { 0 };
+ clib_error_t *error = 0;
+
+ vport = clib_mem_alloc (sizeof (idpf_vport_t));
+ clib_memset (vport, 0, sizeof (idpf_vport_t));
+
+ vport->vport_info = clib_mem_alloc (IDPF_DFLT_MBX_BUF_SIZE);
+ clib_memset (vport->vport_info, 0, IDPF_DFLT_MBX_BUF_SIZE);
+
+ id->vports[param->idx] = vport;
+ vport->id = id;
+ vport->idx = param->idx;
+
+ idpf_init_vport_req_info (id, &vport_req_info);
+
+ error = idpf_vc_create_vport (vm, id, vport, &vport_req_info);
+ if (error != 0)
+ {
+ idpf_log_err (id, "Failed to create vport.");
+ goto err_create_vport;
+ }
+
+ error = idpf_init_vport (id, vport);
+ if (error != 0)
+ {
+ idpf_log_err (id, "Failed to init vports.");
+ goto err_init_vport;
+ }
+
+ id->vports[param->idx] = vport;
+
+ clib_memcpy (id->hwaddr, vport->default_mac_addr, IDPF_ETH_ALEN);
+
+ return error;
+
+err_init_vport:
+ id->vports[param->idx] = NULL; /* reset */
+ idpf_vc_destroy_vport (vm, id, vport);
+err_create_vport:
+ clib_mem_free (vport->vport_info);
+ clib_mem_free (vport);
+ return error;
+}
+
+/* dev configure */
+clib_error_t *
+idpf_device_init (vlib_main_t *vm, idpf_main_t *im, idpf_device_t *id,
+ idpf_create_if_args_t *args)
+{
+ idpf_vport_t *vport;
+ idpf_vport_param_t vport_param = { 0 };
+ virtchnl2_get_capabilities_t caps = { 0 };
+ clib_error_t *error;
+ u16 rxq_num, txq_num;
+ int i;
+
+ idpf_reset_pf (id);
+ error = idpf_check_pf_reset_done (vm, id);
+ if (error)
+ return error;
+
+ /*
+ * Init mailbox configuration
+ */
+ if ((error = idpf_mbx_init (vm, id)))
+ return error;
+
+ /*
+ * Check API version
+ */
+ error = idpf_op_version (vm, id);
+ if (error)
+ return error;
+
+ /*
+ * Get pkt type table
+ */
+ error = idpf_get_pkt_type (vm, id);
+ if (error)
+ return error;
+
+ /* Get idpf capability */
+ error = idpf_op_get_caps (vm, id, &caps);
+ if (error)
+ return error;
+
+ rxq_num = args->rxq_num ? args->rxq_num : 1;
+ txq_num = args->txq_num ? args->txq_num : vlib_get_n_threads ();
+
+ /* Sync capabilities */
+ id->n_rx_queues = rxq_num;
+ id->n_tx_queues = txq_num;
+ id->csum_caps = caps.csum_caps;
+ id->seg_caps = caps.seg_caps;
+ id->hsplit_caps = caps.hsplit_caps;
+ id->rsc_caps = caps.rsc_caps;
+ id->rss_caps = caps.rss_caps;
+ id->other_caps = caps.other_caps;
+ id->max_rx_q = caps.max_rx_q;
+ id->max_tx_q = caps.max_tx_q;
+ id->max_rx_bufq = caps.max_rx_bufq;
+ id->max_tx_complq = caps.max_tx_complq;
+ id->max_sriov_vfs = caps.max_sriov_vfs;
+ id->max_vports = caps.max_vports;
+ id->default_num_vports = caps.default_num_vports;
+
+ id->vports = clib_mem_alloc (id->max_vports * sizeof (*id->vports));
+ id->max_rxq_per_msg =
+ (IDPF_DFLT_MBX_BUF_SIZE - sizeof (virtchnl2_config_rx_queues_t)) /
+ sizeof (virtchnl2_rxq_info_t);
+ id->max_txq_per_msg =
+ (IDPF_DFLT_MBX_BUF_SIZE - sizeof (virtchnl2_config_tx_queues_t)) /
+ sizeof (virtchnl2_txq_info_t);
+
+ id->cur_vport_idx = 0;
+ id->cur_vports = 0;
+ id->cur_vport_nb = 0;
+
+ if (!args->rxq_single)
+ id->rxq_model = 1;
+ if (!args->txq_single)
+ id->txq_model = 1;
+
+ /* Init and enable vports */
+ if (args->req_vport_nb == 1)
+ {
+ vport_param.id = id;
+ vport_param.idx = 0;
+ error = idpf_dev_vport_init (vm, id, &vport_param);
+ if (error)
+ return error;
+ vport = id->vports[vport_param.idx];
+ error = idpf_ena_dis_vport (vm, id, vport, true);
+ if (error)
+ return error;
+ id->cur_vports |= 1ULL << vport_param.idx;
+ id->cur_vport_nb++;
+ id->cur_vport_idx++;
+ error = idpf_queue_init (vm, id, vport, args);
+ if (error)
+ return error;
+ }
+ else
+ {
+ for (i = 0; i < args->req_vport_nb; i++)
+ {
+ vport_param.id = id;
+ vport_param.idx = i;
+ if ((error = idpf_dev_vport_init (vm, id, &vport_param)))
+ return error;
+ vport = id->vports[vport_param.idx];
+ error = idpf_ena_dis_vport (vm, id, vport, true);
+ if (error)
+ return error;
+ id->cur_vports |= 1ULL << vport_param.idx;
+ id->cur_vport_nb++;
+ id->cur_vport_idx++;
+ error = idpf_queue_init (vm, id, vport, args);
+ if (error)
+ return error;
+ }
+ }
+
+ id->flags |= IDPF_DEVICE_F_INITIALIZED;
+ return error;
+}
+
+static u32
+idpf_flag_change (vnet_main_t *vnm, vnet_hw_interface_t *hw, u32 flags)
+{
+ idpf_device_t *id = idpf_get_device (hw->dev_instance);
+
+ switch (flags)
+ {
+ case ETHERNET_INTERFACE_FLAG_DEFAULT_L3:
+ id->flags &= ~IDPF_DEVICE_F_PROMISC;
+ break;
+ case ETHERNET_INTERFACE_FLAG_ACCEPT_ALL:
+ id->flags |= IDPF_DEVICE_F_PROMISC;
+ break;
+ default:
+ return ~0;
+ }
+
+ return 0;
+}
+
+void
+idpf_delete_if (vlib_main_t *vm, idpf_device_t *id, int with_barrier)
+{
+ vnet_main_t *vnm = vnet_get_main ();
+ idpf_main_t *im = &idpf_main;
+ idpf_vport_t *vport;
+ int i;
+ u32 dev_instance;
+
+ id->flags &= ~IDPF_DEVICE_F_ADMIN_UP;
+
+ if (id->hw_if_index)
+ {
+ if (with_barrier)
+ vlib_worker_thread_barrier_sync (vm);
+ vnet_hw_interface_set_flags (vnm, id->hw_if_index, 0);
+ ethernet_delete_interface (vnm, id->hw_if_index);
+ if (with_barrier)
+ vlib_worker_thread_barrier_release (vm);
+ }
+
+ for (i = 0; i < id->cur_vport_nb; i++)
+ {
+ vport = id->vports[i];
+ if (vport->recv_vectors != NULL)
+ idpf_dealloc_vectors (vm, id, vport);
+ }
+
+ vlib_pci_device_close (vm, id->pci_dev_handle);
+
+ vlib_physmem_free (vm, id->asq);
+ vlib_physmem_free (vm, id->arq);
+
+ for (i = 0; i < id->cur_vport_nb; i++)
+ {
+ vport = id->vports[i];
+ vec_foreach_index (i, vport->rxqs)
+ {
+ idpf_rxq_t *rxq = vec_elt_at_index (vport->rxqs, i);
+ vlib_physmem_free (vm, (void *) rxq->descs);
+ if (rxq->n_enqueued)
+ vlib_buffer_free_from_ring (vm, rxq->bufs, rxq->next, rxq->size,
+ rxq->n_enqueued);
+ vec_free (rxq->bufs);
+ }
+
+ vec_free (vport->rxqs);
+
+ vec_foreach_index (i, vport->txqs)
+ {
+ idpf_txq_t *txq = vec_elt_at_index (vport->txqs, i);
+ vlib_physmem_free (vm, (void *) txq->descs);
+ if (txq->n_enqueued)
+ {
+ u16 first = (txq->next - txq->n_enqueued) & (txq->size - 1);
+ vlib_buffer_free_from_ring (vm, txq->bufs, first, txq->size,
+ txq->n_enqueued);
+ }
+ vec_free (txq->ph_bufs);
+ vec_free (txq->bufs);
+ clib_ring_free (txq->rs_slots);
+ vec_free (txq->tmp_bufs);
+ vec_free (txq->tmp_descs);
+ clib_spinlock_free (&txq->lock);
+ }
+ vec_free (vport->txqs);
+ }
+
+ vec_free (id->name);
+
+ clib_error_free (id->error);
+ dev_instance = id->dev_instance;
+ clib_mem_free (id->mbx_resp);
+ clib_memset (id, 0, sizeof (*id));
+ pool_put_index (im->devices, dev_instance);
+ clib_mem_free (id);
+}
+
+static u8
+idpf_validate_queue_size (idpf_create_if_args_t *args)
+{
+ clib_error_t *error = 0;
+
+ args->rxq_size = (args->rxq_size == 0) ? IDPF_RXQ_SZ : args->rxq_size;
+ args->txq_size = (args->txq_size == 0) ? IDPF_TXQ_SZ : args->txq_size;
+
+ if ((args->rxq_size > IDPF_QUEUE_SZ_MAX) ||
+ (args->txq_size > IDPF_QUEUE_SZ_MAX))
+ {
+ args->rv = VNET_API_ERROR_INVALID_VALUE;
+ args->error = clib_error_return (
+ error, "queue size must not be greater than %u", IDPF_QUEUE_SZ_MAX);
+ return 1;
+ }
+ if ((args->rxq_size < IDPF_QUEUE_SZ_MIN) ||
+ (args->txq_size < IDPF_QUEUE_SZ_MIN))
+ {
+ args->rv = VNET_API_ERROR_INVALID_VALUE;
+ args->error = clib_error_return (
+ error, "queue size must not be smaller than %u", IDPF_QUEUE_SZ_MIN);
+ return 1;
+ }
+ if ((args->rxq_size & (args->rxq_size - 1)) ||
+ (args->txq_size & (args->txq_size - 1)))
+ {
+ args->rv = VNET_API_ERROR_INVALID_VALUE;
+ args->error =
+ clib_error_return (error, "queue size must be a power of two");
+ return 1;
+ }
+ return 0;
+}
+
+void
+idpf_process_one_device (vlib_main_t *vm, idpf_device_t *id, int is_irq)
+{
+ /* placeholder */
+ return;
+}
+
+static uword
+idpf_process (vlib_main_t *vm, vlib_node_runtime_t *rt, vlib_frame_t *f)
+{
+ idpf_main_t *im = &idpf_main;
+ uword *event_data = 0, event_type;
+ int enabled = 0, irq;
+ f64 last_run_duration = 0;
+ f64 last_periodic_time = 0;
+ idpf_device_t **dev_pointers = 0;
+ u32 i;
+
+ while (1)
+ {
+ if (enabled)
+ vlib_process_wait_for_event_or_clock (vm, 5.0 - last_run_duration);
+ else
+ vlib_process_wait_for_event (vm);
+
+ event_type = vlib_process_get_events (vm, &event_data);
+ irq = 0;
+
+ switch (event_type)
+ {
+ case ~0:
+ last_periodic_time = vlib_time_now (vm);
+ break;
+ case IDPF_PROCESS_EVENT_START:
+ enabled = 1;
+ break;
+ case IDPF_PROCESS_EVENT_DELETE_IF:
+ for (int i = 0; i < vec_len (event_data); i++)
+ {
+ idpf_device_t *id = idpf_get_device (event_data[i]);
+ idpf_delete_if (vm, id, /* with_barrier */ 1);
+ }
+ if (pool_elts (im->devices) < 1)
+ enabled = 0;
+ break;
+ case IDPF_PROCESS_EVENT_AQ_INT:
+ irq = 1;
+ break;
+
+ default:
+ ASSERT (0);
+ }
+
+ vec_reset_length (event_data);
+
+ if (enabled == 0)
+ continue;
+
+ /* create local list of device pointers as device pool may grow
+ * during suspend */
+ vec_reset_length (dev_pointers);
+
+ pool_foreach_index (i, im->devices)
+ {
+ vec_add1 (dev_pointers, idpf_get_device (i));
+ }
+
+ vec_foreach_index (i, dev_pointers)
+ {
+ idpf_process_one_device (vm, dev_pointers[i], irq);
+ };
+
+ last_run_duration = vlib_time_now (vm) - last_periodic_time;
+ }
+ return 0;
+}
+
+VLIB_REGISTER_NODE (idpf_process_node) = {
+ .function = idpf_process,
+ .type = VLIB_NODE_TYPE_PROCESS,
+ .name = "idpf-process",
+};
+
+void
+idpf_create_if (vlib_main_t *vm, idpf_create_if_args_t *args)
+{
+ vnet_main_t *vnm = vnet_get_main ();
+ vnet_eth_interface_registration_t eir = {};
+ idpf_main_t *im = &idpf_main;
+ idpf_device_t *id, **idp;
+ vlib_pci_dev_handle_t h;
+ clib_error_t *error = 0;
+ int i, j, v;
+
+ /* check input args */
+ if (idpf_validate_queue_size (args) != 0)
+ return;
+
+ pool_foreach (idp, im->devices)
+ {
+ if ((*idp)->pci_addr.as_u32 == args->addr.as_u32)
+ {
+ args->rv = VNET_API_ERROR_ADDRESS_IN_USE;
+ args->error =
+ clib_error_return (error, "%U: %s", format_vlib_pci_addr,
+ &args->addr, "pci address in use");
+ return;
+ }
+ }
+
+ pool_get (im->devices, idp);
+ idp[0] = id =
+ clib_mem_alloc_aligned (sizeof (idpf_device_t), CLIB_CACHE_LINE_BYTES);
+ clib_memset (id, 0, sizeof (idpf_device_t));
+ id->mbx_resp = clib_mem_alloc (IDPF_DFLT_MBX_BUF_SIZE);
+ id->dev_instance = idp - im->devices;
+ id->per_interface_next_index = ~0;
+ id->name = vec_dup (args->name);
+
+ if ((error =
+ vlib_pci_device_open (vm, &args->addr, idpf_pci_device_ids, &h)))
+ {
+ pool_put (im->devices, idp);
+ clib_mem_free (id);
+ args->rv = VNET_API_ERROR_INVALID_INTERFACE;
+ args->error = clib_error_return (error, "pci-addr %U",
+ format_vlib_pci_addr, &args->addr);
+ return;
+ }
+ id->pci_dev_handle = h;
+ id->pci_addr = args->addr;
+ id->numa_node = vlib_pci_get_numa_node (vm, h);
+
+ vlib_pci_set_private_data (vm, h, id->dev_instance);
+
+ if ((error = vlib_pci_bus_master_enable (vm, h)))
+ goto error;
+
+ if ((error = vlib_pci_map_region (vm, h, 0, &id->bar0)))
+ goto error;
+
+ if (vlib_pci_supports_virtual_addr_dma (vm, h))
+ id->flags |= IDPF_DEVICE_F_VA_DMA;
+
+ if ((error = idpf_device_init (vm, im, id, args)))
+ goto error;
+
+ /* create interface */
+ eir.dev_class_index = idpf_device_class.index;
+ eir.dev_instance = id->dev_instance;
+ eir.address = id->hwaddr;
+ eir.cb.flag_change = idpf_flag_change;
+ id->hw_if_index = vnet_eth_register_interface (vnm, &eir);
+
+ ethernet_set_flags (vnm, id->hw_if_index,
+ ETHERNET_INTERFACE_FLAG_DEFAULT_L3);
+
+ vnet_sw_interface_t *sw = vnet_get_hw_sw_interface (vnm, id->hw_if_index);
+ args->sw_if_index = id->sw_if_index = sw->sw_if_index;
+
+ vnet_hw_if_set_caps (vnm, id->hw_if_index,
+ VNET_HW_IF_CAP_INT_MODE | VNET_HW_IF_CAP_MAC_FILTER |
+ VNET_HW_IF_CAP_TX_CKSUM | VNET_HW_IF_CAP_TCP_GSO);
+
+ for (v = 0; v < id->cur_vport_nb; v++)
+ {
+ for (j = 0; j < id->n_rx_queues; j++)
+ {
+ u32 qi;
+ i = v * id->n_rx_queues + j;
+ qi = vnet_hw_if_register_rx_queue (vnm, id->hw_if_index, i,
+ VNET_HW_IF_RXQ_THREAD_ANY);
+ id->vports[v]->rxqs[j].queue_index = qi;
+ }
+ for (j = 0; j < id->n_tx_queues; j++)
+ {
+ u32 qi;
+ i = v * id->n_tx_queues + j;
+ qi = vnet_hw_if_register_tx_queue (vnm, id->hw_if_index, i);
+ id->vports[v]->txqs[j].queue_index = qi;
+ }
+ }
+
+ for (v = 0; v < id->cur_vport_nb; v++)
+ for (i = 0; i < vlib_get_n_threads (); i++)
+ {
+ u32 qi = id->vports[v]->txqs[i % id->n_tx_queues].queue_index;
+ vnet_hw_if_tx_queue_assign_thread (vnm, qi, i);
+ }
+
+ vnet_hw_if_update_runtime_data (vnm, id->hw_if_index);
+
+ if (pool_elts (im->devices) == 1)
+ vlib_process_signal_event (vm, idpf_process_node.index,
+ IDPF_PROCESS_EVENT_START, 0);
+
+ return;
+
+error:
+ idpf_delete_if (vm, id, /* with_barrier */ 0);
+ args->rv = VNET_API_ERROR_INVALID_INTERFACE;
+ args->error = clib_error_return (error, "pci-addr %U", format_vlib_pci_addr,
+ &args->addr);
+ idpf_log_err (id, "error: %U", format_clib_error, args->error);
+}
+
+void *
+idpf_alloc_dma_mem (vlib_main_t *vm, idpf_device_t *id, idpf_dma_mem_t *mem,
+ u64 size)
+{
+ void *mz = NULL;
+ vlib_pci_dev_handle_t h = id->pci_dev_handle;
+
+ if (!mem)
+ return NULL;
+
+ /* Fixme */
+ mz = vlib_physmem_alloc_aligned_on_numa (vm, size, CLIB_CACHE_LINE_BYTES,
+ id->numa_node);
+ if (!mz)
+ return NULL;
+ if (vlib_pci_map_dma (vm, h, mz))
+ return NULL;
+
+ mem->size = size;
+ if (id->flags & IDPF_DEVICE_F_VA_DMA)
+ {
+ mem->va = mz;
+ clib_memset (mem->va, 0, size);
+ }
+ else
+ {
+ mem->va = NULL;
+ }
+ mem->pa = idpf_dma_addr (vm, id, mz);
+
+ return mem->va;
+}
+
+void
+idpf_free_dma_mem (idpf_device_t *id, idpf_dma_mem_t *mem)
+{
+ mem->size = 0;
+ mem->va = NULL;
+ mem->pa = 0;
+
+ clib_mem_free (mem);
+}
+
+static clib_error_t *
+idpf_interface_admin_up_down (vnet_main_t *vnm, u32 hw_if_index, u32 flags)
+{
+ vnet_hw_interface_t *hi = vnet_get_hw_interface (vnm, hw_if_index);
+ idpf_device_t *id = idpf_get_device (hi->dev_instance);
+ uword is_up = (flags & VNET_SW_INTERFACE_FLAG_ADMIN_UP) != 0;
+
+ if (id->flags & IDPF_DEVICE_F_ERROR)
+ return clib_error_return (0, "device is in error state");
+
+ if (is_up)
+ {
+ vnet_hw_interface_set_flags (vnm, id->hw_if_index,
+ VNET_HW_INTERFACE_FLAG_LINK_UP);
+ id->flags |= IDPF_DEVICE_F_ADMIN_UP;
+ }
+ else
+ {
+ vnet_hw_interface_set_flags (vnm, id->hw_if_index, 0);
+ id->flags &= ~IDPF_DEVICE_F_ADMIN_UP;
+ }
+ return 0;
+}
+
+VNET_DEVICE_CLASS (idpf_device_class, ) = {
+ .name = "Infrastructure Data Path Function (IDPF) interface",
+ .format_device_name = format_idpf_device_name,
+ .admin_up_down_function = idpf_interface_admin_up_down,
+};
+
+clib_error_t *
+idpf_init (vlib_main_t *vm)
+{
+ idpf_main_t *im = &idpf_main;
+ vlib_thread_main_t *tm = vlib_get_thread_main ();
+
+ vec_validate_aligned (im->per_thread_data, tm->n_vlib_mains - 1,
+ CLIB_CACHE_LINE_BYTES);
+
+ return 0;
+}
+
+VLIB_INIT_FUNCTION (idpf_init) = {
+ .runs_after = VLIB_INITS ("pci_bus_init"),
+};
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/src/plugins/idpf/format.c b/src/plugins/idpf/format.c
new file mode 100644
index 00000000000..86a4b884286
--- /dev/null
+++ b/src/plugins/idpf/format.c
@@ -0,0 +1,77 @@
+/*
+ *------------------------------------------------------------------
+ * Copyright (c) 2023 Intel and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *------------------------------------------------------------------
+ */
+
+#include <idpf/idpf.h>
+
+u8 *
+format_idpf_device_name (u8 *s, va_list *args)
+{
+ vlib_main_t *vm = vlib_get_main ();
+ u32 i = va_arg (*args, u32);
+ idpf_device_t *id = idpf_get_device (i);
+ vlib_pci_addr_t *addr = vlib_pci_get_addr (vm, id->pci_dev_handle);
+
+ if (id->name)
+ return format (s, "%s", id->name);
+
+ s = format (s, "idpf-%x/%x/%x/%x", addr->domain, addr->bus, addr->slot,
+ addr->function);
+ return s;
+}
+
+u8 *
+format_idpf_device_flags (u8 *s, va_list *args)
+{
+ idpf_device_t *id = va_arg (*args, idpf_device_t *);
+ u8 *t = 0;
+
+#define _(a, b, c) \
+ if (id->flags & (1 << a)) \
+ t = format (t, "%s%s", t ? " " : "", c);
+ foreach_idpf_device_flags
+#undef _
+ s = format (s, "%v", t);
+ vec_free (t);
+ return s;
+}
+
+u8 *
+format_idpf_checksum_cap_flags (u8 *s, va_list *args)
+{
+ u32 flags = va_arg (*args, u32);
+ int not_first = 0;
+
+ char *strs[32] = {
+#define _(a, b, c) [a] = c,
+ foreach_idpf_checksum_cap_flag
+#undef _
+ };
+
+ for (int i = 0; i < 32; i++)
+ {
+ if ((flags & (1 << i)) == 0)
+ continue;
+ if (not_first)
+ s = format (s, " ");
+ if (strs[i])
+ s = format (s, "%s", strs[i]);
+ else
+ s = format (s, "unknown(%u)", i);
+ not_first = 1;
+ }
+ return s;
+}
diff --git a/src/plugins/idpf/idpf.api b/src/plugins/idpf/idpf.api
new file mode 100644
index 00000000000..5d02957ac38
--- /dev/null
+++ b/src/plugins/idpf/idpf.api
@@ -0,0 +1,80 @@
+/*
+ *------------------------------------------------------------------
+ * Copyright (c) 2023 Intel and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *------------------------------------------------------------------
+ */
+
+option version = "1.0.0";
+import "vnet/interface_types.api";
+
+/** \brief
+ @param client_index - opaque cookie to identify the sender
+ @param context - sender context, to match reply w/ request
+ @param pci_addr - pci address as unsigned 32bit integer:
+ 0-15 domain, 16-23 bus, 24-28 slot, 29-31 function
+ ddddddddddddddddbbbbbbbbsssssfff
+ @param rxq_num - number of receive queues
+ @param rxq_size - receive queue size
+ @param txq_size - transmit queue size
+*/
+
+define idpf_create
+{
+ u32 client_index;
+ u32 context;
+
+ u32 pci_addr;
+ u16 rxq_single;
+ u16 txq_single;
+ u16 rxq_num;
+ u16 txq_num;
+ u16 rxq_size;
+ u16 txq_size;
+ u16 req_vport_nb;
+ option vat_help = "<pci-address> [vport-num <size>] [rx-single <size>] [tx-single <size>] [rxq-num <size>] [txq-num <size>] [rxq-size <size>] [txq-size <size>]";
+};
+
+/** \brief
+ @param context - sender context, to match reply w/ request
+ @param retval - return value for request
+ @param sw_if_index - software index for the new idpf interface
+*/
+
+define idpf_create_reply
+{
+ u32 context;
+ i32 retval;
+ vl_api_interface_index_t sw_if_index;
+};
+
+/** \brief
+ @param client_index - opaque cookie to identify the sender
+ @param context - sender context, to match reply w/ request
+ @param sw_if_index - interface index
+*/
+
+autoreply define idpf_delete
+{
+ u32 client_index;
+ u32 context;
+
+ vl_api_interface_index_t sw_if_index;
+ option vat_help = "<sw_if_index>";
+};
+
+/*
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/src/plugins/idpf/idpf.h b/src/plugins/idpf/idpf.h
new file mode 100644
index 00000000000..0bac575d4b4
--- /dev/null
+++ b/src/plugins/idpf/idpf.h
@@ -0,0 +1,929 @@
+/*
+ *------------------------------------------------------------------
+ * Copyright (c) 2023 Intel and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *------------------------------------------------------------------
+ */
+
+#ifndef _IDPF_H_
+#define _IDPF_H_
+
+#include <vlib/vlib.h>
+#include <vppinfra/ring.h>
+#include <vlib/unix/unix.h>
+#include <vlib/pci/pci.h>
+#include <vnet/ethernet/ethernet.h>
+#include <vnet/interface/rx_queue_funcs.h>
+#include <vnet/interface/tx_queue_funcs.h>
+
+#include <vppinfra/types.h>
+#include <vppinfra/error_bootstrap.h>
+#include <vppinfra/lock.h>
+
+#include <vlib/log.h>
+#include <vlib/pci/pci.h>
+
+#include <vnet/interface.h>
+
+#include <vnet/devices/devices.h>
+#include <vnet/flow/flow.h>
+
+#include <idpf/virtchnl2.h>
+#include <sys/queue.h>
+
+#define BIT(a) (1UL << (a))
+
+/*
+ * LAN PF register
+ */
+#define MAKEMASK(m, s) ((m) << (s))
+
+/* Receive queues */
+#define PF_QRX_BASE 0x00000000
+#define PF_QRX_TAIL(_QRX) (PF_QRX_BASE + (((_QRX) *0x1000)))
+#define PF_QRX_BUFFQ_BASE 0x03000000
+#define PF_QRX_BUFFQ_TAIL(_QRX) (PF_QRX_BUFFQ_BASE + (((_QRX) *0x1000)))
+
+/* Transmit queues */
+#define PF_QTX_BASE 0x05000000
+#define PF_QTX_COMM_DBELL(_DBQM) (PF_QTX_BASE + ((_DBQM) *0x1000))
+
+/* Control(PF Mailbox) Queue */
+#define PF_FW_BASE 0x08400000
+
+#define PF_FW_ARQBAL (PF_FW_BASE)
+#define PF_FW_ARQBAH (PF_FW_BASE + 0x4)
+#define PF_FW_ARQLEN (PF_FW_BASE + 0x8)
+#define PF_FW_ARQLEN_ARQLEN_S 0
+#define PF_FW_ARQLEN_ARQLEN_M MAKEMASK (0x1FFF, PF_FW_ARQLEN_ARQLEN_S)
+#define PF_FW_ARQLEN_ARQVFE_S 28
+#define PF_FW_ARQLEN_ARQVFE_M BIT (PF_FW_ARQLEN_ARQVFE_S)
+#define PF_FW_ARQLEN_ARQOVFL_S 29
+#define PF_FW_ARQLEN_ARQOVFL_M BIT (PF_FW_ARQLEN_ARQOVFL_S)
+#define PF_FW_ARQLEN_ARQCRIT_S 30
+#define PF_FW_ARQLEN_ARQCRIT_M BIT (PF_FW_ARQLEN_ARQCRIT_S)
+#define PF_FW_ARQLEN_ARQENABLE_S 31
+#define PF_FW_ARQLEN_ARQENABLE_M BIT (PF_FW_ARQLEN_ARQENABLE_S)
+#define PF_FW_ARQH (PF_FW_BASE + 0xC)
+#define PF_FW_ARQH_ARQH_S 0
+#define PF_FW_ARQH_ARQH_M MAKEMASK (0x1FFF, PF_FW_ARQH_ARQH_S)
+#define PF_FW_ARQT (PF_FW_BASE + 0x10)
+
+#define PF_FW_ATQBAL (PF_FW_BASE + 0x14)
+#define PF_FW_ATQBAH (PF_FW_BASE + 0x18)
+#define PF_FW_ATQLEN (PF_FW_BASE + 0x1C)
+#define PF_FW_ATQLEN_ATQLEN_S 0
+#define PF_FW_ATQLEN_ATQLEN_M MAKEMASK (0x3FF, PF_FW_ATQLEN_ATQLEN_S)
+#define PF_FW_ATQLEN_ATQVFE_S 28
+#define PF_FW_ATQLEN_ATQVFE_M BIT (PF_FW_ATQLEN_ATQVFE_S)
+#define PF_FW_ATQLEN_ATQOVFL_S 29
+#define PF_FW_ATQLEN_ATQOVFL_M BIT (PF_FW_ATQLEN_ATQOVFL_S)
+#define PF_FW_ATQLEN_ATQCRIT_S 30
+#define PF_FW_ATQLEN_ATQCRIT_M BIT (PF_FW_ATQLEN_ATQCRIT_S)
+#define PF_FW_ATQLEN_ATQENABLE_S 31
+#define PF_FW_ATQLEN_ATQENABLE_M BIT (PF_FW_ATQLEN_ATQENABLE_S)
+#define PF_FW_ATQH (PF_FW_BASE + 0x20)
+#define PF_FW_ATQH_ATQH_S 0
+#define PF_FW_ATQH_ATQH_M MAKEMASK (0x3FF, PF_FW_ATQH_ATQH_S)
+#define PF_FW_ATQT (PF_FW_BASE + 0x24)
+
+/* Interrupts */
+#define PF_GLINT_BASE 0x08900000
+#define PF_GLINT_DYN_CTL_ITR_INDX_S 3
+#define PF_GLINT_DYN_CTL_ITR_INDX_M MAKEMASK (0x3, PF_GLINT_DYN_CTL_ITR_INDX_S)
+#define PF_GLINT_DYN_CTL_INTERVAL_S 5
+#define PF_GLINT_DYN_CTL_INTERVAL_M BIT (PF_GLINT_DYN_CTL_INTERVAL_S)
+#define PF_GLINT_DYN_CTL_WB_ON_ITR_S 30
+#define PF_GLINT_DYN_CTL_WB_ON_ITR_M BIT (PF_GLINT_DYN_CTL_WB_ON_ITR_S)
+
+/* Generic registers */
+#define PFGEN_RSTAT 0x08407008 /* PFR Status */
+#define PFGEN_RSTAT_PFR_STATE_S 0
+#define PFGEN_RSTAT_PFR_STATE_M MAKEMASK (0x3, PFGEN_RSTAT_PFR_STATE_S)
+#define PFGEN_CTRL 0x0840700C
+#define PFGEN_CTRL_PFSWR BIT (0)
+
+#define IDPF_CTLQ_ID -1
+#define IDPF_CTLQ_LEN 64
+#define IDPF_DFLT_MBX_BUF_SIZE 4096
+
+#define IDPF_MAX_NUM_QUEUES 256
+#define IDPF_MIN_BUF_SIZE 1024
+#define IDPF_MAX_FRAME_SIZE 9728
+#define IDPF_MAX_PKT_TYPE 1024
+#define IDPF_QUEUE_SZ_MAX 4096
+#define IDPF_QUEUE_SZ_MIN 64
+
+#define IDPF_RESET_SUSPEND_TIME 20e-3
+#define IDPF_RESET_MAX_WAIT_TIME 1
+
+#define IDPF_SEND_TO_PF_SUSPEND_TIME 10e-3
+#define IDPF_SEND_TO_PF_MAX_WAIT_TIME 1
+#define IDPF_SEND_TO_PF_MAX_TRY_TIMES 200
+
+#define IDPF_RX_MAX_DESC_IN_CHAIN 5
+
+#define IDPF_MAX_VPORT_NUM 8
+#define IDPF_DFLT_Q_VEC_NUM 1
+#define IDPF_DFLT_INTERVAL 16
+
+#define IDPF_DEFAULT_RXQ_NUM 16
+#define IDPF_DEFAULT_TXQ_NUM 16
+
+#define IDPF_ETH_ALEN 6
+
+#define IDPF_INVALID_VPORT_IDX 0xffff
+#define IDPF_TXQ_PER_GRP 1
+#define IDPF_TX_COMPLQ_PER_GRP 1
+#define IDPF_RXQ_PER_GRP 1
+#define IDPF_RX_BUFQ_PER_GRP 2
+#define IDPF_RX_BUF_STRIDE 64
+
+/* Maximum buffer lengths for all control queue types */
+#define IDPF_CTLQ_MAX_RING_SIZE 1024
+#define IDPF_CTLQ_MAX_BUF_LEN 4096
+
+#define IDPF_HI_DWORD(x) ((u32) ((((x) >> 16) >> 16) & 0xFFFFFFFF))
+#define IDPF_LO_DWORD(x) ((u32) ((x) &0xFFFFFFFF))
+#define IDPF_HI_WORD(x) ((u16) (((x) >> 16) & 0xFFFF))
+#define IDPF_LO_WORD(x) ((u16) ((x) &0xFFFF))
+
+#define IDPF_CTLQ_DESC(R, i) (&(((idpf_ctlq_desc_t *) ((R)->desc_ring.va))[i]))
+
+#define IDPF_CTLQ_DESC_UNUSED(R) \
+ (u16) ((((R)->next_to_clean > (R)->next_to_use) ? 0 : (R)->ring_size) + \
+ (R)->next_to_clean - (R)->next_to_use - 1)
+
+#define IDPF_GET_PTYPE_SIZE(p) \
+ (sizeof (virtchnl2_ptype_t) + \
+ (((p)->proto_id_count ? ((p)->proto_id_count - 1) : 0) * \
+ sizeof ((p)->proto_id[0])))
+
+/* log configuration */
+extern vlib_log_class_registration_t idpf_log;
+extern vlib_log_class_registration_t idpf_stats_log;
+
+#define idpf_log_err(dev, f, ...) \
+ vlib_log (VLIB_LOG_LEVEL_ERR, idpf_log.class, "%U: " f, \
+ format_vlib_pci_addr, &dev->pci_addr, ##__VA_ARGS__)
+
+#define idpf_log_warn(dev, f, ...) \
+ vlib_log (VLIB_LOG_LEVEL_WARNING, idpf_log.class, "%U: " f, \
+ format_vlib_pci_addr, &dev->pci_addr, ##__VA_ARGS__)
+
+#define idpf_log_debug(dev, f, ...) \
+ vlib_log (VLIB_LOG_LEVEL_DEBUG, idpf_log.class, "%U: " f, \
+ format_vlib_pci_addr, &dev->pci_addr, ##__VA_ARGS__)
+
+#define idpf_stats_log_debug(dev, f, ...) \
+ vlib_log (VLIB_LOG_LEVEL_DEBUG, idpf_stats_log.class, "%U: " f, \
+ format_vlib_pci_addr, &dev->pci_addr, ##__VA_ARGS__)
+
+/* List handler */
+#ifndef LIST_HEAD_TYPE
+#define LIST_HEAD_TYPE(list_name, type) LIST_HEAD (list_name, type)
+#endif
+
+#ifndef LIST_ENTRY_TYPE
+#define LIST_ENTRY_TYPE(type) LIST_ENTRY (type)
+#endif
+
+#ifndef LIST_FOR_EACH_ENTRY_SAFE
+#define LIST_FOR_EACH_ENTRY_SAFE(pos, temp, head, entry_type, list) \
+ LIST_FOREACH (pos, head, list)
+#endif
+
+#ifndef LIST_FOR_EACH_ENTRY
+#define LIST_FOR_EACH_ENTRY(pos, head, entry_type, list) \
+ LIST_FOREACH (pos, head, list)
+#endif
+
+#define foreach_idpf_device_flags \
+ _ (0, INITIALIZED, "initialized") \
+ _ (1, ERROR, "error") \
+ _ (2, ADMIN_UP, "admin-up") \
+ _ (3, VA_DMA, "vaddr-dma") \
+ _ (4, LINK_UP, "link-up") \
+ _ (6, ELOG, "elog") \
+ _ (7, PROMISC, "promisc") \
+ _ (8, RX_INT, "rx-interrupts") \
+ _ (9, RX_FLOW_OFFLOAD, "rx-flow-offload")
+
+enum
+{
+#define _(a, b, c) IDPF_DEVICE_F_##b = (1 << a),
+ foreach_idpf_device_flags
+#undef _
+};
+
+#define IDPF_PTYPE_UNKNOWN 0x00000000
+#define IDPF_PTYPE_L2_ETHER 0x00000001
+#define IDPF_PTYPE_L2_ETHER_TIMESYNC 0x00000002
+#define IDPF_PTYPE_L2_ETHER_ARP 0x00000003
+#define IDPF_PTYPE_L2_ETHER_LLDP 0x00000004
+#define IDPF_PTYPE_L2_ETHER_NSH 0x00000005
+#define IDPF_PTYPE_L2_ETHER_VLAN 0x00000006
+#define IDPF_PTYPE_L2_ETHER_QINQ 0x00000007
+#define IDPF_PTYPE_L2_ETHER_PPPOE 0x00000008
+#define IDPF_PTYPE_L2_ETHER_FCOE 0x00000009
+#define IDPF_PTYPE_L2_ETHER_MPLS 0x0000000a
+#define IDPF_PTYPE_L2_MASK 0x0000000f
+#define IDPF_PTYPE_L3_IPV4 0x00000010
+#define IDPF_PTYPE_L3_IPV4_EXT 0x00000030
+#define IDPF_PTYPE_L3_IPV6 0x00000040
+#define IDPF_PTYPE_L3_IPV4_EXT_UNKNOWN 0x00000090
+#define IDPF_PTYPE_L3_IPV6_EXT 0x000000c0
+#define IDPF_PTYPE_L3_IPV6_EXT_UNKNOWN 0x000000e0
+#define IDPF_PTYPE_L3_MASK 0x000000f0
+#define IDPF_PTYPE_L4_TCP 0x00000100
+#define IDPF_PTYPE_L4_UDP 0x00000200
+#define IDPF_PTYPE_L4_FRAG 0x00000300
+#define IDPF_PTYPE_L4_SCTP 0x00000400
+#define IDPF_PTYPE_L4_ICMP 0x00000500
+#define IDPF_PTYPE_L4_NONFRAG 0x00000600
+#define IDPF_PTYPE_L4_IGMP 0x00000700
+#define IDPF_PTYPE_L4_MASK 0x00000f00
+#define IDPF_PTYPE_TUNNEL_IP 0x00001000
+#define IDPF_PTYPE_TUNNEL_GRE 0x00002000
+#define IDPF_PTYPE_TUNNEL_VXLAN 0x00003000
+#define IDPF_PTYPE_TUNNEL_NVGRE 0x00004000
+#define IDPF_PTYPE_TUNNEL_GENEVE 0x00005000
+#define IDPF_PTYPE_TUNNEL_GRENAT 0x00006000
+#define IDPF_PTYPE_TUNNEL_GTPC 0x00007000
+#define IDPF_PTYPE_TUNNEL_GTPU 0x00008000
+#define IDPF_PTYPE_TUNNEL_ESP 0x00009000
+#define IDPF_PTYPE_TUNNEL_L2TP 0x0000a000
+#define IDPF_PTYPE_TUNNEL_VXLAN_GPE 0x0000b000
+#define IDPF_PTYPE_TUNNEL_MPLS_IN_GRE 0x0000c000
+#define IDPF_PTYPE_TUNNEL_MPLS_IN_UDP 0x0000d000
+#define IDPF_PTYPE_TUNNEL_MASK 0x0000f000
+#define IDPF_PTYPE_INNER_L2_ETHER 0x00010000
+#define IDPF_PTYPE_INNER_L2_ETHER_VLAN 0x00020000
+#define IDPF_PTYPE_INNER_L2_ETHER_QINQ 0x00030000
+#define IDPF_PTYPE_INNER_L2_MASK 0x000f0000
+#define IDPF_PTYPE_INNER_L3_IPV4 0x00100000
+#define IDPF_PTYPE_INNER_L3_IPV4_EXT 0x00200000
+#define IDPF_PTYPE_INNER_L3_IPV6 0x00300000
+#define IDPF_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN 0x00400000
+#define IDPF_PTYPE_INNER_L3_IPV6_EXT 0x00500000
+#define IDPF_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN 0x00600000
+#define IDPF_PTYPE_INNER_L3_MASK 0x00f00000
+#define IDPF_PTYPE_INNER_L4_TCP 0x01000000
+#define IDPF_PTYPE_INNER_L4_UDP 0x02000000
+#define IDPF_PTYPE_INNER_L4_FRAG 0x03000000
+#define IDPF_PTYPE_INNER_L4_SCTP 0x04000000
+#define IDPF_PTYPE_INNER_L4_ICMP 0x05000000
+#define IDPF_PTYPE_INNER_L4_NONFRAG 0x06000000
+#define IDPF_PTYPE_INNER_L4_MASK 0x0f000000
+#define IDPF_PTYPE_ALL_MASK 0x0fffffff
+
+/* Flags sub-structure
+ * |0 |1 |2 |3 |4 |5 |6 |7 |8 |9 |10 |11 |12 |13 |14 |15 |
+ * |DD |CMP|ERR| * RSV * |FTYPE | *RSV* |RD |VFC|BUF| HOST_ID |
+ */
+/* command flags and offsets */
+#define IDPF_CTLQ_FLAG_DD_S 0
+#define IDPF_CTLQ_FLAG_CMP_S 1
+#define IDPF_CTLQ_FLAG_ERR_S 2
+#define IDPF_CTLQ_FLAG_FTYPE_S 6
+#define IDPF_CTLQ_FLAG_RD_S 10
+#define IDPF_CTLQ_FLAG_VFC_S 11
+#define IDPF_CTLQ_FLAG_BUF_S 12
+#define IDPF_CTLQ_FLAG_HOST_ID_S 13
+
+#define IDPF_CTLQ_FLAG_DD BIT (IDPF_CTLQ_FLAG_DD_S) /* 0x1 */
+#define IDPF_CTLQ_FLAG_CMP BIT (IDPF_CTLQ_FLAG_CMP_S) /* 0x2 */
+#define IDPF_CTLQ_FLAG_ERR BIT (IDPF_CTLQ_FLAG_ERR_S) /* 0x4 */
+#define IDPF_CTLQ_FLAG_FTYPE_VM \
+ BIT (IDPF_CTLQ_FLAG_FTYPE_S) /* 0x40 */
+#define IDPF_CTLQ_FLAG_FTYPE_PF BIT (IDPF_CTLQ_FLAG_FTYPE_S + 1) /* 0x80 */
+#define IDPF_CTLQ_FLAG_RD BIT (IDPF_CTLQ_FLAG_RD_S) /* 0x400 */
+#define IDPF_CTLQ_FLAG_VFC BIT (IDPF_CTLQ_FLAG_VFC_S) /* 0x800 */
+#define IDPF_CTLQ_FLAG_BUF BIT (IDPF_CTLQ_FLAG_BUF_S) /* 0x1000 */
+
+/* Host ID is a special field that has 3b and not a 1b flag */
+#define IDPF_CTLQ_FLAG_HOST_ID_M MAKE_MASK (0x7000UL, IDPF_CTLQ_FLAG_HOST_ID_S)
+
+#define IDPF_FLEX_TXD_QW1_DTYPE_S 0
+#define IDPF_FLEX_TXD_QW1_DTYPE_M MAKEMASK (0x1FUL, IDPF_FLEX_TXD_QW1_DTYPE_S)
+#define IDPF_FLEX_TXD_QW1_CMD_S 5
+#define IDPF_FLEX_TXD_QW1_CMD_M MAKEMASK (0x7FFUL, IDPF_FLEX_TXD_QW1_CMD_S)
+
+typedef struct idpf_vport idpf_vport_t;
+
+typedef volatile struct
+{
+ u64 buf_addr; /* Packet buffer address */
+ struct
+ {
+ u64 cmd_dtype;
+ union
+ {
+ /* DTYPE = IDPF_TX_DESC_DTYPE_FLEX_DATA_(0x03) */
+ u8 raw[4];
+
+ /* DTYPE = IDPF_TX_DESC_DTYPE_FLEX_TSYN_L2TAG1 (0x06) */
+ struct
+ {
+ u16 l2tag1;
+ u8 flex;
+ u8 tsync;
+ } tsync;
+
+ /* DTYPE=IDPF_TX_DESC_DTYPE_FLEX_L2TAG1_L2TAG2 (0x07) */
+ struct
+ {
+ u16 l2tag1;
+ u16 l2tag2;
+ } l2tags;
+ } flex;
+ u16 buf_size;
+ } qw1;
+} idpf_flex_tx_desc_t;
+
+typedef struct
+{
+ union
+ {
+ u64 qword[2];
+ };
+} idpf_tx_desc_t;
+
+STATIC_ASSERT_SIZEOF (idpf_tx_desc_t, 16);
+
+typedef struct idpf_rxq
+{
+ CLIB_CACHE_LINE_ALIGN_MARK (cacheline0);
+ volatile u32 *qrx_tail;
+ u16 next;
+ u16 size;
+ virtchnl2_rx_desc_t *descs;
+ u32 *bufs;
+ u16 n_enqueued;
+ u8 int_mode;
+ u8 buffer_pool_index;
+ u32 queue_index;
+
+ struct idpf_rxq *bufq1;
+ struct idpf_rxq *bufq2;
+} idpf_rxq_t;
+
+typedef struct idpf_txq
+{
+ CLIB_CACHE_LINE_ALIGN_MARK (cacheline0);
+ volatile u32 *qtx_tail;
+ u16 next;
+ u16 size;
+ u32 *ph_bufs;
+ clib_spinlock_t lock;
+ idpf_tx_desc_t *descs;
+ u32 *bufs;
+ u16 n_enqueued;
+ u16 *rs_slots;
+
+ idpf_tx_desc_t *tmp_descs;
+ u32 *tmp_bufs;
+ u32 queue_index;
+
+ struct idpf_txq *complq;
+} idpf_txq_t;
+
+typedef struct
+{
+ CLIB_CACHE_LINE_ALIGN_MARK (cacheline0);
+ u32 flags;
+ u32 per_interface_next_index;
+ u32 cmd_retval;
+ u8 *mbx_resp;
+ virtchnl2_op_t pend_cmd;
+
+ u32 dev_instance;
+ u32 sw_if_index;
+ u32 hw_if_index;
+ vlib_pci_dev_handle_t pci_dev_handle;
+ u32 numa_node;
+ void *bar0;
+ u8 *name;
+
+ /* queues */
+ u16 n_tx_queues;
+ u16 n_rx_queues;
+ u32 txq_model;
+ u32 rxq_model;
+
+ u16 vsi_id;
+ u8 hwaddr[6];
+ u16 max_mtu;
+ vlib_pci_addr_t pci_addr;
+
+ /* error */
+ clib_error_t *error;
+
+ /* hw info */
+ u8 *hw_addr;
+ u64 hw_addr_len;
+
+ /* control queue - send and receive */
+ struct idpf_ctlq_info *asq;
+ struct idpf_ctlq_info *arq;
+
+ /* pci info */
+ u16 device_id;
+ u16 vendor_id;
+ u16 subsystem_device_id;
+ u16 subsystem_vendor_id;
+
+ /* max config queue number per vc message */
+ u32 max_rxq_per_msg;
+ u32 max_txq_per_msg;
+
+ /* vport info */
+ idpf_vport_t **vports;
+ u16 max_vport_nb;
+ u16 req_vports[IDPF_MAX_VPORT_NUM];
+ u16 req_vport_nb;
+ u16 cur_vports;
+ u16 cur_vport_nb;
+ u16 cur_vport_idx;
+
+ u32 ptype_tbl[IDPF_MAX_PKT_TYPE];
+
+ /* device capability */
+ u32 csum_caps;
+ u32 seg_caps;
+ u32 hsplit_caps;
+ u32 rsc_caps;
+ u64 rss_caps;
+ u64 other_caps;
+
+ u16 max_rx_q;
+ u16 max_tx_q;
+ u16 max_rx_bufq;
+ u16 max_tx_complq;
+ u16 max_sriov_vfs;
+ u16 max_vports;
+ u16 default_num_vports;
+
+ u32 device_type;
+
+ LIST_HEAD_TYPE (list_head, idpf_ctlq_info) cq_list_head;
+} idpf_device_t;
+
+/* memory allocation tracking */
+typedef struct
+{
+ void *va;
+ u64 pa;
+ u32 size;
+} idpf_dma_mem_t;
+
+/* Message type read in virtual channel from PF */
+typedef enum
+{
+ IDPF_MSG_ERR = -1, /* Meet error when accessing admin queue */
+ IDPF_MSG_NON, /* Read nothing from admin queue */
+ IDPF_MSG_SYS, /* Read system msg from admin queue */
+ IDPF_MSG_CMD, /* Read async command result */
+} idpf_vc_result_t;
+
+typedef struct
+{
+ u32 tx_start_qid;
+ u32 rx_start_qid;
+ u32 tx_compl_start_qid;
+ u32 rx_buf_start_qid;
+
+ u64 tx_qtail_start;
+ u32 tx_qtail_spacing;
+ u64 rx_qtail_start;
+ u32 rx_qtail_spacing;
+ u64 tx_compl_qtail_start;
+ u32 tx_compl_qtail_spacing;
+ u64 rx_buf_qtail_start;
+ u32 rx_buf_qtail_spacing;
+} idpf_chunks_info_t;
+
+typedef struct
+{
+ u32 ops;
+ u8 *in_args; /* buffer for sending */
+ u32 in_args_size; /* buffer size for sending */
+ u8 *out_buffer; /* buffer for response */
+ u32 out_size; /* buffer size for response */
+} idpf_cmd_info_t;
+
+typedef struct
+{
+ idpf_device_t *id;
+ u16 idx;
+} idpf_vport_param_t;
+
+struct idpf_vport
+{
+ idpf_device_t *id;
+ virtchnl2_create_vport_t *vport_info;
+ u16 idx;
+ u16 vport_id;
+ u32 txq_model;
+ u32 rxq_model;
+ u32 num_tx_q;
+ idpf_txq_t *txqs;
+ u16 num_tx_complq;
+ u16 num_rx_q;
+ idpf_rxq_t *rxqs;
+ u16 num_rx_bufq;
+
+ u16 max_mtu;
+ u8 default_mac_addr[VIRTCHNL2_ETH_LENGTH_OF_ADDRESS];
+
+ u16 max_pkt_len; /* Maximum packet length */
+
+ /* MSIX info*/
+ virtchnl2_queue_vector_t *qv_map; /* queue vector mapping */
+ u16 max_vectors;
+ virtchnl2_alloc_vectors_t *recv_vectors;
+
+ /* Chunk info */
+ idpf_chunks_info_t chunks_info;
+
+ virtchnl2_vport_stats_t eth_stats_offset;
+};
+
+#define IDPF_RX_VECTOR_SZ VLIB_FRAME_SIZE
+
+typedef enum
+{
+ IDPF_PROCESS_REQ_ADD_DEL_ETH_ADDR = 1,
+ IDPF_PROCESS_REQ_CONFIG_PROMISC_MDDE = 2,
+ IDPF_PROCESS_REQ_PROGRAM_FLOW = 3,
+} idpf_process_req_type_t;
+
+typedef struct
+{
+ idpf_process_req_type_t type;
+ u32 dev_instance;
+ u32 calling_process_index;
+ u8 eth_addr[6];
+ int is_add, is_enable;
+
+ /* below parameters are used for 'program flow' event */
+ u8 *rule;
+ u32 rule_len;
+ u8 *program_status;
+ u32 status_len;
+
+ clib_error_t *error;
+} idpf_process_req_t;
+
+typedef struct
+{
+ u64 qw1s[IDPF_RX_MAX_DESC_IN_CHAIN - 1];
+ u32 buffers[IDPF_RX_MAX_DESC_IN_CHAIN - 1];
+} idpf_rx_tail_t;
+
+typedef struct
+{
+ CLIB_CACHE_LINE_ALIGN_MARK (cacheline0);
+ vlib_buffer_t *bufs[IDPF_RX_VECTOR_SZ];
+ u16 next[IDPF_RX_VECTOR_SZ];
+ u64 qw1s[IDPF_RX_VECTOR_SZ];
+ u32 flow_ids[IDPF_RX_VECTOR_SZ];
+ idpf_rx_tail_t tails[IDPF_RX_VECTOR_SZ];
+ vlib_buffer_t buffer_template;
+} idpf_per_thread_data_t;
+
+typedef struct
+{
+ u16 msg_id_base;
+
+ idpf_device_t **devices;
+ idpf_per_thread_data_t *per_thread_data;
+} idpf_main_t;
+
+extern idpf_main_t idpf_main;
+
+typedef struct
+{
+ vlib_pci_addr_t addr;
+ u8 *name;
+ u16 rxq_single;
+ u16 txq_single;
+ u16 rxq_num;
+ u16 txq_num;
+ u16 req_vport_nb;
+ u16 rxq_size;
+ u16 txq_size;
+ int rv;
+ u32 sw_if_index;
+ clib_error_t *error;
+} idpf_create_if_args_t;
+
+void idpf_create_if (vlib_main_t *vm, idpf_create_if_args_t *args);
+
+extern vlib_node_registration_t idpf_process_node;
+extern vnet_device_class_t idpf_device_class;
+
+/* format.c */
+format_function_t format_idpf_device_name;
+format_function_t format_idpf_device_flags;
+
+static inline void
+clear_cmd (idpf_device_t *id)
+{
+ /* Return value may be checked in anither thread, need to ensure the
+ * coherence. */
+ CLIB_MEMORY_BARRIER ();
+ id->pend_cmd = VIRTCHNL2_OP_UNKNOWN;
+ id->cmd_retval = VIRTCHNL2_STATUS_SUCCESS;
+}
+
+static_always_inline idpf_device_t *
+idpf_get_device (u32 dev_instance)
+{
+ return pool_elt_at_index (idpf_main.devices, dev_instance)[0];
+}
+
+static inline void
+idpf_reg_write (idpf_device_t *id, u32 addr, u32 val)
+{
+ *(volatile u32 *) ((u8 *) id->bar0 + addr) = val;
+}
+
+static inline u32
+idpf_reg_read (idpf_device_t *id, u32 addr)
+{
+ u32 val = *(volatile u32 *) (id->bar0 + addr);
+ return val;
+}
+
+static inline void
+idpf_reg_flush (idpf_device_t *id)
+{
+ idpf_reg_read (id, PFGEN_RSTAT);
+ asm volatile("" ::: "memory");
+}
+
+typedef struct
+{
+ u16 qid;
+ u16 next_index;
+ u32 hw_if_index;
+ u32 flow_id;
+ u64 qw1s[IDPF_RX_MAX_DESC_IN_CHAIN];
+} idpf_input_trace_t;
+
+/* Error Codes */
+/* Linux kernel driver can't directly use these. Instead, they are mapped to
+ * linux compatible error codes which get translated in the build script.
+ */
+#define IDPF_SUCCESS 0
+#define IDPF_ERR_PARAM -53 /* -EBADR */
+#define IDPF_ERR_NOT_IMPL -95 /* -EOPNOTSUPP */
+#define IDPF_ERR_NOT_READY -16 /* -EBUSY */
+#define IDPF_ERR_BAD_PTR -14 /* -EFAULT */
+#define IDPF_ERR_INVAL_SIZE -90 /* -EMSGSIZE */
+#define IDPF_ERR_DEVICE_NOT_SUPPORTED -19 /* -ENODEV */
+#define IDPF_ERR_FW_API_VER -13 /* -EACCESS */
+#define IDPF_ERR_NO_MEMORY -12 /* -ENOMEM */
+#define IDPF_ERR_CFG -22 /* -EINVAL */
+#define IDPF_ERR_OUT_OF_RANGE -34 /* -ERANGE */
+#define IDPF_ERR_ALREADY_EXISTS -17 /* -EEXIST */
+#define IDPF_ERR_DOES_NOT_EXIST -6 /* -ENXIO */
+#define IDPF_ERR_IN_USE -114 /* -EALREADY */
+#define IDPF_ERR_MAX_LIMIT -109 /* -ETOOMANYREFS */
+#define IDPF_ERR_RESET_ONGOING -104 /* -ECONNRESET */
+
+/* CRQ/CSQ specific error codes */
+#define IDPF_ERR_CTLQ_ERROR -74 /* -EBADMSG */
+#define IDPF_ERR_CTLQ_TIMEOUT -110 /* -ETIMEDOUT */
+#define IDPF_ERR_CTLQ_FULL -28 /* -ENOSPC */
+#define IDPF_ERR_CTLQ_NO_WORK -42 /* -ENOMSG */
+#define IDPF_ERR_CTLQ_EMPTY -105 /* -ENOBUFS */
+
+/* Used for queue init, response and events */
+typedef enum
+{
+ IDPF_CTLQ_TYPE_MAILBOX_TX = 0,
+ IDPF_CTLQ_TYPE_MAILBOX_RX = 1,
+ IDPF_CTLQ_TYPE_CONFIG_TX = 2,
+ IDPF_CTLQ_TYPE_CONFIG_RX = 3,
+ IDPF_CTLQ_TYPE_EVENT_RX = 4,
+ IDPF_CTLQ_TYPE_RDMA_TX = 5,
+ IDPF_CTLQ_TYPE_RDMA_RX = 6,
+ IDPF_CTLQ_TYPE_RDMA_COMPL = 7
+} idpf_ctlq_type_t;
+
+typedef enum
+{
+ IDPF_PROCESS_EVENT_START = 1,
+ IDPF_PROCESS_EVENT_DELETE_IF = 2,
+ IDPF_PROCESS_EVENT_AQ_INT = 3,
+ IDPF_PROCESS_EVENT_REQ = 4,
+} idpf_process_event_t;
+
+/*
+ * Generic Control Queue Structures
+ */
+typedef struct
+{
+ /* used for queue tracking */
+ u32 head;
+ u32 tail;
+ /* Below applies only to default mb (if present) */
+ u32 len;
+ u32 bah;
+ u32 bal;
+ u32 len_mask;
+ u32 len_ena_mask;
+ u32 head_mask;
+} idpf_ctlq_reg_t;
+
+/* Generic queue msg structure */
+typedef struct
+{
+ u8 vmvf_type; /* represents the source of the message on recv */
+#define IDPF_VMVF_TYPE_VF 0
+#define IDPF_VMVF_TYPE_VM 1
+#define IDPF_VMVF_TYPE_PF 2
+ u8 host_id;
+ /* 3b field used only when sending a message to peer - to be used in
+ * combination with target func_id to route the message
+ */
+#define IDPF_HOST_ID_MASK 0x7
+
+ u16 opcode;
+ u16 data_len; /* data_len = 0 when no payload is attached */
+ union
+ {
+ u16 func_id; /* when sending a message */
+ u16 status; /* when receiving a message */
+ };
+ union
+ {
+ struct
+ {
+ u32 chnl_retval;
+ u32 chnl_opcode;
+ } mbx;
+ u64 cookie;
+ } cookie;
+ union
+ {
+#define IDPF_DIRECT_CTX_SIZE 16
+#define IDPF_INDIRECT_CTX_SIZE 8
+ /* 16 bytes of context can be provided or 8 bytes of context
+ * plus the address of a DMA buffer
+ */
+ u8 direct[IDPF_DIRECT_CTX_SIZE];
+ struct
+ {
+ u8 context[IDPF_INDIRECT_CTX_SIZE];
+ idpf_dma_mem_t *payload;
+ } indirect;
+ } ctx;
+} idpf_ctlq_msg_t;
+
+/* Generic queue info structures */
+/* MB, CONFIG and EVENT q do not have extended info */
+typedef struct
+{
+ idpf_ctlq_type_t type;
+ int id; /* absolute queue offset passed as input
+ * -1 for default mailbox if present
+ */
+ u16 len; /* Queue length passed as input */
+ u16 buf_size; /* buffer size passed as input */
+ u64 base_address; /* output, HPA of the Queue start */
+ idpf_ctlq_reg_t reg; /* registers accessed by ctlqs */
+
+ int ext_info_size;
+ void *ext_info; /* Specific to q type */
+} idpf_ctlq_create_info_t;
+
+/* Control Queue information */
+typedef struct idpf_ctlq_info
+{
+ LIST_ENTRY_TYPE (idpf_ctlq_info) cq_list;
+
+ idpf_ctlq_type_t cq_type;
+ int q_id;
+ clib_spinlock_t cq_lock; /* queue lock */
+
+ /* used for interrupt processing */
+ u16 next_to_use;
+ u16 next_to_clean;
+ u16 next_to_post;
+
+ idpf_dma_mem_t desc_ring; /* descriptor ring memory */
+
+ union
+ {
+ idpf_dma_mem_t **rx_buff;
+ idpf_ctlq_msg_t **tx_msg;
+ } bi;
+
+ u16 buf_size; /* queue buffer size */
+ u16 ring_size; /* Number of descriptors */
+ idpf_ctlq_reg_t reg; /* registers accessed by ctlqs */
+} idpf_ctlq_info_t;
+
+/* PF/VF mailbox commands */
+enum idpf_mbx_opc
+{
+ /* idpf_mbq_opc_send_msg_to_pf:
+ * usage: used by PF or VF to send a message to its CPF
+ * target: RX queue and function ID of parent PF taken from HW
+ */
+ idpf_mbq_opc_send_msg_to_pf = 0x0801,
+
+ /* idpf_mbq_opc_send_msg_to_vf:
+ * usage: used by PF to send message to a VF
+ * target: VF control queue ID must be specified in descriptor
+ */
+ idpf_mbq_opc_send_msg_to_vf = 0x0802,
+
+ /* idpf_mbq_opc_send_msg_to_peer_pf:
+ * usage: used by any function to send message to any peer PF
+ * target: RX queue and host of parent PF taken from HW
+ */
+ idpf_mbq_opc_send_msg_to_peer_pf = 0x0803,
+
+ /* idpf_mbq_opc_send_msg_to_peer_drv:
+ * usage: used by any function to send message to any peer driver
+ * target: RX queue and target host must be specific in descriptor
+ */
+ idpf_mbq_opc_send_msg_to_peer_drv = 0x0804,
+};
+
+typedef struct
+{
+ u16 flags;
+ u16 opcode;
+ u16 datalen; /* 0 for direct commands */
+ union
+ {
+ u16 ret_val;
+ u16 pfid_vfid;
+ };
+ u32 cookie_high;
+ u32 cookie_low;
+ union
+ {
+ struct
+ {
+ u32 param0;
+ u32 param1;
+ u32 param2;
+ u32 param3;
+ } direct;
+ struct
+ {
+ u32 param0;
+ u32 param1;
+ u32 addr_high;
+ u32 addr_low;
+ } indirect;
+ u8 raw[16];
+ } params;
+} idpf_ctlq_desc_t;
+
+int idpf_ctlq_init (vlib_main_t *vm, idpf_device_t *id, u8 num_q,
+ idpf_ctlq_create_info_t *q_info);
+int idpf_ctlq_add (vlib_main_t *vm, idpf_device_t *id,
+ idpf_ctlq_create_info_t *qinfo, struct idpf_ctlq_info **cq);
+void idpf_ctlq_remove (idpf_device_t *id, struct idpf_ctlq_info *cq);
+int idpf_ctlq_send (idpf_device_t *id, struct idpf_ctlq_info *cq,
+ u16 num_q_msg, idpf_ctlq_msg_t q_msg[]);
+int idpf_ctlq_recv (struct idpf_ctlq_info *cq, u16 *num_q_msg,
+ idpf_ctlq_msg_t *q_msg);
+int idpf_ctlq_clean_sq (struct idpf_ctlq_info *cq, u16 *clean_count,
+ idpf_ctlq_msg_t *msg_status[]);
+int idpf_ctlq_post_rx_buffs (idpf_device_t *id, struct idpf_ctlq_info *cq,
+ u16 *buff_count, idpf_dma_mem_t **buffs);
+void idpf_ctlq_deinit (idpf_device_t *id);
+int idpf_ctlq_alloc_ring_res (vlib_main_t *vm, idpf_device_t *id,
+ struct idpf_ctlq_info *cq);
+void idpf_ctlq_dealloc_ring_res (idpf_device_t *id, struct idpf_ctlq_info *cq);
+void *idpf_alloc_dma_mem (vlib_main_t *vm, idpf_device_t *id,
+ idpf_dma_mem_t *mem, u64 size);
+void idpf_free_dma_mem (idpf_device_t *id, idpf_dma_mem_t *mem);
+
+#endif /* IDPF_H */
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/src/plugins/idpf/idpf_api.c b/src/plugins/idpf/idpf_api.c
new file mode 100644
index 00000000000..8ca78e62dc0
--- /dev/null
+++ b/src/plugins/idpf/idpf_api.c
@@ -0,0 +1,111 @@
+/*
+ *------------------------------------------------------------------
+ * Copyright (c) 2023 Intel and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *------------------------------------------------------------------
+ */
+
+#include <vlib/vlib.h>
+#include <vlib/unix/unix.h>
+#include <vlib/pci/pci.h>
+#include <vnet/ethernet/ethernet.h>
+
+#include <idpf/idpf.h>
+
+#include <vlibapi/api.h>
+#include <vlibmemory/api.h>
+
+/* define message IDs */
+#include <idpf/idpf.api_enum.h>
+#include <idpf/idpf.api_types.h>
+
+#define REPLY_MSG_ID_BASE (im->msg_id_base)
+#include <vlibapi/api_helper_macros.h>
+
+static void
+vl_api_idpf_create_t_handler (vl_api_idpf_create_t *mp)
+{
+ vlib_main_t *vm = vlib_get_main ();
+ idpf_main_t *im = &idpf_main;
+ vl_api_idpf_create_reply_t *rmp;
+ idpf_create_if_args_t args;
+ int rv;
+
+ clib_memset (&args, 0, sizeof (idpf_create_if_args_t));
+
+ args.addr.as_u32 = ntohl (mp->pci_addr);
+ args.rxq_single = ntohs (mp->rxq_single);
+ args.txq_single = ntohs (mp->txq_single);
+ args.rxq_num = ntohs (mp->rxq_num);
+ args.txq_num = ntohs (mp->txq_num);
+ args.rxq_size = ntohs (mp->rxq_size);
+ args.txq_size = ntohs (mp->txq_size);
+ args.req_vport_nb = ntohs (mp->req_vport_nb);
+
+ idpf_create_if (vm, &args);
+ rv = args.rv;
+
+ REPLY_MACRO2 (VL_API_IDPF_CREATE_REPLY,
+ ({ rmp->sw_if_index = ntohl (args.sw_if_index); }));
+}
+
+static void
+vl_api_idpf_delete_t_handler (vl_api_idpf_delete_t *mp)
+{
+ vlib_main_t *vm = vlib_get_main ();
+ vnet_main_t *vnm = vnet_get_main ();
+ idpf_main_t *im = &idpf_main;
+ vl_api_idpf_delete_reply_t *rmp;
+ vnet_hw_interface_t *hw;
+ int rv = 0;
+
+ hw = vnet_get_sup_hw_interface_api_visible_or_null (vnm,
+ htonl (mp->sw_if_index));
+ if (hw == NULL || idpf_device_class.index != hw->dev_class_index)
+ {
+ rv = VNET_API_ERROR_INVALID_INTERFACE;
+ goto reply;
+ }
+
+ vlib_process_signal_event (vm, idpf_process_node.index,
+ IDPF_PROCESS_EVENT_DELETE_IF, hw->dev_instance);
+
+reply:
+ REPLY_MACRO (VL_API_IDPF_DELETE_REPLY);
+}
+
+/* set tup the API message handling tables */
+#include <idpf/idpf.api.c>
+static clib_error_t *
+idpf_plugin_api_hookup (vlib_main_t *vm)
+{
+ idpf_main_t *ivm = &idpf_main;
+ api_main_t *am = vlibapi_get_main ();
+
+ /* ask for a correctly-sized block of API message decode slots */
+ ivm->msg_id_base = setup_message_id_table ();
+
+ vl_api_set_msg_thread_safe (am, ivm->msg_id_base + VL_API_IDPF_DELETE, 1);
+
+ return 0;
+}
+
+VLIB_API_INIT_FUNCTION (idpf_plugin_api_hookup);
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/src/plugins/idpf/idpf_controlq.c b/src/plugins/idpf/idpf_controlq.c
new file mode 100644
index 00000000000..4887bf71c86
--- /dev/null
+++ b/src/plugins/idpf/idpf_controlq.c
@@ -0,0 +1,890 @@
+/*
+ *------------------------------------------------------------------
+ * Copyright (c) 2023 Intel and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *------------------------------------------------------------------
+ */
+
+#include <idpf/idpf.h>
+
+/**
+ * idpf_ctlq_alloc_desc_ring - Allocate Control Queue (CQ) rings
+ * @hw: pointer to hw struct
+ * @cq: pointer to the specific Control queue
+ */
+static int
+idpf_ctlq_alloc_desc_ring (vlib_main_t *vm, idpf_device_t *id,
+ struct idpf_ctlq_info *cq)
+{
+ size_t size = cq->ring_size * sizeof (idpf_ctlq_desc_t);
+
+ /* Fixme: alloc dma va */
+ cq->desc_ring.va = idpf_alloc_dma_mem (vm, id, &cq->desc_ring, size);
+ if (!cq->desc_ring.va)
+ return IDPF_ERR_NO_MEMORY;
+
+ return IDPF_SUCCESS;
+}
+
+/**
+ * idpf_ctlq_alloc_bufs - Allocate Control Queue (CQ) buffers
+ * @hw: pointer to hw struct
+ * @cq: pointer to the specific Control queue
+ *
+ * Allocate the buffer head for all control queues, and if it's a receive
+ * queue, allocate DMA buffers
+ */
+static int
+idpf_ctlq_alloc_bufs (vlib_main_t *vm, idpf_device_t *id,
+ struct idpf_ctlq_info *cq)
+{
+ int i = 0;
+ u16 len;
+
+ /* Do not allocate DMA buffers for transmit queues */
+ if (cq->cq_type == IDPF_CTLQ_TYPE_MAILBOX_TX)
+ return IDPF_SUCCESS;
+
+ /* We'll be allocating the buffer info memory first, then we can
+ * allocate the mapped buffers for the event processing
+ */
+ len = cq->ring_size * sizeof (idpf_dma_mem_t *);
+ cq->bi.rx_buff = (idpf_dma_mem_t **) clib_mem_alloc (len);
+ if (!cq->bi.rx_buff)
+ return IDPF_ERR_NO_MEMORY;
+ clib_memset (cq->bi.rx_buff, 0, len);
+
+ /* allocate the mapped buffers (except for the last one) */
+ for (i = 0; i < cq->ring_size - 1; i++)
+ {
+ idpf_dma_mem_t *bi;
+ int num = 1; /* number of idpf_dma_mem to be allocated */
+
+ cq->bi.rx_buff[i] =
+ (idpf_dma_mem_t *) clib_mem_alloc (num * sizeof (idpf_dma_mem_t));
+ if (!cq->bi.rx_buff[i])
+ goto unwind_alloc_cq_bufs;
+
+ bi = cq->bi.rx_buff[i];
+
+ bi->va = idpf_alloc_dma_mem (vm, id, bi, cq->buf_size);
+ if (!bi->va)
+ {
+ /* unwind will not free the failed entry */
+ clib_mem_free (cq->bi.rx_buff[i]);
+ goto unwind_alloc_cq_bufs;
+ }
+ }
+
+ return IDPF_SUCCESS;
+
+unwind_alloc_cq_bufs:
+ /* don't try to free the one that failed... */
+ i--;
+ for (; i >= 0; i--)
+ {
+ idpf_free_dma_mem (id, cq->bi.rx_buff[i]);
+ clib_mem_free (cq->bi.rx_buff[i]);
+ }
+ clib_mem_free (cq->bi.rx_buff);
+
+ return IDPF_ERR_NO_MEMORY;
+}
+
+/**
+ * idpf_ctlq_free_desc_ring - Free Control Queue (CQ) rings
+ * @hw: pointer to hw struct
+ * @cq: pointer to the specific Control queue
+ *
+ * This assumes the posted send buffers have already been cleaned
+ * and de-allocated
+ */
+static void
+idpf_ctlq_free_desc_ring (idpf_device_t *id, struct idpf_ctlq_info *cq)
+{
+ idpf_free_dma_mem (id, &cq->desc_ring);
+}
+
+/**
+ * idpf_ctlq_free_bufs - Free CQ buffer info elements
+ * @hw: pointer to hw struct
+ * @cq: pointer to the specific Control queue
+ *
+ * Free the DMA buffers for RX queues, and DMA buffer header for both RX and TX
+ * queues. The upper layers are expected to manage freeing of TX DMA buffers
+ */
+static void
+idpf_ctlq_free_bufs (idpf_device_t *id, struct idpf_ctlq_info *cq)
+{
+ void *bi;
+
+ if (cq->cq_type == IDPF_CTLQ_TYPE_MAILBOX_RX)
+ {
+ int i;
+
+ /* free DMA buffers for rx queues*/
+ for (i = 0; i < cq->ring_size; i++)
+ {
+ if (cq->bi.rx_buff[i])
+ {
+ idpf_free_dma_mem (id, cq->bi.rx_buff[i]);
+ /* Attention */
+ clib_mem_free (cq->bi.rx_buff[i]);
+ }
+ }
+
+ bi = (void *) cq->bi.rx_buff;
+ }
+ else
+ {
+ bi = (void *) cq->bi.tx_msg;
+ }
+
+ /* free the buffer header */
+ clib_mem_free (bi);
+}
+
+/**
+ * idpf_ctlq_dealloc_ring_res - Free memory allocated for control queue
+ * @hw: pointer to hw struct
+ * @cq: pointer to the specific Control queue
+ *
+ * Free the memory used by the ring, buffers and other related structures
+ */
+void
+idpf_ctlq_dealloc_ring_res (idpf_device_t *id, struct idpf_ctlq_info *cq)
+{
+ /* free ring buffers and the ring itself */
+ idpf_ctlq_free_bufs (id, cq);
+ idpf_ctlq_free_desc_ring (id, cq);
+}
+
+/**
+ * idpf_ctlq_alloc_ring_res - allocate memory for descriptor ring and bufs
+ * @hw: pointer to hw struct
+ * @cq: pointer to control queue struct
+ *
+ * Do *NOT* hold the lock when calling this as the memory allocation routines
+ * called are not going to be atomic context safe
+ */
+int
+idpf_ctlq_alloc_ring_res (vlib_main_t *vm, idpf_device_t *id,
+ struct idpf_ctlq_info *cq)
+{
+ int ret_code;
+
+ /* verify input for valid configuration */
+ if (!cq->ring_size || !cq->buf_size)
+ return IDPF_ERR_CFG;
+
+ /* allocate the ring memory */
+ ret_code = idpf_ctlq_alloc_desc_ring (vm, id, cq);
+ if (ret_code)
+ return ret_code;
+
+ /* allocate buffers in the rings */
+ ret_code = idpf_ctlq_alloc_bufs (vm, id, cq);
+ if (ret_code)
+ goto idpf_init_cq_free_ring;
+
+ /* success! */
+ return IDPF_SUCCESS;
+
+idpf_init_cq_free_ring:
+ idpf_free_dma_mem (id, &cq->desc_ring);
+ return ret_code;
+}
+
+/**
+ * idpf_ctlq_setup_regs - initialize control queue registers
+ * @cq: pointer to the specific control queue
+ * @q_create_info: structs containing info for each queue to be initialized
+ */
+static void
+idpf_ctlq_setup_regs (struct idpf_ctlq_info *cq,
+ idpf_ctlq_create_info_t *q_create_info)
+{
+ /* set head and tail registers in our local struct */
+ cq->reg.head = q_create_info->reg.head;
+ cq->reg.tail = q_create_info->reg.tail;
+ cq->reg.len = q_create_info->reg.len;
+ cq->reg.bah = q_create_info->reg.bah;
+ cq->reg.bal = q_create_info->reg.bal;
+ cq->reg.len_mask = q_create_info->reg.len_mask;
+ cq->reg.len_ena_mask = q_create_info->reg.len_ena_mask;
+ cq->reg.head_mask = q_create_info->reg.head_mask;
+}
+
+/**
+ * idpf_ctlq_init_regs - Initialize control queue registers
+ * @hw: pointer to hw struct
+ * @cq: pointer to the specific Control queue
+ * @is_rxq: true if receive control queue, false otherwise
+ *
+ * Initialize registers. The caller is expected to have already initialized the
+ * descriptor ring memory and buffer memory
+ */
+static void
+idpf_ctlq_init_regs (vlib_main_t *vm, idpf_device_t *id,
+ struct idpf_ctlq_info *cq, bool is_rxq)
+{
+ /* Update tail to post pre-allocated buffers for rx queues */
+ if (is_rxq)
+ idpf_reg_write (id, cq->reg.tail, (u32) (cq->ring_size - 1));
+
+ /* For non-Mailbox control queues only TAIL need to be set */
+ if (cq->q_id != -1)
+ return;
+
+ /* Clear Head for both send or receive */
+ idpf_reg_write (id, cq->reg.head, 0);
+
+ /* set starting point */
+ idpf_reg_write (id, cq->reg.bal, IDPF_LO_DWORD (cq->desc_ring.pa));
+ idpf_reg_write (id, cq->reg.bah, IDPF_HI_DWORD (cq->desc_ring.pa));
+ idpf_reg_write (id, cq->reg.len, (cq->ring_size | cq->reg.len_ena_mask));
+}
+
+/**
+ * idpf_ctlq_init_rxq_bufs - populate receive queue descriptors with buf
+ * @cq: pointer to the specific Control queue
+ *
+ * Record the address of the receive queue DMA buffers in the descriptors.
+ * The buffers must have been previously allocated.
+ */
+static void
+idpf_ctlq_init_rxq_bufs (struct idpf_ctlq_info *cq)
+{
+ int i = 0;
+
+ for (i = 0; i < cq->ring_size; i++)
+ {
+ idpf_ctlq_desc_t *desc = IDPF_CTLQ_DESC (cq, i);
+ idpf_dma_mem_t *bi = cq->bi.rx_buff[i];
+
+ /* No buffer to post to descriptor, continue */
+ if (!bi)
+ continue;
+
+ desc->flags = IDPF_CTLQ_FLAG_BUF | IDPF_CTLQ_FLAG_RD;
+ desc->opcode = 0;
+ desc->datalen = (u16) bi->size;
+ desc->ret_val = 0;
+ desc->cookie_high = 0;
+ desc->cookie_low = 0;
+ desc->params.indirect.addr_high = IDPF_HI_DWORD (bi->pa);
+ desc->params.indirect.addr_low = IDPF_LO_DWORD (bi->pa);
+ desc->params.indirect.param0 = 0;
+ desc->params.indirect.param1 = 0;
+ }
+}
+
+/**
+ * idpf_ctlq_shutdown - shutdown the CQ
+ * @hw: pointer to hw struct
+ * @cq: pointer to the specific Control queue
+ *
+ * The main shutdown routine for any controq queue
+ */
+static void
+idpf_ctlq_shutdown (idpf_device_t *id, struct idpf_ctlq_info *cq)
+{
+ clib_spinlock_init (&cq->cq_lock);
+
+ if (!cq->ring_size)
+ goto shutdown_sq_out;
+
+ /* free ring buffers and the ring itself */
+ idpf_ctlq_dealloc_ring_res (id, cq);
+
+ /* Set ring_size to 0 to indicate uninitialized queue */
+ cq->ring_size = 0;
+
+shutdown_sq_out:
+ clib_spinlock_unlock (&cq->cq_lock);
+ clib_spinlock_free (&cq->cq_lock);
+}
+
+/**
+ * idpf_ctlq_add - add one control queue
+ * @hw: pointer to hardware struct
+ * @qinfo: info for queue to be created
+ * @cq_out: (output) double pointer to control queue to be created
+ *
+ * Allocate and initialize a control queue and add it to the control queue
+ * list. The cq parameter will be allocated/initialized and passed back to the
+ * caller if no errors occur.
+ *
+ * Note: idpf_ctlq_init must be called prior to any calls to idpf_ctlq_add
+ */
+int
+idpf_ctlq_add (vlib_main_t *vm, idpf_device_t *id,
+ idpf_ctlq_create_info_t *qinfo, struct idpf_ctlq_info **cq_out)
+{
+ bool is_rxq = false;
+ int status = IDPF_SUCCESS;
+
+ if (!qinfo->len || !qinfo->buf_size ||
+ qinfo->len > IDPF_CTLQ_MAX_RING_SIZE ||
+ qinfo->buf_size > IDPF_CTLQ_MAX_BUF_LEN)
+ return IDPF_ERR_CFG;
+
+ /* Fixme: memory allocation */
+ *cq_out = vlib_physmem_alloc_aligned_on_numa (
+ vm, sizeof (struct idpf_ctlq_info), CLIB_CACHE_LINE_BYTES, id->numa_node);
+ if (!(*cq_out))
+ return IDPF_ERR_NO_MEMORY;
+
+ if ((vlib_pci_map_dma (vm, id->pci_dev_handle, *cq_out)))
+ {
+ status = IDPF_ERR_NO_MEMORY;
+ goto init_free_q;
+ }
+
+ (*cq_out)->cq_type = qinfo->type;
+ (*cq_out)->q_id = qinfo->id;
+ (*cq_out)->buf_size = qinfo->buf_size;
+ (*cq_out)->ring_size = qinfo->len;
+
+ (*cq_out)->next_to_use = 0;
+ (*cq_out)->next_to_clean = 0;
+ (*cq_out)->next_to_post = (*cq_out)->ring_size - 1;
+
+ switch (qinfo->type)
+ {
+ case IDPF_CTLQ_TYPE_MAILBOX_RX:
+ is_rxq = true;
+ case IDPF_CTLQ_TYPE_MAILBOX_TX:
+ status = idpf_ctlq_alloc_ring_res (vm, id, *cq_out);
+ break;
+ default:
+ status = IDPF_ERR_PARAM;
+ break;
+ }
+
+ if (status)
+ goto init_free_q;
+
+ if (is_rxq)
+ {
+ idpf_ctlq_init_rxq_bufs (*cq_out);
+ }
+ else
+ {
+ /* Allocate the array of msg pointers for TX queues */
+ (*cq_out)->bi.tx_msg = (idpf_ctlq_msg_t **) clib_mem_alloc (
+ qinfo->len * sizeof (idpf_ctlq_msg_t *));
+ if (!(*cq_out)->bi.tx_msg)
+ {
+ status = IDPF_ERR_NO_MEMORY;
+ goto init_dealloc_q_mem;
+ }
+ }
+
+ idpf_ctlq_setup_regs (*cq_out, qinfo);
+
+ idpf_ctlq_init_regs (vm, id, *cq_out, is_rxq);
+
+ /* Fixeme: lock issue */
+ clib_spinlock_init (&(*cq_out)->cq_lock);
+
+ LIST_INSERT_HEAD (&id->cq_list_head, (*cq_out), cq_list);
+
+ return status;
+
+init_dealloc_q_mem:
+ /* free ring buffers and the ring itself */
+ idpf_ctlq_dealloc_ring_res (id, *cq_out);
+init_free_q:
+ clib_mem_free (*cq_out);
+
+ return status;
+}
+
+/**
+ * idpf_ctlq_remove - deallocate and remove specified control queue
+ * @hw: pointer to hardware struct
+ * @cq: pointer to control queue to be removed
+ */
+void
+idpf_ctlq_remove (idpf_device_t *id, struct idpf_ctlq_info *cq)
+{
+ LIST_REMOVE (cq, cq_list);
+ idpf_ctlq_shutdown (id, cq);
+ clib_mem_free (cq);
+}
+
+/**
+ * idpf_ctlq_init - main initialization routine for all control queues
+ * @hw: pointer to hardware struct
+ * @num_q: number of queues to initialize
+ * @q_info: array of structs containing info for each queue to be initialized
+ *
+ * This initializes any number and any type of control queues. This is an all
+ * or nothing routine; if one fails, all previously allocated queues will be
+ * destroyed. This must be called prior to using the individual add/remove
+ * APIs.
+ */
+int
+idpf_ctlq_init (vlib_main_t *vm, idpf_device_t *id, u8 num_q,
+ idpf_ctlq_create_info_t *q_info)
+{
+ struct idpf_ctlq_info *cq = NULL;
+ int ret_code = IDPF_SUCCESS;
+ int i = 0;
+
+ LIST_INIT (&id->cq_list_head);
+
+ for (i = 0; i < num_q; i++)
+ {
+ idpf_ctlq_create_info_t *qinfo = q_info + i;
+
+ ret_code = idpf_ctlq_add (vm, id, qinfo, &cq);
+ if (ret_code)
+ goto init_destroy_qs;
+ }
+
+ return ret_code;
+
+init_destroy_qs:
+ LIST_FOR_EACH_ENTRY_SAFE (cq, NULL, &id->cq_list_head, struct idpf_ctlq_info,
+ cq_list)
+ {
+ idpf_ctlq_remove (id, cq);
+ }
+
+ return ret_code;
+}
+
+/**
+ * idpf_ctlq_deinit - destroy all control queues
+ * @hw: pointer to hw struct
+ */
+void
+idpf_ctlq_deinit (idpf_device_t *id)
+{
+ struct idpf_ctlq_info *cq = NULL;
+
+ LIST_FOR_EACH_ENTRY_SAFE (cq, NULL, &id->cq_list_head, struct idpf_ctlq_info,
+ cq_list)
+ {
+ idpf_ctlq_remove (id, cq);
+ }
+
+ return;
+}
+
+/**
+ * idpf_ctlq_send - send command to Control Queue (CTQ)
+ * @id: pointer to device struct
+ * @cq: handle to control queue struct to send on
+ * @num_q_msg: number of messages to send on control queue
+ * @q_msg: pointer to array of queue messages to be sent
+ *
+ * The caller is expected to allocate DMAable buffers and pass them to the
+ * send routine via the q_msg struct / control queue specific data struct.
+ * The control queue will hold a reference to each send message until
+ * the completion for that message has been cleaned.
+ */
+int
+idpf_ctlq_send (idpf_device_t *id, struct idpf_ctlq_info *cq, u16 num_q_msg,
+ idpf_ctlq_msg_t q_msg[])
+{
+ idpf_ctlq_desc_t *desc;
+ int num_desc_avail = 0;
+ int status = IDPF_SUCCESS;
+ int i = 0;
+
+ if (!cq || !cq->ring_size)
+ return -ENOBUFS;
+
+ clib_spinlock_lock (&cq->cq_lock);
+
+ /* Ensure there are enough descriptors to send all messages */
+ num_desc_avail = IDPF_CTLQ_DESC_UNUSED (cq);
+ if (num_desc_avail == 0 || num_desc_avail < num_q_msg)
+ {
+ status = -ENOSPC;
+ goto sq_send_command_out;
+ }
+
+ for (i = 0; i < num_q_msg; i++)
+ {
+ idpf_ctlq_msg_t *msg = &q_msg[i];
+ u64 msg_cookie;
+
+ desc = IDPF_CTLQ_DESC (cq, cq->next_to_use);
+
+ /* Pay attention to CPU_TO_LE16 */
+ desc->opcode = msg->opcode;
+ desc->pfid_vfid = msg->func_id;
+
+ msg_cookie = msg->cookie.cookie;
+ desc->cookie_high = IDPF_HI_DWORD (msg_cookie);
+ desc->cookie_low = IDPF_LO_DWORD (msg_cookie);
+
+ desc->flags = (msg->host_id & IDPF_HOST_ID_MASK)
+ << IDPF_CTLQ_FLAG_HOST_ID_S;
+ if (msg->data_len)
+ {
+ idpf_dma_mem_t *buff = msg->ctx.indirect.payload;
+
+ desc->datalen |= msg->data_len;
+ desc->flags |= IDPF_CTLQ_FLAG_BUF;
+ desc->flags |= IDPF_CTLQ_FLAG_RD;
+
+ /* Update the address values in the desc with the pa
+ * value for respective buffer
+ */
+ desc->params.indirect.addr_high = IDPF_HI_DWORD (buff->pa);
+ desc->params.indirect.addr_low = IDPF_LO_DWORD (buff->pa);
+
+ clib_memcpy (&desc->params, msg->ctx.indirect.context,
+ IDPF_INDIRECT_CTX_SIZE);
+ }
+ else
+ {
+ clib_memcpy (&desc->params, msg->ctx.direct, IDPF_DIRECT_CTX_SIZE);
+ }
+
+ /* Store buffer info */
+ cq->bi.tx_msg[cq->next_to_use] = msg;
+
+ (cq->next_to_use)++;
+ if (cq->next_to_use == cq->ring_size)
+ cq->next_to_use = 0;
+ }
+
+ /* Force memory write to complete before letting hardware
+ * know that there are new descriptors to fetch.
+ */
+ CLIB_MEMORY_BARRIER ();
+
+ idpf_reg_write (id, cq->reg.tail, cq->next_to_use);
+
+sq_send_command_out:
+ clib_spinlock_unlock (&cq->cq_lock);
+
+ return status;
+}
+
+/**
+ * idpf_ctlq_clean_sq - reclaim send descriptors on HW write back for the
+ * requested queue
+ * @cq: pointer to the specific Control queue
+ * @clean_count: (input|output) number of descriptors to clean as input, and
+ * number of descriptors actually cleaned as output
+ * @msg_status: (output) pointer to msg pointer array to be populated; needs
+ * to be allocated by caller
+ *
+ * Returns an array of message pointers associated with the cleaned
+ * descriptors. The pointers are to the original ctlq_msgs sent on the cleaned
+ * descriptors. The status will be returned for each; any messages that failed
+ * to send will have a non-zero status. The caller is expected to free original
+ * ctlq_msgs and free or reuse the DMA buffers.
+ */
+int
+idpf_ctlq_clean_sq (struct idpf_ctlq_info *cq, u16 *clean_count,
+ idpf_ctlq_msg_t *msg_status[])
+{
+ idpf_ctlq_desc_t *desc;
+ u16 i = 0, num_to_clean;
+ u16 ntc, desc_err;
+ int ret = IDPF_SUCCESS;
+
+ if (!cq || !cq->ring_size)
+ return IDPF_ERR_CTLQ_EMPTY;
+
+ if (*clean_count == 0)
+ return IDPF_SUCCESS;
+ if (*clean_count > cq->ring_size)
+ return IDPF_ERR_PARAM;
+
+ /* Fixme rte func */
+ clib_spinlock_lock (&cq->cq_lock);
+
+ ntc = cq->next_to_clean;
+
+ num_to_clean = *clean_count;
+
+ for (i = 0; i < num_to_clean; i++)
+ {
+ /* Fetch next descriptor and check if marked as done */
+ desc = IDPF_CTLQ_DESC (cq, ntc);
+ if (!(desc->flags & IDPF_CTLQ_FLAG_DD))
+ break;
+
+ desc_err = desc->ret_val;
+ if (desc_err)
+ {
+ /* strip off FW internal code */
+ desc_err &= 0xff;
+ }
+
+ msg_status[i] = cq->bi.tx_msg[ntc];
+ msg_status[i]->status = desc_err;
+
+ cq->bi.tx_msg[ntc] = NULL;
+
+ /* Zero out any stale data */
+ clib_memset (desc, 0, sizeof (*desc));
+
+ ntc++;
+ if (ntc == cq->ring_size)
+ ntc = 0;
+ }
+
+ cq->next_to_clean = ntc;
+
+ clib_spinlock_unlock (&cq->cq_lock);
+
+ /* Return number of descriptors actually cleaned */
+ *clean_count = i;
+
+ return ret;
+}
+
+/**
+ * idpf_ctlq_post_rx_buffs - post buffers to descriptor ring
+ * @hw: pointer to hw struct
+ * @cq: pointer to control queue handle
+ * @buff_count: (input|output) input is number of buffers caller is trying to
+ * return; output is number of buffers that were not posted
+ * @buffs: array of pointers to dma mem structs to be given to hardware
+ *
+ * Caller uses this function to return DMA buffers to the descriptor ring after
+ * consuming them; buff_count will be the number of buffers.
+ *
+ * Note: this function needs to be called after a receive call even
+ * if there are no DMA buffers to be returned, i.e. buff_count = 0,
+ * buffs = NULL to support direct commands
+ */
+int
+idpf_ctlq_post_rx_buffs (idpf_device_t *id, struct idpf_ctlq_info *cq,
+ u16 *buff_count, idpf_dma_mem_t **buffs)
+{
+ idpf_ctlq_desc_t *desc;
+ u16 ntp = cq->next_to_post;
+ bool buffs_avail = false;
+ u16 tbp = ntp + 1;
+ int status = IDPF_SUCCESS;
+ int i = 0;
+
+ if (*buff_count > cq->ring_size)
+ return IDPF_ERR_PARAM;
+
+ if (*buff_count > 0)
+ buffs_avail = true;
+
+ clib_spinlock_lock (&cq->cq_lock);
+
+ if (tbp >= cq->ring_size)
+ tbp = 0;
+
+ if (tbp == cq->next_to_clean)
+ /* Nothing to do */
+ goto post_buffs_out;
+
+ /* Post buffers for as many as provided or up until the last one used */
+ while (ntp != cq->next_to_clean)
+ {
+ desc = IDPF_CTLQ_DESC (cq, ntp);
+
+ if (cq->bi.rx_buff[ntp])
+ goto fill_desc;
+ if (!buffs_avail)
+ {
+ /* If the caller hasn't given us any buffers or
+ * there are none left, search the ring itself
+ * for an available buffer to move to this
+ * entry starting at the next entry in the ring
+ */
+ tbp = ntp + 1;
+
+ /* Wrap ring if necessary */
+ if (tbp >= cq->ring_size)
+ tbp = 0;
+
+ while (tbp != cq->next_to_clean)
+ {
+ if (cq->bi.rx_buff[tbp])
+ {
+ cq->bi.rx_buff[ntp] = cq->bi.rx_buff[tbp];
+ cq->bi.rx_buff[tbp] = NULL;
+
+ /* Found a buffer, no need to
+ * search anymore
+ */
+ break;
+ }
+
+ /* Wrap ring if necessary */
+ tbp++;
+ if (tbp >= cq->ring_size)
+ tbp = 0;
+ }
+
+ if (tbp == cq->next_to_clean)
+ goto post_buffs_out;
+ }
+ else
+ {
+ /* Give back pointer to DMA buffer */
+ cq->bi.rx_buff[ntp] = buffs[i];
+ i++;
+
+ if (i >= *buff_count)
+ buffs_avail = false;
+ }
+
+ fill_desc:
+ desc->flags = IDPF_CTLQ_FLAG_BUF | IDPF_CTLQ_FLAG_RD;
+
+ /* Post buffers to descriptor */
+ desc->datalen = cq->bi.rx_buff[ntp]->size;
+ desc->params.indirect.addr_high =
+ IDPF_HI_DWORD (cq->bi.rx_buff[ntp]->pa);
+ desc->params.indirect.addr_low = IDPF_LO_DWORD (cq->bi.rx_buff[ntp]->pa);
+
+ ntp++;
+ if (ntp == cq->ring_size)
+ ntp = 0;
+ }
+
+post_buffs_out:
+ /* Only update tail if buffers were actually posted */
+ if (cq->next_to_post != ntp)
+ {
+ if (ntp)
+ /* Update next_to_post to ntp - 1 since current ntp
+ * will not have a buffer
+ */
+ cq->next_to_post = ntp - 1;
+ else
+ /* Wrap to end of end ring since current ntp is 0 */
+ cq->next_to_post = cq->ring_size - 1;
+
+ idpf_reg_write (id, cq->reg.tail, cq->next_to_post);
+ }
+
+ clib_spinlock_unlock (&cq->cq_lock);
+
+ /* return the number of buffers that were not posted */
+ *buff_count = *buff_count - i;
+
+ return status;
+}
+
+/**
+ * idpf_ctlq_recv - receive control queue message call back
+ * @cq: pointer to control queue handle to receive on
+ * @num_q_msg: (input|output) input number of messages that should be received;
+ * output number of messages actually received
+ * @q_msg: (output) array of received control queue messages on this q;
+ * needs to be pre-allocated by caller for as many messages as requested
+ *
+ * Called by interrupt handler or polling mechanism. Caller is expected
+ * to free buffers
+ */
+int
+idpf_ctlq_recv (struct idpf_ctlq_info *cq, u16 *num_q_msg,
+ idpf_ctlq_msg_t *q_msg)
+{
+ u16 num_to_clean, ntc, ret_val, flags;
+ idpf_ctlq_desc_t *desc;
+ int ret_code = 0;
+ u16 i = 0;
+
+ if (!cq || !cq->ring_size)
+ return -ENOBUFS;
+
+ if (*num_q_msg == 0)
+ return 0;
+ else if (*num_q_msg > cq->ring_size)
+ return -EINVAL;
+
+ /* Fixme: take the lock before we start messing with the ring */
+ clib_spinlock_lock (&cq->cq_lock);
+
+ ntc = cq->next_to_clean;
+
+ num_to_clean = *num_q_msg;
+
+ for (i = 0; i < num_to_clean; i++)
+ {
+ u64 msg_cookie;
+
+ /* Fetch next descriptor and check if marked as done */
+ desc = IDPF_CTLQ_DESC (cq, ntc);
+ flags = desc->flags;
+
+ if (!(flags & IDPF_CTLQ_FLAG_DD))
+ break;
+
+ ret_val = desc->ret_val;
+
+ q_msg[i].vmvf_type =
+ (flags & (IDPF_CTLQ_FLAG_FTYPE_VM | IDPF_CTLQ_FLAG_FTYPE_PF)) >>
+ IDPF_CTLQ_FLAG_FTYPE_S;
+
+ if (flags & IDPF_CTLQ_FLAG_ERR)
+ ret_code = IDPF_ERR_CTLQ_ERROR;
+
+ msg_cookie = (u64) desc->cookie_high << 32;
+ msg_cookie |= (u64) desc->cookie_low;
+ clib_memcpy_fast (&q_msg[i].cookie, &msg_cookie, sizeof (u64));
+
+ q_msg[i].opcode = desc->opcode;
+ q_msg[i].data_len = desc->datalen;
+ q_msg[i].status = ret_val;
+
+ if (desc->datalen)
+ {
+ clib_memcpy_fast (q_msg[i].ctx.indirect.context,
+ &desc->params.indirect, IDPF_INDIRECT_CTX_SIZE);
+
+ /* Assign pointer to dma buffer to ctlq_msg array
+ * to be given to upper layer
+ */
+ q_msg[i].ctx.indirect.payload = cq->bi.rx_buff[ntc];
+
+ /* Zero out pointer to DMA buffer info;
+ * will be repopulated by post buffers API
+ */
+ cq->bi.rx_buff[ntc] = NULL;
+ }
+ else
+ {
+ clib_memcpy_fast (q_msg[i].ctx.direct, desc->params.raw,
+ IDPF_DIRECT_CTX_SIZE);
+ }
+
+ /* Zero out stale data in descriptor */
+ clib_memset (desc, 0, sizeof (idpf_ctlq_desc_t));
+
+ ntc++;
+ if (ntc == cq->ring_size)
+ ntc = 0;
+ };
+
+ cq->next_to_clean = ntc;
+
+ /* Fixme */
+ clib_spinlock_unlock (&cq->cq_lock);
+
+ *num_q_msg = i;
+ if (*num_q_msg == 0)
+ ret_code = -ENOMSG;
+
+ return ret_code;
+}
diff --git a/src/plugins/idpf/idpf_test.c b/src/plugins/idpf/idpf_test.c
new file mode 100644
index 00000000000..85b12966681
--- /dev/null
+++ b/src/plugins/idpf/idpf_test.c
@@ -0,0 +1,169 @@
+/*
+ *------------------------------------------------------------------
+ * Copyright (c) 2023 Intel and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *------------------------------------------------------------------
+ */
+
+#include <vlib/vlib.h>
+#include <vlib/unix/unix.h>
+#include <vlib/pci/pci.h>
+#include <vnet/ethernet/ethernet.h>
+
+#include <vat/vat.h>
+#include <vlibapi/api.h>
+#include <vlibmemory/api.h>
+
+#include <vppinfra/error.h>
+#include <idpf/idpf.h>
+
+#define __plugin_msg_base idpf_test_main.msg_id_base
+#include <vlibapi/vat_helper_macros.h>
+
+/* declare message IDs */
+#include <idpf/idpf.api_enum.h>
+#include <idpf/idpf.api_types.h>
+
+typedef struct
+{
+ /* API message ID base */
+ u16 msg_id_base;
+ vat_main_t *vat_main;
+} idpf_test_main_t;
+
+idpf_test_main_t idpf_test_main;
+
+/* idpf create API */
+static int
+api_idpf_create (vat_main_t *vam)
+{
+ unformat_input_t *i = vam->input;
+ vl_api_idpf_create_t *mp;
+ idpf_create_if_args_t args;
+ uint32_t tmp;
+ int ret;
+ u32 x[4];
+
+ clib_memset (&args, 0, sizeof (idpf_create_if_args_t));
+
+ while (unformat_check_input (i) != UNFORMAT_END_OF_INPUT)
+ {
+ if (unformat (i, "%x:%x:%x.%x", &x[0], &x[1], &x[2], &x[3]))
+ {
+ args.addr.domain = x[0];
+ args.addr.bus = x[1];
+ args.addr.slot = x[2];
+ args.addr.function = x[3];
+ }
+ else if (unformat (i, "rx-single %u", &tmp))
+ args.rxq_single = 1;
+ else if (unformat (i, "tx-single %u", &tmp))
+ args.txq_single = 1;
+ else if (unformat (i, "rxq-size %u", &tmp))
+ args.rxq_size = tmp;
+ else if (unformat (i, "txq-size %u", &tmp))
+ args.txq_size = tmp;
+ else if (unformat (i, "rxq-num %u", &tmp))
+ args.rxq_num = tmp;
+ else if (unformat (i, "txq-num %u", &tmp))
+ args.txq_num = tmp;
+ else if (unformat (i, "vport-num %u", &tmp))
+ args.req_vport_nb = tmp;
+ else
+ {
+ clib_warning ("unknown input '%U'", format_unformat_error, i);
+ return -99;
+ }
+ }
+
+ M (IDPF_CREATE, mp);
+
+ mp->pci_addr = clib_host_to_net_u32 (args.addr.as_u32);
+ mp->rxq_single = clib_host_to_net_u16 (args.rxq_single);
+ mp->txq_single = clib_host_to_net_u16 (args.txq_single);
+ mp->rxq_num = clib_host_to_net_u16 (args.rxq_num);
+ mp->txq_num = clib_host_to_net_u16 (args.txq_num);
+ mp->rxq_size = clib_host_to_net_u16 (args.rxq_size);
+ mp->txq_size = clib_host_to_net_u16 (args.txq_size);
+ mp->req_vport_nb = clib_host_to_net_u16 (args.req_vport_nb);
+
+ S (mp);
+ W (ret);
+
+ return ret;
+}
+
+/* idpf-create reply handler */
+static void
+vl_api_idpf_create_reply_t_handler (vl_api_idpf_create_reply_t *mp)
+{
+ vat_main_t *vam = idpf_test_main.vat_main;
+ i32 retval = ntohl (mp->retval);
+
+ if (retval == 0)
+ {
+ fformat (vam->ofp, "created idpf with sw_if_index %d\n",
+ ntohl (mp->sw_if_index));
+ }
+
+ vam->retval = retval;
+ vam->result_ready = 1;
+ vam->regenerate_interface_table = 1;
+}
+
+/* idpf delete API */
+static int
+api_idpf_delete (vat_main_t *vam)
+{
+ unformat_input_t *i = vam->input;
+ vl_api_idpf_delete_t *mp;
+ u32 sw_if_index = 0;
+ u8 index_defined = 0;
+ int ret;
+
+ while (unformat_check_input (i) != UNFORMAT_END_OF_INPUT)
+ {
+ if (unformat (i, "sw_if_index %u", &sw_if_index))
+ index_defined = 1;
+ else
+ {
+ clib_warning ("unknown input '%U'", format_unformat_error, i);
+ return -99;
+ }
+ }
+
+ if (!index_defined)
+ {
+ errmsg ("missing sw_if_index\n");
+ return -99;
+ }
+
+ M (IDPF_DELETE, mp);
+
+ mp->sw_if_index = clib_host_to_net_u32 (sw_if_index);
+
+ S (mp);
+ W (ret);
+
+ return ret;
+}
+
+#include <idpf/idpf.api_test.c>
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/src/plugins/idpf/plugin.c b/src/plugins/idpf/plugin.c
new file mode 100644
index 00000000000..9bbce64a3ce
--- /dev/null
+++ b/src/plugins/idpf/plugin.c
@@ -0,0 +1,34 @@
+/*
+ *------------------------------------------------------------------
+ * Copyright (c) 2023 Intel and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *------------------------------------------------------------------
+ */
+
+#include <vlib/vlib.h>
+#include <vnet/plugin/plugin.h>
+#include <vpp/app/version.h>
+
+VLIB_PLUGIN_REGISTER () = {
+ .version = VPP_BUILD_VER,
+ .description =
+ "Intel Infrastructure Data Path Function (IDPF) Device Driver",
+};
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/src/plugins/idpf/virtchnl2.h b/src/plugins/idpf/virtchnl2.h
new file mode 100644
index 00000000000..8db68483f22
--- /dev/null
+++ b/src/plugins/idpf/virtchnl2.h
@@ -0,0 +1,855 @@
+/*
+ *------------------------------------------------------------------
+ * Copyright (c) 2023 Intel and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *------------------------------------------------------------------
+ */
+
+#ifndef _IDPF_VIRTCHNL_H_
+#define _IDPF_VIRTCHNL_H_
+
+#include <idpf/virtchnl2_lan_desc.h>
+
+#define foreach_virtchnl2_status \
+ _ (0, SUCCESS) \
+ _ (-5, ERR_PARAM) \
+ _ (-38, ERR_OPCODE_MISMATCH)
+
+typedef enum
+{
+#define _(v, n) VIRTCHNL2_STATUS_##n = v,
+ foreach_virtchnl2_status
+#undef _
+} virtchnl2_status_t;
+
+#define foreach_virtchnl2_op \
+ _ (0, UNKNOWN) \
+ _ (1, VERSION) \
+ _ (500, GET_CAPS) \
+ _ (501, CREATE_VPORT) \
+ _ (502, DESTROY_VPORT) \
+ _ (503, ENABLE_VPORT) \
+ _ (504, DISABLE_VPORT) \
+ _ (505, CONFIG_TX_QUEUES) \
+ _ (506, CONFIG_RX_QUEUES) \
+ _ (507, ENABLE_QUEUES) \
+ _ (508, DISABLE_QUEUES) \
+ _ (509, ADD_QUEUES) \
+ _ (510, DEL_QUEUES) \
+ _ (511, MAP_QUEUE_VECTOR) \
+ _ (512, UNMAP_QUEUE_VECTOR) \
+ _ (513, GET_RSS_KEY) \
+ _ (514, SET_RSS_KEY) \
+ _ (515, GET_RSS_LUT) \
+ _ (516, SET_RSS_LUT) \
+ _ (517, GET_RSS_HASH) \
+ _ (518, SET_RSS_HASH) \
+ _ (519, SET_SRIOV_VFS) \
+ _ (520, ALLOC_VECTORS) \
+ _ (521, DEALLOC_VECTORS) \
+ _ (522, EVENT) \
+ _ (523, GET_STATS) \
+ _ (524, RESET_VF) \
+ _ (526, GET_PTYPE_INFO) \
+ _ (532, CREATE_ADI) \
+ _ (533, DESTROY_ADI) \
+ _ (534, LOOPBACK) \
+ _ (535, ADD_MAC_ADDR) \
+ _ (536, DEL_MAC_ADDR) \
+ _ (537, CONFIG_PROMISCUOUS_MODE)
+
+typedef enum
+{
+#define _(v, n) VIRTCHNL2_OP_##n = v,
+ foreach_virtchnl2_op
+#undef _
+} virtchnl2_op_t;
+
+/* VIRTCHNL2_VPORT_TYPE
+ * Type of virtual port
+ */
+#define foreach_virtchnl2_vport_type \
+ _ (0, DEFAULT) \
+ _ (1, SRIOV) \
+ _ (2, SIOV) \
+ _ (3, SUBDEV) \
+ _ (4, MNG)
+
+typedef enum
+{
+#define _(v, n) VIRTCHNL2_VPORT_TYPE_##n = v,
+ foreach_virtchnl2_vport_type
+#undef _
+} virtchnl2_vport_type_t;
+
+/* VIRTCHNL2_QUEUE_MODEL
+ * Type of queue model
+ */
+#define VIRTCHNL2_QUEUE_MODEL_SINGLE 0
+#define VIRTCHNL2_QUEUE_MODEL_SPLIT 1
+
+#define foreach_idpf_checksum_cap_flag \
+ _ (0, TX_CSUM_L3_IPV4, "tx-csum-l3-ipv4") \
+ _ (1, TX_CSUM_L4_IPV4_TCP, "tx-csum-l4-ipv4-tcp") \
+ _ (2, TX_CSUM_L4_IPV4_UDP, "tx-csum-l4-ipv4-udp") \
+ _ (3, TX_CSUM_L4_IPV4_SCTP, "tx-csum-l4-ipv4-sctp") \
+ _ (4, TX_CSUM_L4_IPV6_TCP, "tx-csum-l4-ipv6-tcp") \
+ _ (5, TX_CSUM_L4_IPV6_UDP, "tx-csum-l4-ipv6-udp") \
+ _ (6, TX_CSUM_L4_IPV6_SCTP, "tx-csum-l4-ipv6-sctp") \
+ _ (7, TX_CSUM_GENERIC, "tx-csum-generic") \
+ _ (8, RX_CSUM_L3_IPV4, "rx-csum-l3-ipv4") \
+ _ (9, RX_CSUM_L4_IPV4_TCP, "rx-csum-l4-ipv4-tcp") \
+ _ (10, RX_CSUM_L4_IPV4_UDP, "rx-csum-l4-ipv4-udp") \
+ _ (11, RX_CSUM_L4_IPV4_SCTP, "rx-csum-l4-ipv4-sctp") \
+ _ (12, RX_CSUM_L4_IPV6_TCP, "rx-csum-l4-ipv6-tcp") \
+ _ (13, RX_CSUM_L4_IPV6_UDP, "rx-csum-l4-ipv6-udp") \
+ _ (14, RX_CSUM_L4_IPV6_SCTP, "rx-csum-l4-ipv6-sctp") \
+ _ (15, RX_CSUM_GENERIC, "rx-csum-generic") \
+ _ (16, TX_CSUM_L3_SINGLE_TUNNEL, "tx-csum-l3-single-tunnel") \
+ _ (17, TX_CSUM_L3_DOUBLE_TUNNEL, "tx-csum-l3-double-tunnel") \
+ _ (18, RX_CSUM_L3_SINGLE_TUNNEL, "rx-csum-l3-single-tunnel") \
+ _ (19, RX_CSUM_L3_DOUBLE_TUNNEL, "rx-csum-l3-double-tunnel") \
+ _ (20, TX_CSUM_L4_SINGLE_TUNNEL, "tx-csum-l4-single-tunnel") \
+ _ (21, TX_CSUM_L4_DOUBLE_TUNNEL, "tx-csum-l4-double-tunnel") \
+ _ (22, RX_CSUM_L4_SINGLE_TUNNEL, "rx-csum-l4-single-tunnel") \
+ _ (23, RX_CSUM_L4_DOUBLE_TUNNEL, "rx-csum-l4-double-tunnel")
+
+typedef enum
+{
+#define _(a, b, c) VIRTCHNL2_CAP_##b = (1 << a),
+ foreach_idpf_checksum_cap_flag
+#undef _
+} idpf_checksum_cap_flag_t;
+
+#define foreach_idpf_seg_cap_flag \
+ _ (0, IPV4_TCP, "ipv4-tcp") \
+ _ (1, IPV4_UDP, "ipv4-udp") \
+ _ (2, IPV4_SCTP, "ipv4-sctp") \
+ _ (3, IPV6_TCP, "ipv6-tcp") \
+ _ (4, IPV6_UDP, "ipv6-udp") \
+ _ (5, IPV6_SCTP, "ipv6-sctp") \
+ _ (6, GENERIC, "generic") \
+ _ (7, TX_SINGLE_TUNNEL, "tx-single-tunnel") \
+ _ (8, TX_DOUBLE_TUNNEL, "tx-double-tunnel")
+
+typedef enum
+{
+#define _(a, b, c) VIRTCHNL2_CAP_SEG_##b = (1 << a),
+ foreach_idpf_seg_cap_flag
+#undef _
+} idpf_seg_cap_flag_t;
+
+#define foreach_idpf_rss_cap_flag \
+ _ (0, IPV4_TCP, "ipv4-tcp") \
+ _ (1, IPV4_UDP, "ipv4-udp") \
+ _ (2, IPV4_SCTP, "ipv4-sctp") \
+ _ (3, IPV4_OTHER, "ipv4-other") \
+ _ (4, IPV6_TCP, "ipv6-tcp") \
+ _ (5, IPV6_UDP, "ipv6-udp") \
+ _ (6, IPV6_SCTP, "ipv6-sctp") \
+ _ (7, IPV6_OTHER, "ipv6-other") \
+ _ (8, IPV4_AH, "ipv4-ah") \
+ _ (9, IPV4_ESP, "ipv4-esp") \
+ _ (10, IPV4_AH_ESP, "ipv4-ah-esp") \
+ _ (11, IPV6_AH, "ipv6-ah") \
+ _ (12, IPV6_ESP, "ipv6-esp") \
+ _ (13, IPV6_AH_ESP, "ipv6-ah-esp")
+
+typedef enum
+{
+#define _(a, b, c) VIRTCHNL2_CAP_RSS_##b = (1 << a),
+ foreach_idpf_rss_cap_flag
+#undef _
+} idpf_rss_cap_flag_t;
+
+#define foreach_idpf_hsplit_cap_flag \
+ _ (0, AT_L2, "at-l2") \
+ _ (1, AT_L3, "at-l3") \
+ _ (2, AT_L4V4, "at-l4v4") \
+ _ (3, AT_L4V6, "at-l4v6")
+
+typedef enum
+{
+#define _(a, b, c) VIRTCHNL2_CAP_RX_HSPLIT_##b = (1 << a),
+ foreach_idpf_hsplit_cap_flag
+#undef _
+} idpf_hsplit_cap_flag_t;
+
+#define foreach_idpf_rsc_cap_flag \
+ _ (0, IPV4_TCP, "ipv4-tcp") \
+ _ (1, IPV4_SCTP, "ipv4-sctp") \
+ _ (2, IPV6_TCP, "ipv6-tcp") \
+ _ (3, IPV6_SCTP, "ipv6-sctp")
+
+typedef enum
+{
+#define _(a, b, c) VIRTCHNL2_CAP_RSC_##b = (1 << a),
+ foreach_idpf_rsc_cap_flag
+#undef _
+} idpf_rsc_cap_flag_t;
+
+#define foreach_idpf_other_cap_flag \
+ _ (0, RDMA, "rdma") \
+ _ (1, SRIOV, "sriov") \
+ _ (2, MACFILTER, "macfilter") \
+ _ (3, FLOW_DIRECTOR, "flow-director") \
+ _ (4, SPLITQ_QSCHED, "spliteq-qsched") \
+ _ (5, CRC, "crc") \
+ _ (6, ADQ, "adq") \
+ _ (7, WB_ON_ITR, "wb-on-itr") \
+ _ (8, PROMISC, "promisc") \
+ _ (9, LINK_SPEED, "link-speed") \
+ _ (10, INLINE_IPSEC, "inline-ipsec") \
+ _ (11, LARGE_NUM_QUEUES, "large-num-queues") \
+ _ (12, VLAN, "vlan") \
+ _ (13, PTP, "ptp") \
+ _ (15, ADV_RSS, "adv-rss") \
+ _ (16, FDIR, "fdir") \
+ _ (17, RX_FLEX_DESC, "rx-flex-desc") \
+ _ (18, PTYPE, "ptype") \
+ _ (19, LOOPBACK, "loopback") \
+ _ (20, OEM, "oem")
+
+typedef enum
+{
+#define _(a, b, c) VIRTCHNL2_CAP_##b = (1 << a),
+ foreach_idpf_other_cap_flag
+#undef _
+} idpf_other_cap_flag_t;
+
+#define VIRTCHNL2_TXQ_SCHED_MODE_QUEUE 0
+#define VIRTCHNL2_TXQ_SCHED_MODE_FLOW 1
+
+#define VIRTCHNL2_TXQ_ENABLE_MISS_COMPL BIT (0)
+
+#define VIRTCHNL2_RDMA_CPF 0
+#define VIRTCHNL2_NVME_CPF 1
+#define VIRTCHNL2_ATE_CPF 2
+#define VIRTCHNL2_LCE_CPF 3
+
+#define VIRTCHNL2_RXQ_RSC BIT (0)
+#define VIRTCHNL2_RXQ_HDR_SPLIT BIT (1)
+#define VIRTCHNL2_RXQ_IMMEDIATE_WRITE_BACK BIT (2)
+#define VIRTCHNL2_RX_DESC_SIZE_16BYTE BIT (3)
+#define VIRTCHNL2_RX_DESC_SIZE_32BYTE BIT (4)
+
+#define foreach_virtchnl2_rss_alg \
+ _ (0, TOEPLITZ_ASYMMETRIC) \
+ _ (1, R_ASYMMETRIC) \
+ _ (2, TOEPLITZ_SYMMETRIC) \
+ _ (3, XOR_SYMMETRIC)
+
+typedef enum
+{
+#define _(v, n) VIRTCHNL2_RSS_ALG_##n = v,
+ foreach_virtchnl2_rss_alg
+#undef _
+} virtchnl2_rss_alg_t;
+
+#define foreach_virtchnl2_event \
+ _ (0, UNKNOWN) \
+ _ (1, LINK_CHANGE) \
+ _ (2, START_RESET_ADI) \
+ _ (3, FINISH_RESET_ADI)
+
+typedef enum
+{
+#define _(v, n) VIRTCHNL2_EVENT_##n = v,
+ foreach_virtchnl2_event
+#undef _
+} virtchnl2_event_name_t;
+
+#define foreach_idpf_queue_type \
+ _ (0, TX) \
+ _ (1, RX) \
+ _ (2, TX_COMPLETION) \
+ _ (3, RX_BUFFER) \
+ _ (4, CONFIG_TX) \
+ _ (5, CONFIG_RX) \
+ _ (6, P2P_TX) \
+ _ (7, P2P_RX) \
+ _ (8, P2P_TX_COMPLETION) \
+ _ (9, P2P_RX_BUFFER) \
+ _ (10, MBX_TX) \
+ _ (11, MBX_RX)
+
+typedef enum
+{
+#define _(v, n) VIRTCHNL2_QUEUE_TYPE_##n = v,
+ foreach_idpf_queue_type
+#undef _
+} idpf_queue_type_t;
+
+#define foreach_virtchnl2_itr_idx \
+ _ (0, 0) \
+ _ (1, 1) \
+ _ (2, 2) \
+ _ (3, NO_ITR)
+
+typedef enum
+{
+#define _(v, n) VIRTCHNL2_ITR_IDX_##n = v,
+ foreach_virtchnl2_itr_idx
+#undef _
+} virtchnl2_itr_idx_t;
+
+#define VIRTCHNL2_MAC_ADDR_PRIMARY 1
+#define VIRTCHNL2_MAC_ADDR_EXTRA 2
+
+#define VIRTCHNL2_UNICAST_PROMISC BIT (0)
+#define VIRTCHNL2_MULTICAST_PROMISC BIT (1)
+
+#define foreach_virtchnl2_proto_hdr \
+ _ (0, ANY) \
+ _ (1, PRE_MAC) \
+ _ (2, MAC) \
+ _ (3, POST_MAC) \
+ _ (4, ETHERTYPE) \
+ _ (5, VLAN) \
+ _ (6, SVLAN) \
+ _ (7, CVLAN) \
+ _ (8, MPLS) \
+ _ (9, UMPLS) \
+ _ (10, MMPLS) \
+ _ (11, PTP) \
+ _ (12, CTRL) \
+ _ (13, LLDP) \
+ _ (14, ARP) \
+ _ (15, ECP) \
+ _ (16, EAPOL) \
+ _ (17, PPPOD) \
+ _ (18, PPPOE) \
+ _ (19, IPV4) \
+ _ (20, IPV4_FRAG) \
+ _ (21, IPV6) \
+ _ (22, IPV6_FRAG) \
+ _ (23, IPV6_EH) \
+ _ (24, UDP) \
+ _ (25, TCP) \
+ _ (26, SCTP) \
+ _ (27, ICMP) \
+ _ (28, ICMPV6) \
+ _ (29, IGMP) \
+ _ (30, AH) \
+ _ (31, ESP) \
+ _ (32, IKE) \
+ _ (33, NATT_KEEP) \
+ _ (34, PAY) \
+ _ (35, L2TPV2) \
+ _ (36, L2TPV2_CONTROL) \
+ _ (37, L2TPV3) \
+ _ (38, GTP) \
+ _ (39, GTP_EH) \
+ _ (40, GTPCV2) \
+ _ (41, GTPC_TEID) \
+ _ (42, GTPU) \
+ _ (43, GTPU_UL) \
+ _ (44, GTPU_DL) \
+ _ (45, ECPRI) \
+ _ (46, VRRP) \
+ _ (47, OSPF) \
+ _ (48, TUN) \
+ _ (49, GRE) \
+ _ (50, NVGRE) \
+ _ (51, VXLAN) \
+ _ (52, VXLAN_GPE) \
+ _ (53, GENEVE) \
+ _ (54, NSH) \
+ _ (55, QUIC) \
+ _ (56, PFCP) \
+ _ (57, PFCP_NODE) \
+ _ (58, PFCP_SESSION) \
+ _ (59, RTP) \
+ _ (60, ROCE) \
+ _ (61, ROCEV1) \
+ _ (62, ROCEV2) \
+ _ (65535, NO_PROTO)
+
+typedef enum
+{
+#define _(v, n) VIRTCHNL2_PROTO_HDR_##n = v,
+ foreach_virtchnl2_proto_hdr
+#undef _
+} virtchnl2_proto_hdr_t;
+
+#define VIRTCHNL2_VERSION_MAJOR_2 2
+#define VIRTCHNL2_VERSION_MINOR_0 0
+
+typedef struct
+{
+ u32 major;
+ u32 minor;
+} virtchnl2_version_info_t;
+
+STATIC_ASSERT_SIZEOF (virtchnl2_version_info_t, 8);
+
+typedef struct
+{
+ u32 csum_caps;
+ u32 seg_caps;
+ u32 hsplit_caps;
+ u32 rsc_caps;
+ u64 rss_caps;
+ u64 other_caps;
+
+ u32 mailbox_dyn_ctl;
+ u16 mailbox_vector_id;
+ u16 num_allocated_vectors;
+
+ u16 max_rx_q;
+ u16 max_tx_q;
+ u16 max_rx_bufq;
+ u16 max_tx_complq;
+
+ u16 max_sriov_vfs;
+
+ u16 max_vports;
+ u16 default_num_vports;
+
+ u16 max_tx_hdr_size;
+
+ u8 max_sg_bufs_per_tx_pkt;
+
+ u8 itr_idx_map;
+
+ u16 pad1;
+
+ u16 oem_cp_ver_major;
+ u16 oem_cp_ver_minor;
+ u32 device_type;
+
+ u8 reserved[12];
+} virtchnl2_get_capabilities_t;
+
+STATIC_ASSERT_SIZEOF (virtchnl2_get_capabilities_t, 80);
+
+typedef struct
+{
+ /* see VIRTCHNL2_QUEUE_TYPE definitions */
+ u32 type;
+ u32 start_queue_id;
+ u32 num_queues;
+ u32 pad;
+
+ /* Queue tail register offset and spacing provided by CP */
+ u64 qtail_reg_start;
+ u32 qtail_reg_spacing;
+
+ u8 reserved[4];
+} virtchnl2_queue_reg_chunk_t;
+
+STATIC_ASSERT_SIZEOF (virtchnl2_queue_reg_chunk_t, 32);
+
+/* structure to specify several chunks of contiguous queues */
+typedef struct
+{
+ u16 num_chunks;
+ u8 reserved[6];
+ virtchnl2_queue_reg_chunk_t chunks[1];
+} virtchnl2_queue_reg_chunks_t;
+
+STATIC_ASSERT_SIZEOF (virtchnl2_queue_reg_chunks_t, 40);
+
+#define VIRTCHNL2_ETH_LENGTH_OF_ADDRESS 6
+
+typedef struct
+{
+ u16 vport_type;
+ u16 txq_model;
+ u16 rxq_model;
+ u16 num_tx_q;
+ u16 num_tx_complq;
+ u16 num_rx_q;
+ u16 num_rx_bufq;
+ u16 default_rx_q;
+ u16 vport_index;
+
+ u16 max_mtu;
+ u32 vport_id;
+ u8 default_mac_addr[VIRTCHNL2_ETH_LENGTH_OF_ADDRESS];
+ u16 pad;
+ u64 rx_desc_ids;
+ u64 tx_desc_ids;
+
+#define MAX_Q_REGIONS 16
+ u32 max_qs_per_qregion[MAX_Q_REGIONS];
+ u32 qregion_total_qs;
+ u16 qregion_type;
+ u16 pad2;
+
+ u32 rss_algorithm;
+ u16 rss_key_size;
+ u16 rss_lut_size;
+
+ u32 rx_split_pos;
+
+ u8 reserved[20];
+ virtchnl2_queue_reg_chunks_t chunks;
+} virtchnl2_create_vport_t;
+
+STATIC_ASSERT_SIZEOF (virtchnl2_create_vport_t, 192);
+
+typedef struct
+{
+ u32 vport_id;
+ u8 reserved[4];
+} virtchnl2_vport_t;
+
+STATIC_ASSERT_SIZEOF (virtchnl2_vport_t, 8);
+
+typedef struct
+{
+ u64 dma_ring_addr;
+ u32 type;
+ u32 queue_id;
+ u16 relative_queue_id;
+ u16 model;
+ u16 sched_mode;
+ u16 qflags;
+ u16 ring_len;
+
+ u16 tx_compl_queue_id;
+ u16 peer_type;
+ u16 peer_rx_queue_id;
+
+ u16 qregion_id;
+ u8 pad[2];
+
+ u32 egress_pasid;
+ u32 egress_hdr_pasid;
+ u32 egress_buf_pasid;
+
+ u8 reserved[8];
+} virtchnl2_txq_info_t;
+
+STATIC_ASSERT_SIZEOF (virtchnl2_txq_info_t, 56);
+
+typedef struct
+{
+ u32 vport_id;
+ u16 num_qinfo;
+
+ u8 reserved[10];
+ virtchnl2_txq_info_t qinfo[1];
+} virtchnl2_config_tx_queues_t;
+
+STATIC_ASSERT_SIZEOF (virtchnl2_config_tx_queues_t, 72);
+
+/* Receive queue config info */
+typedef struct
+{
+ u64 desc_ids;
+ u64 dma_ring_addr;
+
+ u32 type;
+ u32 queue_id;
+
+ u16 model;
+
+ u16 hdr_buffer_size;
+ u32 data_buffer_size;
+ u32 max_pkt_size;
+
+ u16 ring_len;
+ u8 buffer_notif_stride;
+ u8 pad[1];
+
+ u64 dma_head_wb_addr;
+
+ u16 qflags;
+
+ u16 rx_buffer_low_watermark;
+
+ u16 rx_bufq1_id;
+ u16 rx_bufq2_id;
+ u8 bufq2_ena;
+ u8 pad2;
+
+ u16 qregion_id;
+
+ u32 ingress_pasid;
+ u32 ingress_hdr_pasid;
+ u32 ingress_buf_pasid;
+
+ u8 reserved[16];
+} virtchnl2_rxq_info_t;
+
+STATIC_ASSERT_SIZEOF (virtchnl2_rxq_info_t, 88);
+
+typedef struct
+{
+ u32 vport_id;
+ u16 num_qinfo;
+
+ u8 reserved[18];
+ virtchnl2_rxq_info_t qinfo[1];
+} virtchnl2_config_rx_queues_t;
+
+STATIC_ASSERT_SIZEOF (virtchnl2_config_rx_queues_t, 112);
+
+typedef struct
+{
+ u32 vport_id;
+ u16 num_tx_q;
+ u16 num_tx_complq;
+ u16 num_rx_q;
+ u16 num_rx_bufq;
+ u8 reserved[4];
+ virtchnl2_queue_reg_chunks_t chunks;
+} virtchnl2_add_queues_t;
+
+STATIC_ASSERT_SIZEOF (virtchnl2_add_queues_t, 56);
+
+typedef struct
+{
+ u16 start_vector_id;
+ u16 start_evv_id;
+ u16 num_vectors;
+ u16 pad1;
+
+ u32 dynctl_reg_start;
+ u32 dynctl_reg_spacing;
+
+ u32 itrn_reg_start;
+ u32 itrn_reg_spacing;
+ u8 reserved[8];
+} virtchnl2_vector_chunk_t;
+
+STATIC_ASSERT_SIZEOF (virtchnl2_vector_chunk_t, 32);
+
+typedef struct
+{
+ u16 num_vchunks;
+ u8 reserved[14];
+ virtchnl2_vector_chunk_t vchunks[1];
+} virtchnl2_vector_chunks_t;
+
+STATIC_ASSERT_SIZEOF (virtchnl2_vector_chunks_t, 48);
+
+typedef struct
+{
+ u16 num_vectors;
+ u8 reserved[14];
+ virtchnl2_vector_chunks_t vchunks;
+} virtchnl2_alloc_vectors_t;
+
+STATIC_ASSERT_SIZEOF (virtchnl2_alloc_vectors_t, 64);
+
+typedef struct
+{
+ u32 vport_id;
+ u16 lut_entries_start;
+ u16 lut_entries;
+ u8 reserved[4];
+ u32 lut[1]; /* RSS lookup table */
+} virtchnl2_rss_lut_t;
+
+STATIC_ASSERT_SIZEOF (virtchnl2_rss_lut_t, 16);
+
+typedef struct
+{
+ /* Packet Type Groups bitmap */
+ u64 ptype_groups;
+ u32 vport_id;
+ u8 reserved[4];
+} virtchnl2_rss_hash_t;
+
+STATIC_ASSERT_SIZEOF (virtchnl2_rss_hash_t, 16);
+
+typedef struct
+{
+ u16 num_vfs;
+ u16 pad;
+} virtchnl2_sriov_vfs_info_t;
+
+STATIC_ASSERT_SIZEOF (virtchnl2_sriov_vfs_info_t, 4);
+
+typedef struct
+{
+ u32 pasid;
+ u16 mbx_id;
+ u16 mbx_vec_id;
+ u16 adi_id;
+ u8 reserved[64];
+ u8 pad[6];
+ virtchnl2_queue_reg_chunks_t chunks;
+ virtchnl2_vector_chunks_t vchunks;
+} virtchnl2_create_adi_t;
+
+STATIC_ASSERT_SIZEOF (virtchnl2_create_adi_t, 168);
+
+typedef struct
+{
+ u16 adi_id;
+ u8 reserved[2];
+} virtchnl2_destroy_adi_t;
+
+STATIC_ASSERT_SIZEOF (virtchnl2_destroy_adi_t, 4);
+
+typedef struct
+{
+ u16 ptype_id_10;
+ u8 ptype_id_8;
+ u8 proto_id_count;
+ u16 pad;
+ u16 proto_id[1];
+} virtchnl2_ptype_t;
+
+STATIC_ASSERT_SIZEOF (virtchnl2_ptype_t, 8);
+
+typedef struct
+{
+ u16 start_ptype_id;
+ u16 num_ptypes;
+ u32 pad;
+ virtchnl2_ptype_t ptype[1];
+} virtchnl2_get_ptype_info_t;
+
+STATIC_ASSERT_SIZEOF (virtchnl2_get_ptype_info_t, 16);
+
+typedef struct
+{
+ u32 vport_id;
+ u8 pad[4];
+
+ u64 rx_bytes;
+ u64 rx_unicast;
+ u64 rx_multicast;
+ u64 rx_broadcast;
+ u64 rx_discards;
+ u64 rx_errors;
+ u64 rx_unknown_protocol;
+ u64 tx_bytes;
+ u64 tx_unicast;
+ u64 tx_multicast;
+ u64 tx_broadcast;
+ u64 tx_discards;
+ u64 tx_errors;
+ u64 rx_invalid_frame_length;
+ u64 rx_overflow_drop;
+} virtchnl2_vport_stats_t;
+
+STATIC_ASSERT_SIZEOF (virtchnl2_vport_stats_t, 128);
+
+typedef struct
+{
+ u32 event;
+ u32 link_speed;
+ u32 vport_id;
+ u8 link_status;
+ u8 pad[1];
+ u16 adi_id;
+} virtchnl2_event_t;
+
+STATIC_ASSERT_SIZEOF (virtchnl2_event_t, 16);
+
+typedef struct
+{
+ u32 vport_id;
+ u16 key_len;
+ u8 pad;
+ u8 key[1];
+} virtchnl2_rss_key_t;
+
+STATIC_ASSERT_SIZEOF (virtchnl2_rss_key_t, 8);
+
+typedef struct
+{
+ u32 type;
+ u32 start_queue_id;
+ u32 num_queues;
+ u8 reserved[4];
+} virtchnl2_queue_chunk_t;
+
+STATIC_ASSERT_SIZEOF (virtchnl2_queue_chunk_t, 16);
+
+typedef struct
+{
+ u16 num_chunks;
+ u8 reserved[6];
+ virtchnl2_queue_chunk_t chunks[1];
+} virtchnl2_queue_chunks_t;
+
+STATIC_ASSERT_SIZEOF (virtchnl2_queue_chunks_t, 24);
+
+typedef struct
+{
+ u32 vport_id;
+ u8 reserved[4];
+ virtchnl2_queue_chunks_t chunks;
+} virtchnl2_del_ena_dis_queues_t;
+
+STATIC_ASSERT_SIZEOF (virtchnl2_del_ena_dis_queues_t, 32);
+
+typedef struct
+{
+ u32 queue_id;
+ u16 vector_id;
+ u8 pad[2];
+
+ u32 itr_idx;
+
+ u32 queue_type;
+ u8 reserved[8];
+} virtchnl2_queue_vector_t;
+
+STATIC_ASSERT_SIZEOF (virtchnl2_queue_vector_t, 24);
+
+typedef struct
+{
+ u32 vport_id;
+ u16 num_qv_maps;
+ u8 pad[10];
+ virtchnl2_queue_vector_t qv_maps[1];
+} virtchnl2_queue_vector_maps_t;
+
+STATIC_ASSERT_SIZEOF (virtchnl2_queue_vector_maps_t, 40);
+
+typedef struct
+{
+ u32 vport_id;
+ u8 enable;
+ u8 pad[3];
+} virtchnl2_loopback_t;
+
+STATIC_ASSERT_SIZEOF (virtchnl2_loopback_t, 8);
+
+typedef struct
+{
+ u8 addr[VIRTCHNL2_ETH_LENGTH_OF_ADDRESS];
+ u8 type;
+ u8 pad;
+} virtchnl2_mac_addr_t;
+
+STATIC_ASSERT_SIZEOF (virtchnl2_mac_addr_t, 8);
+
+typedef struct
+{
+ u32 vport_id;
+ u16 num_mac_addr;
+ u8 pad[2];
+ virtchnl2_mac_addr_t mac_addr_list[1];
+} virtchnl2_mac_addr_list_t;
+
+STATIC_ASSERT_SIZEOF (virtchnl2_mac_addr_list_t, 16);
+
+typedef struct
+{
+ u32 vport_id;
+ u16 flags;
+ u8 pad[2];
+} virtchnl2_promisc_info_t;
+
+STATIC_ASSERT_SIZEOF (virtchnl2_promisc_info_t, 8);
+
+#endif /* _IDPF_VIRTCHNL_H_ */
diff --git a/src/plugins/idpf/virtchnl2_lan_desc.h b/src/plugins/idpf/virtchnl2_lan_desc.h
new file mode 100644
index 00000000000..31eff81fd81
--- /dev/null
+++ b/src/plugins/idpf/virtchnl2_lan_desc.h
@@ -0,0 +1,610 @@
+/*
+ *------------------------------------------------------------------
+ * Copyright (c) 2023 Intel and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *------------------------------------------------------------------
+ */
+
+#ifndef _IDPF_VIRTCHNL_LAN_DESC_H_
+#define _IDPF_VIRTCHNL_LAN_DESC_H_
+
+/* VIRTCHNL2_TX_DESC_IDS
+ * Transmit descriptor ID flags
+ */
+#define foreach_idpf_txdid \
+ _ (0, DATA) \
+ _ (1, CTX) \
+ _ (2, REINJECT_CTX) \
+ _ (3, FLEX_DATA) \
+ _ (4, FLEX_CTX) \
+ _ (5, FLEX_TSO_CTX) \
+ _ (6, FLEX_TSYN_L2TAG1) \
+ _ (7, FLEX_L2TAG1_L2TAG2) \
+ _ (8, FLEX_TSO_L2TAG2_PARSTAG_CTX) \
+ _ (9, FLEX_HOSTSPLIT_SA_TSO_CTX) \
+ _ (10, FLEX_HOSTSPLIT_SA_CTX) \
+ _ (11, FLEX_L2TAG2_CTX) \
+ _ (12, FLEX_FLOW_SCHED) \
+ _ (13, FLEX_HOSTSPLIT_TSO_CTX) \
+ _ (14, FLEX_HOSTSPLIT_CTX) \
+ _ (15, DESC_DONE)
+
+typedef enum
+{
+#define _(a, b) VIRTCHNL2_TXDID_##b = (1 << a),
+ foreach_idpf_txdid
+#undef _
+} idpf_txdid_t;
+
+/* VIRTCHNL2_RX_DESC_IDS
+ * Receive descriptor IDs (range from 0 to 63)
+ */
+#define foreach_virtchnl2_rxdid \
+ _ (0, 0_16B_BASE) \
+ _ (1, 1_32B_BASE) \
+ _ (2, 2_FLEX_SPLITQ) \
+ _ (2, 2_FLEX_SQ_NIC) \
+ _ (3, 3_FLEX_SQ_SW) \
+ _ (4, 4_FLEX_SQ_NIC_VEB) \
+ _ (5, 5_FLEX_SQ_NIC_ACL) \
+ _ (6, 6_FLEX_SQ_NIC_2) \
+ _ (7, 7_HW_RSVD) \
+ _ (16, 16_COMMS_GENERIC) \
+ _ (17, 17_COMMS_AUX_VLAN) \
+ _ (18, 18_COMMS_AUX_IPV4) \
+ _ (19, 19_COMMS_AUX_IPV6) \
+ _ (20, 20_COMMS_AUX_FLOW) \
+ _ (21, 21_COMMS_AUX_TCP)
+
+typedef enum
+{
+#define _(v, n) VIRTCHNL2_RXDID_##n = v,
+ foreach_virtchnl2_rxdid
+#undef _
+} virtchnl2_rxdid_t;
+
+/* VIRTCHNL2_RX_DESC_ID_BITMASKS
+ * Receive descriptor ID bitmasks
+ */
+#define VIRTCHNL2_RXDID_0_16B_BASE_M BIT (VIRTCHNL2_RXDID_0_16B_BASE)
+#define VIRTCHNL2_RXDID_1_32B_BASE_M BIT (VIRTCHNL2_RXDID_1_32B_BASE)
+#define VIRTCHNL2_RXDID_2_FLEX_SPLITQ_M BIT (VIRTCHNL2_RXDID_2_FLEX_SPLITQ)
+#define VIRTCHNL2_RXDID_2_FLEX_SQ_NIC_M BIT (VIRTCHNL2_RXDID_2_FLEX_SQ_NIC)
+#define VIRTCHNL2_RXDID_3_FLEX_SQ_SW_M BIT (VIRTCHNL2_RXDID_3_FLEX_SQ_SW)
+#define VIRTCHNL2_RXDID_4_FLEX_SQ_NIC_VEB_M \
+ BIT (VIRTCHNL2_RXDID_4_FLEX_SQ_NIC_VEB)
+#define VIRTCHNL2_RXDID_5_FLEX_SQ_NIC_ACL_M \
+ BIT (VIRTCHNL2_RXDID_5_FLEX_SQ_NIC_ACL)
+#define VIRTCHNL2_RXDID_6_FLEX_SQ_NIC_2_M BIT (VIRTCHNL2_RXDID_6_FLEX_SQ_NIC_2)
+#define VIRTCHNL2_RXDID_7_HW_RSVD_M BIT (VIRTCHNL2_RXDID_7_HW_RSVD)
+/* 9 through 15 are reserved */
+#define VIRTCHNL2_RXDID_16_COMMS_GENERIC_M \
+ BIT (VIRTCHNL2_RXDID_16_COMMS_GENERIC)
+#define VIRTCHNL2_RXDID_17_COMMS_AUX_VLAN_M \
+ BIT (VIRTCHNL2_RXDID_17_COMMS_AUX_VLAN)
+#define VIRTCHNL2_RXDID_18_COMMS_AUX_IPV4_M \
+ BIT (VIRTCHNL2_RXDID_18_COMMS_AUX_IPV4)
+#define VIRTCHNL2_RXDID_19_COMMS_AUX_IPV6_M \
+ BIT (VIRTCHNL2_RXDID_19_COMMS_AUX_IPV6)
+#define VIRTCHNL2_RXDID_20_COMMS_AUX_FLOW_M \
+ BIT (VIRTCHNL2_RXDID_20_COMMS_AUX_FLOW)
+#define VIRTCHNL2_RXDID_21_COMMS_AUX_TCP_M \
+ BIT (VIRTCHNL2_RXDID_21_COMMS_AUX_TCP)
+/* 22 through 63 are reserved */
+
+/* Rx */
+/* For splitq virtchnl2_rx_flex_desc_adv desc members */
+#define VIRTCHNL2_RX_FLEX_DESC_ADV_RXDID_S 0
+#define VIRTCHNL2_RX_FLEX_DESC_ADV_RXDID_M \
+ MAKEMASK (0xFUL, VIRTCHNL2_RX_FLEX_DESC_ADV_RXDID_S)
+#define VIRTCHNL2_RX_FLEX_DESC_ADV_PTYPE_S 0
+#define VIRTCHNL2_RX_FLEX_DESC_ADV_PTYPE_M \
+ MAKEMASK (0x3FFUL, VIRTCHNL2_RX_FLEX_DESC_ADV_PTYPE_S)
+#define VIRTCHNL2_RX_FLEX_DESC_ADV_UMBCAST_S 10
+#define VIRTCHNL2_RX_FLEX_DESC_ADV_UMBCAST_M \
+ MAKEMASK (0x3UL, VIRTCHNL2_RX_FLEX_DESC_ADV_UMBCAST_S)
+#define VIRTCHNL2_RX_FLEX_DESC_ADV_FF0_S 12
+#define VIRTCHNL2_RX_FLEX_DESC_ADV_FF0_M \
+ MAKEMASK (0xFUL, VIRTCHNL2_RX_FLEX_DESC_ADV_FF0_S)
+#define VIRTCHNL2_RX_FLEX_DESC_ADV_LEN_PBUF_S 0
+#define VIRTCHNL2_RX_FLEX_DESC_ADV_LEN_PBUF_M \
+ MAKEMASK (0x3FFFUL, VIRTCHNL2_RX_FLEX_DESC_ADV_LEN_PBUF_S)
+#define VIRTCHNL2_RX_FLEX_DESC_ADV_GEN_S 14
+#define VIRTCHNL2_RX_FLEX_DESC_ADV_GEN_M \
+ BIT_ULL (VIRTCHNL2_RX_FLEX_DESC_ADV_GEN_S)
+#define VIRTCHNL2_RX_FLEX_DESC_ADV_BUFQ_ID_S 15
+#define VIRTCHNL2_RX_FLEX_DESC_ADV_BUFQ_ID_M \
+ BIT_ULL (VIRTCHNL2_RX_FLEX_DESC_ADV_BUFQ_ID_S)
+#define VIRTCHNL2_RX_FLEX_DESC_ADV_LEN_HDR_S 0
+#define VIRTCHNL2_RX_FLEX_DESC_ADV_LEN_HDR_M \
+ MAKEMASK (0x3FFUL, VIRTCHNL2_RX_FLEX_DESC_ADV_LEN_HDR_S)
+#define VIRTCHNL2_RX_FLEX_DESC_ADV_RSC_S 10
+#define VIRTCHNL2_RX_FLEX_DESC_ADV_RSC_M \
+ BIT_ULL (VIRTCHNL2_RX_FLEX_DESC_ADV_RSC_S)
+#define VIRTCHNL2_RX_FLEX_DESC_ADV_SPH_S 11
+#define VIRTCHNL2_RX_FLEX_DESC_ADV_SPH_M \
+ BIT_ULL (VIRTCHNL2_RX_FLEX_DESC_ADV_SPH_S)
+#define VIRTCHNL2_RX_FLEX_DESC_ADV_MISS_S 12
+#define VIRTCHNL2_RX_FLEX_DESC_ADV_MISS_M \
+ BIT_ULL (VIRTCHNL2_RX_FLEX_DESC_ADV_MISS_S)
+#define VIRTCHNL2_RX_FLEX_DESC_ADV_FF1_S 13
+#define VIRTCHNL2_RX_FLEX_DESC_ADV_FF1_M \
+ MAKEMASK (0x7UL, VIRTCHNL2_RX_FLEX_DESC_ADV_FF1_M)
+
+#define foreach_virtchnl2_rx_flex_desc_adv_status0_qw1 \
+ _ (0, DD_S) \
+ _ (1, EOF_S) \
+ _ (2, HBO_S) \
+ _ (3, L3L4P_S) \
+ _ (4, XSUM_IPE_S) \
+ _ (5, XSUM_L4E_S) \
+ _ (6, XSUM_EIPE_S) \
+ _ (7, XSUM_EUDPE_S)
+
+typedef enum
+{
+#define _(v, n) VIRTCHNL2_RX_FLEX_DESC_ADV_STATUS0_##n = v,
+ foreach_virtchnl2_rx_flex_desc_adv_status0_qw1
+#undef _
+} virtchnl2_rx_flex_desc_adv_status0_qw1_t;
+
+#define foreach_virtchnl2_rx_flex_desc_adv_status0_qw0 \
+ _ (0, LPBK_S) \
+ _ (1, IPV6EXADD_S) \
+ _ (2, RXE_S) \
+ _ (3, CRCP_S) \
+ _ (4, RSS_VALID_S) \
+ _ (5, L2TAG1P_S) \
+ _ (6, XTRMD0_VALID_S) \
+ _ (7, XTRMD1_VALID_S) \
+ _ (8, LAST)
+
+typedef enum
+{
+#define _(v, n) VIRTCHNL2_RX_FLEX_DESC_ADV_STATUS0_##n = v,
+ foreach_virtchnl2_rx_flex_desc_adv_status0_qw0
+#undef _
+} virtchnl2_rx_flex_desc_adv_status0_qw0_t;
+
+#define foreach_virtchnl2_rx_flex_desc_adv_status1 \
+ _ (0, RSVD_S) \
+ _ (2, ATRAEFAIL_S) \
+ _ (3, L2TAG2P_S) \
+ _ (4, XTRMD2_VALID_S) \
+ _ (5, XTRMD3_VALID_S) \
+ _ (6, XTRMD4_VALID_S) \
+ _ (7, XTRMD5_VALID_S) \
+ _ (8, LAST)
+
+typedef enum
+{
+#define _(v, n) VIRTCHNL2_RX_FLEX_DESC_ADV_STATUS1_##n = v,
+ foreach_virtchnl2_rx_flex_desc_adv_status1
+#undef _
+} virtchnl2_rx_flex_desc_adv_status1_t;
+
+#define VIRTCHNL2_RX_FLEX_DESC_PTYPE_S 0
+#define VIRTCHNL2_RX_FLEX_DESC_PTYPE_M \
+ MAKEMASK (0x3FFUL, VIRTCHNL2_RX_FLEX_DESC_PTYPE_S) /* 10 bits */
+
+#define VIRTCHNL2_RX_FLEX_DESC_PKT_LEN_S 0
+#define VIRTCHNL2_RX_FLEX_DESC_PKT_LEN_M \
+ MAKEMASK (0x3FFFUL, VIRTCHNL2_RX_FLEX_DESC_PKT_LEN_S) /* 14 bits */
+
+#define foreach_virtchnl2_rx_flex_desc_status0 \
+ _ (0, DD_S) \
+ _ (1, EOF_S) \
+ _ (2, HBO_S) \
+ _ (3, L3L4P_S) \
+ _ (4, XSUM_IPE_S) \
+ _ (5, XSUM_L4E_S) \
+ _ (6, XSUM_EIPE_S) \
+ _ (7, XSUM_EUDPE_S) \
+ _ (8, LPBK_S) \
+ _ (9, IPV6EXADD_S) \
+ _ (10, RXE_S) \
+ _ (11, CRCP_S) \
+ _ (12, RSS_VALID_S) \
+ _ (13, L2TAG1P_S) \
+ _ (14, XTRMD0_VALID_S) \
+ _ (15, XTRMD1_VALID_S) \
+ _ (16, LAST)
+
+typedef enum
+{
+#define _(v, n) VIRTCHNL2_RX_FLEX_DESC_STATUS0_##n = v,
+ foreach_virtchnl2_rx_flex_desc_status0
+#undef _
+} virtchnl2_rx_flex_desc_status0_t;
+
+#define foreach_virtchnl2_rx_flex_desc_status1 \
+ _ (0, CPM_S) \
+ _ (4, NAT_S) \
+ _ (5, CRYPTO_S) \
+ _ (11, L2TAG2P_S) \
+ _ (12, XTRMD2_VALID_S) \
+ _ (13, XTRMD3_VALID_S) \
+ _ (14, XTRMD4_VALID_S) \
+ _ (15, XTRMD5_VALID_S) \
+ _ (16, LAST)
+
+typedef enum
+{
+#define _(v, n) VIRTCHNL2_RX_FLEX_DESC_STATUS1_##n = v,
+ foreach_virtchnl2_rx_flex_desc_status1
+#undef _
+} virtchnl2_rx_flex_desc_status1_t;
+
+#define VIRTCHNL2_RX_BASE_DESC_QW1_LEN_SPH_S 63
+#define VIRTCHNL2_RX_BASE_DESC_QW1_LEN_SPH_M \
+ BIT_ULL (VIRTCHNL2_RX_BASE_DESC_QW1_LEN_SPH_S)
+#define VIRTCHNL2_RX_BASE_DESC_QW1_LEN_HBUF_S 52
+#define VIRTCHNL2_RX_BASE_DESC_QW1_LEN_HBUF_M \
+ MAKEMASK (0x7FFULL, VIRTCHNL2_RX_BASE_DESC_QW1_LEN_HBUF_S)
+#define VIRTCHNL2_RX_BASE_DESC_QW1_LEN_PBUF_S 38
+#define VIRTCHNL2_RX_BASE_DESC_QW1_LEN_PBUF_M \
+ MAKEMASK (0x3FFFULL, VIRTCHNL2_RX_BASE_DESC_QW1_LEN_PBUF_S)
+#define VIRTCHNL2_RX_BASE_DESC_QW1_PTYPE_S 30
+#define VIRTCHNL2_RX_BASE_DESC_QW1_PTYPE_M \
+ MAKEMASK (0xFFULL, VIRTCHNL2_RX_BASE_DESC_QW1_PTYPE_S)
+#define VIRTCHNL2_RX_BASE_DESC_QW1_ERROR_S 19
+#define VIRTCHNL2_RX_BASE_DESC_QW1_ERROR_M \
+ MAKEMASK (0xFFUL, VIRTCHNL2_RX_BASE_DESC_QW1_ERROR_S)
+#define VIRTCHNL2_RX_BASE_DESC_QW1_STATUS_S 0
+#define VIRTCHNL2_RX_BASE_DESC_QW1_STATUS_M \
+ MAKEMASK (0x7FFFFUL, VIRTCHNL2_RX_BASE_DESC_QW1_STATUS_S)
+
+#define foreach_virtchnl2_rx_base_desc_status \
+ _ (0, DD_S) \
+ _ (1, EOF_S) \
+ _ (2, L2TAG1P_S) \
+ _ (3, L3L4P_S) \
+ _ (4, CRCP_S) \
+ _ (5, RSVD_S) \
+ _ (8, EXT_UDP_0_S) \
+ _ (9, UMBCAST_S) \
+ _ (11, FLM_S) \
+ _ (12, FLTSTAT_S) \
+ _ (14, LPBK_S) \
+ _ (15, IPV6EXADD_S) \
+ _ (16, RSVD1_S) \
+ _ (18, INT_UDP_0_S) \
+ _ (19, LAST)
+
+typedef enum
+{
+#define _(v, n) VIRTCHNL2_RX_BASE_DESC_STATUS_##n = v,
+ foreach_virtchnl2_rx_base_desc_status
+#undef _
+} virtchnl2_rx_base_desc_status_t;
+
+#define VIRTCHNL2_RX_BASE_DESC_EXT_STATUS_L2TAG2P_S 0
+
+#define foreach_virtchnl2_rx_base_desc_error \
+ _ (0, RXE_S) \
+ _ (1, ATRAEFAIL_S) \
+ _ (2, HBO_S) \
+ _ (3, L3L4E_S) \
+ _ (3, IPE_S) \
+ _ (4, L4E_S) \
+ _ (5, EIPE_S) \
+ _ (6, OVERSIZE_S) \
+ _ (7, PPRS_S)
+
+typedef enum
+{
+#define _(v, n) VIRTCHNL2_RX_BASE_DESC_ERROR_##n = v,
+ foreach_virtchnl2_rx_base_desc_error
+#undef _
+} virtchnl2_rx_base_desc_error_t;
+
+#define foreach_virtchnl2_rx_base_desc_fltstat \
+ _ (0, NO_DATA) \
+ _ (1, FD_ID) \
+ _ (2, RSV) \
+ _ (3, RSS_HASH)
+
+typedef enum
+{
+#define _(v, n) VIRTCHNL2_RX_BASE_DESC_FLTSTAT_##n = v,
+ foreach_virtchnl2_rx_base_desc_fltstat
+#undef _
+} virtchnl2_rx_base_desc_fltstat_t;
+
+/* Receive Descriptors */
+/* splitq buf
+ | 16| 0|
+ ----------------------------------------------------------------
+ | RSV | Buffer ID |
+ ----------------------------------------------------------------
+ | Rx packet buffer adresss |
+ ----------------------------------------------------------------
+ | Rx header buffer adresss |
+ ----------------------------------------------------------------
+ | RSV |
+ ----------------------------------------------------------------
+ | 0|
+ */
+typedef struct
+{
+ struct
+ {
+ u16 buf_id;
+ u16 rsvd0;
+ u32 rsvd1;
+ } qword0;
+ u64 pkt_addr;
+ u64 hdr_addr;
+ u64 rsvd2;
+} virtchnl2_splitq_rx_buf_desc_t;
+
+typedef struct
+{
+ u64 pkt_addr;
+ u64 hdr_addr;
+ u64 rsvd1;
+ u64 rsvd2;
+} virtchnl2_singleq_rx_buf_desc_t;
+
+union virtchnl2_rx_buf_desc
+{
+ virtchnl2_singleq_rx_buf_desc_t read;
+ virtchnl2_splitq_rx_buf_desc_t split_rd;
+};
+
+typedef struct
+{
+ struct
+ {
+ struct
+ {
+ u16 mirroring_status;
+ u16 l2tag1;
+ } lo_dword;
+ union
+ {
+ u32 rss;
+ u32 fd_id;
+ } hi_dword;
+ } qword0;
+ struct
+ {
+ u64 status_error_ptype_len;
+ } qword1;
+ struct
+ {
+ u16 ext_status;
+ u16 rsvd;
+ u16 l2tag2_1;
+ u16 l2tag2_2;
+ } qword2;
+ struct
+ {
+ u32 reserved;
+ u32 fd_id;
+ } qword3;
+} virtchnl2_singleq_base_rx_desc_t;
+
+typedef struct
+{
+ /* Qword 0 */
+ u8 rxdid;
+ u8 mir_id_umb_cast;
+ u16 ptype_flex_flags0;
+ u16 pkt_len;
+ u16 hdr_len_sph_flex_flags1;
+
+ /* Qword 1 */
+ u16 status_error0;
+ u16 l2tag1;
+ u16 flex_meta0;
+ u16 flex_meta1;
+
+ /* Qword 2 */
+ u16 status_error1;
+ u8 flex_flags2;
+ u8 time_stamp_low;
+ u16 l2tag2_1st;
+ u16 l2tag2_2nd;
+
+ /* Qword 3 */
+ u16 flex_meta2;
+ u16 flex_meta3;
+ union
+ {
+ struct
+ {
+ u16 flex_meta4;
+ u16 flex_meta5;
+ } flex;
+ u32 ts_high;
+ } flex_ts;
+} virtchnl2_rx_flex_desc_t;
+
+typedef struct
+{
+ /* Qword 0 */
+ u8 rxdid;
+ u8 mir_id_umb_cast;
+ u16 ptype_flex_flags0;
+ u16 pkt_len;
+ u16 hdr_len_sph_flex_flags1;
+
+ /* Qword 1 */
+ u16 status_error0;
+ u16 l2tag1;
+ u32 rss_hash;
+
+ /* Qword 2 */
+ u16 status_error1;
+ u8 flexi_flags2;
+ u8 ts_low;
+ u16 l2tag2_1st;
+ u16 l2tag2_2nd;
+
+ /* Qword 3 */
+ u32 flow_id;
+ union
+ {
+ struct
+ {
+ u16 rsvd;
+ u16 flow_id_ipv6;
+ } flex;
+ u32 ts_high;
+ } flex_ts;
+} virtchnl2_rx_flex_desc_nic_t;
+
+typedef struct
+{
+ /* Qword 0 */
+ u8 rxdid;
+ u8 mir_id_umb_cast;
+ u16 ptype_flex_flags0;
+ u16 pkt_len;
+ u16 hdr_len_sph_flex_flags1;
+
+ /* Qword 1 */
+ u16 status_error0;
+ u16 l2tag1;
+ u16 src_vsi;
+ u16 flex_md1_rsvd;
+
+ /* Qword 2 */
+ u16 status_error1;
+ u8 flex_flags2;
+ u8 ts_low;
+ u16 l2tag2_1st;
+ u16 l2tag2_2nd;
+
+ /* Qword 3 */
+ u32 rsvd;
+ u32 ts_high;
+} virtchnl2_rx_flex_desc_sw_t;
+
+typedef struct
+{
+ /* Qword 0 */
+ u8 rxdid;
+ u8 mir_id_umb_cast;
+ u16 ptype_flex_flags0;
+ u16 pkt_len;
+ u16 hdr_len_sph_flex_flags1;
+
+ /* Qword 1 */
+ u16 status_error0;
+ u16 l2tag1;
+ u32 rss_hash;
+
+ /* Qword 2 */
+ u16 status_error1;
+ u8 flexi_flags2;
+ u8 ts_low;
+ u16 l2tag2_1st;
+ u16 l2tag2_2nd;
+
+ /* Qword 3 */
+ u16 flow_id;
+ u16 src_vsi;
+ union
+ {
+ struct
+ {
+ u16 rsvd;
+ u16 flow_id_ipv6;
+ } flex;
+ u32 ts_high;
+ } flex_ts;
+} virtchnl2_rx_flex_desc_nic_2_t;
+
+typedef struct
+{
+ /* Qword 0 */
+ u8 rxdid_ucast;
+ u8 status_err0_qw0;
+ u16 ptype_err_fflags0;
+ u16 pktlen_gen_bufq_id;
+ u16 hdrlen_flags;
+
+ /* Qword 1 */
+ u8 status_err0_qw1;
+ u8 status_err1;
+ u8 fflags1;
+ u8 ts_low;
+ u16 fmd0;
+ u16 fmd1;
+ /* Qword 2 */
+ u16 fmd2;
+ u8 fflags2;
+ u8 hash3;
+ u16 fmd3;
+ u16 fmd4;
+ /* Qword 3 */
+ u16 fmd5;
+ u16 fmd6;
+ u16 fmd7_0;
+ u16 fmd7_1;
+} virtchnl2_rx_flex_desc_adv_t;
+
+typedef struct
+{
+ /* Qword 0 */
+ u8 rxdid_ucast;
+ u8 status_err0_qw0;
+ u16 ptype_err_fflags0;
+ u16 pktlen_gen_bufq_id;
+ u16 hdrlen_flags;
+
+ /* Qword 1 */
+ u8 status_err0_qw1;
+ u8 status_err1;
+ u8 fflags1;
+ u8 ts_low;
+ u16 buf_id;
+ union
+ {
+ u16 raw_cs;
+ u16 l2tag1;
+ u16 rscseglen;
+ } misc;
+ /* Qword 2 */
+ u16 hash1;
+ union
+ {
+ u8 fflags2;
+ u8 mirrorid;
+ u8 hash2;
+ } ff2_mirrid_hash2;
+ u8 hash3;
+ u16 l2tag2;
+ u16 fmd4;
+ /* Qword 3 */
+ u16 l2tag1;
+ u16 fmd6;
+ u32 ts_high;
+} virtchnl2_rx_flex_desc_adv_nic_3_t;
+
+typedef union
+{
+ virtchnl2_singleq_rx_buf_desc_t read;
+ virtchnl2_singleq_base_rx_desc_t base_wb;
+ virtchnl2_rx_flex_desc_t flex_wb;
+ virtchnl2_rx_flex_desc_nic_t flex_nic_wb;
+ virtchnl2_rx_flex_desc_sw_t flex_sw_wb;
+ virtchnl2_rx_flex_desc_nic_2_t flex_nic_2_wb;
+ virtchnl2_rx_flex_desc_adv_t flex_adv_wb;
+ virtchnl2_rx_flex_desc_adv_nic_3_t flex_adv_nic_3_wb;
+ u64 qword[4];
+} virtchnl2_rx_desc_t;
+
+#endif /* _IDPF_VIRTCHNL_LAN_DESC_H_ */