summaryrefslogtreecommitdiffstats
path: root/drivers/raw
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/raw')
-rw-r--r--drivers/raw/Makefile5
-rw-r--r--drivers/raw/dpaa2_cmdif/Makefile36
-rw-r--r--drivers/raw/dpaa2_cmdif/dpaa2_cmdif.c297
-rw-r--r--drivers/raw/dpaa2_cmdif/dpaa2_cmdif_logs.h46
-rw-r--r--drivers/raw/dpaa2_cmdif/meson.build10
-rw-r--r--drivers/raw/dpaa2_cmdif/rte_pmd_dpaa2_cmdif.h35
-rw-r--r--drivers/raw/dpaa2_cmdif/rte_pmd_dpaa2_cmdif_version.map4
-rw-r--r--drivers/raw/dpaa2_qdma/Makefile37
-rw-r--r--drivers/raw/dpaa2_qdma/dpaa2_qdma.c1001
-rw-r--r--drivers/raw/dpaa2_qdma/dpaa2_qdma.h150
-rw-r--r--drivers/raw/dpaa2_qdma/dpaa2_qdma_logs.h46
-rw-r--r--drivers/raw/dpaa2_qdma/meson.build10
-rw-r--r--drivers/raw/dpaa2_qdma/rte_pmd_dpaa2_qdma.h286
-rw-r--r--drivers/raw/dpaa2_qdma/rte_pmd_dpaa2_qdma_version.map20
-rw-r--r--drivers/raw/ifpga_rawdev/Makefile36
-rw-r--r--drivers/raw/ifpga_rawdev/base/Makefile26
-rw-r--r--drivers/raw/ifpga_rawdev/base/README31
-rw-r--r--drivers/raw/ifpga_rawdev/base/ifpga_api.c294
-rw-r--r--drivers/raw/ifpga_rawdev/base/ifpga_api.h28
-rw-r--r--drivers/raw/ifpga_rawdev/base/ifpga_compat.h58
-rw-r--r--drivers/raw/ifpga_rawdev/base/ifpga_defines.h1663
-rw-r--r--drivers/raw/ifpga_rawdev/base/ifpga_enumerate.c821
-rw-r--r--drivers/raw/ifpga_rawdev/base/ifpga_enumerate.h11
-rw-r--r--drivers/raw/ifpga_rawdev/base/ifpga_feature_dev.c253
-rw-r--r--drivers/raw/ifpga_rawdev/base/ifpga_feature_dev.h168
-rw-r--r--drivers/raw/ifpga_rawdev/base/ifpga_fme.c734
-rw-r--r--drivers/raw/ifpga_rawdev/base/ifpga_fme_dperf.c301
-rw-r--r--drivers/raw/ifpga_rawdev/base/ifpga_fme_error.c381
-rw-r--r--drivers/raw/ifpga_rawdev/base/ifpga_fme_iperf.c715
-rw-r--r--drivers/raw/ifpga_rawdev/base/ifpga_fme_pr.c352
-rw-r--r--drivers/raw/ifpga_rawdev/base/ifpga_hw.h127
-rw-r--r--drivers/raw/ifpga_rawdev/base/ifpga_port.c388
-rw-r--r--drivers/raw/ifpga_rawdev/base/ifpga_port_error.c144
-rw-r--r--drivers/raw/ifpga_rawdev/base/meson.build34
-rw-r--r--drivers/raw/ifpga_rawdev/base/opae_debug.c99
-rw-r--r--drivers/raw/ifpga_rawdev/base/opae_debug.h19
-rw-r--r--drivers/raw/ifpga_rawdev/base/opae_hw_api.c381
-rw-r--r--drivers/raw/ifpga_rawdev/base/opae_hw_api.h253
-rw-r--r--drivers/raw/ifpga_rawdev/base/opae_ifpga_hw_api.c145
-rw-r--r--drivers/raw/ifpga_rawdev/base/opae_ifpga_hw_api.h279
-rw-r--r--drivers/raw/ifpga_rawdev/base/opae_osdep.h79
-rw-r--r--drivers/raw/ifpga_rawdev/base/osdep_raw/osdep_generic.h75
-rw-r--r--drivers/raw/ifpga_rawdev/base/osdep_rte/osdep_generic.h45
-rw-r--r--drivers/raw/ifpga_rawdev/ifpga_rawdev.c617
-rw-r--r--drivers/raw/ifpga_rawdev/ifpga_rawdev.h37
-rw-r--r--drivers/raw/ifpga_rawdev/meson.build15
-rw-r--r--drivers/raw/ifpga_rawdev/rte_pmd_ifpga_rawdev_version.map4
-rw-r--r--drivers/raw/meson.build7
-rw-r--r--drivers/raw/skeleton_rawdev/Makefile1
-rw-r--r--drivers/raw/skeleton_rawdev/meson.build6
-rw-r--r--drivers/raw/skeleton_rawdev/skeleton_rawdev.c18
-rw-r--r--drivers/raw/skeleton_rawdev/skeleton_rawdev_test.c16
52 files changed, 10637 insertions, 7 deletions
diff --git a/drivers/raw/Makefile b/drivers/raw/Makefile
index da7c8b44..8e29b4a5 100644
--- a/drivers/raw/Makefile
+++ b/drivers/raw/Makefile
@@ -5,5 +5,10 @@ include $(RTE_SDK)/mk/rte.vars.mk
# DIRS-$(<configuration>) += <directory>
DIRS-$(CONFIG_RTE_LIBRTE_PMD_SKELETON_RAWDEV) += skeleton_rawdev
+ifeq ($(CONFIG_RTE_EAL_VFIO)$(CONFIG_RTE_LIBRTE_FSLMC_BUS),yy)
+DIRS-$(CONFIG_RTE_LIBRTE_PMD_DPAA2_CMDIF_RAWDEV) += dpaa2_cmdif
+DIRS-$(CONFIG_RTE_LIBRTE_PMD_DPAA2_QDMA_RAWDEV) += dpaa2_qdma
+endif
+DIRS-$(CONFIG_RTE_LIBRTE_PMD_IFPGA_RAWDEV) += ifpga_rawdev
include $(RTE_SDK)/mk/rte.subdir.mk
diff --git a/drivers/raw/dpaa2_cmdif/Makefile b/drivers/raw/dpaa2_cmdif/Makefile
new file mode 100644
index 00000000..9b863dda
--- /dev/null
+++ b/drivers/raw/dpaa2_cmdif/Makefile
@@ -0,0 +1,36 @@
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright 2018 NXP
+
+include $(RTE_SDK)/mk/rte.vars.mk
+
+#
+# library name
+#
+LIB = librte_pmd_dpaa2_cmdif.a
+
+CFLAGS += -DALLOW_EXPERIMENTAL_API
+CFLAGS += -O3
+CFLAGS += $(WERROR_FLAGS)
+
+CFLAGS += -I$(RTE_SDK)/drivers/bus/fslmc
+CFLAGS += -I$(RTE_SDK)/drivers/bus/fslmc/qbman/include
+
+LDLIBS += -lrte_bus_fslmc
+LDLIBS += -lrte_bus_vdev
+LDLIBS += -lrte_eal
+LDLIBS += -lrte_kvargs
+LDLIBS += -lrte_mempool_dpaa2
+LDLIBS += -lrte_rawdev
+
+EXPORT_MAP := rte_pmd_dpaa2_cmdif_version.map
+
+LIBABIVER := 1
+
+#
+# all source are stored in SRCS-y
+#
+SRCS-$(CONFIG_RTE_LIBRTE_PMD_DPAA2_CMDIF_RAWDEV) += dpaa2_cmdif.c
+
+SYMLINK-$(CONFIG_RTE_LIBRTE_PMD_DPAA2_CMDIF_RAWDEV)-include += rte_pmd_dpaa2_cmdif.h
+
+include $(RTE_SDK)/mk/rte.lib.mk
diff --git a/drivers/raw/dpaa2_cmdif/dpaa2_cmdif.c b/drivers/raw/dpaa2_cmdif/dpaa2_cmdif.c
new file mode 100644
index 00000000..469960a3
--- /dev/null
+++ b/drivers/raw/dpaa2_cmdif/dpaa2_cmdif.c
@@ -0,0 +1,297 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright 2018 NXP
+ */
+
+#include <stdio.h>
+#include <errno.h>
+#include <stdint.h>
+
+#include <rte_bus_vdev.h>
+#include <rte_atomic.h>
+#include <rte_interrupts.h>
+#include <rte_branch_prediction.h>
+#include <rte_lcore.h>
+
+#include <rte_rawdev.h>
+#include <rte_rawdev_pmd.h>
+
+#include <portal/dpaa2_hw_pvt.h>
+#include <portal/dpaa2_hw_dpio.h>
+#include "dpaa2_cmdif_logs.h"
+#include "rte_pmd_dpaa2_cmdif.h"
+
+/* Dynamic log type identifier */
+int dpaa2_cmdif_logtype;
+
+/* CMDIF driver name */
+#define DPAA2_CMDIF_PMD_NAME dpaa2_dpci
+
+/* CMDIF driver object */
+static struct rte_vdev_driver dpaa2_cmdif_drv;
+
+/*
+ * This API provides the DPCI device ID in 'attr_value'.
+ * The device ID shall be passed by GPP to the AIOP using CMDIF commands.
+ */
+static int
+dpaa2_cmdif_get_attr(struct rte_rawdev *dev,
+ const char *attr_name,
+ uint64_t *attr_value)
+{
+ struct dpaa2_dpci_dev *cidev = dev->dev_private;
+
+ DPAA2_CMDIF_FUNC_TRACE();
+
+ RTE_SET_USED(attr_name);
+
+ if (!attr_value) {
+ DPAA2_CMDIF_ERR("Invalid arguments for getting attributes");
+ return -EINVAL;
+ }
+ *attr_value = cidev->dpci_id;
+
+ return 0;
+}
+
+static int
+dpaa2_cmdif_enqueue_bufs(struct rte_rawdev *dev,
+ struct rte_rawdev_buf **buffers,
+ unsigned int count,
+ rte_rawdev_obj_t context)
+{
+ struct dpaa2_dpci_dev *cidev = dev->dev_private;
+ struct rte_dpaa2_cmdif_context *cmdif_send_cnxt;
+ struct dpaa2_queue *txq;
+ struct qbman_fd fd;
+ struct qbman_eq_desc eqdesc;
+ struct qbman_swp *swp;
+ int ret;
+
+ DPAA2_CMDIF_FUNC_TRACE();
+
+ RTE_SET_USED(count);
+
+ if (unlikely(!DPAA2_PER_LCORE_DPIO)) {
+ ret = dpaa2_affine_qbman_swp();
+ if (ret) {
+ DPAA2_CMDIF_ERR("Failure in affining portal\n");
+ return 0;
+ }
+ }
+ swp = DPAA2_PER_LCORE_PORTAL;
+
+ cmdif_send_cnxt = (struct rte_dpaa2_cmdif_context *)(context);
+ txq = &(cidev->tx_queue[cmdif_send_cnxt->priority]);
+
+ /* Prepare enqueue descriptor */
+ qbman_eq_desc_clear(&eqdesc);
+ qbman_eq_desc_set_fq(&eqdesc, txq->fqid);
+ qbman_eq_desc_set_no_orp(&eqdesc, 0);
+ qbman_eq_desc_set_response(&eqdesc, 0, 0);
+
+ /* Set some of the FD parameters to i.
+ * For performance reasons do not memset
+ */
+ fd.simple.bpid_offset = 0;
+ fd.simple.ctrl = 0;
+
+ DPAA2_SET_FD_ADDR(&fd, DPAA2_VADDR_TO_IOVA(buffers[0]->buf_addr));
+ DPAA2_SET_FD_LEN(&fd, cmdif_send_cnxt->size);
+ DPAA2_SET_FD_FRC(&fd, cmdif_send_cnxt->frc);
+ DPAA2_SET_FD_FLC(&fd, cmdif_send_cnxt->flc);
+
+ /* Enqueue a packet to the QBMAN */
+ do {
+ ret = qbman_swp_enqueue_multiple(swp, &eqdesc, &fd, NULL, 1);
+ if (ret < 0 && ret != -EBUSY)
+ DPAA2_CMDIF_ERR("Transmit failure with err: %d\n", ret);
+ } while (ret == -EBUSY);
+
+ DPAA2_CMDIF_DP_DEBUG("Successfully transmitted a packet\n");
+
+ return 0;
+}
+
+static int
+dpaa2_cmdif_dequeue_bufs(struct rte_rawdev *dev,
+ struct rte_rawdev_buf **buffers,
+ unsigned int count,
+ rte_rawdev_obj_t context)
+{
+ struct dpaa2_dpci_dev *cidev = dev->dev_private;
+ struct rte_dpaa2_cmdif_context *cmdif_rcv_cnxt;
+ struct dpaa2_queue *rxq;
+ struct qbman_swp *swp;
+ struct qbman_result *dq_storage;
+ const struct qbman_fd *fd;
+ struct qbman_pull_desc pulldesc;
+ uint8_t status;
+ int ret;
+
+ DPAA2_CMDIF_FUNC_TRACE();
+
+ RTE_SET_USED(count);
+
+ if (unlikely(!DPAA2_PER_LCORE_DPIO)) {
+ ret = dpaa2_affine_qbman_swp();
+ if (ret) {
+ DPAA2_CMDIF_ERR("Failure in affining portal\n");
+ return 0;
+ }
+ }
+ swp = DPAA2_PER_LCORE_PORTAL;
+
+ cmdif_rcv_cnxt = (struct rte_dpaa2_cmdif_context *)(context);
+ rxq = &(cidev->rx_queue[cmdif_rcv_cnxt->priority]);
+ dq_storage = rxq->q_storage->dq_storage[0];
+
+ qbman_pull_desc_clear(&pulldesc);
+ qbman_pull_desc_set_fq(&pulldesc, rxq->fqid);
+ qbman_pull_desc_set_numframes(&pulldesc, 1);
+ qbman_pull_desc_set_storage(&pulldesc, dq_storage,
+ (uint64_t)(DPAA2_VADDR_TO_IOVA(dq_storage)), 1);
+
+ while (1) {
+ if (qbman_swp_pull(swp, &pulldesc)) {
+ DPAA2_CMDIF_DP_WARN("VDQ cmd not issued. QBMAN is busy\n");
+ /* Portal was busy, try again */
+ continue;
+ }
+ break;
+ }
+
+ /* Check if previous issued command is completed. */
+ while (!qbman_check_command_complete(dq_storage))
+ ;
+ /* Loop until the dq_storage is updated with new token by QBMAN */
+ while (!qbman_result_has_new_result(swp, dq_storage))
+ ;
+
+ /* Check for valid frame. */
+ status = (uint8_t)qbman_result_DQ_flags(dq_storage);
+ if (unlikely((status & QBMAN_DQ_STAT_VALIDFRAME) == 0)) {
+ DPAA2_CMDIF_DP_DEBUG("No frame is delivered\n");
+ return 0;
+ }
+
+ fd = qbman_result_DQ_fd(dq_storage);
+
+ buffers[0]->buf_addr = (void *)DPAA2_IOVA_TO_VADDR(
+ DPAA2_GET_FD_ADDR(fd) + DPAA2_GET_FD_OFFSET(fd));
+ cmdif_rcv_cnxt->size = DPAA2_GET_FD_LEN(fd);
+ cmdif_rcv_cnxt->flc = DPAA2_GET_FD_FLC(fd);
+ cmdif_rcv_cnxt->frc = DPAA2_GET_FD_FRC(fd);
+
+ DPAA2_CMDIF_DP_DEBUG("packet received\n");
+
+ return 1;
+}
+
+static const struct rte_rawdev_ops dpaa2_cmdif_ops = {
+ .attr_get = dpaa2_cmdif_get_attr,
+ .enqueue_bufs = dpaa2_cmdif_enqueue_bufs,
+ .dequeue_bufs = dpaa2_cmdif_dequeue_bufs,
+};
+
+static int
+dpaa2_cmdif_create(const char *name,
+ struct rte_vdev_device *vdev,
+ int socket_id)
+{
+ struct rte_rawdev *rawdev;
+ struct dpaa2_dpci_dev *cidev;
+
+ /* Allocate device structure */
+ rawdev = rte_rawdev_pmd_allocate(name, sizeof(struct dpaa2_dpci_dev),
+ socket_id);
+ if (!rawdev) {
+ DPAA2_CMDIF_ERR("Unable to allocate rawdevice");
+ return -EINVAL;
+ }
+
+ rawdev->dev_ops = &dpaa2_cmdif_ops;
+ rawdev->device = &vdev->device;
+ rawdev->driver_name = vdev->device.driver->name;
+
+ /* For secondary processes, the primary has done all the work */
+ if (rte_eal_process_type() != RTE_PROC_PRIMARY)
+ return 0;
+
+ cidev = rte_dpaa2_alloc_dpci_dev();
+ if (!cidev) {
+ DPAA2_CMDIF_ERR("Unable to allocate CI device");
+ rte_rawdev_pmd_release(rawdev);
+ return -ENODEV;
+ }
+
+ rawdev->dev_private = cidev;
+
+ return 0;
+}
+
+static int
+dpaa2_cmdif_destroy(const char *name)
+{
+ int ret;
+ struct rte_rawdev *rdev;
+
+ rdev = rte_rawdev_pmd_get_named_dev(name);
+ if (!rdev) {
+ DPAA2_CMDIF_ERR("Invalid device name (%s)", name);
+ return -EINVAL;
+ }
+
+ /* The primary process will only free the DPCI device */
+ if (rte_eal_process_type() == RTE_PROC_PRIMARY)
+ rte_dpaa2_free_dpci_dev(rdev->dev_private);
+
+ ret = rte_rawdev_pmd_release(rdev);
+ if (ret)
+ DPAA2_CMDIF_DEBUG("Device cleanup failed");
+
+ return 0;
+}
+
+static int
+dpaa2_cmdif_probe(struct rte_vdev_device *vdev)
+{
+ const char *name;
+ int ret = 0;
+
+ name = rte_vdev_device_name(vdev);
+
+ DPAA2_CMDIF_INFO("Init %s on NUMA node %d", name, rte_socket_id());
+
+ ret = dpaa2_cmdif_create(name, vdev, rte_socket_id());
+
+ return ret;
+}
+
+static int
+dpaa2_cmdif_remove(struct rte_vdev_device *vdev)
+{
+ const char *name;
+ int ret;
+
+ name = rte_vdev_device_name(vdev);
+
+ DPAA2_CMDIF_INFO("Closing %s on NUMA node %d", name, rte_socket_id());
+
+ ret = dpaa2_cmdif_destroy(name);
+
+ return ret;
+}
+
+static struct rte_vdev_driver dpaa2_cmdif_drv = {
+ .probe = dpaa2_cmdif_probe,
+ .remove = dpaa2_cmdif_remove
+};
+
+RTE_PMD_REGISTER_VDEV(DPAA2_CMDIF_PMD_NAME, dpaa2_cmdif_drv);
+
+RTE_INIT(dpaa2_cmdif_init_log)
+{
+ dpaa2_cmdif_logtype = rte_log_register("pmd.raw.dpaa2.cmdif");
+ if (dpaa2_cmdif_logtype >= 0)
+ rte_log_set_level(dpaa2_cmdif_logtype, RTE_LOG_INFO);
+}
diff --git a/drivers/raw/dpaa2_cmdif/dpaa2_cmdif_logs.h b/drivers/raw/dpaa2_cmdif/dpaa2_cmdif_logs.h
new file mode 100644
index 00000000..8991e832
--- /dev/null
+++ b/drivers/raw/dpaa2_cmdif/dpaa2_cmdif_logs.h
@@ -0,0 +1,46 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright 2018 NXP
+ */
+
+#ifndef __DPAA2_CMDIF_LOGS_H__
+#define __DPAA2_CMDIF_LOGS_H__
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+extern int dpaa2_cmdif_logtype;
+
+#define DPAA2_CMDIF_LOG(level, fmt, args...) \
+ rte_log(RTE_LOG_ ## level, dpaa2_cmdif_logtype, "dpaa2_cmdif: " \
+ fmt "\n", ## args)
+
+#define DPAA2_CMDIF_DEBUG(fmt, args...) \
+ rte_log(RTE_LOG_DEBUG, dpaa2_cmdif_logtype, "dpaa2_cmdif: %s(): " \
+ fmt "\n", __func__, ## args)
+
+#define DPAA2_CMDIF_FUNC_TRACE() DPAA2_CMDIF_DEBUG(">>")
+
+#define DPAA2_CMDIF_INFO(fmt, args...) \
+ DPAA2_CMDIF_LOG(INFO, fmt, ## args)
+#define DPAA2_CMDIF_ERR(fmt, args...) \
+ DPAA2_CMDIF_LOG(ERR, fmt, ## args)
+#define DPAA2_CMDIF_WARN(fmt, args...) \
+ DPAA2_CMDIF_LOG(WARNING, fmt, ## args)
+
+/* DP Logs, toggled out at compile time if level lower than current level */
+#define DPAA2_CMDIF_DP_LOG(level, fmt, args...) \
+ RTE_LOG_DP(level, PMD, "dpaa2_cmdif: " fmt "\n", ## args)
+
+#define DPAA2_CMDIF_DP_DEBUG(fmt, args...) \
+ DPAA2_CMDIF_DP_LOG(DEBUG, fmt, ## args)
+#define DPAA2_CMDIF_DP_INFO(fmt, args...) \
+ DPAA2_CMDIF_DP_LOG(INFO, fmt, ## args)
+#define DPAA2_CMDIF_DP_WARN(fmt, args...) \
+ DPAA2_CMDIF_DP_LOG(WARNING, fmt, ## args)
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* __DPAA2_CMDIF_LOGS_H__ */
diff --git a/drivers/raw/dpaa2_cmdif/meson.build b/drivers/raw/dpaa2_cmdif/meson.build
new file mode 100644
index 00000000..1d146872
--- /dev/null
+++ b/drivers/raw/dpaa2_cmdif/meson.build
@@ -0,0 +1,10 @@
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright 2018 NXP
+
+build = dpdk_conf.has('RTE_LIBRTE_DPAA2_MEMPOOL')
+deps += ['rawdev', 'mempool_dpaa2', 'bus_vdev']
+sources = files('dpaa2_cmdif.c')
+
+allow_experimental_apis = true
+
+install_headers('rte_pmd_dpaa2_cmdif.h')
diff --git a/drivers/raw/dpaa2_cmdif/rte_pmd_dpaa2_cmdif.h b/drivers/raw/dpaa2_cmdif/rte_pmd_dpaa2_cmdif.h
new file mode 100644
index 00000000..483b66ea
--- /dev/null
+++ b/drivers/raw/dpaa2_cmdif/rte_pmd_dpaa2_cmdif.h
@@ -0,0 +1,35 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright 2018 NXP
+ */
+
+#ifndef __RTE_PMD_DPAA2_CMDIF_H__
+#define __RTE_PMD_DPAA2_CMDIF_H__
+
+/**
+ * @file
+ *
+ * NXP dpaa2 AIOP CMDIF PMD specific structures.
+ *
+ */
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/** The context required in the I/O path for DPAA2 AIOP Command Interface */
+struct rte_dpaa2_cmdif_context {
+ /** Size to populate in QBMAN FD */
+ uint32_t size;
+ /** FRC to populate in QBMAN FD */
+ uint32_t frc;
+ /** FLC to populate in QBMAN FD */
+ uint64_t flc;
+ /** Priority of the command. This priority determines DPCI Queue*/
+ uint8_t priority;
+};
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* __RTE_PMD_DPAA2_CMDIF_H__ */
diff --git a/drivers/raw/dpaa2_cmdif/rte_pmd_dpaa2_cmdif_version.map b/drivers/raw/dpaa2_cmdif/rte_pmd_dpaa2_cmdif_version.map
new file mode 100644
index 00000000..9b9ab1a4
--- /dev/null
+++ b/drivers/raw/dpaa2_cmdif/rte_pmd_dpaa2_cmdif_version.map
@@ -0,0 +1,4 @@
+DPDK_18.05 {
+
+ local: *;
+};
diff --git a/drivers/raw/dpaa2_qdma/Makefile b/drivers/raw/dpaa2_qdma/Makefile
new file mode 100644
index 00000000..d88809ea
--- /dev/null
+++ b/drivers/raw/dpaa2_qdma/Makefile
@@ -0,0 +1,37 @@
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright 2018 NXP
+
+include $(RTE_SDK)/mk/rte.vars.mk
+
+#
+# library name
+#
+LIB = librte_pmd_dpaa2_qdma.a
+
+CFLAGS += -DALLOW_EXPERIMENTAL_API
+CFLAGS += -O3
+CFLAGS += $(WERROR_FLAGS)
+
+CFLAGS += -I$(RTE_SDK)/lib/librte_eal/linuxapp/eal
+CFLAGS += -I$(RTE_SDK)/drivers/bus/fslmc
+CFLAGS += -I$(RTE_SDK)/drivers/bus/fslmc/qbman/include
+
+LDLIBS += -lrte_bus_fslmc
+LDLIBS += -lrte_eal
+LDLIBS += -lrte_mempool
+LDLIBS += -lrte_mempool_dpaa2
+LDLIBS += -lrte_rawdev
+LDLIBS += -lrte_ring
+
+EXPORT_MAP := rte_pmd_dpaa2_qdma_version.map
+
+LIBABIVER := 1
+
+#
+# all source are stored in SRCS-y
+#
+SRCS-$(CONFIG_RTE_LIBRTE_PMD_DPAA2_QDMA_RAWDEV) += dpaa2_qdma.c
+
+SYMLINK-$(CONFIG_RTE_LIBRTE_PMD_DPAA2_QDMA_RAWDEV)-include += rte_pmd_dpaa2_qdma.h
+
+include $(RTE_SDK)/mk/rte.lib.mk
diff --git a/drivers/raw/dpaa2_qdma/dpaa2_qdma.c b/drivers/raw/dpaa2_qdma/dpaa2_qdma.c
new file mode 100644
index 00000000..2787d302
--- /dev/null
+++ b/drivers/raw/dpaa2_qdma/dpaa2_qdma.c
@@ -0,0 +1,1001 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright 2018 NXP
+ */
+
+#include <string.h>
+
+#include <rte_eal.h>
+#include <rte_fslmc.h>
+#include <rte_atomic.h>
+#include <rte_lcore.h>
+#include <rte_rawdev.h>
+#include <rte_rawdev_pmd.h>
+#include <rte_malloc.h>
+#include <rte_ring.h>
+#include <rte_mempool.h>
+
+#include <mc/fsl_dpdmai.h>
+#include <portal/dpaa2_hw_pvt.h>
+#include <portal/dpaa2_hw_dpio.h>
+
+#include "dpaa2_qdma.h"
+#include "dpaa2_qdma_logs.h"
+#include "rte_pmd_dpaa2_qdma.h"
+
+/* Dynamic log type identifier */
+int dpaa2_qdma_logtype;
+
+/* QDMA device */
+static struct qdma_device qdma_dev;
+
+/* QDMA H/W queues list */
+TAILQ_HEAD(qdma_hw_queue_list, qdma_hw_queue);
+static struct qdma_hw_queue_list qdma_queue_list
+ = TAILQ_HEAD_INITIALIZER(qdma_queue_list);
+
+/* QDMA Virtual Queues */
+struct qdma_virt_queue *qdma_vqs;
+
+/* QDMA per core data */
+struct qdma_per_core_info qdma_core_info[RTE_MAX_LCORE];
+
+static struct qdma_hw_queue *
+alloc_hw_queue(uint32_t lcore_id)
+{
+ struct qdma_hw_queue *queue = NULL;
+
+ DPAA2_QDMA_FUNC_TRACE();
+
+ /* Get a free queue from the list */
+ TAILQ_FOREACH(queue, &qdma_queue_list, next) {
+ if (queue->num_users == 0) {
+ queue->lcore_id = lcore_id;
+ queue->num_users++;
+ break;
+ }
+ }
+
+ return queue;
+}
+
+static void
+free_hw_queue(struct qdma_hw_queue *queue)
+{
+ DPAA2_QDMA_FUNC_TRACE();
+
+ queue->num_users--;
+}
+
+
+static struct qdma_hw_queue *
+get_hw_queue(uint32_t lcore_id)
+{
+ struct qdma_per_core_info *core_info;
+ struct qdma_hw_queue *queue, *temp;
+ uint32_t least_num_users;
+ int num_hw_queues, i;
+
+ DPAA2_QDMA_FUNC_TRACE();
+
+ core_info = &qdma_core_info[lcore_id];
+ num_hw_queues = core_info->num_hw_queues;
+
+ /*
+ * Allocate a HW queue if there are less queues
+ * than maximum per core queues configured
+ */
+ if (num_hw_queues < qdma_dev.max_hw_queues_per_core) {
+ queue = alloc_hw_queue(lcore_id);
+ if (queue) {
+ core_info->hw_queues[num_hw_queues] = queue;
+ core_info->num_hw_queues++;
+ return queue;
+ }
+ }
+
+ queue = core_info->hw_queues[0];
+ /* In case there is no queue associated with the core return NULL */
+ if (!queue)
+ return NULL;
+
+ /* Fetch the least loaded H/W queue */
+ least_num_users = core_info->hw_queues[0]->num_users;
+ for (i = 0; i < num_hw_queues; i++) {
+ temp = core_info->hw_queues[i];
+ if (temp->num_users < least_num_users)
+ queue = temp;
+ }
+
+ if (queue)
+ queue->num_users++;
+
+ return queue;
+}
+
+static void
+put_hw_queue(struct qdma_hw_queue *queue)
+{
+ struct qdma_per_core_info *core_info;
+ int lcore_id, num_hw_queues, i;
+
+ DPAA2_QDMA_FUNC_TRACE();
+
+ /*
+ * If this is the last user of the queue free it.
+ * Also remove it from QDMA core info.
+ */
+ if (queue->num_users == 1) {
+ free_hw_queue(queue);
+
+ /* Remove the physical queue from core info */
+ lcore_id = queue->lcore_id;
+ core_info = &qdma_core_info[lcore_id];
+ num_hw_queues = core_info->num_hw_queues;
+ for (i = 0; i < num_hw_queues; i++) {
+ if (queue == core_info->hw_queues[i])
+ break;
+ }
+ for (; i < num_hw_queues - 1; i++)
+ core_info->hw_queues[i] = core_info->hw_queues[i + 1];
+ core_info->hw_queues[i] = NULL;
+ } else {
+ queue->num_users--;
+ }
+}
+
+int __rte_experimental
+rte_qdma_init(void)
+{
+ DPAA2_QDMA_FUNC_TRACE();
+
+ rte_spinlock_init(&qdma_dev.lock);
+
+ return 0;
+}
+
+void __rte_experimental
+rte_qdma_attr_get(struct rte_qdma_attr *qdma_attr)
+{
+ DPAA2_QDMA_FUNC_TRACE();
+
+ qdma_attr->num_hw_queues = qdma_dev.num_hw_queues;
+}
+
+int __rte_experimental
+rte_qdma_reset(void)
+{
+ struct qdma_hw_queue *queue;
+ int i;
+
+ DPAA2_QDMA_FUNC_TRACE();
+
+ /* In case QDMA device is not in stopped state, return -EBUSY */
+ if (qdma_dev.state == 1) {
+ DPAA2_QDMA_ERR(
+ "Device is in running state. Stop before reset.");
+ return -EBUSY;
+ }
+
+ /* In case there are pending jobs on any VQ, return -EBUSY */
+ for (i = 0; i < qdma_dev.max_vqs; i++) {
+ if (qdma_vqs[i].in_use && (qdma_vqs[i].num_enqueues !=
+ qdma_vqs[i].num_dequeues))
+ DPAA2_QDMA_ERR("Jobs are still pending on VQ: %d", i);
+ return -EBUSY;
+ }
+
+ /* Reset HW queues */
+ TAILQ_FOREACH(queue, &qdma_queue_list, next)
+ queue->num_users = 0;
+
+ /* Reset and free virtual queues */
+ for (i = 0; i < qdma_dev.max_vqs; i++) {
+ if (qdma_vqs[i].status_ring)
+ rte_ring_free(qdma_vqs[i].status_ring);
+ }
+ if (qdma_vqs)
+ rte_free(qdma_vqs);
+ qdma_vqs = NULL;
+
+ /* Reset per core info */
+ memset(&qdma_core_info, 0,
+ sizeof(struct qdma_per_core_info) * RTE_MAX_LCORE);
+
+ /* Free the FLE pool */
+ if (qdma_dev.fle_pool)
+ rte_mempool_free(qdma_dev.fle_pool);
+
+ /* Reset QDMA device structure */
+ qdma_dev.mode = RTE_QDMA_MODE_HW;
+ qdma_dev.max_hw_queues_per_core = 0;
+ qdma_dev.fle_pool = NULL;
+ qdma_dev.fle_pool_count = 0;
+ qdma_dev.max_vqs = 0;
+
+ return 0;
+}
+
+int __rte_experimental
+rte_qdma_configure(struct rte_qdma_config *qdma_config)
+{
+ int ret;
+
+ DPAA2_QDMA_FUNC_TRACE();
+
+ /* In case QDMA device is not in stopped state, return -EBUSY */
+ if (qdma_dev.state == 1) {
+ DPAA2_QDMA_ERR(
+ "Device is in running state. Stop before config.");
+ return -1;
+ }
+
+ /* Reset the QDMA device */
+ ret = rte_qdma_reset();
+ if (ret) {
+ DPAA2_QDMA_ERR("Resetting QDMA failed");
+ return ret;
+ }
+
+ /* Set mode */
+ qdma_dev.mode = qdma_config->mode;
+
+ /* Set max HW queue per core */
+ if (qdma_config->max_hw_queues_per_core > MAX_HW_QUEUE_PER_CORE) {
+ DPAA2_QDMA_ERR("H/W queues per core is more than: %d",
+ MAX_HW_QUEUE_PER_CORE);
+ return -EINVAL;
+ }
+ qdma_dev.max_hw_queues_per_core =
+ qdma_config->max_hw_queues_per_core;
+
+ /* Allocate Virtual Queues */
+ qdma_vqs = rte_malloc("qdma_virtual_queues",
+ (sizeof(struct qdma_virt_queue) * qdma_config->max_vqs),
+ RTE_CACHE_LINE_SIZE);
+ if (!qdma_vqs) {
+ DPAA2_QDMA_ERR("qdma_virtual_queues allocation failed");
+ return -ENOMEM;
+ }
+ qdma_dev.max_vqs = qdma_config->max_vqs;
+
+ /* Allocate FLE pool */
+ qdma_dev.fle_pool = rte_mempool_create("qdma_fle_pool",
+ qdma_config->fle_pool_count, QDMA_FLE_POOL_SIZE,
+ QDMA_FLE_CACHE_SIZE(qdma_config->fle_pool_count), 0,
+ NULL, NULL, NULL, NULL, SOCKET_ID_ANY, 0);
+ if (!qdma_dev.fle_pool) {
+ DPAA2_QDMA_ERR("qdma_fle_pool create failed");
+ rte_free(qdma_vqs);
+ qdma_vqs = NULL;
+ return -ENOMEM;
+ }
+ qdma_dev.fle_pool_count = qdma_config->fle_pool_count;
+
+ return 0;
+}
+
+int __rte_experimental
+rte_qdma_start(void)
+{
+ DPAA2_QDMA_FUNC_TRACE();
+
+ qdma_dev.state = 1;
+
+ return 0;
+}
+
+int __rte_experimental
+rte_qdma_vq_create(uint32_t lcore_id, uint32_t flags)
+{
+ char ring_name[32];
+ int i;
+
+ DPAA2_QDMA_FUNC_TRACE();
+
+ rte_spinlock_lock(&qdma_dev.lock);
+
+ /* Get a free Virtual Queue */
+ for (i = 0; i < qdma_dev.max_vqs; i++) {
+ if (qdma_vqs[i].in_use == 0)
+ break;
+ }
+
+ /* Return in case no VQ is free */
+ if (i == qdma_dev.max_vqs) {
+ rte_spinlock_unlock(&qdma_dev.lock);
+ return -ENODEV;
+ }
+
+ if (qdma_dev.mode == RTE_QDMA_MODE_HW ||
+ (flags & RTE_QDMA_VQ_EXCLUSIVE_PQ)) {
+ /* Allocate HW queue for a VQ */
+ qdma_vqs[i].hw_queue = alloc_hw_queue(lcore_id);
+ qdma_vqs[i].exclusive_hw_queue = 1;
+ } else {
+ /* Allocate a Ring for Virutal Queue in VQ mode */
+ sprintf(ring_name, "status ring %d", i);
+ qdma_vqs[i].status_ring = rte_ring_create(ring_name,
+ qdma_dev.fle_pool_count, rte_socket_id(), 0);
+ if (!qdma_vqs[i].status_ring) {
+ DPAA2_QDMA_ERR("Status ring creation failed for vq");
+ rte_spinlock_unlock(&qdma_dev.lock);
+ return rte_errno;
+ }
+
+ /* Get a HW queue (shared) for a VQ */
+ qdma_vqs[i].hw_queue = get_hw_queue(lcore_id);
+ qdma_vqs[i].exclusive_hw_queue = 0;
+ }
+
+ if (qdma_vqs[i].hw_queue == NULL) {
+ DPAA2_QDMA_ERR("No H/W queue available for VQ");
+ if (qdma_vqs[i].status_ring)
+ rte_ring_free(qdma_vqs[i].status_ring);
+ qdma_vqs[i].status_ring = NULL;
+ rte_spinlock_unlock(&qdma_dev.lock);
+ return -ENODEV;
+ }
+
+ qdma_vqs[i].in_use = 1;
+ qdma_vqs[i].lcore_id = lcore_id;
+
+ rte_spinlock_unlock(&qdma_dev.lock);
+
+ return i;
+}
+
+static void
+dpaa2_qdma_populate_fle(struct qbman_fle *fle,
+ uint64_t src, uint64_t dest,
+ size_t len, uint32_t flags)
+{
+ struct qdma_sdd *sdd;
+
+ DPAA2_QDMA_FUNC_TRACE();
+
+ sdd = (struct qdma_sdd *)((uint8_t *)(fle) +
+ (DPAA2_QDMA_MAX_FLE * sizeof(struct qbman_fle)));
+
+ /* first frame list to source descriptor */
+ DPAA2_SET_FLE_ADDR(fle, DPAA2_VADDR_TO_IOVA(sdd));
+ DPAA2_SET_FLE_LEN(fle, (2 * (sizeof(struct qdma_sdd))));
+
+ /* source and destination descriptor */
+ DPAA2_SET_SDD_RD_COHERENT(sdd); /* source descriptor CMD */
+ sdd++;
+ DPAA2_SET_SDD_WR_COHERENT(sdd); /* dest descriptor CMD */
+
+ fle++;
+ /* source frame list to source buffer */
+ if (flags & RTE_QDMA_JOB_SRC_PHY) {
+ DPAA2_SET_FLE_ADDR(fle, src);
+ DPAA2_SET_FLE_BMT(fle);
+ } else {
+ DPAA2_SET_FLE_ADDR(fle, DPAA2_VADDR_TO_IOVA(src));
+ }
+ DPAA2_SET_FLE_LEN(fle, len);
+
+ fle++;
+ /* destination frame list to destination buffer */
+ if (flags & RTE_QDMA_JOB_DEST_PHY) {
+ DPAA2_SET_FLE_BMT(fle);
+ DPAA2_SET_FLE_ADDR(fle, dest);
+ } else {
+ DPAA2_SET_FLE_ADDR(fle, DPAA2_VADDR_TO_IOVA(dest));
+ }
+ DPAA2_SET_FLE_LEN(fle, len);
+
+ /* Final bit: 1, for last frame list */
+ DPAA2_SET_FLE_FIN(fle);
+}
+
+static int
+dpdmai_dev_enqueue(struct dpaa2_dpdmai_dev *dpdmai_dev,
+ uint16_t txq_id,
+ uint16_t vq_id,
+ struct rte_qdma_job *job)
+{
+ struct qdma_io_meta *io_meta;
+ struct qbman_fd fd;
+ struct dpaa2_queue *txq;
+ struct qbman_fle *fle;
+ struct qbman_eq_desc eqdesc;
+ struct qbman_swp *swp;
+ int ret;
+
+ DPAA2_QDMA_FUNC_TRACE();
+
+ if (unlikely(!DPAA2_PER_LCORE_DPIO)) {
+ ret = dpaa2_affine_qbman_swp();
+ if (ret) {
+ DPAA2_QDMA_ERR("Failure in affining portal");
+ return 0;
+ }
+ }
+ swp = DPAA2_PER_LCORE_PORTAL;
+
+ txq = &(dpdmai_dev->tx_queue[txq_id]);
+
+ /* Prepare enqueue descriptor */
+ qbman_eq_desc_clear(&eqdesc);
+ qbman_eq_desc_set_fq(&eqdesc, txq->fqid);
+ qbman_eq_desc_set_no_orp(&eqdesc, 0);
+ qbman_eq_desc_set_response(&eqdesc, 0, 0);
+
+ /*
+ * Get an FLE/SDD from FLE pool.
+ * Note: IO metadata is before the FLE and SDD memory.
+ */
+ ret = rte_mempool_get(qdma_dev.fle_pool, (void **)(&io_meta));
+ if (ret) {
+ DPAA2_QDMA_DP_WARN("Memory alloc failed for FLE");
+ return ret;
+ }
+
+ /* Set the metadata */
+ io_meta->cnxt = (size_t)job;
+ io_meta->id = vq_id;
+
+ fle = (struct qbman_fle *)(io_meta + 1);
+
+ /* populate Frame descriptor */
+ memset(&fd, 0, sizeof(struct qbman_fd));
+ DPAA2_SET_FD_ADDR(&fd, DPAA2_VADDR_TO_IOVA(fle));
+ DPAA2_SET_FD_COMPOUND_FMT(&fd);
+ DPAA2_SET_FD_FRC(&fd, QDMA_SER_CTX);
+
+ /* Populate FLE */
+ memset(fle, 0, QDMA_FLE_POOL_SIZE);
+ dpaa2_qdma_populate_fle(fle, job->src, job->dest, job->len, job->flags);
+
+ /* Enqueue the packet to the QBMAN */
+ do {
+ ret = qbman_swp_enqueue_multiple(swp, &eqdesc, &fd, NULL, 1);
+ if (ret < 0 && ret != -EBUSY)
+ DPAA2_QDMA_ERR("Transmit failure with err: %d", ret);
+ } while (ret == -EBUSY);
+
+ DPAA2_QDMA_DP_DEBUG("Successfully transmitted a packet");
+
+ return ret;
+}
+
+int __rte_experimental
+rte_qdma_vq_enqueue_multi(uint16_t vq_id,
+ struct rte_qdma_job **job,
+ uint16_t nb_jobs)
+{
+ int i, ret;
+
+ DPAA2_QDMA_FUNC_TRACE();
+
+ for (i = 0; i < nb_jobs; i++) {
+ ret = rte_qdma_vq_enqueue(vq_id, job[i]);
+ if (ret < 0)
+ break;
+ }
+
+ return i;
+}
+
+int __rte_experimental
+rte_qdma_vq_enqueue(uint16_t vq_id,
+ struct rte_qdma_job *job)
+{
+ struct qdma_virt_queue *qdma_vq = &qdma_vqs[vq_id];
+ struct qdma_hw_queue *qdma_pq = qdma_vq->hw_queue;
+ struct dpaa2_dpdmai_dev *dpdmai_dev = qdma_pq->dpdmai_dev;
+ int ret;
+
+ DPAA2_QDMA_FUNC_TRACE();
+
+ /* Return error in case of wrong lcore_id */
+ if (rte_lcore_id() != qdma_vq->lcore_id) {
+ DPAA2_QDMA_ERR("QDMA enqueue for vqid %d on wrong core",
+ vq_id);
+ return -EINVAL;
+ }
+
+ ret = dpdmai_dev_enqueue(dpdmai_dev, qdma_pq->queue_id, vq_id, job);
+ if (ret < 0) {
+ DPAA2_QDMA_ERR("DPDMAI device enqueue failed: %d", ret);
+ return ret;
+ }
+
+ qdma_vq->num_enqueues++;
+
+ return 1;
+}
+
+/* Function to receive a QDMA job for a given device and queue*/
+static int
+dpdmai_dev_dequeue(struct dpaa2_dpdmai_dev *dpdmai_dev,
+ uint16_t rxq_id,
+ uint16_t *vq_id,
+ struct rte_qdma_job **job)
+{
+ struct qdma_io_meta *io_meta;
+ struct dpaa2_queue *rxq;
+ struct qbman_result *dq_storage;
+ struct qbman_pull_desc pulldesc;
+ const struct qbman_fd *fd;
+ struct qbman_swp *swp;
+ struct qbman_fle *fle;
+ uint32_t fqid;
+ uint8_t status;
+ int ret;
+
+ DPAA2_QDMA_FUNC_TRACE();
+
+ if (unlikely(!DPAA2_PER_LCORE_DPIO)) {
+ ret = dpaa2_affine_qbman_swp();
+ if (ret) {
+ DPAA2_QDMA_ERR("Failure in affining portal");
+ return 0;
+ }
+ }
+ swp = DPAA2_PER_LCORE_PORTAL;
+
+ rxq = &(dpdmai_dev->rx_queue[rxq_id]);
+ dq_storage = rxq->q_storage->dq_storage[0];
+ fqid = rxq->fqid;
+
+ /* Prepare dequeue descriptor */
+ qbman_pull_desc_clear(&pulldesc);
+ qbman_pull_desc_set_fq(&pulldesc, fqid);
+ qbman_pull_desc_set_storage(&pulldesc, dq_storage,
+ (uint64_t)(DPAA2_VADDR_TO_IOVA(dq_storage)), 1);
+ qbman_pull_desc_set_numframes(&pulldesc, 1);
+
+ while (1) {
+ if (qbman_swp_pull(swp, &pulldesc)) {
+ DPAA2_QDMA_DP_WARN("VDQ command not issued. QBMAN busy");
+ continue;
+ }
+ break;
+ }
+
+ /* Check if previous issued command is completed. */
+ while (!qbman_check_command_complete(dq_storage))
+ ;
+ /* Loop until dq_storage is updated with new token by QBMAN */
+ while (!qbman_check_new_result(dq_storage))
+ ;
+
+ /* Check for valid frame. */
+ status = qbman_result_DQ_flags(dq_storage);
+ if (unlikely((status & QBMAN_DQ_STAT_VALIDFRAME) == 0)) {
+ DPAA2_QDMA_DP_DEBUG("No frame is delivered");
+ return 0;
+ }
+
+ /* Get the FD */
+ fd = qbman_result_DQ_fd(dq_storage);
+
+ /*
+ * Fetch metadata from FLE. job and vq_id were set
+ * in metadata in the enqueue operation.
+ */
+ fle = (struct qbman_fle *)DPAA2_IOVA_TO_VADDR(DPAA2_GET_FD_ADDR(fd));
+ io_meta = (struct qdma_io_meta *)(fle) - 1;
+ if (vq_id)
+ *vq_id = io_meta->id;
+
+ *job = (struct rte_qdma_job *)(size_t)io_meta->cnxt;
+ (*job)->status = DPAA2_GET_FD_ERR(fd);
+
+ /* Free FLE to the pool */
+ rte_mempool_put(qdma_dev.fle_pool, io_meta);
+
+ DPAA2_QDMA_DP_DEBUG("packet received");
+
+ return 1;
+}
+
+int __rte_experimental
+rte_qdma_vq_dequeue_multi(uint16_t vq_id,
+ struct rte_qdma_job **job,
+ uint16_t nb_jobs)
+{
+ int i;
+
+ DPAA2_QDMA_FUNC_TRACE();
+
+ for (i = 0; i < nb_jobs; i++) {
+ job[i] = rte_qdma_vq_dequeue(vq_id);
+ if (!job[i])
+ break;
+ }
+
+ return i;
+}
+
+struct rte_qdma_job * __rte_experimental
+rte_qdma_vq_dequeue(uint16_t vq_id)
+{
+ struct qdma_virt_queue *qdma_vq = &qdma_vqs[vq_id];
+ struct qdma_hw_queue *qdma_pq = qdma_vq->hw_queue;
+ struct dpaa2_dpdmai_dev *dpdmai_dev = qdma_pq->dpdmai_dev;
+ struct rte_qdma_job *job = NULL;
+ struct qdma_virt_queue *temp_qdma_vq;
+ int dequeue_budget = QDMA_DEQUEUE_BUDGET;
+ int ring_count, ret, i;
+ uint16_t temp_vq_id;
+
+ DPAA2_QDMA_FUNC_TRACE();
+
+ /* Return error in case of wrong lcore_id */
+ if (rte_lcore_id() != (unsigned int)(qdma_vq->lcore_id)) {
+ DPAA2_QDMA_ERR("QDMA dequeue for vqid %d on wrong core",
+ vq_id);
+ return NULL;
+ }
+
+ /* Only dequeue when there are pending jobs on VQ */
+ if (qdma_vq->num_enqueues == qdma_vq->num_dequeues)
+ return NULL;
+
+ if (qdma_vq->exclusive_hw_queue) {
+ /* In case of exclusive queue directly fetch from HW queue */
+ ret = dpdmai_dev_dequeue(dpdmai_dev, qdma_pq->queue_id,
+ NULL, &job);
+ if (ret < 0) {
+ DPAA2_QDMA_ERR(
+ "Dequeue from DPDMAI device failed: %d", ret);
+ return NULL;
+ }
+ } else {
+ /*
+ * Get the QDMA completed jobs from the software ring.
+ * In case they are not available on the ring poke the HW
+ * to fetch completed jobs from corresponding HW queues
+ */
+ ring_count = rte_ring_count(qdma_vq->status_ring);
+ if (ring_count == 0) {
+ /* TODO - How to have right budget */
+ for (i = 0; i < dequeue_budget; i++) {
+ ret = dpdmai_dev_dequeue(dpdmai_dev,
+ qdma_pq->queue_id, &temp_vq_id, &job);
+ if (ret == 0)
+ break;
+ temp_qdma_vq = &qdma_vqs[temp_vq_id];
+ rte_ring_enqueue(temp_qdma_vq->status_ring,
+ (void *)(job));
+ ring_count = rte_ring_count(
+ qdma_vq->status_ring);
+ if (ring_count)
+ break;
+ }
+ }
+
+ /* Dequeue job from the software ring to provide to the user */
+ rte_ring_dequeue(qdma_vq->status_ring, (void **)&job);
+ if (job)
+ qdma_vq->num_dequeues++;
+ }
+
+ return job;
+}
+
+void __rte_experimental
+rte_qdma_vq_stats(uint16_t vq_id,
+ struct rte_qdma_vq_stats *vq_status)
+{
+ struct qdma_virt_queue *qdma_vq = &qdma_vqs[vq_id];
+
+ DPAA2_QDMA_FUNC_TRACE();
+
+ if (qdma_vq->in_use) {
+ vq_status->exclusive_hw_queue = qdma_vq->exclusive_hw_queue;
+ vq_status->lcore_id = qdma_vq->lcore_id;
+ vq_status->num_enqueues = qdma_vq->num_enqueues;
+ vq_status->num_dequeues = qdma_vq->num_dequeues;
+ vq_status->num_pending_jobs = vq_status->num_enqueues -
+ vq_status->num_dequeues;
+ }
+}
+
+int __rte_experimental
+rte_qdma_vq_destroy(uint16_t vq_id)
+{
+ struct qdma_virt_queue *qdma_vq = &qdma_vqs[vq_id];
+
+ DPAA2_QDMA_FUNC_TRACE();
+
+ /* In case there are pending jobs on any VQ, return -EBUSY */
+ if (qdma_vq->num_enqueues != qdma_vq->num_dequeues)
+ return -EBUSY;
+
+ rte_spinlock_lock(&qdma_dev.lock);
+
+ if (qdma_vq->exclusive_hw_queue)
+ free_hw_queue(qdma_vq->hw_queue);
+ else {
+ if (qdma_vqs->status_ring)
+ rte_ring_free(qdma_vqs->status_ring);
+
+ put_hw_queue(qdma_vq->hw_queue);
+ }
+
+ memset(qdma_vq, 0, sizeof(struct qdma_virt_queue));
+
+ rte_spinlock_lock(&qdma_dev.lock);
+
+ return 0;
+}
+
+void __rte_experimental
+rte_qdma_stop(void)
+{
+ DPAA2_QDMA_FUNC_TRACE();
+
+ qdma_dev.state = 0;
+}
+
+void __rte_experimental
+rte_qdma_destroy(void)
+{
+ DPAA2_QDMA_FUNC_TRACE();
+
+ rte_qdma_reset();
+}
+
+static const struct rte_rawdev_ops dpaa2_qdma_ops;
+
+static int
+add_hw_queues_to_list(struct dpaa2_dpdmai_dev *dpdmai_dev)
+{
+ struct qdma_hw_queue *queue;
+ int i;
+
+ DPAA2_QDMA_FUNC_TRACE();
+
+ for (i = 0; i < dpdmai_dev->num_queues; i++) {
+ queue = rte_zmalloc(NULL, sizeof(struct qdma_hw_queue), 0);
+ if (!queue) {
+ DPAA2_QDMA_ERR(
+ "Memory allocation failed for QDMA queue");
+ return -ENOMEM;
+ }
+
+ queue->dpdmai_dev = dpdmai_dev;
+ queue->queue_id = i;
+
+ TAILQ_INSERT_TAIL(&qdma_queue_list, queue, next);
+ qdma_dev.num_hw_queues++;
+ }
+
+ return 0;
+}
+
+static void
+remove_hw_queues_from_list(struct dpaa2_dpdmai_dev *dpdmai_dev)
+{
+ struct qdma_hw_queue *queue = NULL;
+ struct qdma_hw_queue *tqueue = NULL;
+
+ DPAA2_QDMA_FUNC_TRACE();
+
+ TAILQ_FOREACH_SAFE(queue, &qdma_queue_list, next, tqueue) {
+ if (queue->dpdmai_dev == dpdmai_dev) {
+ TAILQ_REMOVE(&qdma_queue_list, queue, next);
+ rte_free(queue);
+ queue = NULL;
+ }
+ }
+}
+
+static int
+dpaa2_dpdmai_dev_uninit(struct rte_rawdev *rawdev)
+{
+ struct dpaa2_dpdmai_dev *dpdmai_dev = rawdev->dev_private;
+ int ret, i;
+
+ DPAA2_QDMA_FUNC_TRACE();
+
+ if (rte_eal_process_type() != RTE_PROC_PRIMARY)
+ return 0;
+
+ /* Remove HW queues from global list */
+ remove_hw_queues_from_list(dpdmai_dev);
+
+ ret = dpdmai_disable(&dpdmai_dev->dpdmai, CMD_PRI_LOW,
+ dpdmai_dev->token);
+ if (ret)
+ DPAA2_QDMA_ERR("dmdmai disable failed");
+
+ /* Set up the DQRR storage for Rx */
+ for (i = 0; i < DPDMAI_PRIO_NUM; i++) {
+ struct dpaa2_queue *rxq = &(dpdmai_dev->rx_queue[i]);
+
+ if (rxq->q_storage) {
+ dpaa2_free_dq_storage(rxq->q_storage);
+ rte_free(rxq->q_storage);
+ }
+ }
+
+ /* Close the device at underlying layer*/
+ ret = dpdmai_close(&dpdmai_dev->dpdmai, CMD_PRI_LOW, dpdmai_dev->token);
+ if (ret)
+ DPAA2_QDMA_ERR("Failure closing dpdmai device");
+
+ return 0;
+}
+
+static int
+dpaa2_dpdmai_dev_init(struct rte_rawdev *rawdev, int dpdmai_id)
+{
+ struct dpaa2_dpdmai_dev *dpdmai_dev = rawdev->dev_private;
+ struct dpdmai_rx_queue_cfg rx_queue_cfg;
+ struct dpdmai_attr attr;
+ struct dpdmai_rx_queue_attr rx_attr;
+ struct dpdmai_tx_queue_attr tx_attr;
+ int ret, i;
+
+ DPAA2_QDMA_FUNC_TRACE();
+
+ /* For secondary processes, the primary has done all the work */
+ if (rte_eal_process_type() != RTE_PROC_PRIMARY)
+ return 0;
+
+ /* Open DPDMAI device */
+ dpdmai_dev->dpdmai_id = dpdmai_id;
+ dpdmai_dev->dpdmai.regs = rte_mcp_ptr_list[MC_PORTAL_INDEX];
+ ret = dpdmai_open(&dpdmai_dev->dpdmai, CMD_PRI_LOW,
+ dpdmai_dev->dpdmai_id, &dpdmai_dev->token);
+ if (ret) {
+ DPAA2_QDMA_ERR("dpdmai_open() failed with err: %d", ret);
+ return ret;
+ }
+
+ /* Get DPDMAI attributes */
+ ret = dpdmai_get_attributes(&dpdmai_dev->dpdmai, CMD_PRI_LOW,
+ dpdmai_dev->token, &attr);
+ if (ret) {
+ DPAA2_QDMA_ERR("dpdmai get attributes failed with err: %d",
+ ret);
+ goto init_err;
+ }
+ dpdmai_dev->num_queues = attr.num_of_priorities;
+
+ /* Set up Rx Queues */
+ for (i = 0; i < attr.num_of_priorities; i++) {
+ struct dpaa2_queue *rxq;
+
+ memset(&rx_queue_cfg, 0, sizeof(struct dpdmai_rx_queue_cfg));
+ ret = dpdmai_set_rx_queue(&dpdmai_dev->dpdmai,
+ CMD_PRI_LOW,
+ dpdmai_dev->token,
+ i, &rx_queue_cfg);
+ if (ret) {
+ DPAA2_QDMA_ERR("Setting Rx queue failed with err: %d",
+ ret);
+ goto init_err;
+ }
+
+ /* Allocate DQ storage for the DPDMAI Rx queues */
+ rxq = &(dpdmai_dev->rx_queue[i]);
+ rxq->q_storage = rte_malloc("dq_storage",
+ sizeof(struct queue_storage_info_t),
+ RTE_CACHE_LINE_SIZE);
+ if (!rxq->q_storage) {
+ DPAA2_QDMA_ERR("q_storage allocation failed");
+ ret = -ENOMEM;
+ goto init_err;
+ }
+
+ memset(rxq->q_storage, 0, sizeof(struct queue_storage_info_t));
+ ret = dpaa2_alloc_dq_storage(rxq->q_storage);
+ if (ret) {
+ DPAA2_QDMA_ERR("dpaa2_alloc_dq_storage failed");
+ goto init_err;
+ }
+ }
+
+ /* Get Rx and Tx queues FQID's */
+ for (i = 0; i < DPDMAI_PRIO_NUM; i++) {
+ ret = dpdmai_get_rx_queue(&dpdmai_dev->dpdmai, CMD_PRI_LOW,
+ dpdmai_dev->token, i, &rx_attr);
+ if (ret) {
+ DPAA2_QDMA_ERR("Reading device failed with err: %d",
+ ret);
+ goto init_err;
+ }
+ dpdmai_dev->rx_queue[i].fqid = rx_attr.fqid;
+
+ ret = dpdmai_get_tx_queue(&dpdmai_dev->dpdmai, CMD_PRI_LOW,
+ dpdmai_dev->token, i, &tx_attr);
+ if (ret) {
+ DPAA2_QDMA_ERR("Reading device failed with err: %d",
+ ret);
+ goto init_err;
+ }
+ dpdmai_dev->tx_queue[i].fqid = tx_attr.fqid;
+ }
+
+ /* Enable the device */
+ ret = dpdmai_enable(&dpdmai_dev->dpdmai, CMD_PRI_LOW,
+ dpdmai_dev->token);
+ if (ret) {
+ DPAA2_QDMA_ERR("Enabling device failed with err: %d", ret);
+ goto init_err;
+ }
+
+ /* Add the HW queue to the global list */
+ ret = add_hw_queues_to_list(dpdmai_dev);
+ if (ret) {
+ DPAA2_QDMA_ERR("Adding H/W queue to list failed");
+ goto init_err;
+ }
+ DPAA2_QDMA_DEBUG("Initialized dpdmai object successfully");
+
+ return 0;
+init_err:
+ dpaa2_dpdmai_dev_uninit(rawdev);
+ return ret;
+}
+
+static int
+rte_dpaa2_qdma_probe(struct rte_dpaa2_driver *dpaa2_drv,
+ struct rte_dpaa2_device *dpaa2_dev)
+{
+ struct rte_rawdev *rawdev;
+ int ret;
+
+ DPAA2_QDMA_FUNC_TRACE();
+
+ rawdev = rte_rawdev_pmd_allocate(dpaa2_dev->device.name,
+ sizeof(struct dpaa2_dpdmai_dev),
+ rte_socket_id());
+ if (!rawdev) {
+ DPAA2_QDMA_ERR("Unable to allocate rawdevice");
+ return -EINVAL;
+ }
+
+ dpaa2_dev->rawdev = rawdev;
+ rawdev->dev_ops = &dpaa2_qdma_ops;
+ rawdev->device = &dpaa2_dev->device;
+ rawdev->driver_name = dpaa2_drv->driver.name;
+
+ /* Invoke PMD device initialization function */
+ ret = dpaa2_dpdmai_dev_init(rawdev, dpaa2_dev->object_id);
+ if (ret) {
+ rte_rawdev_pmd_release(rawdev);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int
+rte_dpaa2_qdma_remove(struct rte_dpaa2_device *dpaa2_dev)
+{
+ struct rte_rawdev *rawdev = dpaa2_dev->rawdev;
+ int ret;
+
+ DPAA2_QDMA_FUNC_TRACE();
+
+ dpaa2_dpdmai_dev_uninit(rawdev);
+
+ ret = rte_rawdev_pmd_release(rawdev);
+ if (ret)
+ DPAA2_QDMA_ERR("Device cleanup failed");
+
+ return 0;
+}
+
+static struct rte_dpaa2_driver rte_dpaa2_qdma_pmd = {
+ .drv_flags = RTE_DPAA2_DRV_IOVA_AS_VA,
+ .drv_type = DPAA2_QDMA,
+ .probe = rte_dpaa2_qdma_probe,
+ .remove = rte_dpaa2_qdma_remove,
+};
+
+RTE_PMD_REGISTER_DPAA2(dpaa2_qdma, rte_dpaa2_qdma_pmd);
+
+RTE_INIT(dpaa2_qdma_init_log)
+{
+ dpaa2_qdma_logtype = rte_log_register("pmd.raw.dpaa2.qdma");
+ if (dpaa2_qdma_logtype >= 0)
+ rte_log_set_level(dpaa2_qdma_logtype, RTE_LOG_INFO);
+}
diff --git a/drivers/raw/dpaa2_qdma/dpaa2_qdma.h b/drivers/raw/dpaa2_qdma/dpaa2_qdma.h
new file mode 100644
index 00000000..c6a05780
--- /dev/null
+++ b/drivers/raw/dpaa2_qdma/dpaa2_qdma.h
@@ -0,0 +1,150 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright 2018 NXP
+ */
+
+#ifndef __DPAA2_QDMA_H__
+#define __DPAA2_QDMA_H__
+
+struct qdma_sdd;
+struct qdma_io_meta;
+
+#define DPAA2_QDMA_MAX_FLE 3
+#define DPAA2_QDMA_MAX_SDD 2
+
+/** FLE pool size: 3 Frame list + 2 source/destination descriptor */
+#define QDMA_FLE_POOL_SIZE (sizeof(struct qdma_io_meta) + \
+ sizeof(struct qbman_fle) * DPAA2_QDMA_MAX_FLE + \
+ sizeof(struct qdma_sdd) * DPAA2_QDMA_MAX_SDD)
+/** FLE pool cache size */
+#define QDMA_FLE_CACHE_SIZE(_num) (_num/(RTE_MAX_LCORE * 2))
+
+/** Notification by FQD_CTX[fqid] */
+#define QDMA_SER_CTX (1 << 8)
+
+/**
+ * Source descriptor command read transaction type for RBP=0:
+ * coherent copy of cacheable memory
+ */
+#define DPAA2_SET_SDD_RD_COHERENT(sdd) ((sdd)->cmd = (0xb << 28))
+/**
+ * Destination descriptor command write transaction type for RBP=0:
+ * coherent copy of cacheable memory
+ */
+#define DPAA2_SET_SDD_WR_COHERENT(sdd) ((sdd)->cmd = (0x6 << 28))
+
+/** Maximum possible H/W Queues on each core */
+#define MAX_HW_QUEUE_PER_CORE 64
+
+/**
+ * In case of Virtual Queue mode, this specifies the number of
+ * dequeue the 'qdma_vq_dequeue/multi' API does from the H/W Queue
+ * in case there is no job present on the Virtual Queue ring.
+ */
+#define QDMA_DEQUEUE_BUDGET 64
+
+/**
+ * Represents a QDMA device.
+ * A single QDMA device exists which is combination of multiple DPDMAI rawdev's.
+ */
+struct qdma_device {
+ /** total number of hw queues. */
+ uint16_t num_hw_queues;
+ /**
+ * Maximum number of hw queues to be alocated per core.
+ * This is limited by MAX_HW_QUEUE_PER_CORE
+ */
+ uint16_t max_hw_queues_per_core;
+ /** Maximum number of VQ's */
+ uint16_t max_vqs;
+ /** mode of operation - physical(h/w) or virtual */
+ uint8_t mode;
+ /** Device state - started or stopped */
+ uint8_t state;
+ /** FLE pool for the device */
+ struct rte_mempool *fle_pool;
+ /** FLE pool size */
+ int fle_pool_count;
+ /** A lock to QDMA device whenever required */
+ rte_spinlock_t lock;
+};
+
+/** Represents a QDMA H/W queue */
+struct qdma_hw_queue {
+ /** Pointer to Next instance */
+ TAILQ_ENTRY(qdma_hw_queue) next;
+ /** DPDMAI device to communicate with HW */
+ struct dpaa2_dpdmai_dev *dpdmai_dev;
+ /** queue ID to communicate with HW */
+ uint16_t queue_id;
+ /** Associated lcore id */
+ uint32_t lcore_id;
+ /** Number of users of this hw queue */
+ uint32_t num_users;
+};
+
+/** Represents a QDMA virtual queue */
+struct qdma_virt_queue {
+ /** Status ring of the virtual queue */
+ struct rte_ring *status_ring;
+ /** Associated hw queue */
+ struct qdma_hw_queue *hw_queue;
+ /** Associated lcore id */
+ uint32_t lcore_id;
+ /** States if this vq is in use or not */
+ uint8_t in_use;
+ /** States if this vq has exclusively associated hw queue */
+ uint8_t exclusive_hw_queue;
+ /* Total number of enqueues on this VQ */
+ uint64_t num_enqueues;
+ /* Total number of dequeues from this VQ */
+ uint64_t num_dequeues;
+};
+
+/** Represents a QDMA per core hw queues allocation in virtual mode */
+struct qdma_per_core_info {
+ /** list for allocated hw queues */
+ struct qdma_hw_queue *hw_queues[MAX_HW_QUEUE_PER_CORE];
+ /* Number of hw queues allocated for this core */
+ uint16_t num_hw_queues;
+};
+
+/** Metadata which is stored with each operation */
+struct qdma_io_meta {
+ /**
+ * Context which is stored in the FLE pool (just before the FLE).
+ * QDMA job is stored as a this context as a part of metadata.
+ */
+ uint64_t cnxt;
+ /** VQ ID is stored as a part of metadata of the enqueue command */
+ uint64_t id;
+};
+
+/** Source/Destination Descriptor */
+struct qdma_sdd {
+ uint32_t rsv;
+ /** Stride configuration */
+ uint32_t stride;
+ /** Route-by-port command */
+ uint32_t rbpcmd;
+ uint32_t cmd;
+} __attribute__((__packed__));
+
+/** Represents a DPDMAI raw device */
+struct dpaa2_dpdmai_dev {
+ /** Pointer to Next device instance */
+ TAILQ_ENTRY(dpaa2_qdma_device) next;
+ /** handle to DPDMAI object */
+ struct fsl_mc_io dpdmai;
+ /** HW ID for DPDMAI object */
+ uint32_t dpdmai_id;
+ /** Tocken of this device */
+ uint16_t token;
+ /** Number of queue in this DPDMAI device */
+ uint8_t num_queues;
+ /** RX queues */
+ struct dpaa2_queue rx_queue[DPDMAI_PRIO_NUM];
+ /** TX queues */
+ struct dpaa2_queue tx_queue[DPDMAI_PRIO_NUM];
+};
+
+#endif /* __DPAA2_QDMA_H__ */
diff --git a/drivers/raw/dpaa2_qdma/dpaa2_qdma_logs.h b/drivers/raw/dpaa2_qdma/dpaa2_qdma_logs.h
new file mode 100644
index 00000000..4779e4ce
--- /dev/null
+++ b/drivers/raw/dpaa2_qdma/dpaa2_qdma_logs.h
@@ -0,0 +1,46 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright 2018 NXP
+ */
+
+#ifndef __DPAA2_QDMA_LOGS_H__
+#define __DPAA2_QDMA_LOGS_H__
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+extern int dpaa2_qdma_logtype;
+
+#define DPAA2_QDMA_LOG(level, fmt, args...) \
+ rte_log(RTE_LOG_ ## level, dpaa2_qdma_logtype, "dpaa2_qdma: " \
+ fmt "\n", ## args)
+
+#define DPAA2_QDMA_DEBUG(fmt, args...) \
+ rte_log(RTE_LOG_DEBUG, dpaa2_qdma_logtype, "dpaa2_qdma: %s(): " \
+ fmt "\n", __func__, ## args)
+
+#define DPAA2_QDMA_FUNC_TRACE() DPAA2_QDMA_DEBUG(">>")
+
+#define DPAA2_QDMA_INFO(fmt, args...) \
+ DPAA2_QDMA_LOG(INFO, fmt, ## args)
+#define DPAA2_QDMA_ERR(fmt, args...) \
+ DPAA2_QDMA_LOG(ERR, fmt, ## args)
+#define DPAA2_QDMA_WARN(fmt, args...) \
+ DPAA2_QDMA_LOG(WARNING, fmt, ## args)
+
+/* DP Logs, toggled out at compile time if level lower than current level */
+#define DPAA2_QDMA_DP_LOG(level, fmt, args...) \
+ RTE_LOG_DP(level, PMD, "dpaa2_qdma: " fmt "\n", ## args)
+
+#define DPAA2_QDMA_DP_DEBUG(fmt, args...) \
+ DPAA2_QDMA_DP_LOG(DEBUG, fmt, ## args)
+#define DPAA2_QDMA_DP_INFO(fmt, args...) \
+ DPAA2_QDMA_DP_LOG(INFO, fmt, ## args)
+#define DPAA2_QDMA_DP_WARN(fmt, args...) \
+ DPAA2_QDMA_DP_LOG(WARNING, fmt, ## args)
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* __DPAA2_QDMA_LOGS_H__ */
diff --git a/drivers/raw/dpaa2_qdma/meson.build b/drivers/raw/dpaa2_qdma/meson.build
new file mode 100644
index 00000000..b6a081f1
--- /dev/null
+++ b/drivers/raw/dpaa2_qdma/meson.build
@@ -0,0 +1,10 @@
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright 2018 NXP
+
+build = dpdk_conf.has('RTE_LIBRTE_DPAA2_MEMPOOL')
+deps += ['rawdev', 'mempool_dpaa2', 'ring']
+sources = files('dpaa2_qdma.c')
+
+allow_experimental_apis = true
+
+install_headers('rte_pmd_dpaa2_qdma.h')
diff --git a/drivers/raw/dpaa2_qdma/rte_pmd_dpaa2_qdma.h b/drivers/raw/dpaa2_qdma/rte_pmd_dpaa2_qdma.h
new file mode 100644
index 00000000..17fffcb7
--- /dev/null
+++ b/drivers/raw/dpaa2_qdma/rte_pmd_dpaa2_qdma.h
@@ -0,0 +1,286 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright 2018 NXP
+ */
+
+#ifndef __RTE_PMD_DPAA2_QDMA_H__
+#define __RTE_PMD_DPAA2_QDMA_H__
+
+/**
+ * @file
+ *
+ * NXP dpaa2 QDMA specific structures.
+ *
+ */
+
+/** Determines the mode of operation */
+enum {
+ /**
+ * Allocate a H/W queue per VQ i.e. Exclusive hardware queue for a VQ.
+ * This mode will have best performance.
+ */
+ RTE_QDMA_MODE_HW,
+ /**
+ * A VQ shall not have an exclusive associated H/W queue.
+ * Rather a H/W Queue will be shared by multiple Virtual Queues.
+ * This mode will have intermediate data structures to support
+ * multi VQ to PQ mappings thus having some performance implications.
+ * Note: Even in this mode there is an option to allocate a H/W
+ * queue for a VQ. Please see 'RTE_QDMA_VQ_EXCLUSIVE_PQ' flag.
+ */
+ RTE_QDMA_MODE_VIRTUAL
+};
+
+/**
+ * If user has configued a Virtual Queue mode, but for some particular VQ
+ * user needs an exclusive H/W queue associated (for better performance
+ * on that particular VQ), then user can pass this flag while creating the
+ * Virtual Queue. A H/W queue will be allocated corresponding to
+ * VQ which uses this flag.
+ */
+#define RTE_QDMA_VQ_EXCLUSIVE_PQ (1ULL)
+
+/** States if the source addresses is physical. */
+#define RTE_QDMA_JOB_SRC_PHY (1ULL)
+
+/** States if the destination addresses is physical. */
+#define RTE_QDMA_JOB_DEST_PHY (1ULL << 1)
+
+/** Provides QDMA device attributes */
+struct rte_qdma_attr {
+ /** total number of hw QDMA queues present */
+ uint16_t num_hw_queues;
+};
+
+/** QDMA device configuration structure */
+struct rte_qdma_config {
+ /** Number of maximum hw queues to allocate per core. */
+ uint16_t max_hw_queues_per_core;
+ /** Maximum number of VQ's to be used. */
+ uint16_t max_vqs;
+ /** mode of operation - physical(h/w) or virtual */
+ uint8_t mode;
+ /**
+ * User provides this as input to the driver as a size of the FLE pool.
+ * FLE's (and corresponding source/destination descriptors) are
+ * allocated by the driver at enqueue time to store src/dest and
+ * other data and are freed at the dequeue time. This determines the
+ * maximum number of inflight jobs on the QDMA device. This should
+ * be power of 2.
+ */
+ int fle_pool_count;
+};
+
+/** Provides QDMA device statistics */
+struct rte_qdma_vq_stats {
+ /** States if this vq has exclusively associated hw queue */
+ uint8_t exclusive_hw_queue;
+ /** Associated lcore id */
+ uint32_t lcore_id;
+ /* Total number of enqueues on this VQ */
+ uint64_t num_enqueues;
+ /* Total number of dequeues from this VQ */
+ uint64_t num_dequeues;
+ /* total number of pending jobs in this VQ */
+ uint64_t num_pending_jobs;
+};
+
+/** Determines a QDMA job */
+struct rte_qdma_job {
+ /** Source Address from where DMA is (to be) performed */
+ uint64_t src;
+ /** Destination Address where DMA is (to be) done */
+ uint64_t dest;
+ /** Length of the DMA operation in bytes. */
+ uint32_t len;
+ /** See RTE_QDMA_JOB_ flags */
+ uint32_t flags;
+ /**
+ * User can specify a context which will be maintained
+ * on the dequeue operation.
+ */
+ uint64_t cnxt;
+ /**
+ * Status of the transaction.
+ * This is filled in the dequeue operation by the driver.
+ */
+ uint8_t status;
+};
+
+/**
+ * Initialize the QDMA device.
+ *
+ * @returns
+ * - 0: Success.
+ * - <0: Error code.
+ */
+int __rte_experimental
+rte_qdma_init(void);
+
+/**
+ * Get the QDMA attributes.
+ *
+ * @param qdma_attr
+ * QDMA attributes providing total number of hw queues etc.
+ */
+void __rte_experimental
+rte_qdma_attr_get(struct rte_qdma_attr *qdma_attr);
+
+/**
+ * Reset the QDMA device. This API will completely reset the QDMA
+ * device, bringing it to original state as if only rte_qdma_init() API
+ * has been called.
+ *
+ * @returns
+ * - 0: Success.
+ * - <0: Error code.
+ */
+int __rte_experimental
+rte_qdma_reset(void);
+
+/**
+ * Configure the QDMA device.
+ *
+ * @returns
+ * - 0: Success.
+ * - <0: Error code.
+ */
+int __rte_experimental
+rte_qdma_configure(struct rte_qdma_config *qdma_config);
+
+/**
+ * Start the QDMA device.
+ *
+ * @returns
+ * - 0: Success.
+ * - <0: Error code.
+ */
+int __rte_experimental
+rte_qdma_start(void);
+
+/**
+ * Create a Virtual Queue on a particular lcore id.
+ * This API can be called from any thread/core. User can create/destroy
+ * VQ's at runtime.
+ *
+ * @param lcore_id
+ * LCORE ID on which this particular queue would be associated with.
+ * @param flags
+ * RTE_QDMA_VQ_ flags. See macro definitions.
+ *
+ * @returns
+ * - >= 0: Virtual queue ID.
+ * - <0: Error code.
+ */
+int __rte_experimental
+rte_qdma_vq_create(uint32_t lcore_id, uint32_t flags);
+
+/**
+ * Enqueue multiple jobs to a Virtual Queue.
+ * If the enqueue is successful, the H/W will perform DMA operations
+ * on the basis of the QDMA jobs provided.
+ *
+ * @param vq_id
+ * Virtual Queue ID.
+ * @param job
+ * List of QDMA Jobs containing relevant information related to DMA.
+ * @param nb_jobs
+ * Number of QDMA jobs provided by the user.
+ *
+ * @returns
+ * - >=0: Number of jobs successfully submitted
+ * - <0: Error code.
+ */
+int __rte_experimental
+rte_qdma_vq_enqueue_multi(uint16_t vq_id,
+ struct rte_qdma_job **job,
+ uint16_t nb_jobs);
+
+/**
+ * Enqueue a single job to a Virtual Queue.
+ * If the enqueue is successful, the H/W will perform DMA operations
+ * on the basis of the QDMA job provided.
+ *
+ * @param vq_id
+ * Virtual Queue ID.
+ * @param job
+ * A QDMA Job containing relevant information related to DMA.
+ *
+ * @returns
+ * - >=0: Number of jobs successfully submitted
+ * - <0: Error code.
+ */
+int __rte_experimental
+rte_qdma_vq_enqueue(uint16_t vq_id,
+ struct rte_qdma_job *job);
+
+/**
+ * Dequeue multiple completed jobs from a Virtual Queue.
+ * Provides the list of completed jobs capped by nb_jobs.
+ *
+ * @param vq_id
+ * Virtual Queue ID.
+ * @param job
+ * List of QDMA Jobs returned from the API.
+ * @param nb_jobs
+ * Number of QDMA jobs requested for dequeue by the user.
+ *
+ * @returns
+ * Number of jobs actually dequeued.
+ */
+int __rte_experimental
+rte_qdma_vq_dequeue_multi(uint16_t vq_id,
+ struct rte_qdma_job **job,
+ uint16_t nb_jobs);
+
+/**
+ * Dequeue a single completed jobs from a Virtual Queue.
+ *
+ * @param vq_id
+ * Virtual Queue ID.
+ *
+ * @returns
+ * - A completed job or NULL if no job is there.
+ */
+struct rte_qdma_job * __rte_experimental
+rte_qdma_vq_dequeue(uint16_t vq_id);
+
+/**
+ * Get a Virtual Queue statistics.
+ *
+ * @param vq_id
+ * Virtual Queue ID.
+ * @param vq_stats
+ * VQ statistics structure which will be filled in by the driver.
+ */
+void __rte_experimental
+rte_qdma_vq_stats(uint16_t vq_id,
+ struct rte_qdma_vq_stats *vq_stats);
+
+/**
+ * Destroy the Virtual Queue specified by vq_id.
+ * This API can be called from any thread/core. User can create/destroy
+ * VQ's at runtime.
+ *
+ * @param vq_id
+ * Virtual Queue ID which needs to be deinialized.
+ *
+ * @returns
+ * - 0: Success.
+ * - <0: Error code.
+ */
+int __rte_experimental
+rte_qdma_vq_destroy(uint16_t vq_id);
+
+/**
+ * Stop QDMA device.
+ */
+void __rte_experimental
+rte_qdma_stop(void);
+
+/**
+ * Destroy the QDMA device.
+ */
+void __rte_experimental
+rte_qdma_destroy(void);
+
+#endif /* __RTE_PMD_DPAA2_QDMA_H__*/
diff --git a/drivers/raw/dpaa2_qdma/rte_pmd_dpaa2_qdma_version.map b/drivers/raw/dpaa2_qdma/rte_pmd_dpaa2_qdma_version.map
new file mode 100644
index 00000000..fe42a227
--- /dev/null
+++ b/drivers/raw/dpaa2_qdma/rte_pmd_dpaa2_qdma_version.map
@@ -0,0 +1,20 @@
+EXPERIMENTAL {
+ global:
+
+ rte_qdma_attr_get;
+ rte_qdma_configure;
+ rte_qdma_destroy;
+ rte_qdma_init;
+ rte_qdma_reset;
+ rte_qdma_start;
+ rte_qdma_stop;
+ rte_qdma_vq_create;
+ rte_qdma_vq_destroy;
+ rte_qdma_vq_dequeue;
+ rte_qdma_vq_dequeue_multi;
+ rte_qdma_vq_enqueue;
+ rte_qdma_vq_enqueue_multi;
+ rte_qdma_vq_stats;
+
+ local: *;
+};
diff --git a/drivers/raw/ifpga_rawdev/Makefile b/drivers/raw/ifpga_rawdev/Makefile
new file mode 100644
index 00000000..f3b9d5e6
--- /dev/null
+++ b/drivers/raw/ifpga_rawdev/Makefile
@@ -0,0 +1,36 @@
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright(c) 2018 Intel Corporation
+
+include $(RTE_SDK)/mk/rte.vars.mk
+
+#
+# library name
+#
+LIB = librte_pmd_ifpga_rawdev.a
+
+CFLAGS += -DALLOW_EXPERIMENTAL_API
+CFLAGS += -O3
+CFLAGS += $(WERROR_FLAGS)
+CFLAGS += -I$(RTE_SDK)/drivers/bus/ifpga
+CFLAGS += -I$(RTE_SDK)/drivers/raw/ifpga_rawdev
+LDLIBS += -lrte_eal
+LDLIBS += -lrte_rawdev
+LDLIBS += -lrte_bus_vdev
+LDLIBS += -lrte_kvargs
+LDLIBS += -lrte_bus_pci
+LDLIBS += -lrte_bus_ifpga
+
+EXPORT_MAP := rte_pmd_ifpga_rawdev_version.map
+
+LIBABIVER := 1
+
+VPATH += $(SRCDIR)/base
+
+include $(RTE_SDK)/drivers/raw/ifpga_rawdev/base/Makefile
+
+#
+# all source are stored in SRCS-y
+#
+SRCS-$(CONFIG_RTE_LIBRTE_PMD_IFPGA_RAWDEV) += ifpga_rawdev.c
+
+include $(RTE_SDK)/mk/rte.lib.mk
diff --git a/drivers/raw/ifpga_rawdev/base/Makefile b/drivers/raw/ifpga_rawdev/base/Makefile
new file mode 100644
index 00000000..d79da72b
--- /dev/null
+++ b/drivers/raw/ifpga_rawdev/base/Makefile
@@ -0,0 +1,26 @@
+#SPDX-License-Identifier: BSD-3-Clause
+#Copyright(c) 2010-2018 Intel Corporation
+
+ifneq ($(CONFIG_RTE_LIBRTE_EAL),)
+OSDEP := osdep_rte
+else
+OSDEP := osdep_raw
+endif
+
+CFLAGS += -I$(RTE_SDK)/drivers/raw/ifpga_rawdev/base/$(OSDEP)
+
+SRCS-y += ifpga_api.c
+SRCS-y += ifpga_enumerate.c
+SRCS-y += ifpga_feature_dev.c
+SRCS-y += ifpga_fme.c
+SRCS-y += ifpga_fme_iperf.c
+SRCS-y += ifpga_fme_dperf.c
+SRCS-y += ifpga_fme_error.c
+SRCS-y += ifpga_port.c
+SRCS-y += ifpga_port_error.c
+SRCS-y += opae_hw_api.c
+SRCS-y += opae_ifpga_hw_api.c
+SRCS-y += opae_debug.c
+SRCS-y += ifpga_fme_pr.c
+
+SRCS-y += $(wildcard $(SRCDIR)/base/$(OSDEP)/*.c)
diff --git a/drivers/raw/ifpga_rawdev/base/README b/drivers/raw/ifpga_rawdev/base/README
new file mode 100644
index 00000000..5bc2ed0f
--- /dev/null
+++ b/drivers/raw/ifpga_rawdev/base/README
@@ -0,0 +1,31 @@
+..
+
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2010-2018 Intel Corporation
+ */
+
+Intel iFPGA driver
+==================
+
+This directory contains source code of Intel FPGA driver released by
+the team which develops Intel FPGA Open Programmable Acceleration Engine (OPAE).
+The directory of base/ contains the original source package. The base code
+currently supports Intel FPGA solutions including integrated solution (Intel(R)
+Xeon(R) CPU with FPGAs) and discrete solution (Intel(R) Programmable Acceleration
+Card with Intel(R) Arria(R) 10 FPGA) and it could be extended to support more FPGA
+devices in the future.
+
+Please refer to [1][2] for more introduction on OPAE and Intel FPGAs.
+
+[1] https://01.org/OPAE
+[2] https://www.altera.com/solutions/acceleration-hub/overview.html
+
+
+Updating the driver
+===================
+
+NOTE: The source code in this directory should not be modified apart from
+the following file(s):
+
+ osdep_raw/osdep_generic.h
+ osdep_rte/osdep_generic.h
diff --git a/drivers/raw/ifpga_rawdev/base/ifpga_api.c b/drivers/raw/ifpga_rawdev/base/ifpga_api.c
new file mode 100644
index 00000000..540e171a
--- /dev/null
+++ b/drivers/raw/ifpga_rawdev/base/ifpga_api.c
@@ -0,0 +1,294 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2010-2018 Intel Corporation
+ */
+
+#include "ifpga_api.h"
+#include "ifpga_enumerate.h"
+#include "ifpga_feature_dev.h"
+
+#include "opae_hw_api.h"
+
+/* Accelerator APIs */
+static int ifpga_acc_get_uuid(struct opae_accelerator *acc,
+ struct uuid *uuid)
+{
+ struct opae_bridge *br = acc->br;
+ struct ifpga_port_hw *port;
+
+ if (!br || !br->data)
+ return -EINVAL;
+
+ port = br->data;
+
+ return fpga_get_afu_uuid(port, uuid);
+}
+
+static int ifpga_acc_set_irq(struct opae_accelerator *acc,
+ u32 start, u32 count, s32 evtfds[])
+{
+ struct ifpga_afu_info *afu_info = acc->data;
+ struct opae_bridge *br = acc->br;
+ struct ifpga_port_hw *port;
+ struct fpga_uafu_irq_set irq_set;
+
+ if (!br || !br->data)
+ return -EINVAL;
+
+ if (start >= afu_info->num_irqs || start + count > afu_info->num_irqs)
+ return -EINVAL;
+
+ port = br->data;
+
+ irq_set.start = start;
+ irq_set.count = count;
+ irq_set.evtfds = evtfds;
+
+ return ifpga_set_irq(port->parent, FEATURE_FIU_ID_PORT, port->port_id,
+ IFPGA_PORT_FEATURE_ID_UINT, &irq_set);
+}
+
+static int ifpga_acc_get_info(struct opae_accelerator *acc,
+ struct opae_acc_info *info)
+{
+ struct ifpga_afu_info *afu_info = acc->data;
+
+ if (!afu_info)
+ return -ENODEV;
+
+ info->num_regions = afu_info->num_regions;
+ info->num_irqs = afu_info->num_irqs;
+
+ return 0;
+}
+
+static int ifpga_acc_get_region_info(struct opae_accelerator *acc,
+ struct opae_acc_region_info *info)
+{
+ struct ifpga_afu_info *afu_info = acc->data;
+
+ if (!afu_info)
+ return -EINVAL;
+
+ if (info->index >= afu_info->num_regions)
+ return -EINVAL;
+
+ /* always one RW region only for AFU now */
+ info->flags = ACC_REGION_READ | ACC_REGION_WRITE | ACC_REGION_MMIO;
+ info->len = afu_info->region[info->index].len;
+ info->addr = afu_info->region[info->index].addr;
+
+ return 0;
+}
+
+static int ifpga_acc_read(struct opae_accelerator *acc, unsigned int region_idx,
+ u64 offset, unsigned int byte, void *data)
+{
+ struct ifpga_afu_info *afu_info = acc->data;
+ struct opae_reg_region *region;
+
+ if (!afu_info)
+ return -EINVAL;
+
+ if (offset + byte <= offset)
+ return -EINVAL;
+
+ if (region_idx >= afu_info->num_regions)
+ return -EINVAL;
+
+ region = &afu_info->region[region_idx];
+ if (offset + byte > region->len)
+ return -EINVAL;
+
+ switch (byte) {
+ case 8:
+ *(u64 *)data = opae_readq(region->addr + offset);
+ break;
+ case 4:
+ *(u32 *)data = opae_readl(region->addr + offset);
+ break;
+ case 2:
+ *(u16 *)data = opae_readw(region->addr + offset);
+ break;
+ case 1:
+ *(u8 *)data = opae_readb(region->addr + offset);
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int ifpga_acc_write(struct opae_accelerator *acc,
+ unsigned int region_idx, u64 offset,
+ unsigned int byte, void *data)
+{
+ struct ifpga_afu_info *afu_info = acc->data;
+ struct opae_reg_region *region;
+
+ if (!afu_info)
+ return -EINVAL;
+
+ if (offset + byte <= offset)
+ return -EINVAL;
+
+ if (region_idx >= afu_info->num_regions)
+ return -EINVAL;
+
+ region = &afu_info->region[region_idx];
+ if (offset + byte > region->len)
+ return -EINVAL;
+
+ /* normal mmio case */
+ switch (byte) {
+ case 8:
+ opae_writeq(*(u64 *)data, region->addr + offset);
+ break;
+ case 4:
+ opae_writel(*(u32 *)data, region->addr + offset);
+ break;
+ case 2:
+ opae_writew(*(u16 *)data, region->addr + offset);
+ break;
+ case 1:
+ opae_writeb(*(u8 *)data, region->addr + offset);
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+struct opae_accelerator_ops ifpga_acc_ops = {
+ .read = ifpga_acc_read,
+ .write = ifpga_acc_write,
+ .set_irq = ifpga_acc_set_irq,
+ .get_info = ifpga_acc_get_info,
+ .get_region_info = ifpga_acc_get_region_info,
+ .get_uuid = ifpga_acc_get_uuid,
+};
+
+/* Bridge APIs */
+
+static int ifpga_br_reset(struct opae_bridge *br)
+{
+ struct ifpga_port_hw *port = br->data;
+
+ return fpga_port_reset(port);
+}
+
+struct opae_bridge_ops ifpga_br_ops = {
+ .reset = ifpga_br_reset,
+};
+
+/* Manager APIs */
+static int ifpga_mgr_flash(struct opae_manager *mgr, int id, void *buf,
+ u32 size, u64 *status)
+{
+ struct ifpga_fme_hw *fme = mgr->data;
+ struct ifpga_hw *hw = fme->parent;
+
+ return ifpga_pr(hw, id, buf, size, status);
+}
+
+struct opae_manager_ops ifpga_mgr_ops = {
+ .flash = ifpga_mgr_flash,
+};
+
+/* Adapter APIs */
+static int ifpga_adapter_enumerate(struct opae_adapter *adapter)
+{
+ struct ifpga_hw *hw = malloc(sizeof(*hw));
+
+ if (hw) {
+ memset(hw, 0, sizeof(*hw));
+ hw->pci_data = adapter->data;
+ hw->adapter = adapter;
+ if (ifpga_bus_enumerate(hw))
+ goto error;
+ return ifpga_bus_init(hw);
+ }
+
+error:
+ return -ENOMEM;
+}
+
+struct opae_adapter_ops ifpga_adapter_ops = {
+ .enumerate = ifpga_adapter_enumerate,
+};
+
+/**
+ * ifpga_pr - do the partial reconfiguration for a given port device
+ * @hw: pointer to the HW structure
+ * @port_id: the port device id
+ * @buffer: the buffer of the bitstream
+ * @size: the size of the bitstream
+ * @status: hardware status including PR error code if return -EIO.
+ *
+ * @return
+ * - 0: Success, partial reconfiguration finished.
+ * - <0: Error code returned in partial reconfiguration.
+ **/
+int ifpga_pr(struct ifpga_hw *hw, u32 port_id, void *buffer, u32 size,
+ u64 *status)
+{
+ if (!is_valid_port_id(hw, port_id))
+ return -ENODEV;
+
+ return do_pr(hw, port_id, buffer, size, status);
+}
+
+int ifpga_get_prop(struct ifpga_hw *hw, u32 fiu_id, u32 port_id,
+ struct feature_prop *prop)
+{
+ if (!hw || !prop)
+ return -EINVAL;
+
+ switch (fiu_id) {
+ case FEATURE_FIU_ID_FME:
+ return fme_get_prop(&hw->fme, prop);
+ case FEATURE_FIU_ID_PORT:
+ if (!is_valid_port_id(hw, port_id))
+ return -ENODEV;
+ return port_get_prop(&hw->port[port_id], prop);
+ }
+
+ return -ENOENT;
+}
+
+int ifpga_set_prop(struct ifpga_hw *hw, u32 fiu_id, u32 port_id,
+ struct feature_prop *prop)
+{
+ if (!hw || !prop)
+ return -EINVAL;
+
+ switch (fiu_id) {
+ case FEATURE_FIU_ID_FME:
+ return fme_set_prop(&hw->fme, prop);
+ case FEATURE_FIU_ID_PORT:
+ if (!is_valid_port_id(hw, port_id))
+ return -ENODEV;
+ return port_set_prop(&hw->port[port_id], prop);
+ }
+
+ return -ENOENT;
+}
+
+int ifpga_set_irq(struct ifpga_hw *hw, u32 fiu_id, u32 port_id,
+ u32 feature_id, void *irq_set)
+{
+ if (!hw || !irq_set)
+ return -EINVAL;
+
+ switch (fiu_id) {
+ case FEATURE_FIU_ID_FME:
+ return fme_set_irq(&hw->fme, feature_id, irq_set);
+ case FEATURE_FIU_ID_PORT:
+ if (!is_valid_port_id(hw, port_id))
+ return -ENODEV;
+ return port_set_irq(&hw->port[port_id], feature_id, irq_set);
+ }
+
+ return -ENOENT;
+}
diff --git a/drivers/raw/ifpga_rawdev/base/ifpga_api.h b/drivers/raw/ifpga_rawdev/base/ifpga_api.h
new file mode 100644
index 00000000..dae7ca14
--- /dev/null
+++ b/drivers/raw/ifpga_rawdev/base/ifpga_api.h
@@ -0,0 +1,28 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2010-2018 Intel Corporation
+ */
+
+#ifndef _IFPGA_API_H_
+#define _IFPGA_API_H_
+
+#include "opae_hw_api.h"
+#include "ifpga_hw.h"
+
+extern struct opae_adapter_ops ifpga_adapter_ops;
+extern struct opae_manager_ops ifpga_mgr_ops;
+extern struct opae_bridge_ops ifpga_br_ops;
+extern struct opae_accelerator_ops ifpga_acc_ops;
+
+/* common APIs */
+int ifpga_get_prop(struct ifpga_hw *hw, u32 fiu_id, u32 port_id,
+ struct feature_prop *prop);
+int ifpga_set_prop(struct ifpga_hw *hw, u32 fiu_id, u32 port_id,
+ struct feature_prop *prop);
+int ifpga_set_irq(struct ifpga_hw *hw, u32 fiu_id, u32 port_id,
+ u32 feature_id, void *irq_set);
+
+/* FME APIs */
+int ifpga_pr(struct ifpga_hw *hw, u32 port_id, void *buffer, u32 size,
+ u64 *status);
+
+#endif /* _IFPGA_API_H_ */
diff --git a/drivers/raw/ifpga_rawdev/base/ifpga_compat.h b/drivers/raw/ifpga_rawdev/base/ifpga_compat.h
new file mode 100644
index 00000000..931c8938
--- /dev/null
+++ b/drivers/raw/ifpga_rawdev/base/ifpga_compat.h
@@ -0,0 +1,58 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2010-2018 Intel Corporation
+ */
+
+#ifndef _IFPGA_COMPAT_H_
+#define _IFPGA_COMPAT_H_
+
+#include "opae_osdep.h"
+
+#undef container_of
+#define container_of(ptr, type, member) ({ \
+ typeof(((type *)0)->member)(*__mptr) = (ptr); \
+ (type *)((char *)__mptr - offsetof(type, member)); })
+
+#define IFPGA_PAGE_SHIFT 12
+#define IFPGA_PAGE_SIZE (1 << IFPGA_PAGE_SHIFT)
+#define IFPGA_PAGE_MASK (~(IFPGA_PAGE_SIZE - 1))
+#define IFPGA_PAGE_ALIGN(addr) (((addr) + IFPGA_PAGE_SIZE - 1)\
+ & IFPGA_PAGE_MASK)
+#define IFPGA_ALIGN(x, a) (((x) + (a) - 1) & ~((a) - 1))
+
+#define IS_ALIGNED(x, a) (((x) & ((typeof(x))(a) - 1)) == 0)
+#define PAGE_ALIGNED(addr) IS_ALIGNED((unsigned long)(addr), IFPGA_PAGE_SIZE)
+
+#define readl(addr) opae_readl(addr)
+#define readq(addr) opae_readq(addr)
+#define writel(value, addr) opae_writel(value, addr)
+#define writeq(value, addr) opae_writeq(value, addr)
+
+#define malloc(size) opae_malloc(size)
+#define zmalloc(size) opae_zmalloc(size)
+#define free(size) opae_free(size)
+
+/*
+ * Wait register's _field to be changed to the given value (_expect's _field)
+ * by polling with given interval and timeout.
+ */
+#define fpga_wait_register_field(_field, _expect, _reg_addr, _timeout, _invl)\
+({ \
+ int wait = 0; \
+ int ret = -ETIMEDOUT; \
+ typeof(_expect) value; \
+ for (; wait <= _timeout; wait += _invl) { \
+ value.csr = readq(_reg_addr); \
+ if (_expect._field == value._field) { \
+ ret = 0; \
+ break; \
+ } \
+ udelay(_invl); \
+ } \
+ ret; \
+})
+
+#define __maybe_unused __attribute__((__unused__))
+
+#define UNUSED(x) (void)(x)
+
+#endif /* _IFPGA_COMPAT_H_ */
diff --git a/drivers/raw/ifpga_rawdev/base/ifpga_defines.h b/drivers/raw/ifpga_rawdev/base/ifpga_defines.h
new file mode 100644
index 00000000..aa025272
--- /dev/null
+++ b/drivers/raw/ifpga_rawdev/base/ifpga_defines.h
@@ -0,0 +1,1663 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2010-2018 Intel Corporation
+ */
+
+#ifndef _IFPGA_DEFINES_H_
+#define _IFPGA_DEFINES_H_
+
+#include "ifpga_compat.h"
+
+#define MAX_FPGA_PORT_NUM 4
+
+#define FME_FEATURE_HEADER "fme_hdr"
+#define FME_FEATURE_THERMAL_MGMT "fme_thermal"
+#define FME_FEATURE_POWER_MGMT "fme_power"
+#define FME_FEATURE_GLOBAL_IPERF "fme_iperf"
+#define FME_FEATURE_GLOBAL_ERR "fme_error"
+#define FME_FEATURE_PR_MGMT "fme_pr"
+#define FME_FEATURE_HSSI_ETH "fme_hssi"
+#define FME_FEATURE_GLOBAL_DPERF "fme_dperf"
+#define FME_FEATURE_QSPI_FLASH "fme_qspi_flash"
+
+#define PORT_FEATURE_HEADER "port_hdr"
+#define PORT_FEATURE_UAFU "port_uafu"
+#define PORT_FEATURE_ERR "port_err"
+#define PORT_FEATURE_UMSG "port_umsg"
+#define PORT_FEATURE_PR "port_pr"
+#define PORT_FEATURE_UINT "port_uint"
+#define PORT_FEATURE_STP "port_stp"
+
+/*
+ * do not check the revision id as id may be dynamic under
+ * some cases, e.g, UAFU.
+ */
+#define SKIP_REVISION_CHECK 0xff
+
+#define FME_HEADER_REVISION 1
+#define FME_THERMAL_MGMT_REVISION 0
+#define FME_POWER_MGMT_REVISION 1
+#define FME_GLOBAL_IPERF_REVISION 1
+#define FME_GLOBAL_ERR_REVISION 1
+#define FME_PR_MGMT_REVISION 2
+#define FME_HSSI_ETH_REVISION 0
+#define FME_GLOBAL_DPERF_REVISION 0
+#define FME_QSPI_REVISION 0
+
+#define PORT_HEADER_REVISION 0
+/* UAFU's header info depends on the downloaded GBS */
+#define PORT_UAFU_REVISION SKIP_REVISION_CHECK
+#define PORT_ERR_REVISION 1
+#define PORT_UMSG_REVISION 0
+#define PORT_UINT_REVISION 0
+#define PORT_STP_REVISION 1
+
+#define FEATURE_TYPE_AFU 0x1
+#define FEATURE_TYPE_BBB 0x2
+#define FEATURE_TYPE_PRIVATE 0x3
+#define FEATURE_TYPE_FIU 0x4
+
+#define FEATURE_FIU_ID_FME 0x0
+#define FEATURE_FIU_ID_PORT 0x1
+
+#define FEATURE_ID_HEADER 0x0
+#define FEATURE_ID_AFU 0xff
+
+enum fpga_id_type {
+ FME_ID,
+ PORT_ID,
+ FPGA_ID_MAX,
+};
+
+enum fme_feature_id {
+ FME_FEATURE_ID_HEADER = 0x0,
+
+ FME_FEATURE_ID_THERMAL_MGMT = 0x1,
+ FME_FEATURE_ID_POWER_MGMT = 0x2,
+ FME_FEATURE_ID_GLOBAL_IPERF = 0x3,
+ FME_FEATURE_ID_GLOBAL_ERR = 0x4,
+ FME_FEATURE_ID_PR_MGMT = 0x5,
+ FME_FEATURE_ID_HSSI_ETH = 0x6,
+ FME_FEATURE_ID_GLOBAL_DPERF = 0x7,
+ FME_FEATURE_ID_QSPI_FLASH = 0x8,
+
+ /* one for fme header. */
+ FME_FEATURE_ID_MAX = 0x9,
+};
+
+enum port_feature_id {
+ PORT_FEATURE_ID_HEADER = 0x0,
+ PORT_FEATURE_ID_ERROR = 0x1,
+ PORT_FEATURE_ID_UMSG = 0x2,
+ PORT_FEATURE_ID_UINT = 0x3,
+ PORT_FEATURE_ID_STP = 0x4,
+ PORT_FEATURE_ID_UAFU = 0x5,
+ PORT_FEATURE_ID_MAX = 0x6,
+};
+
+/*
+ * All headers and structures must be byte-packed to match the spec.
+ */
+#pragma pack(push, 1)
+
+struct feature_header {
+ union {
+ u64 csr;
+ struct {
+ u16 id:12;
+ u8 revision:4;
+ u32 next_header_offset:24;
+ u8 end_of_list:1;
+ u32 reserved:19;
+ u8 type:4;
+ };
+ };
+};
+
+struct feature_bbb_header {
+ struct uuid guid;
+};
+
+struct feature_afu_header {
+ struct uuid guid;
+ union {
+ u64 csr;
+ struct {
+ u64 next_afu:24;
+ u64 reserved:40;
+ };
+ };
+};
+
+struct feature_fiu_header {
+ struct uuid guid;
+ union {
+ u64 csr;
+ struct {
+ u64 next_afu:24;
+ u64 reserved:40;
+ };
+ };
+};
+
+struct feature_fme_capability {
+ union {
+ u64 csr;
+ struct {
+ u8 fabric_verid; /* Fabric version ID */
+ u8 socket_id:1; /* Socket id */
+ u8 rsvd1:3; /* Reserved */
+ /* pci0 link available yes /no */
+ u8 pci0_link_avile:1;
+ /* pci1 link available yes /no */
+ u8 pci1_link_avile:1;
+ /* Coherent (QPI/UPI) link available yes /no */
+ u8 qpi_link_avile:1;
+ u8 rsvd2:1; /* Reserved */
+ /* IOMMU or VT-d supported yes/no */
+ u8 iommu_support:1;
+ u8 num_ports:3; /* Number of ports */
+ u8 sf_fab_ctl:1; /* Internal validation bit */
+ u8 rsvd3:3; /* Reserved */
+ /*
+ * Address width supported in bits
+ * BXT -0x26 , SKX -0x30
+ */
+ u8 address_width_bits:6;
+ u8 rsvd4:2; /* Reserved */
+ /* Size of cache supported in kb */
+ u16 cache_size:12;
+ u8 cache_assoc:4; /* Cache Associativity */
+ u16 rsvd5:15; /* Reserved */
+ u8 lock_bit:1; /* Lock bit */
+ };
+ };
+};
+
+#define FME_AFU_ACCESS_PF 0
+#define FME_AFU_ACCESS_VF 1
+
+struct feature_fme_port {
+ union {
+ u64 csr;
+ struct {
+ u32 port_offset:24;
+ u8 reserved1;
+ u8 port_bar:3;
+ u32 reserved2:20;
+ u8 afu_access_control:1;
+ u8 reserved3:4;
+ u8 port_implemented:1;
+ u8 reserved4:3;
+ };
+ };
+};
+
+struct feature_fme_fab_status {
+ union {
+ u64 csr;
+ struct {
+ u8 upilink_status:4; /* UPI Link Status */
+ u8 rsvd1:4; /* Reserved */
+ u8 pci0link_status:1; /* pci0 link status */
+ u8 rsvd2:3; /* Reserved */
+ u8 pci1link_status:1; /* pci1 link status */
+ u64 rsvd3:51; /* Reserved */
+ };
+ };
+};
+
+struct feature_fme_genprotrange2_base {
+ union {
+ u64 csr;
+ struct {
+ u16 rsvd1; /* Reserved */
+ /* Base Address of memory range */
+ u8 protected_base_addrss:4;
+ u64 rsvd2:44; /* Reserved */
+ };
+ };
+};
+
+struct feature_fme_genprotrange2_limit {
+ union {
+ u64 csr;
+ struct {
+ u16 rsvd1; /* Reserved */
+ /* Limit Address of memory range */
+ u8 protected_limit_addrss:4;
+ u16 rsvd2:11; /* Reserved */
+ u8 enable:1; /* Enable GENPROTRANGE check */
+ u32 rsvd3; /* Reserved */
+ };
+ };
+};
+
+struct feature_fme_dxe_lock {
+ union {
+ u64 csr;
+ struct {
+ /*
+ * Determines write access to the DXE region CSRs
+ * 1 - CSR region is locked;
+ * 0 - it is open for write access.
+ */
+ u8 dxe_early_lock:1;
+ /*
+ * Determines write access to the HSSI CSR
+ * 1 - CSR region is locked;
+ * 0 - it is open for write access.
+ */
+ u8 dxe_late_lock:1;
+ u64 rsvd:62;
+ };
+ };
+};
+
+#define HSSI_ID_NO_HASSI 0
+#define HSSI_ID_PCIE_RP 1
+#define HSSI_ID_ETHERNET 2
+
+struct feature_fme_bitstream_id {
+ union {
+ u64 csr;
+ struct {
+ u32 gitrepo_hash:32; /* GIT repository hash */
+ /*
+ * HSSI configuration identifier:
+ * 0 - No HSSI
+ * 1 - PCIe-RP
+ * 2 - Ethernet
+ */
+ u8 hssi_id:4;
+ u16 rsvd1:12; /* Reserved */
+ /* Bitstream version patch number */
+ u8 bs_verpatch:4;
+ /* Bitstream version minor number */
+ u8 bs_verminor:4;
+ /* Bitstream version major number */
+ u8 bs_vermajor:4;
+ /* Bitstream version debug number */
+ u8 bs_verdebug:4;
+ };
+ };
+};
+
+struct feature_fme_bitstream_md {
+ union {
+ u64 csr;
+ struct {
+ /* Seed number userd for synthesis flow */
+ u8 synth_seed:4;
+ /* Synthesis date(day number - 2 digits) */
+ u8 synth_day:8;
+ /* Synthesis date(month number - 2 digits) */
+ u8 synth_month:8;
+ /* Synthesis date(year number - 2 digits) */
+ u8 synth_year:8;
+ u64 rsvd:36; /* Reserved */
+ };
+ };
+};
+
+struct feature_fme_iommu_ctrl {
+ union {
+ u64 csr;
+ struct {
+ /* Disables IOMMU prefetcher for C0 channel */
+ u8 prefetch_disableC0:1;
+ /* Disables IOMMU prefetcher for C1 channel */
+ u8 prefetch_disableC1:1;
+ /* Disables IOMMU partial cache line writes */
+ u8 prefetch_wrdisable:1;
+ u8 rsvd1:1; /* Reserved */
+ /*
+ * Select counter and read value from register
+ * iommu_stat.dbg_counters
+ * 0 - Number of 4K page translation response
+ * 1 - Number of 2M page translation response
+ * 2 - Number of 1G page translation response
+ */
+ u8 counter_sel:2;
+ u32 rsvd2:26; /* Reserved */
+ /* Connected to IOMMU SIP Capabilities */
+ u32 capecap_defeature;
+ };
+ };
+};
+
+struct feature_fme_iommu_stat {
+ union {
+ u64 csr;
+ struct {
+ /* Translation Enable bit from IOMMU SIP */
+ u8 translation_enable:1;
+ /* Drain request in progress */
+ u8 drain_req_inprog:1;
+ /* Invalidation current state */
+ u8 inv_state:3;
+ /* C0 Response Buffer current state */
+ u8 respbuffer_stateC0:3;
+ /* C1 Response Buffer current state */
+ u8 respbuffer_stateC1:3;
+ /* Last request ID to IOMMU SIP */
+ u8 last_reqID:4;
+ /* Last IOMMU SIP response ID value */
+ u8 last_respID:4;
+ /* Last IOMMU SIP response status value */
+ u8 last_respstatus:3;
+ /* C0 Transaction Buffer is not empty */
+ u8 transbuf_notEmptyC0:1;
+ /* C1 Transaction Buffer is not empty */
+ u8 transbuf_notEmptyC1:1;
+ /* C0 Request FIFO is not empty */
+ u8 reqFIFO_notemptyC0:1;
+ /* C1 Request FIFO is not empty */
+ u8 reqFIFO_notemptyC1:1;
+ /* C0 Response FIFO is not empty */
+ u8 respFIFO_notemptyC0:1;
+ /* C1 Response FIFO is not empty */
+ u8 respFIFO_notemptyC1:1;
+ /* C0 Response FIFO overflow detected */
+ u8 respFIFO_overflowC0:1;
+ /* C1 Response FIFO overflow detected */
+ u8 respFIFO_overflowC1:1;
+ /* C0 Transaction Buffer overflow detected */
+ u8 tranbuf_overflowC0:1;
+ /* C1 Transaction Buffer overflow detected */
+ u8 tranbuf_overflowC1:1;
+ /* Request FIFO overflow detected */
+ u8 reqFIFO_overflow:1;
+ /* IOMMU memory read in progress */
+ u8 memrd_inprog:1;
+ /* IOMMU memory write in progress */
+ u8 memwr_inprog:1;
+ u8 rsvd1:1; /* Reserved */
+ /* Value of counter selected by iommu_ctl.counter_sel */
+ u16 dbg_counters:16;
+ u16 rsvd2:12; /* Reserved */
+ };
+ };
+};
+
+struct feature_fme_pcie0_ctrl {
+ union {
+ u64 csr;
+ struct {
+ u64 vtd_bar_lock:1; /* Lock VT-D BAR register */
+ u64 rsvd1:3;
+ u64 rciep:1; /* Configure PCIE0 as RCiEP */
+ u64 rsvd2:59;
+ };
+ };
+};
+
+struct feature_fme_llpr_smrr_base {
+ union {
+ u64 csr;
+ struct {
+ u64 rsvd1:12;
+ u64 base:20; /* SMRR2 memory range base address */
+ u64 rsvd2:32;
+ };
+ };
+};
+
+struct feature_fme_llpr_smrr_mask {
+ union {
+ u64 csr;
+ struct {
+ u64 rsvd1:11;
+ u64 valid:1; /* LLPR_SMRR rule is valid or not */
+ /*
+ * SMRR memory range mask which determines the range
+ * of region being mapped
+ */
+ u64 phys_mask:20;
+ u64 rsvd2:32;
+ };
+ };
+};
+
+struct feature_fme_llpr_smrr2_base {
+ union {
+ u64 csr;
+ struct {
+ u64 rsvd1:12;
+ u64 base:20; /* SMRR2 memory range base address */
+ u64 rsvd2:32;
+ };
+ };
+};
+
+struct feature_fme_llpr_smrr2_mask {
+ union {
+ u64 csr;
+ struct {
+ u64 rsvd1:11;
+ u64 valid:1; /* LLPR_SMRR2 rule is valid or not */
+ /*
+ * SMRR2 memory range mask which determines the range
+ * of region being mapped
+ */
+ u64 phys_mask:20;
+ u64 rsvd2:32;
+ };
+ };
+};
+
+struct feature_fme_llpr_meseg_base {
+ union {
+ u64 csr;
+ struct {
+ /* A[45:19] of base address memory range */
+ u64 me_base:27;
+ u64 rsvd:37;
+ };
+ };
+};
+
+struct feature_fme_llpr_meseg_limit {
+ union {
+ u64 csr;
+ struct {
+ /* A[45:19] of limit address memory range */
+ u64 me_limit:27;
+ u64 rsvd1:4;
+ u64 enable:1; /* Enable LLPR MESEG rule */
+ u64 rsvd2:32;
+ };
+ };
+};
+
+struct feature_fme_header {
+ struct feature_header header;
+ struct feature_afu_header afu_header;
+ u64 reserved;
+ u64 scratchpad;
+ struct feature_fme_capability capability;
+ struct feature_fme_port port[MAX_FPGA_PORT_NUM];
+ struct feature_fme_fab_status fab_status;
+ struct feature_fme_bitstream_id bitstream_id;
+ struct feature_fme_bitstream_md bitstream_md;
+ struct feature_fme_genprotrange2_base genprotrange2_base;
+ struct feature_fme_genprotrange2_limit genprotrange2_limit;
+ struct feature_fme_dxe_lock dxe_lock;
+ struct feature_fme_iommu_ctrl iommu_ctrl;
+ struct feature_fme_iommu_stat iommu_stat;
+ struct feature_fme_pcie0_ctrl pcie0_control;
+ struct feature_fme_llpr_smrr_base smrr_base;
+ struct feature_fme_llpr_smrr_mask smrr_mask;
+ struct feature_fme_llpr_smrr2_base smrr2_base;
+ struct feature_fme_llpr_smrr2_mask smrr2_mask;
+ struct feature_fme_llpr_meseg_base meseg_base;
+ struct feature_fme_llpr_meseg_limit meseg_limit;
+};
+
+struct feature_port_capability {
+ union {
+ u64 csr;
+ struct {
+ u8 port_number:2; /* Port Number 0-3 */
+ u8 rsvd1:6; /* Reserved */
+ u16 mmio_size; /* User MMIO size in KB */
+ u8 rsvd2; /* Reserved */
+ u8 sp_intr_num:4; /* Supported interrupts num */
+ u32 rsvd3:28; /* Reserved */
+ };
+ };
+};
+
+struct feature_port_control {
+ union {
+ u64 csr;
+ struct {
+ u8 port_sftrst:1; /* Port Soft Reset */
+ u8 rsvd1:1; /* Reserved */
+ u8 latency_tolerance:1;/* '1' >= 40us, '0' < 40us */
+ u8 rsvd2:1; /* Reserved */
+ u8 port_sftrst_ack:1; /* HW ACK for Soft Reset */
+ u64 rsvd3:59; /* Reserved */
+ };
+ };
+};
+
+#define PORT_POWER_STATE_NORMAL 0
+#define PORT_POWER_STATE_AP1 1
+#define PORT_POWER_STATE_AP2 2
+#define PORT_POWER_STATE_AP6 6
+
+struct feature_port_status {
+ union {
+ u64 csr;
+ struct {
+ u8 port_freeze:1; /* '1' - freezed '0' - normal */
+ u8 rsvd1:7; /* Reserved */
+ u8 power_state:4; /* Power State */
+ u8 ap1_event:1; /* AP1 event was detected */
+ u8 ap2_event:1; /* AP2 event was detected */
+ u64 rsvd2:50; /* Reserved */
+ };
+ };
+};
+
+/* Port Header Register Set */
+struct feature_port_header {
+ struct feature_header header;
+ struct feature_afu_header afu_header;
+ u64 port_mailbox;
+ u64 scratchpad;
+ struct feature_port_capability capability;
+ struct feature_port_control control;
+ struct feature_port_status status;
+ u64 rsvd2;
+ u64 user_clk_freq_cmd0;
+ u64 user_clk_freq_cmd1;
+ u64 user_clk_freq_sts0;
+ u64 user_clk_freq_sts1;
+};
+
+struct feature_fme_tmp_threshold {
+ union {
+ u64 csr;
+ struct {
+ u8 tmp_thshold1:7; /* temperature Threshold 1 */
+ /* temperature Threshold 1 enable/disable */
+ u8 tmp_thshold1_enable:1;
+ u8 tmp_thshold2:7; /* temperature Threshold 2 */
+ /* temperature Threshold 2 enable /disable */
+ u8 tmp_thshold2_enable:1;
+ u8 pro_hot_setpoint:7; /* Proc Hot set point */
+ u8 rsvd4:1; /* Reserved */
+ u8 therm_trip_thshold:7; /* Thermeal Trip Threshold */
+ u8 rsvd3:1; /* Reserved */
+ u8 thshold1_status:1; /* Threshold 1 Status */
+ u8 thshold2_status:1; /* Threshold 2 Status */
+ u8 rsvd5:1; /* Reserved */
+ /* Thermeal Trip Threshold status */
+ u8 therm_trip_thshold_status:1;
+ u8 rsvd6:4; /* Reserved */
+ /* Validation mode- Force Proc Hot */
+ u8 valmodeforce:1;
+ /* Validation mode - Therm trip Hot */
+ u8 valmodetherm:1;
+ u8 rsvd2:2; /* Reserved */
+ u8 thshold_policy:1; /* threshold policy */
+ u32 rsvd:19; /* Reserved */
+ };
+ };
+};
+
+/* Temperature Sensor Read values format 1 */
+struct feature_fme_temp_rdsensor_fmt1 {
+ union {
+ u64 csr;
+ struct {
+ /* Reads out FPGA temperature in celsius */
+ u8 fpga_temp:7;
+ u8 rsvd0:1; /* Reserved */
+ /* Temperature reading sequence number */
+ u16 tmp_reading_seq_num;
+ /* Temperature reading is valid */
+ u8 tmp_reading_valid:1;
+ u8 rsvd1:7; /* Reserved */
+ u16 dbg_mode:10; /* Debug mode */
+ u32 rsvd2:22; /* Reserved */
+ };
+ };
+};
+
+/* Temperature sensor read values format 2 */
+struct feature_fme_temp_rdsensor_fmt2 {
+ u64 rsvd; /* Reserved */
+};
+
+/* Temperature Threshold Capability Register */
+struct feature_fme_tmp_threshold_cap {
+ union {
+ u64 csr;
+ struct {
+ /* Temperature Threshold Unsupported */
+ u8 tmp_thshold_disabled:1;
+ u64 rsvd:63; /* Reserved */
+ };
+ };
+};
+
+/* FME THERNAL FEATURE */
+struct feature_fme_thermal {
+ struct feature_header header;
+ struct feature_fme_tmp_threshold threshold;
+ struct feature_fme_temp_rdsensor_fmt1 rdsensor_fm1;
+ struct feature_fme_temp_rdsensor_fmt2 rdsensor_fm2;
+ struct feature_fme_tmp_threshold_cap threshold_cap;
+};
+
+/* Power Status register */
+struct feature_fme_pm_status {
+ union {
+ u64 csr;
+ struct {
+ /* FPGA Power consumed, The format is to be defined */
+ u32 pwr_consumed:18;
+ /* FPGA Latency Tolerance Reporting */
+ u8 fpga_latency_report:1;
+ u64 rsvd:45; /* Reserved */
+ };
+ };
+};
+
+/* AP Thresholds */
+struct feature_fme_pm_ap_threshold {
+ union {
+ u64 csr;
+ struct {
+ /*
+ * Number of clocks (5ns period) for assertion
+ * of FME_data
+ */
+ u8 threshold1:7;
+ u8 rsvd1:1;
+ u8 threshold2:7;
+ u8 rsvd2:1;
+ u8 threshold1_status:1;
+ u8 threshold2_status:1;
+ u64 rsvd3:46; /* Reserved */
+ };
+ };
+};
+
+/* Xeon Power Limit */
+struct feature_fme_pm_xeon_limit {
+ union {
+ u64 csr;
+ struct {
+ /* Power limit in Watts in 12.3 format */
+ u16 pwr_limit:15;
+ /* Indicates that power limit has been written */
+ u8 enable:1;
+ /* 0 - Turbe range, 1 - Entire range */
+ u8 clamping:1;
+ /* Time constant in XXYYY format */
+ u8 time:7;
+ u64 rsvd:40; /* Reserved */
+ };
+ };
+};
+
+/* FPGA Power Limit */
+struct feature_fme_pm_fpga_limit {
+ union {
+ u64 csr;
+ struct {
+ /* Power limit in Watts in 12.3 format */
+ u16 pwr_limit:15;
+ /* Indicates that power limit has been written */
+ u8 enable:1;
+ /* 0 - Turbe range, 1 - Entire range */
+ u8 clamping:1;
+ /* Time constant in XXYYY format */
+ u8 time:7;
+ u64 rsvd:40; /* Reserved */
+ };
+ };
+};
+
+/* FME POWER FEATURE */
+struct feature_fme_power {
+ struct feature_header header;
+ struct feature_fme_pm_status status;
+ struct feature_fme_pm_ap_threshold threshold;
+ struct feature_fme_pm_xeon_limit xeon_limit;
+ struct feature_fme_pm_fpga_limit fpga_limit;
+};
+
+#define CACHE_CHANNEL_RD 0
+#define CACHE_CHANNEL_WR 1
+
+enum iperf_cache_events {
+ IPERF_CACHE_RD_HIT,
+ IPERF_CACHE_WR_HIT,
+ IPERF_CACHE_RD_MISS,
+ IPERF_CACHE_WR_MISS,
+ IPERF_CACHE_RSVD, /* reserved */
+ IPERF_CACHE_HOLD_REQ,
+ IPERF_CACHE_DATA_WR_PORT_CONTEN,
+ IPERF_CACHE_TAG_WR_PORT_CONTEN,
+ IPERF_CACHE_TX_REQ_STALL,
+ IPERF_CACHE_RX_REQ_STALL,
+ IPERF_CACHE_EVICTIONS,
+};
+
+/* FPMON Cache Control */
+struct feature_fme_ifpmon_ch_ctl {
+ union {
+ u64 csr;
+ struct {
+ u8 reset_counters:1; /* Reset Counters */
+ u8 rsvd1:7; /* Reserved */
+ u8 freeze:1; /* Freeze if set to 1 */
+ u8 rsvd2:7; /* Reserved */
+ u8 cache_event:4; /* Select the cache event */
+ u8 cci_chsel:1; /* Select the channel */
+ u64 rsvd3:43; /* Reserved */
+ };
+ };
+};
+
+/* FPMON Cache Counter */
+struct feature_fme_ifpmon_ch_ctr {
+ union {
+ u64 csr;
+ struct {
+ /* Cache Counter for even addresse */
+ u64 cache_counter:48;
+ u16 rsvd:12; /* Reserved */
+ /* Cache Event being reported */
+ u8 event_code:4;
+ };
+ };
+};
+
+enum iperf_fab_events {
+ IPERF_FAB_PCIE0_RD,
+ IPERF_FAB_PCIE0_WR,
+ IPERF_FAB_PCIE1_RD,
+ IPERF_FAB_PCIE1_WR,
+ IPERF_FAB_UPI_RD,
+ IPERF_FAB_UPI_WR,
+ IPERF_FAB_MMIO_RD,
+ IPERF_FAB_MMIO_WR,
+};
+
+#define FAB_DISABLE_FILTER 0
+#define FAB_ENABLE_FILTER 1
+
+/* FPMON FAB Control */
+struct feature_fme_ifpmon_fab_ctl {
+ union {
+ u64 csr;
+ struct {
+ u8 reset_counters:1; /* Reset Counters */
+ u8 rsvd:7; /* Reserved */
+ u8 freeze:1; /* Set to 1 frozen counter */
+ u8 rsvd1:7; /* Reserved */
+ u8 fab_evtcode:4; /* Fabric Event Code */
+ u8 port_id:2; /* Port ID */
+ u8 rsvd2:1; /* Reserved */
+ u8 port_filter:1; /* Port Filter */
+ u64 rsvd3:40; /* Reserved */
+ };
+ };
+};
+
+/* FPMON Event Counter */
+struct feature_fme_ifpmon_fab_ctr {
+ union {
+ u64 csr;
+ struct {
+ u64 fab_cnt:60; /* Fabric event counter */
+ /* Fabric event code being reported */
+ u8 event_code:4;
+ };
+ };
+};
+
+/* FPMON Clock Counter */
+struct feature_fme_ifpmon_clk_ctr {
+ u64 afu_interf_clock; /* Clk_16UI (AFU clock) counter. */
+};
+
+enum iperf_vtd_events {
+ IPERF_VTD_AFU_MEM_RD_TRANS,
+ IPERF_VTD_AFU_MEM_WR_TRANS,
+ IPERF_VTD_AFU_DEVTLB_RD_HIT,
+ IPERF_VTD_AFU_DEVTLB_WR_HIT,
+ IPERF_VTD_DEVTLB_4K_FILL,
+ IPERF_VTD_DEVTLB_2M_FILL,
+ IPERF_VTD_DEVTLB_1G_FILL,
+};
+
+/* VT-d control register */
+struct feature_fme_ifpmon_vtd_ctl {
+ union {
+ u64 csr;
+ struct {
+ u8 reset_counters:1; /* Reset Counters */
+ u8 rsvd:7; /* Reserved */
+ u8 freeze:1; /* Set to 1 frozen counter */
+ u8 rsvd1:7; /* Reserved */
+ u8 vtd_evtcode:4; /* VTd and TLB event code */
+ u64 rsvd2:44; /* Reserved */
+ };
+ };
+};
+
+/* VT-d event counter */
+struct feature_fme_ifpmon_vtd_ctr {
+ union {
+ u64 csr;
+ struct {
+ u64 vtd_counter:48; /* VTd event counter */
+ u16 rsvd:12; /* Reserved */
+ u8 event_code:4; /* VTd event code */
+ };
+ };
+};
+
+enum iperf_vtd_sip_events {
+ IPERF_VTD_SIP_IOTLB_4K_HIT,
+ IPERF_VTD_SIP_IOTLB_2M_HIT,
+ IPERF_VTD_SIP_IOTLB_1G_HIT,
+ IPERF_VTD_SIP_SLPWC_L3_HIT,
+ IPERF_VTD_SIP_SLPWC_L4_HIT,
+ IPERF_VTD_SIP_RCC_HIT,
+ IPERF_VTD_SIP_IOTLB_4K_MISS,
+ IPERF_VTD_SIP_IOTLB_2M_MISS,
+ IPERF_VTD_SIP_IOTLB_1G_MISS,
+ IPERF_VTD_SIP_SLPWC_L3_MISS,
+ IPERF_VTD_SIP_SLPWC_L4_MISS,
+ IPERF_VTD_SIP_RCC_MISS,
+};
+
+/* VT-d SIP control register */
+struct feature_fme_ifpmon_vtd_sip_ctl {
+ union {
+ u64 csr;
+ struct {
+ u8 reset_counters:1; /* Reset Counters */
+ u8 rsvd:7; /* Reserved */
+ u8 freeze:1; /* Set to 1 frozen counter */
+ u8 rsvd1:7; /* Reserved */
+ u8 vtd_evtcode:4; /* VTd and TLB event code */
+ u64 rsvd2:44; /* Reserved */
+ };
+ };
+};
+
+/* VT-d SIP event counter */
+struct feature_fme_ifpmon_vtd_sip_ctr {
+ union {
+ u64 csr;
+ struct {
+ u64 vtd_counter:48; /* VTd event counter */
+ u16 rsvd:12; /* Reserved */
+ u8 event_code:4; /* VTd event code */
+ };
+ };
+};
+
+/* FME IPERF FEATURE */
+struct feature_fme_iperf {
+ struct feature_header header;
+ struct feature_fme_ifpmon_ch_ctl ch_ctl;
+ struct feature_fme_ifpmon_ch_ctr ch_ctr0;
+ struct feature_fme_ifpmon_ch_ctr ch_ctr1;
+ struct feature_fme_ifpmon_fab_ctl fab_ctl;
+ struct feature_fme_ifpmon_fab_ctr fab_ctr;
+ struct feature_fme_ifpmon_clk_ctr clk;
+ struct feature_fme_ifpmon_vtd_ctl vtd_ctl;
+ struct feature_fme_ifpmon_vtd_ctr vtd_ctr;
+ struct feature_fme_ifpmon_vtd_sip_ctl vtd_sip_ctl;
+ struct feature_fme_ifpmon_vtd_sip_ctr vtd_sip_ctr;
+};
+
+enum dperf_fab_events {
+ DPERF_FAB_PCIE0_RD,
+ DPERF_FAB_PCIE0_WR,
+ DPERF_FAB_MMIO_RD = 6,
+ DPERF_FAB_MMIO_WR,
+};
+
+/* FPMON FAB Control */
+struct feature_fme_dfpmon_fab_ctl {
+ union {
+ u64 csr;
+ struct {
+ u8 reset_counters:1; /* Reset Counters */
+ u8 rsvd:7; /* Reserved */
+ u8 freeze:1; /* Set to 1 frozen counter */
+ u8 rsvd1:7; /* Reserved */
+ u8 fab_evtcode:4; /* Fabric Event Code */
+ u8 port_id:2; /* Port ID */
+ u8 rsvd2:1; /* Reserved */
+ u8 port_filter:1; /* Port Filter */
+ u64 rsvd3:40; /* Reserved */
+ };
+ };
+};
+
+/* FPMON Event Counter */
+struct feature_fme_dfpmon_fab_ctr {
+ union {
+ u64 csr;
+ struct {
+ u64 fab_cnt:60; /* Fabric event counter */
+ /* Fabric event code being reported */
+ u8 event_code:4;
+ };
+ };
+};
+
+/* FPMON Clock Counter */
+struct feature_fme_dfpmon_clk_ctr {
+ u64 afu_interf_clock; /* Clk_16UI (AFU clock) counter. */
+};
+
+/* FME DPERF FEATURE */
+struct feature_fme_dperf {
+ struct feature_header header;
+ u64 rsvd[3];
+ struct feature_fme_dfpmon_fab_ctl fab_ctl;
+ struct feature_fme_dfpmon_fab_ctr fab_ctr;
+ struct feature_fme_dfpmon_clk_ctr clk;
+};
+
+struct feature_fme_error0 {
+#define FME_ERROR0_MASK 0xFFUL
+#define FME_ERROR0_MASK_DEFAULT 0x40UL /* pcode workaround */
+ union {
+ u64 csr;
+ struct {
+ u8 fabric_err:1; /* Fabric error */
+ u8 fabfifo_overflow:1; /* Fabric fifo overflow */
+ u8 kticdc_parity_err:2;/* KTI CDC Parity Error */
+ u8 iommu_parity_err:1; /* IOMMU Parity error */
+ /* AFU PF/VF access mismatch detected */
+ u8 afu_acc_mode_err:1;
+ u8 mbp_err:1; /* Indicates an MBP event */
+ /* PCIE0 CDC Parity Error */
+ u8 pcie0cdc_parity_err:5;
+ /* PCIE1 CDC Parity Error */
+ u8 pcie1cdc_parity_err:5;
+ /* CVL CDC Parity Error */
+ u8 cvlcdc_parity_err:3;
+ u64 rsvd:44; /* Reserved */
+ };
+ };
+};
+
+/* PCIe0 Error Status register */
+struct feature_fme_pcie0_error {
+#define FME_PCIE0_ERROR_MASK 0xFFUL
+ union {
+ u64 csr;
+ struct {
+ u8 formattype_err:1; /* TLP format/type error */
+ u8 MWAddr_err:1; /* TLP MW address error */
+ u8 MWAddrLength_err:1; /* TLP MW length error */
+ u8 MRAddr_err:1; /* TLP MR address error */
+ u8 MRAddrLength_err:1; /* TLP MR length error */
+ u8 cpl_tag_err:1; /* TLP CPL tag error */
+ u8 cpl_status_err:1; /* TLP CPL status error */
+ u8 cpl_timeout_err:1; /* TLP CPL timeout */
+ u8 cci_parity_err:1; /* CCI bridge parity error */
+ u8 rxpoison_tlp_err:1; /* Received a TLP with EP set */
+ u64 rsvd:52; /* Reserved */
+ u8 vfnumb_err:1; /* Number of error VF */
+ u8 funct_type_err:1; /* Virtual (1) or Physical */
+ };
+ };
+};
+
+/* PCIe1 Error Status register */
+struct feature_fme_pcie1_error {
+#define FME_PCIE1_ERROR_MASK 0xFFUL
+ union {
+ u64 csr;
+ struct {
+ u8 formattype_err:1; /* TLP format/type error */
+ u8 MWAddr_err:1; /* TLP MW address error */
+ u8 MWAddrLength_err:1; /* TLP MW length error */
+ u8 MRAddr_err:1; /* TLP MR address error */
+ u8 MRAddrLength_err:1; /* TLP MR length error */
+ u8 cpl_tag_err:1; /* TLP CPL tag error */
+ u8 cpl_status_err:1; /* TLP CPL status error */
+ u8 cpl_timeout_err:1; /* TLP CPL timeout */
+ u8 cci_parity_err:1; /* CCI bridge parity error */
+ u8 rxpoison_tlp_err:1; /* Received a TLP with EP set */
+ u64 rsvd:54; /* Reserved */
+ };
+ };
+};
+
+/* FME First Error register */
+struct feature_fme_first_error {
+#define FME_FIRST_ERROR_MASK ((1ULL << 60) - 1)
+ union {
+ u64 csr;
+ struct {
+ /*
+ * Indicates the Error Register that was
+ * triggered first
+ */
+ u64 err_reg_status:60;
+ /*
+ * Holds 60 LSBs from the Error register that was
+ * triggered first
+ */
+ u8 errReg_id:4;
+ };
+ };
+};
+
+/* FME Next Error register */
+struct feature_fme_next_error {
+#define FME_NEXT_ERROR_MASK ((1ULL << 60) - 1)
+ union {
+ u64 csr;
+ struct {
+ /*
+ * Indicates the Error Register that was
+ * triggered second
+ */
+ u64 err_reg_status:60;
+ /*
+ * Holds 60 LSBs from the Error register that was
+ * triggered second
+ */
+ u8 errReg_id:4;
+ };
+ };
+};
+
+/* RAS Non Fatal Error Status register */
+struct feature_fme_ras_nonfaterror {
+ union {
+ u64 csr;
+ struct {
+ /* thremal threshold AP1 */
+ u8 temp_thresh_ap1:1;
+ /* thremal threshold AP2 */
+ u8 temp_thresh_ap2:1;
+ u8 pcie_error:1; /* pcie Error */
+ u8 portfatal_error:1; /* port fatal error */
+ u8 proc_hot:1; /* Indicates a ProcHot event */
+ /* Indicates an AFU PF/VF access mismatch */
+ u8 afu_acc_mode_err:1;
+ /* Injected nonfata Error */
+ u8 injected_nonfata_err:1;
+ u8 rsvd1:2;
+ /* Temperature threshold triggered AP6*/
+ u8 temp_thresh_AP6:1;
+ /* Power threshold triggered AP1 */
+ u8 power_thresh_AP1:1;
+ /* Power threshold triggered AP2 */
+ u8 power_thresh_AP2:1;
+ /* Indicates a MBP event */
+ u8 mbp_err:1;
+ u64 rsvd2:51; /* Reserved */
+ };
+ };
+};
+
+/* RAS Catastrophic Fatal Error Status register */
+struct feature_fme_ras_catfaterror {
+ union {
+ u64 csr;
+ struct {
+ /* KTI Link layer error detected */
+ u8 ktilink_fatal_err:1;
+ /* tag-n-cache error detected */
+ u8 tagcch_fatal_err:1;
+ /* CCI error detected */
+ u8 cci_fatal_err:1;
+ /* KTI Protocol error detected */
+ u8 ktiprpto_fatal_err:1;
+ /* Fatal DRAM error detected */
+ u8 dram_fatal_err:1;
+ /* IOMMU detected */
+ u8 iommu_fatal_err:1;
+ /* Fabric Fatal Error */
+ u8 fabric_fatal_err:1;
+ /* PCIe possion Error */
+ u8 pcie_poison_err:1;
+ /* Injected fatal Error */
+ u8 inject_fata_err:1;
+ /* Catastrophic CRC Error */
+ u8 crc_catast_err:1;
+ /* Catastrophic Thermal Error */
+ u8 therm_catast_err:1;
+ /* Injected Catastrophic Error */
+ u8 injected_catast_err:1;
+ u64 rsvd:52;
+ };
+ };
+};
+
+/* RAS Error injection register */
+struct feature_fme_ras_error_inj {
+#define FME_RAS_ERROR_INJ_MASK 0x7UL
+ union {
+ u64 csr;
+ struct {
+ u8 catast_error:1; /* Catastrophic error flag */
+ u8 fatal_error:1; /* Fatal error flag */
+ u8 nonfatal_error:1; /* NonFatal error flag */
+ u64 rsvd:61; /* Reserved */
+ };
+ };
+};
+
+/* FME error capabilities */
+struct feature_fme_error_capability {
+ union {
+ u64 csr;
+ struct {
+ u8 support_intr:1;
+ /* MSI-X vector table entry number */
+ u16 intr_vector_num:12;
+ u64 rsvd:51; /* Reserved */
+ };
+ };
+};
+
+/* FME ERR FEATURE */
+struct feature_fme_err {
+ struct feature_header header;
+ struct feature_fme_error0 fme_err_mask;
+ struct feature_fme_error0 fme_err;
+ struct feature_fme_pcie0_error pcie0_err_mask;
+ struct feature_fme_pcie0_error pcie0_err;
+ struct feature_fme_pcie1_error pcie1_err_mask;
+ struct feature_fme_pcie1_error pcie1_err;
+ struct feature_fme_first_error fme_first_err;
+ struct feature_fme_next_error fme_next_err;
+ struct feature_fme_ras_nonfaterror ras_nonfat_mask;
+ struct feature_fme_ras_nonfaterror ras_nonfaterr;
+ struct feature_fme_ras_catfaterror ras_catfat_mask;
+ struct feature_fme_ras_catfaterror ras_catfaterr;
+ struct feature_fme_ras_error_inj ras_error_inj;
+ struct feature_fme_error_capability fme_err_capability;
+};
+
+/* FME Partial Reconfiguration Control */
+struct feature_fme_pr_ctl {
+ union {
+ u64 csr;
+ struct {
+ u8 pr_reset:1; /* Reset PR Engine */
+ u8 rsvd3:3; /* Reserved */
+ u8 pr_reset_ack:1; /* Reset PR Engine Ack */
+ u8 rsvd4:3; /* Reserved */
+ u8 pr_regionid:2; /* PR Region ID */
+ u8 rsvd1:2; /* Reserved */
+ u8 pr_start_req:1; /* PR Start Request */
+ u8 pr_push_complete:1; /* PR Data push complete */
+ u8 pr_kind:1; /* PR Data push complete */
+ u32 rsvd:17; /* Reserved */
+ u32 config_data; /* Config data TBD */
+ };
+ };
+};
+
+/* FME Partial Reconfiguration Status */
+struct feature_fme_pr_status {
+ union {
+ u64 csr;
+ struct {
+ u16 pr_credit:9; /* PR Credits */
+ u8 rsvd2:7; /* Reserved */
+ u8 pr_status:1; /* PR status */
+ u8 rsvd:3; /* Reserved */
+ /* Altra PR Controller Block status */
+ u8 pr_controller_status:3;
+ u8 rsvd1:1; /* Reserved */
+ u8 pr_host_status:4; /* PR Host status */
+ u8 rsvd3:4; /* Reserved */
+ /* Security Block Status fields (TBD) */
+ u32 security_bstatus;
+ };
+ };
+};
+
+/* FME Partial Reconfiguration Data */
+struct feature_fme_pr_data {
+ union {
+ u64 csr; /* PR data from the raw-binary file */
+ struct {
+ /* PR data from the raw-binary file */
+ u32 pr_data_raw;
+ u32 rsvd;
+ };
+ };
+};
+
+/* FME PR Public Key */
+struct feature_fme_pr_key {
+ u64 key; /* FME PR Public Hash */
+};
+
+/* FME PR FEATURE */
+struct feature_fme_pr {
+ struct feature_header header;
+ /*Partial Reconfiguration control */
+ struct feature_fme_pr_ctl ccip_fme_pr_control;
+
+ /* Partial Reconfiguration Status */
+ struct feature_fme_pr_status ccip_fme_pr_status;
+
+ /* Partial Reconfiguration data */
+ struct feature_fme_pr_data ccip_fme_pr_data;
+
+ /* Partial Reconfiguration data */
+ u64 ccip_fme_pr_err;
+
+ u64 rsvd1[3];
+
+ /* Partial Reconfiguration data registers */
+ u64 fme_pr_data1;
+ u64 fme_pr_data2;
+ u64 fme_pr_data3;
+ u64 fme_pr_data4;
+ u64 fme_pr_data5;
+ u64 fme_pr_data6;
+ u64 fme_pr_data7;
+ u64 fme_pr_data8;
+
+ u64 rsvd2[5];
+
+ /* PR Interface ID */
+ u64 fme_pr_intfc_id_l;
+ u64 fme_pr_intfc_id_h;
+
+ /* MSIX filed to be Added */
+};
+
+/* FME HSSI Control */
+struct feature_fme_hssi_eth_ctrl {
+ union {
+ u64 csr;
+ struct {
+ u32 data:32; /* HSSI data */
+ u16 address:16; /* HSSI address */
+ /*
+ * HSSI comamnd
+ * 0x0 - No request
+ * 0x08 - SW register RD request
+ * 0x10 - SW register WR request
+ * 0x40 - Auxiliar bus RD request
+ * 0x80 - Auxiliar bus WR request
+ */
+ u16 cmd:16;
+ };
+ };
+};
+
+/* FME HSSI Status */
+struct feature_fme_hssi_eth_stat {
+ union {
+ u64 csr;
+ struct {
+ u32 data:32; /* HSSI data */
+ u8 acknowledge:1; /* HSSI acknowledge */
+ u8 spare:1; /* HSSI spare */
+ u32 rsvd:30; /* Reserved */
+ };
+ };
+};
+
+/* FME HSSI FEATURE */
+struct feature_fme_hssi {
+ struct feature_header header;
+ struct feature_fme_hssi_eth_ctrl hssi_control;
+ struct feature_fme_hssi_eth_stat hssi_status;
+};
+
+#define PORT_ERR_MASK 0xfff0703ff001f
+struct feature_port_err_key {
+ union {
+ u64 csr;
+ struct {
+ /* Tx Channel0: Overflow */
+ u8 tx_ch0_overflow:1;
+ /* Tx Channel0: Invalid request encoding */
+ u8 tx_ch0_invaldreq :1;
+ /* Tx Channel0: Request with cl_len=3 not supported */
+ u8 tx_ch0_cl_len3:1;
+ /* Tx Channel0: Request with cl_len=2 not aligned 2CL */
+ u8 tx_ch0_cl_len2:1;
+ /* Tx Channel0: Request with cl_len=4 not aligned 4CL */
+ u8 tx_ch0_cl_len4:1;
+
+ u16 rsvd1:4; /* Reserved */
+
+ /* AFU MMIO RD received while PORT is in reset */
+ u8 mmio_rd_whilerst:1;
+ /* AFU MMIO WR received while PORT is in reset */
+ u8 mmio_wr_whilerst:1;
+
+ u16 rsvd2:5; /* Reserved */
+
+ /* Tx Channel1: Overflow */
+ u8 tx_ch1_overflow:1;
+ /* Tx Channel1: Invalid request encoding */
+ u8 tx_ch1_invaldreq:1;
+ /* Tx Channel1: Request with cl_len=3 not supported */
+ u8 tx_ch1_cl_len3:1;
+ /* Tx Channel1: Request with cl_len=2 not aligned 2CL */
+ u8 tx_ch1_cl_len2:1;
+ /* Tx Channel1: Request with cl_len=4 not aligned 4CL */
+ u8 tx_ch1_cl_len4:1;
+
+ /* Tx Channel1: Insufficient data payload */
+ u8 tx_ch1_insuff_data:1;
+ /* Tx Channel1: Data payload overrun */
+ u8 tx_ch1_data_overrun:1;
+ /* Tx Channel1 : Incorrect address */
+ u8 tx_ch1_incorr_addr:1;
+ /* Tx Channel1 : NON-Zero SOP Detected */
+ u8 tx_ch1_nzsop:1;
+ /* Tx Channel1 : Illegal VC_SEL, atomic request VLO */
+ u8 tx_ch1_illegal_vcsel:1;
+
+ u8 rsvd3:6; /* Reserved */
+
+ /* MMIO Read Timeout in AFU */
+ u8 mmioread_timeout:1;
+
+ /* Tx Channel2: FIFO Overflow */
+ u8 tx_ch2_fifo_overflow:1;
+
+ /* MMIO read is not matching pending request */
+ u8 unexp_mmio_resp:1;
+
+ u8 rsvd4:5; /* Reserved */
+
+ /* Number of pending Requests: counter overflow */
+ u8 tx_req_counter_overflow:1;
+ /* Req with Address violating SMM Range */
+ u8 llpr_smrr_err:1;
+ /* Req with Address violating second SMM Range */
+ u8 llpr_smrr2_err:1;
+ /* Req with Address violating ME Stolen message */
+ u8 llpr_mesg_err:1;
+ /* Req with Address violating Generic Protected Range */
+ u8 genprot_range_err:1;
+ /* Req with Address violating Legacy Range low */
+ u8 legrange_low_err:1;
+ /* Req with Address violating Legacy Range High */
+ u8 legrange_high_err:1;
+ /* Req with Address violating VGA memory range */
+ u8 vgmem_range_err:1;
+ u8 page_fault_err:1; /* Page fault */
+ u8 pmr_err:1; /* PMR Error */
+ u8 ap6_event:1; /* AP6 event */
+ /* VF FLR detected on Port with PF access control */
+ u8 vfflr_access_err:1;
+ u16 rsvd5:12; /* Reserved */
+ };
+ };
+};
+
+/* Port first error register, not contain all error bits in error register. */
+struct feature_port_first_err_key {
+ union {
+ u64 csr;
+ struct {
+ u8 tx_ch0_overflow:1;
+ u8 tx_ch0_invaldreq :1;
+ u8 tx_ch0_cl_len3:1;
+ u8 tx_ch0_cl_len2:1;
+ u8 tx_ch0_cl_len4:1;
+ u8 rsvd1:4; /* Reserved */
+ u8 mmio_rd_whilerst:1;
+ u8 mmio_wr_whilerst:1;
+ u8 rsvd2:5; /* Reserved */
+ u8 tx_ch1_overflow:1;
+ u8 tx_ch1_invaldreq:1;
+ u8 tx_ch1_cl_len3:1;
+ u8 tx_ch1_cl_len2:1;
+ u8 tx_ch1_cl_len4:1;
+ u8 tx_ch1_insuff_data:1;
+ u8 tx_ch1_data_overrun:1;
+ u8 tx_ch1_incorr_addr:1;
+ u8 tx_ch1_nzsop:1;
+ u8 tx_ch1_illegal_vcsel:1;
+ u8 rsvd3:6; /* Reserved */
+ u8 mmioread_timeout:1;
+ u8 tx_ch2_fifo_overflow:1;
+ u8 rsvd4:6; /* Reserved */
+ u8 tx_req_counter_overflow:1;
+ u32 rsvd5:23; /* Reserved */
+ };
+ };
+};
+
+/* Port malformed Req0 */
+struct feature_port_malformed_req0 {
+ u64 header_lsb;
+};
+
+/* Port malformed Req1 */
+struct feature_port_malformed_req1 {
+ u64 header_msb;
+};
+
+/* Port debug register */
+struct feature_port_debug {
+ u64 port_debug;
+};
+
+/* Port error capabilities */
+struct feature_port_err_capability {
+ union {
+ u64 csr;
+ struct {
+ u8 support_intr:1;
+ /* MSI-X vector table entry number */
+ u16 intr_vector_num:12;
+ u64 rsvd:51; /* Reserved */
+ };
+ };
+};
+
+/* PORT FEATURE ERROR */
+struct feature_port_error {
+ struct feature_header header;
+ struct feature_port_err_key error_mask;
+ struct feature_port_err_key port_error;
+ struct feature_port_first_err_key port_first_error;
+ struct feature_port_malformed_req0 malreq0;
+ struct feature_port_malformed_req1 malreq1;
+ struct feature_port_debug port_debug;
+ struct feature_port_err_capability error_capability;
+};
+
+/* Port UMSG Capability */
+struct feature_port_umsg_cap {
+ union {
+ u64 csr;
+ struct {
+ /* Number of umsg allocated to this port */
+ u8 umsg_allocated;
+ /* Enable / Disable UMsg engine for this port */
+ u8 umsg_enable:1;
+ /* Usmg initialization status */
+ u8 umsg_init_complete:1;
+ /* IOMMU can not translate the umsg base address */
+ u8 umsg_trans_error:1;
+ u64 rsvd:53; /* Reserved */
+ };
+ };
+};
+
+/* Port UMSG base address */
+struct feature_port_umsg_baseaddr {
+ union {
+ u64 csr;
+ struct {
+ u64 base_addr:48; /* 48 bit physical address */
+ u16 rsvd; /* Reserved */
+ };
+ };
+};
+
+struct feature_port_umsg_mode {
+ union {
+ u64 csr;
+ struct {
+ u32 umsg_hint_enable; /* UMSG hint enable/disable */
+ u32 rsvd; /* Reserved */
+ };
+ };
+};
+
+/* PORT FEATURE UMSG */
+struct feature_port_umsg {
+ struct feature_header header;
+ struct feature_port_umsg_cap capability;
+ struct feature_port_umsg_baseaddr baseaddr;
+ struct feature_port_umsg_mode mode;
+};
+
+#define UMSG_EN_POLL_INVL 10 /* us */
+#define UMSG_EN_POLL_TIMEOUT 1000 /* us */
+
+/* Port UINT Capability */
+struct feature_port_uint_cap {
+ union {
+ u64 csr;
+ struct {
+ u16 intr_num:12; /* Supported interrupts num */
+ /* First MSI-X vector table entry number */
+ u16 first_vec_num:12;
+ u64 rsvd:40;
+ };
+ };
+};
+
+/* PORT FEATURE UINT */
+struct feature_port_uint {
+ struct feature_header header;
+ struct feature_port_uint_cap capability;
+};
+
+/* STP region supports mmap operation, so use page aligned size. */
+#define PORT_FEATURE_STP_REGION_SIZE \
+ IFPGA_PAGE_ALIGN(sizeof(struct feature_port_stp))
+
+/* Port STP status register (for debug only)*/
+struct feature_port_stp_status {
+ union {
+ u64 csr;
+ struct {
+ /* SLD Hub end-point read/write timeout */
+ u8 sld_ep_timeout:1;
+ /* Remote STP in reset/disable */
+ u8 rstp_disabled:1;
+ u8 unsupported_read:1;
+ /* MMIO timeout detected and faked with a response */
+ u8 mmio_timeout:1;
+ u8 txfifo_count:4;
+ u8 rxfifo_count:4;
+ u8 txfifo_overflow:1;
+ u8 txfifo_underflow:1;
+ u8 rxfifo_overflow:1;
+ u8 rxfifo_underflow:1;
+ /* Number of MMIO write requests */
+ u16 write_requests;
+ /* Number of MMIO read requests */
+ u16 read_requests;
+ /* Number of MMIO read responses */
+ u16 read_responses;
+ };
+ };
+};
+
+/*
+ * PORT FEATURE STP
+ * Most registers in STP region are not touched by driver, but mmapped to user
+ * space. So they are not defined in below data structure, as its actual size
+ * is 0x18c per spec.
+ */
+struct feature_port_stp {
+ struct feature_header header;
+ struct feature_port_stp_status stp_status;
+};
+
+/**
+ * enum fpga_pr_states - fpga PR states
+ * @FPGA_PR_STATE_UNKNOWN: can't determine state
+ * @FPGA_PR_STATE_WRITE_INIT: preparing FPGA for programming
+ * @FPGA_PR_STATE_WRITE_INIT_ERR: Error during WRITE_INIT stage
+ * @FPGA_PR_STATE_WRITE: writing image to FPGA
+ * @FPGA_PR_STATE_WRITE_ERR: Error while writing FPGA
+ * @FPGA_PR_STATE_WRITE_COMPLETE: Doing post programming steps
+ * @FPGA_PR_STATE_WRITE_COMPLETE_ERR: Error during WRITE_COMPLETE
+ * @FPGA_PR_STATE_OPERATING: FPGA PR done
+ */
+enum fpga_pr_states {
+ /* canot determine state states */
+ FPGA_PR_STATE_UNKNOWN,
+
+ /* write sequence: init, write, complete */
+ FPGA_PR_STATE_WRITE_INIT,
+ FPGA_PR_STATE_WRITE_INIT_ERR,
+ FPGA_PR_STATE_WRITE,
+ FPGA_PR_STATE_WRITE_ERR,
+ FPGA_PR_STATE_WRITE_COMPLETE,
+ FPGA_PR_STATE_WRITE_COMPLETE_ERR,
+
+ /* FPGA PR done */
+ FPGA_PR_STATE_DONE,
+};
+
+/*
+ * FPGA Manager flags
+ * FPGA_MGR_PARTIAL_RECONFIG: do partial reconfiguration if supported
+ */
+#define FPGA_MGR_PARTIAL_RECONFIG BIT(0)
+
+/**
+ * struct fpga_pr_info - specific information to a FPGA PR
+ * @flags: boolean flags as defined above
+ * @pr_err: PR error code
+ * @state: fpga manager state
+ * @port_id: port id
+ */
+struct fpga_pr_info {
+ u32 flags;
+ u64 pr_err;
+ enum fpga_pr_states state;
+ int port_id;
+};
+
+#define DEFINE_FPGA_PR_ERR_MSG(_name_) \
+static const char * const _name_[] = { \
+ "PR operation error detected", \
+ "PR CRC error detected", \
+ "PR incompatiable bitstream error detected", \
+ "PR IP protocol error detected", \
+ "PR FIFO overflow error detected", \
+ "PR timeout error detected", \
+ "PR secure load error detected", \
+}
+
+#define RST_POLL_INVL 10 /* us */
+#define RST_POLL_TIMEOUT 1000 /* us */
+
+#define PR_WAIT_TIMEOUT 15000000
+
+#define PR_HOST_STATUS_IDLE 0
+#define PR_MAX_ERR_NUM 7
+
+DEFINE_FPGA_PR_ERR_MSG(pr_err_msg);
+
+/*
+ * green bitstream header must be byte-packed to match the
+ * real file format.
+ */
+struct bts_header {
+ u64 guid_h;
+ u64 guid_l;
+ u32 metadata_len;
+};
+
+#define GBS_GUID_H 0x414750466e6f6558
+#define GBS_GUID_L 0x31303076534247b7
+#define is_valid_bts(bts_hdr) \
+ (((bts_hdr)->guid_h == GBS_GUID_H) && \
+ ((bts_hdr)->guid_l == GBS_GUID_L))
+
+#pragma pack(pop)
+#endif /* _BASE_IFPGA_DEFINES_H_ */
diff --git a/drivers/raw/ifpga_rawdev/base/ifpga_enumerate.c b/drivers/raw/ifpga_rawdev/base/ifpga_enumerate.c
new file mode 100644
index 00000000..f0939dc3
--- /dev/null
+++ b/drivers/raw/ifpga_rawdev/base/ifpga_enumerate.c
@@ -0,0 +1,821 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2010-2018 Intel Corporation
+ */
+
+#include "opae_hw_api.h"
+#include "ifpga_api.h"
+
+#include "ifpga_hw.h"
+#include "ifpga_enumerate.h"
+#include "ifpga_feature_dev.h"
+
+struct build_feature_devs_info {
+ struct opae_adapter_data_pci *pci_data;
+
+ struct ifpga_afu_info *acc_info;
+
+ void *fiu;
+ enum fpga_id_type current_type;
+ int current_port_id;
+
+ void *ioaddr;
+ void *ioend;
+ uint64_t phys_addr;
+ int current_bar;
+
+ void *pfme_hdr;
+
+ struct ifpga_hw *hw;
+};
+
+struct feature_info {
+ const char *name;
+ u32 resource_size;
+ int feature_index;
+ int revision_id;
+ unsigned int vec_start;
+ unsigned int vec_cnt;
+
+ struct feature_ops *ops;
+};
+
+/* indexed by fme feature IDs which are defined in 'enum fme_feature_id'. */
+static struct feature_info fme_features[] = {
+ {
+ .name = FME_FEATURE_HEADER,
+ .resource_size = sizeof(struct feature_fme_header),
+ .feature_index = FME_FEATURE_ID_HEADER,
+ .revision_id = FME_HEADER_REVISION,
+ .ops = &fme_hdr_ops,
+ },
+ {
+ .name = FME_FEATURE_THERMAL_MGMT,
+ .resource_size = sizeof(struct feature_fme_thermal),
+ .feature_index = FME_FEATURE_ID_THERMAL_MGMT,
+ .revision_id = FME_THERMAL_MGMT_REVISION,
+ .ops = &fme_thermal_mgmt_ops,
+ },
+ {
+ .name = FME_FEATURE_POWER_MGMT,
+ .resource_size = sizeof(struct feature_fme_power),
+ .feature_index = FME_FEATURE_ID_POWER_MGMT,
+ .revision_id = FME_POWER_MGMT_REVISION,
+ .ops = &fme_power_mgmt_ops,
+ },
+ {
+ .name = FME_FEATURE_GLOBAL_IPERF,
+ .resource_size = sizeof(struct feature_fme_iperf),
+ .feature_index = FME_FEATURE_ID_GLOBAL_IPERF,
+ .revision_id = FME_GLOBAL_IPERF_REVISION,
+ .ops = &fme_global_iperf_ops,
+ },
+ {
+ .name = FME_FEATURE_GLOBAL_ERR,
+ .resource_size = sizeof(struct feature_fme_err),
+ .feature_index = FME_FEATURE_ID_GLOBAL_ERR,
+ .revision_id = FME_GLOBAL_ERR_REVISION,
+ .ops = &fme_global_err_ops,
+ },
+ {
+ .name = FME_FEATURE_PR_MGMT,
+ .resource_size = sizeof(struct feature_fme_pr),
+ .feature_index = FME_FEATURE_ID_PR_MGMT,
+ .revision_id = FME_PR_MGMT_REVISION,
+ .ops = &fme_pr_mgmt_ops,
+ },
+ {
+ .name = FME_FEATURE_HSSI_ETH,
+ .resource_size = sizeof(struct feature_fme_hssi),
+ .feature_index = FME_FEATURE_ID_HSSI_ETH,
+ .revision_id = FME_HSSI_ETH_REVISION
+ },
+ {
+ .name = FME_FEATURE_GLOBAL_DPERF,
+ .resource_size = sizeof(struct feature_fme_dperf),
+ .feature_index = FME_FEATURE_ID_GLOBAL_DPERF,
+ .revision_id = FME_GLOBAL_DPERF_REVISION,
+ .ops = &fme_global_dperf_ops,
+ }
+};
+
+static struct feature_info port_features[] = {
+ {
+ .name = PORT_FEATURE_HEADER,
+ .resource_size = sizeof(struct feature_port_header),
+ .feature_index = PORT_FEATURE_ID_HEADER,
+ .revision_id = PORT_HEADER_REVISION,
+ .ops = &port_hdr_ops,
+ },
+ {
+ .name = PORT_FEATURE_ERR,
+ .resource_size = sizeof(struct feature_port_error),
+ .feature_index = PORT_FEATURE_ID_ERROR,
+ .revision_id = PORT_ERR_REVISION,
+ .ops = &port_error_ops,
+ },
+ {
+ .name = PORT_FEATURE_UMSG,
+ .resource_size = sizeof(struct feature_port_umsg),
+ .feature_index = PORT_FEATURE_ID_UMSG,
+ .revision_id = PORT_UMSG_REVISION,
+ },
+ {
+ .name = PORT_FEATURE_UINT,
+ .resource_size = sizeof(struct feature_port_uint),
+ .feature_index = PORT_FEATURE_ID_UINT,
+ .revision_id = PORT_UINT_REVISION,
+ .ops = &port_uint_ops,
+ },
+ {
+ .name = PORT_FEATURE_STP,
+ .resource_size = PORT_FEATURE_STP_REGION_SIZE,
+ .feature_index = PORT_FEATURE_ID_STP,
+ .revision_id = PORT_STP_REVISION,
+ .ops = &port_stp_ops,
+ },
+ {
+ .name = PORT_FEATURE_UAFU,
+ /* UAFU feature size should be read from PORT_CAP.MMIOSIZE.
+ * Will set uafu feature size while parse port device.
+ */
+ .resource_size = 0,
+ .feature_index = PORT_FEATURE_ID_UAFU,
+ .revision_id = PORT_UAFU_REVISION
+ },
+};
+
+static u64 feature_id(void __iomem *start)
+{
+ struct feature_header header;
+
+ header.csr = readq(start);
+
+ switch (header.type) {
+ case FEATURE_TYPE_FIU:
+ return FEATURE_ID_HEADER;
+ case FEATURE_TYPE_PRIVATE:
+ return header.id;
+ case FEATURE_TYPE_AFU:
+ return FEATURE_ID_AFU;
+ }
+
+ WARN_ON(1);
+ return 0;
+}
+
+static int
+build_info_add_sub_feature(struct build_feature_devs_info *binfo,
+ struct feature_info *finfo, void __iomem *start)
+{
+ struct ifpga_hw *hw = binfo->hw;
+ struct feature *feature = NULL;
+ int feature_idx = finfo->feature_index;
+ unsigned int vec_start = finfo->vec_start;
+ unsigned int vec_cnt = finfo->vec_cnt;
+ struct feature_irq_ctx *ctx = NULL;
+ int port_id, ret = 0;
+ unsigned int i;
+
+ if (binfo->current_type == FME_ID) {
+ feature = &hw->fme.sub_feature[feature_idx];
+ feature->parent = &hw->fme;
+ } else if (binfo->current_type == PORT_ID) {
+ port_id = binfo->current_port_id;
+ feature = &hw->port[port_id].sub_feature[feature_idx];
+ feature->parent = &hw->port[port_id];
+ } else {
+ return -EFAULT;
+ }
+
+ feature->state = IFPGA_FEATURE_ATTACHED;
+ feature->addr = start;
+ feature->id = feature_id(start);
+ feature->size = finfo->resource_size;
+ feature->name = finfo->name;
+ feature->revision = finfo->revision_id;
+ feature->ops = finfo->ops;
+ feature->phys_addr = binfo->phys_addr +
+ ((u8 *)start - (u8 *)binfo->ioaddr);
+
+ if (vec_cnt) {
+ if (vec_start + vec_cnt <= vec_start)
+ return -EINVAL;
+
+ ctx = zmalloc(sizeof(*ctx) * vec_cnt);
+ if (!ctx)
+ return -ENOMEM;
+
+ for (i = 0; i < vec_cnt; i++) {
+ ctx[i].eventfd = -1;
+ ctx[i].idx = vec_start + i;
+ }
+ }
+
+ feature->ctx = ctx;
+ feature->ctx_num = vec_cnt;
+ feature->vfio_dev_fd = binfo->pci_data->vfio_dev_fd;
+
+ return ret;
+}
+
+static int
+create_feature_instance(struct build_feature_devs_info *binfo,
+ void __iomem *start, struct feature_info *finfo)
+{
+ struct feature_header *hdr = start;
+
+ if (finfo->revision_id != SKIP_REVISION_CHECK &&
+ hdr->revision > finfo->revision_id) {
+ dev_err(binfo, "feature %s revision :default:%x, now at:%x, mis-match.\n",
+ finfo->name, finfo->revision_id, hdr->revision);
+ }
+
+ return build_info_add_sub_feature(binfo, finfo, start);
+}
+
+/*
+ * UAFU GUID is dynamic as it can be changed after FME downloads different
+ * Green Bitstream to the port, so we treat the unknown GUIDs which are
+ * attached on port's feature list as UAFU.
+ */
+static bool feature_is_UAFU(struct build_feature_devs_info *binfo)
+{
+ if (binfo->current_type != PORT_ID)
+ return false;
+
+ return true;
+}
+
+static int parse_feature_port_uafu(struct build_feature_devs_info *binfo,
+ struct feature_header *hdr)
+{
+ enum port_feature_id id = PORT_FEATURE_ID_UAFU;
+ struct ifpga_afu_info *info;
+ void *start = (void *)hdr;
+ int ret;
+
+ if (port_features[id].resource_size) {
+ ret = create_feature_instance(binfo, hdr, &port_features[id]);
+ } else {
+ dev_err(binfo, "the uafu feature header is mis-configured.\n");
+ ret = -EINVAL;
+ }
+
+ if (ret)
+ return ret;
+
+ /* FIXME: need to figure out a better name */
+ info = malloc(sizeof(*info));
+ if (!info)
+ return -ENOMEM;
+
+ info->region[0].addr = start;
+ info->region[0].phys_addr = binfo->phys_addr +
+ (uint8_t *)start - (uint8_t *)binfo->ioaddr;
+ info->region[0].len = port_features[id].resource_size;
+ port_features[id].resource_size = 0;
+ info->num_regions = 1;
+
+ binfo->acc_info = info;
+
+ return ret;
+}
+
+static int parse_feature_afus(struct build_feature_devs_info *binfo,
+ struct feature_header *hdr)
+{
+ int ret;
+ struct feature_afu_header *afu_hdr, header;
+ u8 __iomem *start;
+ u8 __iomem *end = binfo->ioend;
+
+ start = (u8 __iomem *)hdr;
+ for (; start < end; start += header.next_afu) {
+ if ((unsigned int)(end - start) <
+ (unsigned int)(sizeof(*afu_hdr) + sizeof(*hdr)))
+ return -EINVAL;
+
+ hdr = (struct feature_header *)start;
+ afu_hdr = (struct feature_afu_header *)(hdr + 1);
+ header.csr = readq(&afu_hdr->csr);
+
+ if (feature_is_UAFU(binfo)) {
+ ret = parse_feature_port_uafu(binfo, hdr);
+ if (ret)
+ return ret;
+ }
+
+ if (!header.next_afu)
+ break;
+ }
+
+ return 0;
+}
+
+/* create and register proper private data */
+static int build_info_commit_dev(struct build_feature_devs_info *binfo)
+{
+ struct ifpga_afu_info *info = binfo->acc_info;
+ struct ifpga_hw *hw = binfo->hw;
+ struct opae_manager *mgr;
+ struct opae_bridge *br;
+ struct opae_accelerator *acc;
+
+ if (!binfo->fiu)
+ return 0;
+
+ if (binfo->current_type == PORT_ID) {
+ /* return error if no valid acc info data structure */
+ if (!info)
+ return -EFAULT;
+
+ br = opae_bridge_alloc(hw->adapter->name, &ifpga_br_ops,
+ binfo->fiu);
+ if (!br)
+ return -ENOMEM;
+
+ br->id = binfo->current_port_id;
+
+ /* update irq info */
+ info->num_irqs = port_features[PORT_FEATURE_ID_UINT].vec_cnt;
+
+ acc = opae_accelerator_alloc(hw->adapter->name,
+ &ifpga_acc_ops, info);
+ if (!acc) {
+ opae_bridge_free(br);
+ return -ENOMEM;
+ }
+
+ acc->br = br;
+ acc->index = br->id;
+
+ opae_adapter_add_acc(hw->adapter, acc);
+
+ } else if (binfo->current_type == FME_ID) {
+ mgr = opae_manager_alloc(hw->adapter->name, &ifpga_mgr_ops,
+ binfo->fiu);
+ if (!mgr)
+ return -ENOMEM;
+
+ mgr->adapter = hw->adapter;
+ hw->adapter->mgr = mgr;
+ }
+
+ binfo->fiu = NULL;
+
+ return 0;
+}
+
+static int
+build_info_create_dev(struct build_feature_devs_info *binfo,
+ enum fpga_id_type type, unsigned int index)
+{
+ int ret;
+
+ ret = build_info_commit_dev(binfo);
+ if (ret)
+ return ret;
+
+ binfo->current_type = type;
+
+ if (type == FME_ID) {
+ binfo->fiu = &binfo->hw->fme;
+ } else if (type == PORT_ID) {
+ binfo->fiu = &binfo->hw->port[index];
+ binfo->current_port_id = index;
+ }
+
+ return 0;
+}
+
+static int parse_feature_fme(struct build_feature_devs_info *binfo,
+ struct feature_header *start)
+{
+ struct ifpga_hw *hw = binfo->hw;
+ struct ifpga_fme_hw *fme = &hw->fme;
+ int ret;
+
+ ret = build_info_create_dev(binfo, FME_ID, 0);
+ if (ret)
+ return ret;
+
+ /* Update FME states */
+ fme->state = IFPGA_FME_IMPLEMENTED;
+ fme->parent = hw;
+ spinlock_init(&fme->lock);
+
+ return create_feature_instance(binfo, start,
+ &fme_features[FME_FEATURE_ID_HEADER]);
+}
+
+static int parse_feature_port(struct build_feature_devs_info *binfo,
+ void __iomem *start)
+{
+ struct feature_port_header *port_hdr;
+ struct feature_port_capability capability;
+ struct ifpga_hw *hw = binfo->hw;
+ struct ifpga_port_hw *port;
+ unsigned int port_id;
+ int ret;
+
+ /* Get current port's id */
+ port_hdr = (struct feature_port_header *)start;
+ capability.csr = readq(&port_hdr->capability);
+ port_id = capability.port_number;
+
+ ret = build_info_create_dev(binfo, PORT_ID, port_id);
+ if (ret)
+ return ret;
+
+ /*found a Port device*/
+ port = &hw->port[port_id];
+ port->port_id = binfo->current_port_id;
+ port->parent = hw;
+ port->state = IFPGA_PORT_ATTACHED;
+ spinlock_init(&port->lock);
+
+ return create_feature_instance(binfo, start,
+ &port_features[PORT_FEATURE_ID_HEADER]);
+}
+
+static void enable_port_uafu(struct build_feature_devs_info *binfo,
+ void __iomem *start)
+{
+ enum port_feature_id id = PORT_FEATURE_ID_UAFU;
+ struct feature_port_header *port_hdr;
+ struct feature_port_capability capability;
+ struct ifpga_port_hw *port = &binfo->hw->port[binfo->current_port_id];
+
+ port_hdr = (struct feature_port_header *)start;
+ capability.csr = readq(&port_hdr->capability);
+ port_features[id].resource_size = (capability.mmio_size << 10);
+
+ /*
+ * From spec, to Enable UAFU, we should reset related port,
+ * or the whole mmio space in this UAFU will be invalid
+ */
+ if (port_features[id].resource_size)
+ fpga_port_reset(port);
+}
+
+static int parse_feature_fiu(struct build_feature_devs_info *binfo,
+ struct feature_header *hdr)
+{
+ struct feature_header header;
+ struct feature_fiu_header *fiu_hdr, fiu_header;
+ u8 __iomem *start = (u8 __iomem *)hdr;
+ int ret;
+
+ header.csr = readq(hdr);
+
+ switch (header.id) {
+ case FEATURE_FIU_ID_FME:
+ ret = parse_feature_fme(binfo, hdr);
+ binfo->pfme_hdr = hdr;
+ if (ret)
+ return ret;
+ break;
+ case FEATURE_FIU_ID_PORT:
+ ret = parse_feature_port(binfo, hdr);
+ enable_port_uafu(binfo, hdr);
+ if (ret)
+ return ret;
+
+ /* Check Port FIU's next_afu pointer to User AFU DFH */
+ fiu_hdr = (struct feature_fiu_header *)(hdr + 1);
+ fiu_header.csr = readq(&fiu_hdr->csr);
+
+ if (fiu_header.next_afu) {
+ start += fiu_header.next_afu;
+ ret = parse_feature_afus(binfo,
+ (struct feature_header *)start);
+ if (ret)
+ return ret;
+ } else {
+ dev_info(binfo, "No AFUs detected on Port\n");
+ }
+
+ break;
+ default:
+ dev_info(binfo, "FIU TYPE %d is not supported yet.\n",
+ header.id);
+ }
+
+ return 0;
+}
+
+static void parse_feature_irqs(struct build_feature_devs_info *binfo,
+ void __iomem *start, struct feature_info *finfo)
+{
+ finfo->vec_start = 0;
+ finfo->vec_cnt = 0;
+
+ UNUSED(binfo);
+
+ if (!strcmp(finfo->name, PORT_FEATURE_UINT)) {
+ struct feature_port_uint *port_uint = start;
+ struct feature_port_uint_cap uint_cap;
+
+ uint_cap.csr = readq(&port_uint->capability);
+ if (uint_cap.intr_num) {
+ finfo->vec_start = uint_cap.first_vec_num;
+ finfo->vec_cnt = uint_cap.intr_num;
+ } else {
+ dev_debug(binfo, "UAFU doesn't support interrupt\n");
+ }
+ } else if (!strcmp(finfo->name, PORT_FEATURE_ERR)) {
+ struct feature_port_error *port_err = start;
+ struct feature_port_err_capability port_err_cap;
+
+ port_err_cap.csr = readq(&port_err->error_capability);
+ if (port_err_cap.support_intr) {
+ finfo->vec_start = port_err_cap.intr_vector_num;
+ finfo->vec_cnt = 1;
+ } else {
+ dev_debug(&binfo, "Port error doesn't support interrupt\n");
+ }
+
+ } else if (!strcmp(finfo->name, FME_FEATURE_GLOBAL_ERR)) {
+ struct feature_fme_err *fme_err = start;
+ struct feature_fme_error_capability fme_err_cap;
+
+ fme_err_cap.csr = readq(&fme_err->fme_err_capability);
+ if (fme_err_cap.support_intr) {
+ finfo->vec_start = fme_err_cap.intr_vector_num;
+ finfo->vec_cnt = 1;
+ } else {
+ dev_debug(&binfo, "FME error doesn't support interrupt\n");
+ }
+ }
+}
+
+static int parse_feature_fme_private(struct build_feature_devs_info *binfo,
+ struct feature_header *hdr)
+{
+ struct feature_header header;
+
+ header.csr = readq(hdr);
+
+ if (header.id >= ARRAY_SIZE(fme_features)) {
+ dev_err(binfo, "FME feature id %x is not supported yet.\n",
+ header.id);
+ return 0;
+ }
+
+ parse_feature_irqs(binfo, hdr, &fme_features[header.id]);
+
+ return create_feature_instance(binfo, hdr, &fme_features[header.id]);
+}
+
+static int parse_feature_port_private(struct build_feature_devs_info *binfo,
+ struct feature_header *hdr)
+{
+ struct feature_header header;
+ enum port_feature_id id;
+
+ header.csr = readq(hdr);
+ /*
+ * the region of port feature id is [0x10, 0x13], + 1 to reserve 0
+ * which is dedicated for port-hdr.
+ */
+ id = (header.id & 0x000f) + 1;
+
+ if (id >= ARRAY_SIZE(port_features)) {
+ dev_err(binfo, "Port feature id %x is not supported yet.\n",
+ header.id);
+ return 0;
+ }
+
+ parse_feature_irqs(binfo, hdr, &port_features[id]);
+
+ return create_feature_instance(binfo, hdr, &port_features[id]);
+}
+
+static int parse_feature_private(struct build_feature_devs_info *binfo,
+ struct feature_header *hdr)
+{
+ struct feature_header header;
+
+ header.csr = readq(hdr);
+
+ switch (binfo->current_type) {
+ case FME_ID:
+ return parse_feature_fme_private(binfo, hdr);
+ case PORT_ID:
+ return parse_feature_port_private(binfo, hdr);
+ default:
+ dev_err(binfo, "private feature %x belonging to AFU %d (unknown_type) is not supported yet.\n",
+ header.id, binfo->current_type);
+ }
+ return 0;
+}
+
+static int parse_feature(struct build_feature_devs_info *binfo,
+ struct feature_header *hdr)
+{
+ struct feature_header header;
+ int ret = 0;
+
+ header.csr = readq(hdr);
+
+ switch (header.type) {
+ case FEATURE_TYPE_AFU:
+ ret = parse_feature_afus(binfo, hdr);
+ break;
+ case FEATURE_TYPE_PRIVATE:
+ ret = parse_feature_private(binfo, hdr);
+ break;
+ case FEATURE_TYPE_FIU:
+ ret = parse_feature_fiu(binfo, hdr);
+ break;
+ default:
+ dev_err(binfo, "Feature Type %x is not supported.\n",
+ hdr->type);
+ };
+
+ return ret;
+}
+
+static int
+parse_feature_list(struct build_feature_devs_info *binfo, u8 __iomem *start)
+{
+ struct feature_header *hdr, header;
+ u8 __iomem *end = (u8 __iomem *)binfo->ioend;
+ int ret = 0;
+
+ for (; start < end; start += header.next_header_offset) {
+ if ((unsigned int)(end - start) < (unsigned int)sizeof(*hdr)) {
+ dev_err(binfo, "The region is too small to contain a feature.\n");
+ ret = -EINVAL;
+ break;
+ }
+
+ hdr = (struct feature_header *)start;
+ ret = parse_feature(binfo, hdr);
+ if (ret)
+ return ret;
+
+ header.csr = readq(hdr);
+ if (!header.next_header_offset)
+ break;
+ }
+
+ return build_info_commit_dev(binfo);
+}
+
+/* switch the memory mapping to BAR# @bar */
+static int parse_switch_to(struct build_feature_devs_info *binfo, int bar)
+{
+ struct opae_adapter_data_pci *pci_data = binfo->pci_data;
+
+ if (!pci_data->region[bar].addr)
+ return -ENOMEM;
+
+ binfo->ioaddr = pci_data->region[bar].addr;
+ binfo->ioend = (u8 __iomem *)binfo->ioaddr + pci_data->region[bar].len;
+ binfo->phys_addr = pci_data->region[bar].phys_addr;
+ binfo->current_bar = bar;
+
+ return 0;
+}
+
+static int parse_ports_from_fme(struct build_feature_devs_info *binfo)
+{
+ struct feature_fme_header *fme_hdr;
+ struct feature_fme_port port;
+ int i = 0, ret = 0;
+
+ if (!binfo->pfme_hdr) {
+ dev_info(binfo, "VF is detected.\n");
+ return ret;
+ }
+
+ fme_hdr = binfo->pfme_hdr;
+
+ do {
+ port.csr = readq(&fme_hdr->port[i]);
+ if (!port.port_implemented)
+ break;
+
+ /* skip port which only could be accessed via VF */
+ if (port.afu_access_control == FME_AFU_ACCESS_VF)
+ continue;
+
+ ret = parse_switch_to(binfo, port.port_bar);
+ if (ret)
+ break;
+
+ ret = parse_feature_list(binfo,
+ (u8 __iomem *)binfo->ioaddr +
+ port.port_offset);
+ if (ret)
+ break;
+ } while (++i < MAX_FPGA_PORT_NUM);
+
+ return ret;
+}
+
+static struct build_feature_devs_info *
+build_info_alloc_and_init(struct ifpga_hw *hw)
+{
+ struct build_feature_devs_info *binfo;
+
+ binfo = zmalloc(sizeof(*binfo));
+ if (!binfo)
+ return binfo;
+
+ binfo->hw = hw;
+ binfo->pci_data = hw->pci_data;
+
+ /* fpga feature list starts from BAR 0 */
+ if (parse_switch_to(binfo, 0)) {
+ free(binfo);
+ return NULL;
+ }
+
+ return binfo;
+}
+
+static void build_info_free(struct build_feature_devs_info *binfo)
+{
+ free(binfo);
+}
+
+static void ifpga_print_device_feature_list(struct ifpga_hw *hw)
+{
+ struct ifpga_fme_hw *fme = &hw->fme;
+ struct ifpga_port_hw *port;
+ struct feature *feature;
+ int i, j;
+
+ dev_info(hw, "found fme_device, is in PF: %s\n",
+ is_ifpga_hw_pf(hw) ? "yes" : "no");
+
+ for (i = 0; i < FME_FEATURE_ID_MAX; i++) {
+ feature = &fme->sub_feature[i];
+ if (feature->state != IFPGA_FEATURE_ATTACHED)
+ continue;
+
+ dev_info(hw, "%12s: 0x%p - 0x%p - paddr: 0x%lx\n",
+ feature->name, feature->addr,
+ feature->addr + feature->size - 1,
+ (unsigned long)feature->phys_addr);
+ }
+
+ for (i = 0; i < MAX_FPGA_PORT_NUM; i++) {
+ port = &hw->port[i];
+
+ if (port->state != IFPGA_PORT_ATTACHED)
+ continue;
+
+ dev_info(hw, "port device: %d\n", port->port_id);
+
+ for (j = 0; j < PORT_FEATURE_ID_MAX; j++) {
+ feature = &port->sub_feature[j];
+ if (feature->state != IFPGA_FEATURE_ATTACHED)
+ continue;
+
+ dev_info(hw, "%12s: 0x%p - 0x%p - paddr:0x%lx\n",
+ feature->name,
+ feature->addr,
+ feature->addr +
+ feature->size - 1,
+ (unsigned long)feature->phys_addr);
+ }
+ }
+}
+
+int ifpga_bus_enumerate(struct ifpga_hw *hw)
+{
+ struct build_feature_devs_info *binfo;
+ int ret;
+
+ binfo = build_info_alloc_and_init(hw);
+ if (!binfo)
+ return -ENOMEM;
+
+ ret = parse_feature_list(binfo, binfo->ioaddr);
+ if (ret)
+ goto exit;
+
+ ret = parse_ports_from_fme(binfo);
+ if (ret)
+ goto exit;
+
+ ifpga_print_device_feature_list(hw);
+
+exit:
+ build_info_free(binfo);
+ return ret;
+}
+
+int ifpga_bus_init(struct ifpga_hw *hw)
+{
+ int i;
+
+ fme_hw_init(&hw->fme);
+ for (i = 0; i < MAX_FPGA_PORT_NUM; i++)
+ port_hw_init(&hw->port[i]);
+
+ return 0;
+}
diff --git a/drivers/raw/ifpga_rawdev/base/ifpga_enumerate.h b/drivers/raw/ifpga_rawdev/base/ifpga_enumerate.h
new file mode 100644
index 00000000..14131e32
--- /dev/null
+++ b/drivers/raw/ifpga_rawdev/base/ifpga_enumerate.h
@@ -0,0 +1,11 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2010-2018 Intel Corporation
+ */
+
+#ifndef _IFPGA_ENUMERATE_H_
+#define _IFPGA_ENUMERATE_H_
+
+int ifpga_bus_init(struct ifpga_hw *hw);
+int ifpga_bus_enumerate(struct ifpga_hw *hw);
+
+#endif /* _IFPGA_ENUMERATE_H_ */
diff --git a/drivers/raw/ifpga_rawdev/base/ifpga_feature_dev.c b/drivers/raw/ifpga_rawdev/base/ifpga_feature_dev.c
new file mode 100644
index 00000000..be7ac9ee
--- /dev/null
+++ b/drivers/raw/ifpga_rawdev/base/ifpga_feature_dev.c
@@ -0,0 +1,253 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2010-2018 Intel Corporation
+ */
+
+#include <sys/ioctl.h>
+
+#include "ifpga_feature_dev.h"
+
+/*
+ * Enable Port by clear the port soft reset bit, which is set by default.
+ * The AFU is unable to respond to any MMIO access while in reset.
+ * __fpga_port_enable function should only be used after __fpga_port_disable
+ * function.
+ */
+void __fpga_port_enable(struct ifpga_port_hw *port)
+{
+ struct feature_port_header *port_hdr;
+ struct feature_port_control control;
+
+ WARN_ON(!port->disable_count);
+
+ if (--port->disable_count != 0)
+ return;
+
+ port_hdr = get_port_feature_ioaddr_by_index(port,
+ PORT_FEATURE_ID_HEADER);
+ WARN_ON(!port_hdr);
+
+ control.csr = readq(&port_hdr->control);
+ control.port_sftrst = 0x0;
+ writeq(control.csr, &port_hdr->control);
+}
+
+int __fpga_port_disable(struct ifpga_port_hw *port)
+{
+ struct feature_port_header *port_hdr;
+ struct feature_port_control control;
+
+ if (port->disable_count++ != 0)
+ return 0;
+
+ port_hdr = get_port_feature_ioaddr_by_index(port,
+ PORT_FEATURE_ID_HEADER);
+ WARN_ON(!port_hdr);
+
+ /* Set port soft reset */
+ control.csr = readq(&port_hdr->control);
+ control.port_sftrst = 0x1;
+ writeq(control.csr, &port_hdr->control);
+
+ /*
+ * HW sets ack bit to 1 when all outstanding requests have been drained
+ * on this port and minimum soft reset pulse width has elapsed.
+ * Driver polls port_soft_reset_ack to determine if reset done by HW.
+ */
+ control.port_sftrst_ack = 1;
+
+ if (fpga_wait_register_field(port_sftrst_ack, control,
+ &port_hdr->control, RST_POLL_TIMEOUT,
+ RST_POLL_INVL)) {
+ dev_err(port, "timeout, fail to reset device\n");
+ return -ETIMEDOUT;
+ }
+
+ return 0;
+}
+
+int fpga_get_afu_uuid(struct ifpga_port_hw *port, struct uuid *uuid)
+{
+ struct feature_port_header *port_hdr;
+ u64 guidl, guidh;
+
+ port_hdr = get_port_feature_ioaddr_by_index(port, PORT_FEATURE_ID_UAFU);
+
+ spinlock_lock(&port->lock);
+ guidl = readq(&port_hdr->afu_header.guid.b[0]);
+ guidh = readq(&port_hdr->afu_header.guid.b[8]);
+ spinlock_unlock(&port->lock);
+
+ memcpy(uuid->b, &guidl, sizeof(u64));
+ memcpy(uuid->b + 8, &guidh, sizeof(u64));
+
+ return 0;
+}
+
+/* Mask / Unmask Port Errors by the Error Mask register. */
+void port_err_mask(struct ifpga_port_hw *port, bool mask)
+{
+ struct feature_port_error *port_err;
+ struct feature_port_err_key err_mask;
+
+ port_err = get_port_feature_ioaddr_by_index(port,
+ PORT_FEATURE_ID_ERROR);
+
+ if (mask)
+ err_mask.csr = PORT_ERR_MASK;
+ else
+ err_mask.csr = 0;
+
+ writeq(err_mask.csr, &port_err->error_mask);
+}
+
+/* Clear All Port Errors. */
+int port_err_clear(struct ifpga_port_hw *port, u64 err)
+{
+ struct feature_port_header *port_hdr;
+ struct feature_port_error *port_err;
+ struct feature_port_err_key mask;
+ struct feature_port_first_err_key first;
+ struct feature_port_status status;
+ int ret = 0;
+
+ port_err = get_port_feature_ioaddr_by_index(port,
+ PORT_FEATURE_ID_ERROR);
+ port_hdr = get_port_feature_ioaddr_by_index(port,
+ PORT_FEATURE_ID_HEADER);
+
+ /*
+ * Clear All Port Errors
+ *
+ * - Check for AP6 State
+ * - Halt Port by keeping Port in reset
+ * - Set PORT Error mask to all 1 to mask errors
+ * - Clear all errors
+ * - Set Port mask to all 0 to enable errors
+ * - All errors start capturing new errors
+ * - Enable Port by pulling the port out of reset
+ */
+
+ /* If device is still in AP6 state, can not clear any error.*/
+ status.csr = readq(&port_hdr->status);
+ if (status.power_state == PORT_POWER_STATE_AP6) {
+ dev_err(dev, "Could not clear errors, device in AP6 state.\n");
+ return -EBUSY;
+ }
+
+ /* Halt Port by keeping Port in reset */
+ ret = __fpga_port_disable(port);
+ if (ret)
+ return ret;
+
+ /* Mask all errors */
+ port_err_mask(port, true);
+
+ /* Clear errors if err input matches with current port errors.*/
+ mask.csr = readq(&port_err->port_error);
+
+ if (mask.csr == err) {
+ writeq(mask.csr, &port_err->port_error);
+
+ first.csr = readq(&port_err->port_first_error);
+ writeq(first.csr, &port_err->port_first_error);
+ } else {
+ ret = -EBUSY;
+ }
+
+ /* Clear mask */
+ port_err_mask(port, false);
+
+ /* Enable the Port by clear the reset */
+ __fpga_port_enable(port);
+
+ return ret;
+}
+
+int port_clear_error(struct ifpga_port_hw *port)
+{
+ struct feature_port_error *port_err;
+ struct feature_port_err_key error;
+
+ port_err = get_port_feature_ioaddr_by_index(port,
+ PORT_FEATURE_ID_ERROR);
+ error.csr = readq(&port_err->port_error);
+
+ dev_info(port, "read port error: 0x%lx\n", (unsigned long)error.csr);
+
+ return port_err_clear(port, error.csr);
+}
+
+void fme_hw_uinit(struct ifpga_fme_hw *fme)
+{
+ struct feature *feature;
+ int i;
+
+ if (fme->state != IFPGA_FME_IMPLEMENTED)
+ return;
+
+ for (i = 0; i < FME_FEATURE_ID_MAX; i++) {
+ feature = &fme->sub_feature[i];
+ if (feature->state == IFPGA_FEATURE_ATTACHED &&
+ feature->ops && feature->ops->uinit)
+ feature->ops->uinit(feature);
+ }
+}
+
+int fme_hw_init(struct ifpga_fme_hw *fme)
+{
+ struct feature *feature;
+ int i, ret;
+
+ if (fme->state != IFPGA_FME_IMPLEMENTED)
+ return -EINVAL;
+
+ for (i = 0; i < FME_FEATURE_ID_MAX; i++) {
+ feature = &fme->sub_feature[i];
+ if (feature->state == IFPGA_FEATURE_ATTACHED &&
+ feature->ops && feature->ops->init) {
+ ret = feature->ops->init(feature);
+ if (ret) {
+ fme_hw_uinit(fme);
+ return ret;
+ }
+ }
+ }
+
+ return 0;
+}
+
+void port_hw_uinit(struct ifpga_port_hw *port)
+{
+ struct feature *feature;
+ int i;
+
+ for (i = 0; i < PORT_FEATURE_ID_MAX; i++) {
+ feature = &port->sub_feature[i];
+ if (feature->state == IFPGA_FEATURE_ATTACHED &&
+ feature->ops && feature->ops->uinit)
+ feature->ops->uinit(feature);
+ }
+}
+
+int port_hw_init(struct ifpga_port_hw *port)
+{
+ struct feature *feature;
+ int i, ret;
+
+ if (port->state == IFPGA_PORT_UNUSED)
+ return 0;
+
+ for (i = 0; i < PORT_FEATURE_ID_MAX; i++) {
+ feature = &port->sub_feature[i];
+ if (feature->ops && feature->ops->init) {
+ ret = feature->ops->init(feature);
+ if (ret) {
+ port_hw_uinit(port);
+ return ret;
+ }
+ }
+ }
+
+ return 0;
+}
+
diff --git a/drivers/raw/ifpga_rawdev/base/ifpga_feature_dev.h b/drivers/raw/ifpga_rawdev/base/ifpga_feature_dev.h
new file mode 100644
index 00000000..7a39a580
--- /dev/null
+++ b/drivers/raw/ifpga_rawdev/base/ifpga_feature_dev.h
@@ -0,0 +1,168 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2010-2018 Intel Corporation
+ */
+
+#ifndef _IFPGA_FEATURE_DEV_H_
+#define _IFPGA_FEATURE_DEV_H_
+
+#include "ifpga_hw.h"
+
+static inline struct ifpga_port_hw *
+get_port(struct ifpga_hw *hw, u32 port_id)
+{
+ if (!is_valid_port_id(hw, port_id))
+ return NULL;
+
+ return &hw->port[port_id];
+}
+
+#define ifpga_for_each_fme_feature(hw, feature) \
+ for ((feature) = (hw)->sub_feature; \
+ (feature) < (hw)->sub_feature + (FME_FEATURE_ID_MAX); (feature)++)
+
+#define ifpga_for_each_port_feature(hw, feature) \
+ for ((feature) = (hw)->sub_feature; \
+ (feature) < (hw)->sub_feature + (PORT_FEATURE_ID_MAX); (feature)++)
+
+static inline struct feature *
+get_fme_feature_by_id(struct ifpga_fme_hw *fme, u64 id)
+{
+ struct feature *feature;
+
+ ifpga_for_each_fme_feature(fme, feature) {
+ if (feature->id == id)
+ return feature;
+ }
+
+ return NULL;
+}
+
+static inline struct feature *
+get_port_feature_by_id(struct ifpga_port_hw *port, u64 id)
+{
+ struct feature *feature;
+
+ ifpga_for_each_port_feature(port, feature) {
+ if (feature->id == id)
+ return feature;
+ }
+
+ return NULL;
+}
+
+static inline void *
+get_fme_feature_ioaddr_by_index(struct ifpga_fme_hw *fme, int index)
+{
+ return fme->sub_feature[index].addr;
+}
+
+static inline void *
+get_port_feature_ioaddr_by_index(struct ifpga_port_hw *port, int index)
+{
+ return port->sub_feature[index].addr;
+}
+
+static inline bool
+is_fme_feature_present(struct ifpga_fme_hw *fme, int index)
+{
+ return !!get_fme_feature_ioaddr_by_index(fme, index);
+}
+
+static inline bool
+is_port_feature_present(struct ifpga_port_hw *port, int index)
+{
+ return !!get_port_feature_ioaddr_by_index(port, index);
+}
+
+int fpga_get_afu_uuid(struct ifpga_port_hw *port, struct uuid *uuid);
+
+int __fpga_port_disable(struct ifpga_port_hw *port);
+void __fpga_port_enable(struct ifpga_port_hw *port);
+
+static inline int fpga_port_disable(struct ifpga_port_hw *port)
+{
+ int ret;
+
+ spinlock_lock(&port->lock);
+ ret = __fpga_port_disable(port);
+ spinlock_unlock(&port->lock);
+ return ret;
+}
+
+static inline int fpga_port_enable(struct ifpga_port_hw *port)
+{
+ spinlock_lock(&port->lock);
+ __fpga_port_enable(port);
+ spinlock_unlock(&port->lock);
+
+ return 0;
+}
+
+static inline int __fpga_port_reset(struct ifpga_port_hw *port)
+{
+ int ret;
+
+ ret = __fpga_port_disable(port);
+ if (ret)
+ return ret;
+
+ __fpga_port_enable(port);
+
+ return 0;
+}
+
+static inline int fpga_port_reset(struct ifpga_port_hw *port)
+{
+ int ret;
+
+ spinlock_lock(&port->lock);
+ ret = __fpga_port_reset(port);
+ spinlock_unlock(&port->lock);
+ return ret;
+}
+
+int do_pr(struct ifpga_hw *hw, u32 port_id, void *buffer, u32 size,
+ u64 *status);
+
+int fme_get_prop(struct ifpga_fme_hw *fme, struct feature_prop *prop);
+int fme_set_prop(struct ifpga_fme_hw *fme, struct feature_prop *prop);
+int fme_set_irq(struct ifpga_fme_hw *fme, u32 feature_id, void *irq_set);
+
+int fme_hw_init(struct ifpga_fme_hw *fme);
+void fme_hw_uinit(struct ifpga_fme_hw *fme);
+void port_hw_uinit(struct ifpga_port_hw *port);
+int port_hw_init(struct ifpga_port_hw *port);
+int port_clear_error(struct ifpga_port_hw *port);
+void port_err_mask(struct ifpga_port_hw *port, bool mask);
+int port_err_clear(struct ifpga_port_hw *port, u64 err);
+
+extern struct feature_ops fme_hdr_ops;
+extern struct feature_ops fme_thermal_mgmt_ops;
+extern struct feature_ops fme_power_mgmt_ops;
+extern struct feature_ops fme_global_err_ops;
+extern struct feature_ops fme_pr_mgmt_ops;
+extern struct feature_ops fme_global_iperf_ops;
+extern struct feature_ops fme_global_dperf_ops;
+
+int port_get_prop(struct ifpga_port_hw *port, struct feature_prop *prop);
+int port_set_prop(struct ifpga_port_hw *port, struct feature_prop *prop);
+
+/* This struct is used when parsing uafu irq_set */
+struct fpga_uafu_irq_set {
+ u32 start;
+ u32 count;
+ s32 *evtfds;
+};
+
+int port_set_irq(struct ifpga_port_hw *port, u32 feature_id, void *irq_set);
+
+extern struct feature_ops port_hdr_ops;
+extern struct feature_ops port_error_ops;
+extern struct feature_ops port_stp_ops;
+extern struct feature_ops port_uint_ops;
+
+/* help functions for feature ops */
+int fpga_msix_set_block(struct feature *feature, unsigned int start,
+ unsigned int count, s32 *fds);
+
+#endif /* _IFPGA_FEATURE_DEV_H_ */
diff --git a/drivers/raw/ifpga_rawdev/base/ifpga_fme.c b/drivers/raw/ifpga_rawdev/base/ifpga_fme.c
new file mode 100644
index 00000000..4be60c0c
--- /dev/null
+++ b/drivers/raw/ifpga_rawdev/base/ifpga_fme.c
@@ -0,0 +1,734 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2010-2018 Intel Corporation
+ */
+
+#include "ifpga_feature_dev.h"
+
+#define PWR_THRESHOLD_MAX 0x7F
+
+int fme_get_prop(struct ifpga_fme_hw *fme, struct feature_prop *prop)
+{
+ struct feature *feature;
+
+ if (!fme)
+ return -ENOENT;
+
+ feature = get_fme_feature_by_id(fme, prop->feature_id);
+
+ if (feature && feature->ops && feature->ops->get_prop)
+ return feature->ops->get_prop(feature, prop);
+
+ return -ENOENT;
+}
+
+int fme_set_prop(struct ifpga_fme_hw *fme, struct feature_prop *prop)
+{
+ struct feature *feature;
+
+ if (!fme)
+ return -ENOENT;
+
+ feature = get_fme_feature_by_id(fme, prop->feature_id);
+
+ if (feature && feature->ops && feature->ops->set_prop)
+ return feature->ops->set_prop(feature, prop);
+
+ return -ENOENT;
+}
+
+int fme_set_irq(struct ifpga_fme_hw *fme, u32 feature_id, void *irq_set)
+{
+ struct feature *feature;
+
+ if (!fme)
+ return -ENOENT;
+
+ feature = get_fme_feature_by_id(fme, feature_id);
+
+ if (feature && feature->ops && feature->ops->set_irq)
+ return feature->ops->set_irq(feature, irq_set);
+
+ return -ENOENT;
+}
+
+/* fme private feature head */
+static int fme_hdr_init(struct feature *feature)
+{
+ struct feature_fme_header *fme_hdr;
+
+ fme_hdr = (struct feature_fme_header *)feature->addr;
+
+ dev_info(NULL, "FME HDR Init.\n");
+ dev_info(NULL, "FME cap %llx.\n",
+ (unsigned long long)fme_hdr->capability.csr);
+
+ return 0;
+}
+
+static void fme_hdr_uinit(struct feature *feature)
+{
+ UNUSED(feature);
+
+ dev_info(NULL, "FME HDR UInit.\n");
+}
+
+static int fme_hdr_get_revision(struct ifpga_fme_hw *fme, u64 *revision)
+{
+ struct feature_fme_header *fme_hdr
+ = get_fme_feature_ioaddr_by_index(fme, FME_FEATURE_ID_HEADER);
+ struct feature_header header;
+
+ header.csr = readq(&fme_hdr->header);
+ *revision = header.revision;
+
+ return 0;
+}
+
+static int fme_hdr_get_ports_num(struct ifpga_fme_hw *fme, u64 *ports_num)
+{
+ struct feature_fme_header *fme_hdr
+ = get_fme_feature_ioaddr_by_index(fme, FME_FEATURE_ID_HEADER);
+ struct feature_fme_capability fme_capability;
+
+ fme_capability.csr = readq(&fme_hdr->capability);
+ *ports_num = fme_capability.num_ports;
+
+ return 0;
+}
+
+static int fme_hdr_get_cache_size(struct ifpga_fme_hw *fme, u64 *cache_size)
+{
+ struct feature_fme_header *fme_hdr
+ = get_fme_feature_ioaddr_by_index(fme, FME_FEATURE_ID_HEADER);
+ struct feature_fme_capability fme_capability;
+
+ fme_capability.csr = readq(&fme_hdr->capability);
+ *cache_size = fme_capability.cache_size;
+
+ return 0;
+}
+
+static int fme_hdr_get_version(struct ifpga_fme_hw *fme, u64 *version)
+{
+ struct feature_fme_header *fme_hdr
+ = get_fme_feature_ioaddr_by_index(fme, FME_FEATURE_ID_HEADER);
+ struct feature_fme_capability fme_capability;
+
+ fme_capability.csr = readq(&fme_hdr->capability);
+ *version = fme_capability.fabric_verid;
+
+ return 0;
+}
+
+static int fme_hdr_get_socket_id(struct ifpga_fme_hw *fme, u64 *socket_id)
+{
+ struct feature_fme_header *fme_hdr
+ = get_fme_feature_ioaddr_by_index(fme, FME_FEATURE_ID_HEADER);
+ struct feature_fme_capability fme_capability;
+
+ fme_capability.csr = readq(&fme_hdr->capability);
+ *socket_id = fme_capability.socket_id;
+
+ return 0;
+}
+
+static int fme_hdr_get_bitstream_id(struct ifpga_fme_hw *fme,
+ u64 *bitstream_id)
+{
+ struct feature_fme_header *fme_hdr
+ = get_fme_feature_ioaddr_by_index(fme, FME_FEATURE_ID_HEADER);
+
+ *bitstream_id = readq(&fme_hdr->bitstream_id);
+
+ return 0;
+}
+
+static int fme_hdr_get_bitstream_metadata(struct ifpga_fme_hw *fme,
+ u64 *bitstream_metadata)
+{
+ struct feature_fme_header *fme_hdr
+ = get_fme_feature_ioaddr_by_index(fme, FME_FEATURE_ID_HEADER);
+
+ *bitstream_metadata = readq(&fme_hdr->bitstream_md);
+
+ return 0;
+}
+
+static int
+fme_hdr_get_prop(struct feature *feature, struct feature_prop *prop)
+{
+ struct ifpga_fme_hw *fme = feature->parent;
+
+ switch (prop->prop_id) {
+ case FME_HDR_PROP_REVISION:
+ return fme_hdr_get_revision(fme, &prop->data);
+ case FME_HDR_PROP_PORTS_NUM:
+ return fme_hdr_get_ports_num(fme, &prop->data);
+ case FME_HDR_PROP_CACHE_SIZE:
+ return fme_hdr_get_cache_size(fme, &prop->data);
+ case FME_HDR_PROP_VERSION:
+ return fme_hdr_get_version(fme, &prop->data);
+ case FME_HDR_PROP_SOCKET_ID:
+ return fme_hdr_get_socket_id(fme, &prop->data);
+ case FME_HDR_PROP_BITSTREAM_ID:
+ return fme_hdr_get_bitstream_id(fme, &prop->data);
+ case FME_HDR_PROP_BITSTREAM_METADATA:
+ return fme_hdr_get_bitstream_metadata(fme, &prop->data);
+ }
+
+ return -ENOENT;
+}
+
+struct feature_ops fme_hdr_ops = {
+ .init = fme_hdr_init,
+ .uinit = fme_hdr_uinit,
+ .get_prop = fme_hdr_get_prop,
+};
+
+/* thermal management */
+static int fme_thermal_get_threshold1(struct ifpga_fme_hw *fme, u64 *thres1)
+{
+ struct feature_fme_thermal *thermal;
+ struct feature_fme_tmp_threshold temp_threshold;
+
+ thermal = get_fme_feature_ioaddr_by_index(fme,
+ FME_FEATURE_ID_THERMAL_MGMT);
+
+ temp_threshold.csr = readq(&thermal->threshold);
+ *thres1 = temp_threshold.tmp_thshold1;
+
+ return 0;
+}
+
+static int fme_thermal_set_threshold1(struct ifpga_fme_hw *fme, u64 thres1)
+{
+ struct feature_fme_thermal *thermal;
+ struct feature_fme_header *fme_hdr;
+ struct feature_fme_tmp_threshold tmp_threshold;
+ struct feature_fme_capability fme_capability;
+
+ thermal = get_fme_feature_ioaddr_by_index(fme,
+ FME_FEATURE_ID_THERMAL_MGMT);
+ fme_hdr = get_fme_feature_ioaddr_by_index(fme, FME_FEATURE_ID_HEADER);
+
+ spinlock_lock(&fme->lock);
+ tmp_threshold.csr = readq(&thermal->threshold);
+ fme_capability.csr = readq(&fme_hdr->capability);
+
+ if (fme_capability.lock_bit == 1) {
+ spinlock_unlock(&fme->lock);
+ return -EBUSY;
+ } else if (thres1 > 100) {
+ spinlock_unlock(&fme->lock);
+ return -EINVAL;
+ } else if (thres1 == 0) {
+ tmp_threshold.tmp_thshold1_enable = 0;
+ tmp_threshold.tmp_thshold1 = thres1;
+ } else {
+ tmp_threshold.tmp_thshold1_enable = 1;
+ tmp_threshold.tmp_thshold1 = thres1;
+ }
+
+ writeq(tmp_threshold.csr, &thermal->threshold);
+ spinlock_unlock(&fme->lock);
+
+ return 0;
+}
+
+static int fme_thermal_get_threshold2(struct ifpga_fme_hw *fme, u64 *thres2)
+{
+ struct feature_fme_thermal *thermal;
+ struct feature_fme_tmp_threshold temp_threshold;
+
+ thermal = get_fme_feature_ioaddr_by_index(fme,
+ FME_FEATURE_ID_THERMAL_MGMT);
+
+ temp_threshold.csr = readq(&thermal->threshold);
+ *thres2 = temp_threshold.tmp_thshold2;
+
+ return 0;
+}
+
+static int fme_thermal_set_threshold2(struct ifpga_fme_hw *fme, u64 thres2)
+{
+ struct feature_fme_thermal *thermal;
+ struct feature_fme_header *fme_hdr;
+ struct feature_fme_tmp_threshold tmp_threshold;
+ struct feature_fme_capability fme_capability;
+
+ thermal = get_fme_feature_ioaddr_by_index(fme,
+ FME_FEATURE_ID_THERMAL_MGMT);
+ fme_hdr = get_fme_feature_ioaddr_by_index(fme, FME_FEATURE_ID_HEADER);
+
+ spinlock_lock(&fme->lock);
+ tmp_threshold.csr = readq(&thermal->threshold);
+ fme_capability.csr = readq(&fme_hdr->capability);
+
+ if (fme_capability.lock_bit == 1) {
+ spinlock_unlock(&fme->lock);
+ return -EBUSY;
+ } else if (thres2 > 100) {
+ spinlock_unlock(&fme->lock);
+ return -EINVAL;
+ } else if (thres2 == 0) {
+ tmp_threshold.tmp_thshold2_enable = 0;
+ tmp_threshold.tmp_thshold2 = thres2;
+ } else {
+ tmp_threshold.tmp_thshold2_enable = 1;
+ tmp_threshold.tmp_thshold2 = thres2;
+ }
+
+ writeq(tmp_threshold.csr, &thermal->threshold);
+ spinlock_unlock(&fme->lock);
+
+ return 0;
+}
+
+static int fme_thermal_get_threshold_trip(struct ifpga_fme_hw *fme,
+ u64 *thres_trip)
+{
+ struct feature_fme_thermal *thermal;
+ struct feature_fme_tmp_threshold temp_threshold;
+
+ thermal = get_fme_feature_ioaddr_by_index(fme,
+ FME_FEATURE_ID_THERMAL_MGMT);
+
+ temp_threshold.csr = readq(&thermal->threshold);
+ *thres_trip = temp_threshold.therm_trip_thshold;
+
+ return 0;
+}
+
+static int fme_thermal_get_threshold1_reached(struct ifpga_fme_hw *fme,
+ u64 *thres1_reached)
+{
+ struct feature_fme_thermal *thermal;
+ struct feature_fme_tmp_threshold temp_threshold;
+
+ thermal = get_fme_feature_ioaddr_by_index(fme,
+ FME_FEATURE_ID_THERMAL_MGMT);
+
+ temp_threshold.csr = readq(&thermal->threshold);
+ *thres1_reached = temp_threshold.thshold1_status;
+
+ return 0;
+}
+
+static int fme_thermal_get_threshold2_reached(struct ifpga_fme_hw *fme,
+ u64 *thres1_reached)
+{
+ struct feature_fme_thermal *thermal;
+ struct feature_fme_tmp_threshold temp_threshold;
+
+ thermal = get_fme_feature_ioaddr_by_index(fme,
+ FME_FEATURE_ID_THERMAL_MGMT);
+
+ temp_threshold.csr = readq(&thermal->threshold);
+ *thres1_reached = temp_threshold.thshold2_status;
+
+ return 0;
+}
+
+static int fme_thermal_get_threshold1_policy(struct ifpga_fme_hw *fme,
+ u64 *thres1_policy)
+{
+ struct feature_fme_thermal *thermal;
+ struct feature_fme_tmp_threshold temp_threshold;
+
+ thermal = get_fme_feature_ioaddr_by_index(fme,
+ FME_FEATURE_ID_THERMAL_MGMT);
+
+ temp_threshold.csr = readq(&thermal->threshold);
+ *thres1_policy = temp_threshold.thshold_policy;
+
+ return 0;
+}
+
+static int fme_thermal_set_threshold1_policy(struct ifpga_fme_hw *fme,
+ u64 thres1_policy)
+{
+ struct feature_fme_thermal *thermal;
+ struct feature_fme_tmp_threshold tmp_threshold;
+
+ thermal = get_fme_feature_ioaddr_by_index(fme,
+ FME_FEATURE_ID_THERMAL_MGMT);
+
+ spinlock_lock(&fme->lock);
+ tmp_threshold.csr = readq(&thermal->threshold);
+
+ if (thres1_policy == 0) {
+ tmp_threshold.thshold_policy = 0;
+ } else if (thres1_policy == 1) {
+ tmp_threshold.thshold_policy = 1;
+ } else {
+ spinlock_unlock(&fme->lock);
+ return -EINVAL;
+ }
+
+ writeq(tmp_threshold.csr, &thermal->threshold);
+ spinlock_unlock(&fme->lock);
+
+ return 0;
+}
+
+static int fme_thermal_get_temperature(struct ifpga_fme_hw *fme, u64 *temp)
+{
+ struct feature_fme_thermal *thermal;
+ struct feature_fme_temp_rdsensor_fmt1 temp_rdsensor_fmt1;
+
+ thermal = get_fme_feature_ioaddr_by_index(fme,
+ FME_FEATURE_ID_THERMAL_MGMT);
+
+ temp_rdsensor_fmt1.csr = readq(&thermal->rdsensor_fm1);
+ *temp = temp_rdsensor_fmt1.fpga_temp;
+
+ return 0;
+}
+
+static int fme_thermal_get_revision(struct ifpga_fme_hw *fme, u64 *revision)
+{
+ struct feature_fme_thermal *fme_thermal
+ = get_fme_feature_ioaddr_by_index(fme,
+ FME_FEATURE_ID_THERMAL_MGMT);
+ struct feature_header header;
+
+ header.csr = readq(&fme_thermal->header);
+ *revision = header.revision;
+
+ return 0;
+}
+
+#define FME_THERMAL_CAP_NO_TMP_THRESHOLD 0x1
+
+static int fme_thermal_mgmt_init(struct feature *feature)
+{
+ struct feature_fme_thermal *fme_thermal;
+ struct feature_fme_tmp_threshold_cap thermal_cap;
+
+ UNUSED(feature);
+
+ dev_info(NULL, "FME thermal mgmt Init.\n");
+
+ fme_thermal = (struct feature_fme_thermal *)feature->addr;
+ thermal_cap.csr = readq(&fme_thermal->threshold_cap);
+
+ dev_info(NULL, "FME thermal cap %llx.\n",
+ (unsigned long long)fme_thermal->threshold_cap.csr);
+
+ if (thermal_cap.tmp_thshold_disabled)
+ feature->cap |= FME_THERMAL_CAP_NO_TMP_THRESHOLD;
+
+ return 0;
+}
+
+static void fme_thermal_mgmt_uinit(struct feature *feature)
+{
+ UNUSED(feature);
+
+ dev_info(NULL, "FME thermal mgmt UInit.\n");
+}
+
+static int
+fme_thermal_set_prop(struct feature *feature, struct feature_prop *prop)
+{
+ struct ifpga_fme_hw *fme = feature->parent;
+
+ if (feature->cap & FME_THERMAL_CAP_NO_TMP_THRESHOLD)
+ return -ENOENT;
+
+ switch (prop->prop_id) {
+ case FME_THERMAL_PROP_THRESHOLD1:
+ return fme_thermal_set_threshold1(fme, prop->data);
+ case FME_THERMAL_PROP_THRESHOLD2:
+ return fme_thermal_set_threshold2(fme, prop->data);
+ case FME_THERMAL_PROP_THRESHOLD1_POLICY:
+ return fme_thermal_set_threshold1_policy(fme, prop->data);
+ }
+
+ return -ENOENT;
+}
+
+static int
+fme_thermal_get_prop(struct feature *feature, struct feature_prop *prop)
+{
+ struct ifpga_fme_hw *fme = feature->parent;
+
+ if (feature->cap & FME_THERMAL_CAP_NO_TMP_THRESHOLD &&
+ prop->prop_id != FME_THERMAL_PROP_TEMPERATURE &&
+ prop->prop_id != FME_THERMAL_PROP_REVISION)
+ return -ENOENT;
+
+ switch (prop->prop_id) {
+ case FME_THERMAL_PROP_THRESHOLD1:
+ return fme_thermal_get_threshold1(fme, &prop->data);
+ case FME_THERMAL_PROP_THRESHOLD2:
+ return fme_thermal_get_threshold2(fme, &prop->data);
+ case FME_THERMAL_PROP_THRESHOLD_TRIP:
+ return fme_thermal_get_threshold_trip(fme, &prop->data);
+ case FME_THERMAL_PROP_THRESHOLD1_REACHED:
+ return fme_thermal_get_threshold1_reached(fme, &prop->data);
+ case FME_THERMAL_PROP_THRESHOLD2_REACHED:
+ return fme_thermal_get_threshold2_reached(fme, &prop->data);
+ case FME_THERMAL_PROP_THRESHOLD1_POLICY:
+ return fme_thermal_get_threshold1_policy(fme, &prop->data);
+ case FME_THERMAL_PROP_TEMPERATURE:
+ return fme_thermal_get_temperature(fme, &prop->data);
+ case FME_THERMAL_PROP_REVISION:
+ return fme_thermal_get_revision(fme, &prop->data);
+ }
+
+ return -ENOENT;
+}
+
+struct feature_ops fme_thermal_mgmt_ops = {
+ .init = fme_thermal_mgmt_init,
+ .uinit = fme_thermal_mgmt_uinit,
+ .get_prop = fme_thermal_get_prop,
+ .set_prop = fme_thermal_set_prop,
+};
+
+static int fme_pwr_get_consumed(struct ifpga_fme_hw *fme, u64 *consumed)
+{
+ struct feature_fme_power *fme_power
+ = get_fme_feature_ioaddr_by_index(fme,
+ FME_FEATURE_ID_POWER_MGMT);
+ struct feature_fme_pm_status pm_status;
+
+ pm_status.csr = readq(&fme_power->status);
+
+ *consumed = pm_status.pwr_consumed;
+
+ return 0;
+}
+
+static int fme_pwr_get_threshold1(struct ifpga_fme_hw *fme, u64 *threshold)
+{
+ struct feature_fme_power *fme_power
+ = get_fme_feature_ioaddr_by_index(fme,
+ FME_FEATURE_ID_POWER_MGMT);
+ struct feature_fme_pm_ap_threshold pm_ap_threshold;
+
+ pm_ap_threshold.csr = readq(&fme_power->threshold);
+
+ *threshold = pm_ap_threshold.threshold1;
+
+ return 0;
+}
+
+static int fme_pwr_set_threshold1(struct ifpga_fme_hw *fme, u64 threshold)
+{
+ struct feature_fme_power *fme_power
+ = get_fme_feature_ioaddr_by_index(fme,
+ FME_FEATURE_ID_POWER_MGMT);
+ struct feature_fme_pm_ap_threshold pm_ap_threshold;
+
+ spinlock_lock(&fme->lock);
+ pm_ap_threshold.csr = readq(&fme_power->threshold);
+
+ if (threshold <= PWR_THRESHOLD_MAX) {
+ pm_ap_threshold.threshold1 = threshold;
+ } else {
+ spinlock_unlock(&fme->lock);
+ return -EINVAL;
+ }
+
+ writeq(pm_ap_threshold.csr, &fme_power->threshold);
+ spinlock_unlock(&fme->lock);
+
+ return 0;
+}
+
+static int fme_pwr_get_threshold2(struct ifpga_fme_hw *fme, u64 *threshold)
+{
+ struct feature_fme_power *fme_power
+ = get_fme_feature_ioaddr_by_index(fme,
+ FME_FEATURE_ID_POWER_MGMT);
+ struct feature_fme_pm_ap_threshold pm_ap_threshold;
+
+ pm_ap_threshold.csr = readq(&fme_power->threshold);
+
+ *threshold = pm_ap_threshold.threshold2;
+
+ return 0;
+}
+
+static int fme_pwr_set_threshold2(struct ifpga_fme_hw *fme, u64 threshold)
+{
+ struct feature_fme_power *fme_power
+ = get_fme_feature_ioaddr_by_index(fme,
+ FME_FEATURE_ID_POWER_MGMT);
+ struct feature_fme_pm_ap_threshold pm_ap_threshold;
+
+ spinlock_lock(&fme->lock);
+ pm_ap_threshold.csr = readq(&fme_power->threshold);
+
+ if (threshold <= PWR_THRESHOLD_MAX) {
+ pm_ap_threshold.threshold2 = threshold;
+ } else {
+ spinlock_unlock(&fme->lock);
+ return -EINVAL;
+ }
+
+ writeq(pm_ap_threshold.csr, &fme_power->threshold);
+ spinlock_unlock(&fme->lock);
+
+ return 0;
+}
+
+static int fme_pwr_get_threshold1_status(struct ifpga_fme_hw *fme,
+ u64 *threshold_status)
+{
+ struct feature_fme_power *fme_power
+ = get_fme_feature_ioaddr_by_index(fme,
+ FME_FEATURE_ID_POWER_MGMT);
+ struct feature_fme_pm_ap_threshold pm_ap_threshold;
+
+ pm_ap_threshold.csr = readq(&fme_power->threshold);
+
+ *threshold_status = pm_ap_threshold.threshold1_status;
+
+ return 0;
+}
+
+static int fme_pwr_get_threshold2_status(struct ifpga_fme_hw *fme,
+ u64 *threshold_status)
+{
+ struct feature_fme_power *fme_power
+ = get_fme_feature_ioaddr_by_index(fme,
+ FME_FEATURE_ID_POWER_MGMT);
+ struct feature_fme_pm_ap_threshold pm_ap_threshold;
+
+ pm_ap_threshold.csr = readq(&fme_power->threshold);
+
+ *threshold_status = pm_ap_threshold.threshold2_status;
+
+ return 0;
+}
+
+static int fme_pwr_get_rtl(struct ifpga_fme_hw *fme, u64 *rtl)
+{
+ struct feature_fme_power *fme_power
+ = get_fme_feature_ioaddr_by_index(fme,
+ FME_FEATURE_ID_POWER_MGMT);
+ struct feature_fme_pm_status pm_status;
+
+ pm_status.csr = readq(&fme_power->status);
+
+ *rtl = pm_status.fpga_latency_report;
+
+ return 0;
+}
+
+static int fme_pwr_get_xeon_limit(struct ifpga_fme_hw *fme, u64 *limit)
+{
+ struct feature_fme_power *fme_power
+ = get_fme_feature_ioaddr_by_index(fme,
+ FME_FEATURE_ID_POWER_MGMT);
+ struct feature_fme_pm_xeon_limit xeon_limit;
+
+ xeon_limit.csr = readq(&fme_power->xeon_limit);
+
+ if (!xeon_limit.enable)
+ xeon_limit.pwr_limit = 0;
+
+ *limit = xeon_limit.pwr_limit;
+
+ return 0;
+}
+
+static int fme_pwr_get_fpga_limit(struct ifpga_fme_hw *fme, u64 *limit)
+{
+ struct feature_fme_power *fme_power
+ = get_fme_feature_ioaddr_by_index(fme,
+ FME_FEATURE_ID_POWER_MGMT);
+ struct feature_fme_pm_fpga_limit fpga_limit;
+
+ fpga_limit.csr = readq(&fme_power->fpga_limit);
+
+ if (!fpga_limit.enable)
+ fpga_limit.pwr_limit = 0;
+
+ *limit = fpga_limit.pwr_limit;
+
+ return 0;
+}
+
+static int fme_pwr_get_revision(struct ifpga_fme_hw *fme, u64 *revision)
+{
+ struct feature_fme_power *fme_power
+ = get_fme_feature_ioaddr_by_index(fme,
+ FME_FEATURE_ID_POWER_MGMT);
+ struct feature_header header;
+
+ header.csr = readq(&fme_power->header);
+ *revision = header.revision;
+
+ return 0;
+}
+
+static int fme_power_mgmt_init(struct feature *feature)
+{
+ UNUSED(feature);
+
+ dev_info(NULL, "FME power mgmt Init.\n");
+
+ return 0;
+}
+
+static void fme_power_mgmt_uinit(struct feature *feature)
+{
+ UNUSED(feature);
+
+ dev_info(NULL, "FME power mgmt UInit.\n");
+}
+
+static int fme_power_mgmt_get_prop(struct feature *feature,
+ struct feature_prop *prop)
+{
+ struct ifpga_fme_hw *fme = feature->parent;
+
+ switch (prop->prop_id) {
+ case FME_PWR_PROP_CONSUMED:
+ return fme_pwr_get_consumed(fme, &prop->data);
+ case FME_PWR_PROP_THRESHOLD1:
+ return fme_pwr_get_threshold1(fme, &prop->data);
+ case FME_PWR_PROP_THRESHOLD2:
+ return fme_pwr_get_threshold2(fme, &prop->data);
+ case FME_PWR_PROP_THRESHOLD1_STATUS:
+ return fme_pwr_get_threshold1_status(fme, &prop->data);
+ case FME_PWR_PROP_THRESHOLD2_STATUS:
+ return fme_pwr_get_threshold2_status(fme, &prop->data);
+ case FME_PWR_PROP_RTL:
+ return fme_pwr_get_rtl(fme, &prop->data);
+ case FME_PWR_PROP_XEON_LIMIT:
+ return fme_pwr_get_xeon_limit(fme, &prop->data);
+ case FME_PWR_PROP_FPGA_LIMIT:
+ return fme_pwr_get_fpga_limit(fme, &prop->data);
+ case FME_PWR_PROP_REVISION:
+ return fme_pwr_get_revision(fme, &prop->data);
+ }
+
+ return -ENOENT;
+}
+
+static int fme_power_mgmt_set_prop(struct feature *feature,
+ struct feature_prop *prop)
+{
+ struct ifpga_fme_hw *fme = feature->parent;
+
+ switch (prop->prop_id) {
+ case FME_PWR_PROP_THRESHOLD1:
+ return fme_pwr_set_threshold1(fme, prop->data);
+ case FME_PWR_PROP_THRESHOLD2:
+ return fme_pwr_set_threshold2(fme, prop->data);
+ }
+
+ return -ENOENT;
+}
+
+struct feature_ops fme_power_mgmt_ops = {
+ .init = fme_power_mgmt_init,
+ .uinit = fme_power_mgmt_uinit,
+ .get_prop = fme_power_mgmt_get_prop,
+ .set_prop = fme_power_mgmt_set_prop,
+};
diff --git a/drivers/raw/ifpga_rawdev/base/ifpga_fme_dperf.c b/drivers/raw/ifpga_rawdev/base/ifpga_fme_dperf.c
new file mode 100644
index 00000000..1773b87e
--- /dev/null
+++ b/drivers/raw/ifpga_rawdev/base/ifpga_fme_dperf.c
@@ -0,0 +1,301 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2010-2018 Intel Corporation
+ */
+
+#include "ifpga_feature_dev.h"
+
+#define PERF_OBJ_ROOT_ID 0xff
+
+static int fme_dperf_get_clock(struct ifpga_fme_hw *fme, u64 *clock)
+{
+ struct feature_fme_dperf *dperf;
+ struct feature_fme_dfpmon_clk_ctr clk;
+
+ dperf = get_fme_feature_ioaddr_by_index(fme,
+ FME_FEATURE_ID_GLOBAL_DPERF);
+ clk.afu_interf_clock = readq(&dperf->clk);
+
+ *clock = clk.afu_interf_clock;
+ return 0;
+}
+
+static int fme_dperf_get_revision(struct ifpga_fme_hw *fme, u64 *revision)
+{
+ struct feature_fme_dperf *dperf;
+ struct feature_header header;
+
+ dperf = get_fme_feature_ioaddr_by_index(fme,
+ FME_FEATURE_ID_GLOBAL_DPERF);
+ header.csr = readq(&dperf->header);
+ *revision = header.revision;
+
+ return 0;
+}
+
+#define DPERF_TIMEOUT 30
+
+static bool fabric_pobj_is_enabled(int port_id,
+ struct feature_fme_dperf *dperf)
+{
+ struct feature_fme_dfpmon_fab_ctl ctl;
+
+ ctl.csr = readq(&dperf->fab_ctl);
+
+ if (ctl.port_filter == FAB_DISABLE_FILTER)
+ return port_id == PERF_OBJ_ROOT_ID;
+
+ return port_id == ctl.port_id;
+}
+
+static u64 read_fabric_counter(struct ifpga_fme_hw *fme, u8 port_id,
+ enum dperf_fab_events fab_event)
+{
+ struct feature_fme_dfpmon_fab_ctl ctl;
+ struct feature_fme_dfpmon_fab_ctr ctr;
+ struct feature_fme_dperf *dperf;
+ u64 counter = 0;
+
+ spinlock_lock(&fme->lock);
+ dperf = get_fme_feature_ioaddr_by_index(fme,
+ FME_FEATURE_ID_GLOBAL_DPERF);
+
+ /* if it is disabled, force the counter to return zero. */
+ if (!fabric_pobj_is_enabled(port_id, dperf))
+ goto exit;
+
+ ctl.csr = readq(&dperf->fab_ctl);
+ ctl.fab_evtcode = fab_event;
+ writeq(ctl.csr, &dperf->fab_ctl);
+
+ ctr.event_code = fab_event;
+
+ if (fpga_wait_register_field(event_code, ctr,
+ &dperf->fab_ctr, DPERF_TIMEOUT, 1)) {
+ dev_err(fme, "timeout, unmatched VTd event type in counter registers.\n");
+ spinlock_unlock(&fme->lock);
+ return -ETIMEDOUT;
+ }
+
+ ctr.csr = readq(&dperf->fab_ctr);
+ counter = ctr.fab_cnt;
+exit:
+ spinlock_unlock(&fme->lock);
+ return counter;
+}
+
+#define FAB_PORT_SHOW(name, event) \
+static int fme_dperf_get_fab_port_##name(struct ifpga_fme_hw *fme, \
+ u8 port_id, u64 *counter) \
+{ \
+ *counter = read_fabric_counter(fme, port_id, event); \
+ return 0; \
+}
+
+FAB_PORT_SHOW(pcie0_read, DPERF_FAB_PCIE0_RD);
+FAB_PORT_SHOW(pcie0_write, DPERF_FAB_PCIE0_WR);
+FAB_PORT_SHOW(mmio_read, DPERF_FAB_MMIO_RD);
+FAB_PORT_SHOW(mmio_write, DPERF_FAB_MMIO_WR);
+
+static int fme_dperf_get_fab_port_enable(struct ifpga_fme_hw *fme,
+ u8 port_id, u64 *enable)
+{
+ struct feature_fme_dperf *dperf;
+ int status;
+
+ dperf = get_fme_feature_ioaddr_by_index(fme,
+ FME_FEATURE_ID_GLOBAL_DPERF);
+
+ status = fabric_pobj_is_enabled(port_id, dperf);
+ *enable = (u64)status;
+
+ return 0;
+}
+
+/*
+ * If enable one port or all port event counter in fabric, other
+ * fabric event counter originally enabled will be disable automatically.
+ */
+static int fme_dperf_set_fab_port_enable(struct ifpga_fme_hw *fme,
+ u8 port_id, u64 enable)
+{
+ struct feature_fme_dfpmon_fab_ctl ctl;
+ struct feature_fme_dperf *dperf;
+ bool state;
+
+ state = !!enable;
+
+ if (!state)
+ return -EINVAL;
+
+ dperf = get_fme_feature_ioaddr_by_index(fme,
+ FME_FEATURE_ID_GLOBAL_DPERF);
+
+ /* if it is already enabled. */
+ if (fabric_pobj_is_enabled(port_id, dperf))
+ return 0;
+
+ spinlock_lock(&fme->lock);
+ ctl.csr = readq(&dperf->fab_ctl);
+ if (port_id == PERF_OBJ_ROOT_ID) {
+ ctl.port_filter = FAB_DISABLE_FILTER;
+ } else {
+ ctl.port_filter = FAB_ENABLE_FILTER;
+ ctl.port_id = port_id;
+ }
+
+ writeq(ctl.csr, &dperf->fab_ctl);
+ spinlock_unlock(&fme->lock);
+
+ return 0;
+}
+
+static int fme_dperf_get_fab_freeze(struct ifpga_fme_hw *fme, u64 *freeze)
+{
+ struct feature_fme_dperf *dperf;
+ struct feature_fme_dfpmon_fab_ctl ctl;
+
+ dperf = get_fme_feature_ioaddr_by_index(fme,
+ FME_FEATURE_ID_GLOBAL_DPERF);
+ ctl.csr = readq(&dperf->fab_ctl);
+ *freeze = (u64)ctl.freeze;
+
+ return 0;
+}
+
+static int fme_dperf_set_fab_freeze(struct ifpga_fme_hw *fme, u64 freeze)
+{
+ struct feature_fme_dperf *dperf;
+ struct feature_fme_dfpmon_fab_ctl ctl;
+ bool state;
+
+ state = !!freeze;
+
+ spinlock_lock(&fme->lock);
+ dperf = get_fme_feature_ioaddr_by_index(fme,
+ FME_FEATURE_ID_GLOBAL_DPERF);
+ ctl.csr = readq(&dperf->fab_ctl);
+ ctl.freeze = state;
+ writeq(ctl.csr, &dperf->fab_ctl);
+ spinlock_unlock(&fme->lock);
+
+ return 0;
+}
+
+#define PERF_MAX_PORT_NUM 1
+
+static int fme_global_dperf_init(struct feature *feature)
+{
+ UNUSED(feature);
+
+ dev_info(NULL, "FME global_dperf Init.\n");
+
+ return 0;
+}
+
+static void fme_global_dperf_uinit(struct feature *feature)
+{
+ UNUSED(feature);
+
+ dev_info(NULL, "FME global_dperf UInit.\n");
+}
+
+static int fme_dperf_fab_get_prop(struct feature *feature,
+ struct feature_prop *prop)
+{
+ struct ifpga_fme_hw *fme = feature->parent;
+ u8 sub = GET_FIELD(PROP_SUB, prop->prop_id);
+ u16 id = GET_FIELD(PROP_ID, prop->prop_id);
+
+ switch (id) {
+ case 0x1: /* FREEZE */
+ return fme_dperf_get_fab_freeze(fme, &prop->data);
+ case 0x2: /* PCIE0_READ */
+ return fme_dperf_get_fab_port_pcie0_read(fme, sub, &prop->data);
+ case 0x3: /* PCIE0_WRITE */
+ return fme_dperf_get_fab_port_pcie0_write(fme, sub,
+ &prop->data);
+ case 0x4: /* MMIO_READ */
+ return fme_dperf_get_fab_port_mmio_read(fme, sub, &prop->data);
+ case 0x5: /* MMIO_WRITE */
+ return fme_dperf_get_fab_port_mmio_write(fme, sub, &prop->data);
+ case 0x6: /* ENABLE */
+ return fme_dperf_get_fab_port_enable(fme, sub, &prop->data);
+ }
+
+ return -ENOENT;
+}
+
+static int fme_dperf_root_get_prop(struct feature *feature,
+ struct feature_prop *prop)
+{
+ struct ifpga_fme_hw *fme = feature->parent;
+ u8 sub = GET_FIELD(PROP_SUB, prop->prop_id);
+ u16 id = GET_FIELD(PROP_ID, prop->prop_id);
+
+ if (sub != PERF_PROP_SUB_UNUSED)
+ return -ENOENT;
+
+ switch (id) {
+ case 0x1: /* CLOCK */
+ return fme_dperf_get_clock(fme, &prop->data);
+ case 0x2: /* REVISION */
+ return fme_dperf_get_revision(fme, &prop->data);
+ }
+
+ return -ENOENT;
+}
+
+static int fme_global_dperf_get_prop(struct feature *feature,
+ struct feature_prop *prop)
+{
+ u8 top = GET_FIELD(PROP_TOP, prop->prop_id);
+
+ switch (top) {
+ case PERF_PROP_TOP_FAB:
+ return fme_dperf_fab_get_prop(feature, prop);
+ case PERF_PROP_TOP_UNUSED:
+ return fme_dperf_root_get_prop(feature, prop);
+ }
+
+ return -ENOENT;
+}
+
+static int fme_dperf_fab_set_prop(struct feature *feature,
+ struct feature_prop *prop)
+{
+ struct ifpga_fme_hw *fme = feature->parent;
+ u8 sub = GET_FIELD(PROP_SUB, prop->prop_id);
+ u16 id = GET_FIELD(PROP_ID, prop->prop_id);
+
+ switch (id) {
+ case 0x1: /* FREEZE - fab root only prop */
+ if (sub != PERF_PROP_SUB_UNUSED)
+ return -ENOENT;
+ return fme_dperf_set_fab_freeze(fme, prop->data);
+ case 0x6: /* ENABLE - fab both root and sub */
+ return fme_dperf_set_fab_port_enable(fme, sub, prop->data);
+ }
+
+ return -ENOENT;
+}
+
+static int fme_global_dperf_set_prop(struct feature *feature,
+ struct feature_prop *prop)
+{
+ u8 top = GET_FIELD(PROP_TOP, prop->prop_id);
+
+ switch (top) {
+ case PERF_PROP_TOP_FAB:
+ return fme_dperf_fab_set_prop(feature, prop);
+ }
+
+ return -ENOENT;
+}
+
+struct feature_ops fme_global_dperf_ops = {
+ .init = fme_global_dperf_init,
+ .uinit = fme_global_dperf_uinit,
+ .get_prop = fme_global_dperf_get_prop,
+ .set_prop = fme_global_dperf_set_prop,
+
+};
diff --git a/drivers/raw/ifpga_rawdev/base/ifpga_fme_error.c b/drivers/raw/ifpga_rawdev/base/ifpga_fme_error.c
new file mode 100644
index 00000000..8c26fb25
--- /dev/null
+++ b/drivers/raw/ifpga_rawdev/base/ifpga_fme_error.c
@@ -0,0 +1,381 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2010-2018 Intel Corporation
+ */
+
+#include "ifpga_feature_dev.h"
+
+static int fme_err_get_errors(struct ifpga_fme_hw *fme, u64 *val)
+{
+ struct feature_fme_err *fme_err
+ = get_fme_feature_ioaddr_by_index(fme,
+ FME_FEATURE_ID_GLOBAL_ERR);
+ struct feature_fme_error0 fme_error0;
+
+ fme_error0.csr = readq(&fme_err->fme_err);
+ *val = fme_error0.csr;
+
+ return 0;
+}
+
+static int fme_err_get_first_error(struct ifpga_fme_hw *fme, u64 *val)
+{
+ struct feature_fme_err *fme_err
+ = get_fme_feature_ioaddr_by_index(fme,
+ FME_FEATURE_ID_GLOBAL_ERR);
+ struct feature_fme_first_error fme_first_err;
+
+ fme_first_err.csr = readq(&fme_err->fme_first_err);
+ *val = fme_first_err.err_reg_status;
+
+ return 0;
+}
+
+static int fme_err_get_next_error(struct ifpga_fme_hw *fme, u64 *val)
+{
+ struct feature_fme_err *fme_err
+ = get_fme_feature_ioaddr_by_index(fme,
+ FME_FEATURE_ID_GLOBAL_ERR);
+ struct feature_fme_next_error fme_next_err;
+
+ fme_next_err.csr = readq(&fme_err->fme_next_err);
+ *val = fme_next_err.err_reg_status;
+
+ return 0;
+}
+
+static int fme_err_set_clear(struct ifpga_fme_hw *fme, u64 val)
+{
+ struct feature_fme_err *fme_err
+ = get_fme_feature_ioaddr_by_index(fme,
+ FME_FEATURE_ID_GLOBAL_ERR);
+ struct feature_fme_error0 fme_error0;
+ struct feature_fme_first_error fme_first_err;
+ struct feature_fme_next_error fme_next_err;
+ int ret = 0;
+
+ spinlock_lock(&fme->lock);
+ writeq(FME_ERROR0_MASK, &fme_err->fme_err_mask);
+
+ fme_error0.csr = readq(&fme_err->fme_err);
+ if (val != fme_error0.csr) {
+ ret = -EBUSY;
+ goto exit;
+ }
+
+ fme_first_err.csr = readq(&fme_err->fme_first_err);
+ fme_next_err.csr = readq(&fme_err->fme_next_err);
+
+ writeq(fme_error0.csr & FME_ERROR0_MASK, &fme_err->fme_err);
+ writeq(fme_first_err.csr & FME_FIRST_ERROR_MASK,
+ &fme_err->fme_first_err);
+ writeq(fme_next_err.csr & FME_NEXT_ERROR_MASK,
+ &fme_err->fme_next_err);
+
+exit:
+ writeq(FME_ERROR0_MASK_DEFAULT, &fme_err->fme_err_mask);
+ spinlock_unlock(&fme->lock);
+
+ return ret;
+}
+
+static int fme_err_get_revision(struct ifpga_fme_hw *fme, u64 *val)
+{
+ struct feature_fme_err *fme_err
+ = get_fme_feature_ioaddr_by_index(fme,
+ FME_FEATURE_ID_GLOBAL_ERR);
+ struct feature_header header;
+
+ header.csr = readq(&fme_err->header);
+ *val = header.revision;
+
+ return 0;
+}
+
+static int fme_err_get_pcie0_errors(struct ifpga_fme_hw *fme, u64 *val)
+{
+ struct feature_fme_err *fme_err
+ = get_fme_feature_ioaddr_by_index(fme,
+ FME_FEATURE_ID_GLOBAL_ERR);
+ struct feature_fme_pcie0_error pcie0_err;
+
+ pcie0_err.csr = readq(&fme_err->pcie0_err);
+ *val = pcie0_err.csr;
+
+ return 0;
+}
+
+static int fme_err_set_pcie0_errors(struct ifpga_fme_hw *fme, u64 val)
+{
+ struct feature_fme_err *fme_err
+ = get_fme_feature_ioaddr_by_index(fme,
+ FME_FEATURE_ID_GLOBAL_ERR);
+ struct feature_fme_pcie0_error pcie0_err;
+ int ret = 0;
+
+ spinlock_lock(&fme->lock);
+ writeq(FME_PCIE0_ERROR_MASK, &fme_err->pcie0_err_mask);
+
+ pcie0_err.csr = readq(&fme_err->pcie0_err);
+ if (val != pcie0_err.csr)
+ ret = -EBUSY;
+ else
+ writeq(pcie0_err.csr & FME_PCIE0_ERROR_MASK,
+ &fme_err->pcie0_err);
+
+ writeq(0UL, &fme_err->pcie0_err_mask);
+ spinlock_unlock(&fme->lock);
+
+ return ret;
+}
+
+static int fme_err_get_pcie1_errors(struct ifpga_fme_hw *fme, u64 *val)
+{
+ struct feature_fme_err *fme_err
+ = get_fme_feature_ioaddr_by_index(fme,
+ FME_FEATURE_ID_GLOBAL_ERR);
+ struct feature_fme_pcie1_error pcie1_err;
+
+ pcie1_err.csr = readq(&fme_err->pcie1_err);
+ *val = pcie1_err.csr;
+
+ return 0;
+}
+
+static int fme_err_set_pcie1_errors(struct ifpga_fme_hw *fme, u64 val)
+{
+ struct feature_fme_err *fme_err
+ = get_fme_feature_ioaddr_by_index(fme,
+ FME_FEATURE_ID_GLOBAL_ERR);
+ struct feature_fme_pcie1_error pcie1_err;
+ int ret = 0;
+
+ spinlock_lock(&fme->lock);
+ writeq(FME_PCIE1_ERROR_MASK, &fme_err->pcie1_err_mask);
+
+ pcie1_err.csr = readq(&fme_err->pcie1_err);
+ if (val != pcie1_err.csr)
+ ret = -EBUSY;
+ else
+ writeq(pcie1_err.csr & FME_PCIE1_ERROR_MASK,
+ &fme_err->pcie1_err);
+
+ writeq(0UL, &fme_err->pcie1_err_mask);
+ spinlock_unlock(&fme->lock);
+
+ return ret;
+}
+
+static int fme_err_get_nonfatal_errors(struct ifpga_fme_hw *fme, u64 *val)
+{
+ struct feature_fme_err *fme_err
+ = get_fme_feature_ioaddr_by_index(fme,
+ FME_FEATURE_ID_GLOBAL_ERR);
+ struct feature_fme_ras_nonfaterror ras_nonfaterr;
+
+ ras_nonfaterr.csr = readq(&fme_err->ras_nonfaterr);
+ *val = ras_nonfaterr.csr;
+
+ return 0;
+}
+
+static int fme_err_get_catfatal_errors(struct ifpga_fme_hw *fme, u64 *val)
+{
+ struct feature_fme_err *fme_err
+ = get_fme_feature_ioaddr_by_index(fme,
+ FME_FEATURE_ID_GLOBAL_ERR);
+ struct feature_fme_ras_catfaterror ras_catfaterr;
+
+ ras_catfaterr.csr = readq(&fme_err->ras_catfaterr);
+ *val = ras_catfaterr.csr;
+
+ return 0;
+}
+
+static int fme_err_get_inject_errors(struct ifpga_fme_hw *fme, u64 *val)
+{
+ struct feature_fme_err *fme_err
+ = get_fme_feature_ioaddr_by_index(fme,
+ FME_FEATURE_ID_GLOBAL_ERR);
+ struct feature_fme_ras_error_inj ras_error_inj;
+
+ ras_error_inj.csr = readq(&fme_err->ras_error_inj);
+ *val = ras_error_inj.csr & FME_RAS_ERROR_INJ_MASK;
+
+ return 0;
+}
+
+static int fme_err_set_inject_errors(struct ifpga_fme_hw *fme, u64 val)
+{
+ struct feature_fme_err *fme_err
+ = get_fme_feature_ioaddr_by_index(fme,
+ FME_FEATURE_ID_GLOBAL_ERR);
+ struct feature_fme_ras_error_inj ras_error_inj;
+
+ spinlock_lock(&fme->lock);
+ ras_error_inj.csr = readq(&fme_err->ras_error_inj);
+
+ if (val <= FME_RAS_ERROR_INJ_MASK) {
+ ras_error_inj.csr = val;
+ } else {
+ spinlock_unlock(&fme->lock);
+ return -EINVAL;
+ }
+
+ writeq(ras_error_inj.csr, &fme_err->ras_error_inj);
+ spinlock_unlock(&fme->lock);
+
+ return 0;
+}
+
+static void fme_error_enable(struct ifpga_fme_hw *fme)
+{
+ struct feature_fme_err *fme_err
+ = get_fme_feature_ioaddr_by_index(fme,
+ FME_FEATURE_ID_GLOBAL_ERR);
+
+ writeq(FME_ERROR0_MASK_DEFAULT, &fme_err->fme_err_mask);
+ writeq(0UL, &fme_err->pcie0_err_mask);
+ writeq(0UL, &fme_err->pcie1_err_mask);
+ writeq(0UL, &fme_err->ras_nonfat_mask);
+ writeq(0UL, &fme_err->ras_catfat_mask);
+}
+
+static int fme_global_error_init(struct feature *feature)
+{
+ struct ifpga_fme_hw *fme = feature->parent;
+
+ fme_error_enable(fme);
+
+ if (feature->ctx_num)
+ fme->capability |= FPGA_FME_CAP_ERR_IRQ;
+
+ return 0;
+}
+
+static void fme_global_error_uinit(struct feature *feature)
+{
+ UNUSED(feature);
+}
+
+static int fme_err_fme_err_get_prop(struct feature *feature,
+ struct feature_prop *prop)
+{
+ struct ifpga_fme_hw *fme = feature->parent;
+ u16 id = GET_FIELD(PROP_ID, prop->prop_id);
+
+ switch (id) {
+ case 0x1: /* ERRORS */
+ return fme_err_get_errors(fme, &prop->data);
+ case 0x2: /* FIRST_ERROR */
+ return fme_err_get_first_error(fme, &prop->data);
+ case 0x3: /* NEXT_ERROR */
+ return fme_err_get_next_error(fme, &prop->data);
+ }
+
+ return -ENOENT;
+}
+
+static int fme_err_root_get_prop(struct feature *feature,
+ struct feature_prop *prop)
+{
+ struct ifpga_fme_hw *fme = feature->parent;
+ u16 id = GET_FIELD(PROP_ID, prop->prop_id);
+
+ switch (id) {
+ case 0x5: /* REVISION */
+ return fme_err_get_revision(fme, &prop->data);
+ case 0x6: /* PCIE0_ERRORS */
+ return fme_err_get_pcie0_errors(fme, &prop->data);
+ case 0x7: /* PCIE1_ERRORS */
+ return fme_err_get_pcie1_errors(fme, &prop->data);
+ case 0x8: /* NONFATAL_ERRORS */
+ return fme_err_get_nonfatal_errors(fme, &prop->data);
+ case 0x9: /* CATFATAL_ERRORS */
+ return fme_err_get_catfatal_errors(fme, &prop->data);
+ case 0xa: /* INJECT_ERRORS */
+ return fme_err_get_inject_errors(fme, &prop->data);
+ case 0xb: /* REVISION*/
+ return fme_err_get_revision(fme, &prop->data);
+ }
+
+ return -ENOENT;
+}
+
+static int fme_global_error_get_prop(struct feature *feature,
+ struct feature_prop *prop)
+{
+ u8 top = GET_FIELD(PROP_TOP, prop->prop_id);
+ u8 sub = GET_FIELD(PROP_SUB, prop->prop_id);
+
+ /* PROP_SUB is never used */
+ if (sub != PROP_SUB_UNUSED)
+ return -ENOENT;
+
+ switch (top) {
+ case ERR_PROP_TOP_FME_ERR:
+ return fme_err_fme_err_get_prop(feature, prop);
+ case ERR_PROP_TOP_UNUSED:
+ return fme_err_root_get_prop(feature, prop);
+ }
+
+ return -ENOENT;
+}
+
+static int fme_err_fme_err_set_prop(struct feature *feature,
+ struct feature_prop *prop)
+{
+ struct ifpga_fme_hw *fme = feature->parent;
+ u16 id = GET_FIELD(PROP_ID, prop->prop_id);
+
+ switch (id) {
+ case 0x4: /* CLEAR */
+ return fme_err_set_clear(fme, prop->data);
+ }
+
+ return -ENOENT;
+}
+
+static int fme_err_root_set_prop(struct feature *feature,
+ struct feature_prop *prop)
+{
+ struct ifpga_fme_hw *fme = feature->parent;
+ u16 id = GET_FIELD(PROP_ID, prop->prop_id);
+
+ switch (id) {
+ case 0x6: /* PCIE0_ERRORS */
+ return fme_err_set_pcie0_errors(fme, prop->data);
+ case 0x7: /* PCIE1_ERRORS */
+ return fme_err_set_pcie1_errors(fme, prop->data);
+ case 0xa: /* INJECT_ERRORS */
+ return fme_err_set_inject_errors(fme, prop->data);
+ }
+
+ return -ENOENT;
+}
+
+static int fme_global_error_set_prop(struct feature *feature,
+ struct feature_prop *prop)
+{
+ u8 top = GET_FIELD(PROP_TOP, prop->prop_id);
+ u8 sub = GET_FIELD(PROP_SUB, prop->prop_id);
+
+ /* PROP_SUB is never used */
+ if (sub != PROP_SUB_UNUSED)
+ return -ENOENT;
+
+ switch (top) {
+ case ERR_PROP_TOP_FME_ERR:
+ return fme_err_fme_err_set_prop(feature, prop);
+ case ERR_PROP_TOP_UNUSED:
+ return fme_err_root_set_prop(feature, prop);
+ }
+
+ return -ENOENT;
+}
+
+struct feature_ops fme_global_err_ops = {
+ .init = fme_global_error_init,
+ .uinit = fme_global_error_uinit,
+ .get_prop = fme_global_error_get_prop,
+ .set_prop = fme_global_error_set_prop,
+};
diff --git a/drivers/raw/ifpga_rawdev/base/ifpga_fme_iperf.c b/drivers/raw/ifpga_rawdev/base/ifpga_fme_iperf.c
new file mode 100644
index 00000000..e6c40a19
--- /dev/null
+++ b/drivers/raw/ifpga_rawdev/base/ifpga_fme_iperf.c
@@ -0,0 +1,715 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2010-2018 Intel Corporation
+ */
+
+#include "ifpga_feature_dev.h"
+
+#define PERF_OBJ_ROOT_ID 0xff
+
+static int fme_iperf_get_clock(struct ifpga_fme_hw *fme, u64 *clock)
+{
+ struct feature_fme_iperf *iperf;
+ struct feature_fme_ifpmon_clk_ctr clk;
+
+ iperf = get_fme_feature_ioaddr_by_index(fme,
+ FME_FEATURE_ID_GLOBAL_IPERF);
+ clk.afu_interf_clock = readq(&iperf->clk);
+
+ *clock = clk.afu_interf_clock;
+ return 0;
+}
+
+static int fme_iperf_get_revision(struct ifpga_fme_hw *fme, u64 *revision)
+{
+ struct feature_fme_iperf *iperf;
+ struct feature_header header;
+
+ iperf = get_fme_feature_ioaddr_by_index(fme,
+ FME_FEATURE_ID_GLOBAL_IPERF);
+ header.csr = readq(&iperf->header);
+ *revision = header.revision;
+
+ return 0;
+}
+
+static int fme_iperf_get_cache_freeze(struct ifpga_fme_hw *fme, u64 *freeze)
+{
+ struct feature_fme_iperf *iperf;
+ struct feature_fme_ifpmon_ch_ctl ctl;
+
+ iperf = get_fme_feature_ioaddr_by_index(fme,
+ FME_FEATURE_ID_GLOBAL_IPERF);
+ ctl.csr = readq(&iperf->ch_ctl);
+ *freeze = (u64)ctl.freeze;
+ return 0;
+}
+
+static int fme_iperf_set_cache_freeze(struct ifpga_fme_hw *fme, u64 freeze)
+{
+ struct feature_fme_iperf *iperf;
+ struct feature_fme_ifpmon_ch_ctl ctl;
+ bool state;
+
+ state = !!freeze;
+
+ spinlock_lock(&fme->lock);
+ iperf = get_fme_feature_ioaddr_by_index(fme,
+ FME_FEATURE_ID_GLOBAL_IPERF);
+ ctl.csr = readq(&iperf->ch_ctl);
+ ctl.freeze = state;
+ writeq(ctl.csr, &iperf->ch_ctl);
+ spinlock_unlock(&fme->lock);
+
+ return 0;
+}
+
+#define IPERF_TIMEOUT 30
+
+static u64 read_cache_counter(struct ifpga_fme_hw *fme,
+ u8 channel, enum iperf_cache_events event)
+{
+ struct feature_fme_iperf *iperf;
+ struct feature_fme_ifpmon_ch_ctl ctl;
+ struct feature_fme_ifpmon_ch_ctr ctr0, ctr1;
+ u64 counter;
+
+ spinlock_lock(&fme->lock);
+ iperf = get_fme_feature_ioaddr_by_index(fme,
+ FME_FEATURE_ID_GLOBAL_IPERF);
+
+ /* set channel access type and cache event code. */
+ ctl.csr = readq(&iperf->ch_ctl);
+ ctl.cci_chsel = channel;
+ ctl.cache_event = event;
+ writeq(ctl.csr, &iperf->ch_ctl);
+
+ /* check the event type in the counter registers */
+ ctr0.event_code = event;
+
+ if (fpga_wait_register_field(event_code, ctr0,
+ &iperf->ch_ctr0, IPERF_TIMEOUT, 1)) {
+ dev_err(fme, "timeout, unmatched cache event type in counter registers.\n");
+ spinlock_unlock(&fme->lock);
+ return -ETIMEDOUT;
+ }
+
+ ctr0.csr = readq(&iperf->ch_ctr0);
+ ctr1.csr = readq(&iperf->ch_ctr1);
+ counter = ctr0.cache_counter + ctr1.cache_counter;
+ spinlock_unlock(&fme->lock);
+
+ return counter;
+}
+
+#define CACHE_SHOW(name, type, event) \
+static int fme_iperf_get_cache_##name(struct ifpga_fme_hw *fme, \
+ u64 *counter) \
+{ \
+ *counter = read_cache_counter(fme, type, event); \
+ return 0; \
+}
+
+CACHE_SHOW(read_hit, CACHE_CHANNEL_RD, IPERF_CACHE_RD_HIT);
+CACHE_SHOW(read_miss, CACHE_CHANNEL_RD, IPERF_CACHE_RD_MISS);
+CACHE_SHOW(write_hit, CACHE_CHANNEL_WR, IPERF_CACHE_WR_HIT);
+CACHE_SHOW(write_miss, CACHE_CHANNEL_WR, IPERF_CACHE_WR_MISS);
+CACHE_SHOW(hold_request, CACHE_CHANNEL_RD, IPERF_CACHE_HOLD_REQ);
+CACHE_SHOW(tx_req_stall, CACHE_CHANNEL_RD, IPERF_CACHE_TX_REQ_STALL);
+CACHE_SHOW(rx_req_stall, CACHE_CHANNEL_RD, IPERF_CACHE_RX_REQ_STALL);
+CACHE_SHOW(rx_eviction, CACHE_CHANNEL_RD, IPERF_CACHE_EVICTIONS);
+CACHE_SHOW(data_write_port_contention, CACHE_CHANNEL_WR,
+ IPERF_CACHE_DATA_WR_PORT_CONTEN);
+CACHE_SHOW(tag_write_port_contention, CACHE_CHANNEL_WR,
+ IPERF_CACHE_TAG_WR_PORT_CONTEN);
+
+static int fme_iperf_get_vtd_freeze(struct ifpga_fme_hw *fme, u64 *freeze)
+{
+ struct feature_fme_ifpmon_vtd_ctl ctl;
+ struct feature_fme_iperf *iperf;
+
+ iperf = get_fme_feature_ioaddr_by_index(fme,
+ FME_FEATURE_ID_GLOBAL_IPERF);
+ ctl.csr = readq(&iperf->vtd_ctl);
+ *freeze = (u64)ctl.freeze;
+
+ return 0;
+}
+
+static int fme_iperf_set_vtd_freeze(struct ifpga_fme_hw *fme, u64 freeze)
+{
+ struct feature_fme_ifpmon_vtd_ctl ctl;
+ struct feature_fme_iperf *iperf;
+ bool state;
+
+ state = !!freeze;
+
+ spinlock_lock(&fme->lock);
+ iperf = get_fme_feature_ioaddr_by_index(fme,
+ FME_FEATURE_ID_GLOBAL_IPERF);
+ ctl.csr = readq(&iperf->vtd_ctl);
+ ctl.freeze = state;
+ writeq(ctl.csr, &iperf->vtd_ctl);
+ spinlock_unlock(&fme->lock);
+
+ return 0;
+}
+
+static u64 read_iommu_sip_counter(struct ifpga_fme_hw *fme,
+ enum iperf_vtd_sip_events event)
+{
+ struct feature_fme_ifpmon_vtd_sip_ctl sip_ctl;
+ struct feature_fme_ifpmon_vtd_sip_ctr sip_ctr;
+ struct feature_fme_iperf *iperf;
+ u64 counter;
+
+ spinlock_lock(&fme->lock);
+ iperf = get_fme_feature_ioaddr_by_index(fme,
+ FME_FEATURE_ID_GLOBAL_IPERF);
+ sip_ctl.csr = readq(&iperf->vtd_sip_ctl);
+ sip_ctl.vtd_evtcode = event;
+ writeq(sip_ctl.csr, &iperf->vtd_sip_ctl);
+
+ sip_ctr.event_code = event;
+
+ if (fpga_wait_register_field(event_code, sip_ctr,
+ &iperf->vtd_sip_ctr, IPERF_TIMEOUT, 1)) {
+ dev_err(fme, "timeout, unmatched VTd SIP event type in counter registers\n");
+ spinlock_unlock(&fme->lock);
+ return -ETIMEDOUT;
+ }
+
+ sip_ctr.csr = readq(&iperf->vtd_sip_ctr);
+ counter = sip_ctr.vtd_counter;
+ spinlock_unlock(&fme->lock);
+
+ return counter;
+}
+
+#define VTD_SIP_SHOW(name, event) \
+static int fme_iperf_get_vtd_sip_##name(struct ifpga_fme_hw *fme, \
+ u64 *counter) \
+{ \
+ *counter = read_iommu_sip_counter(fme, event); \
+ return 0; \
+}
+
+VTD_SIP_SHOW(iotlb_4k_hit, IPERF_VTD_SIP_IOTLB_4K_HIT);
+VTD_SIP_SHOW(iotlb_2m_hit, IPERF_VTD_SIP_IOTLB_2M_HIT);
+VTD_SIP_SHOW(iotlb_1g_hit, IPERF_VTD_SIP_IOTLB_1G_HIT);
+VTD_SIP_SHOW(slpwc_l3_hit, IPERF_VTD_SIP_SLPWC_L3_HIT);
+VTD_SIP_SHOW(slpwc_l4_hit, IPERF_VTD_SIP_SLPWC_L4_HIT);
+VTD_SIP_SHOW(rcc_hit, IPERF_VTD_SIP_RCC_HIT);
+VTD_SIP_SHOW(iotlb_4k_miss, IPERF_VTD_SIP_IOTLB_4K_MISS);
+VTD_SIP_SHOW(iotlb_2m_miss, IPERF_VTD_SIP_IOTLB_2M_MISS);
+VTD_SIP_SHOW(iotlb_1g_miss, IPERF_VTD_SIP_IOTLB_1G_MISS);
+VTD_SIP_SHOW(slpwc_l3_miss, IPERF_VTD_SIP_SLPWC_L3_MISS);
+VTD_SIP_SHOW(slpwc_l4_miss, IPERF_VTD_SIP_SLPWC_L4_MISS);
+VTD_SIP_SHOW(rcc_miss, IPERF_VTD_SIP_RCC_MISS);
+
+static u64 read_iommu_counter(struct ifpga_fme_hw *fme, u8 port_id,
+ enum iperf_vtd_events base_event)
+{
+ struct feature_fme_ifpmon_vtd_ctl ctl;
+ struct feature_fme_ifpmon_vtd_ctr ctr;
+ struct feature_fme_iperf *iperf;
+ enum iperf_vtd_events event = base_event + port_id;
+ u64 counter;
+
+ spinlock_lock(&fme->lock);
+ iperf = get_fme_feature_ioaddr_by_index(fme,
+ FME_FEATURE_ID_GLOBAL_IPERF);
+ ctl.csr = readq(&iperf->vtd_ctl);
+ ctl.vtd_evtcode = event;
+ writeq(ctl.csr, &iperf->vtd_ctl);
+
+ ctr.event_code = event;
+
+ if (fpga_wait_register_field(event_code, ctr,
+ &iperf->vtd_ctr, IPERF_TIMEOUT, 1)) {
+ dev_err(fme, "timeout, unmatched VTd event type in counter registers.\n");
+ spinlock_unlock(&fme->lock);
+ return -ETIMEDOUT;
+ }
+
+ ctr.csr = readq(&iperf->vtd_ctr);
+ counter = ctr.vtd_counter;
+ spinlock_unlock(&fme->lock);
+
+ return counter;
+}
+
+#define VTD_PORT_SHOW(name, base_event) \
+static int fme_iperf_get_vtd_port_##name(struct ifpga_fme_hw *fme, \
+ u8 port_id, u64 *counter) \
+{ \
+ *counter = read_iommu_counter(fme, port_id, base_event); \
+ return 0; \
+}
+
+VTD_PORT_SHOW(read_transaction, IPERF_VTD_AFU_MEM_RD_TRANS);
+VTD_PORT_SHOW(write_transaction, IPERF_VTD_AFU_MEM_WR_TRANS);
+VTD_PORT_SHOW(devtlb_read_hit, IPERF_VTD_AFU_DEVTLB_RD_HIT);
+VTD_PORT_SHOW(devtlb_write_hit, IPERF_VTD_AFU_DEVTLB_WR_HIT);
+VTD_PORT_SHOW(devtlb_4k_fill, IPERF_VTD_DEVTLB_4K_FILL);
+VTD_PORT_SHOW(devtlb_2m_fill, IPERF_VTD_DEVTLB_2M_FILL);
+VTD_PORT_SHOW(devtlb_1g_fill, IPERF_VTD_DEVTLB_1G_FILL);
+
+static bool fabric_pobj_is_enabled(u8 port_id, struct feature_fme_iperf *iperf)
+{
+ struct feature_fme_ifpmon_fab_ctl ctl;
+
+ ctl.csr = readq(&iperf->fab_ctl);
+
+ if (ctl.port_filter == FAB_DISABLE_FILTER)
+ return port_id == PERF_OBJ_ROOT_ID;
+
+ return port_id == ctl.port_id;
+}
+
+static u64 read_fabric_counter(struct ifpga_fme_hw *fme, u8 port_id,
+ enum iperf_fab_events fab_event)
+{
+ struct feature_fme_ifpmon_fab_ctl ctl;
+ struct feature_fme_ifpmon_fab_ctr ctr;
+ struct feature_fme_iperf *iperf;
+ u64 counter = 0;
+
+ spinlock_lock(&fme->lock);
+ iperf = get_fme_feature_ioaddr_by_index(fme,
+ FME_FEATURE_ID_GLOBAL_IPERF);
+
+ /* if it is disabled, force the counter to return zero. */
+ if (!fabric_pobj_is_enabled(port_id, iperf))
+ goto exit;
+
+ ctl.csr = readq(&iperf->fab_ctl);
+ ctl.fab_evtcode = fab_event;
+ writeq(ctl.csr, &iperf->fab_ctl);
+
+ ctr.event_code = fab_event;
+
+ if (fpga_wait_register_field(event_code, ctr,
+ &iperf->fab_ctr, IPERF_TIMEOUT, 1)) {
+ dev_err(fme, "timeout, unmatched VTd event type in counter registers.\n");
+ spinlock_unlock(&fme->lock);
+ return -ETIMEDOUT;
+ }
+
+ ctr.csr = readq(&iperf->fab_ctr);
+ counter = ctr.fab_cnt;
+exit:
+ spinlock_unlock(&fme->lock);
+ return counter;
+}
+
+#define FAB_PORT_SHOW(name, event) \
+static int fme_iperf_get_fab_port_##name(struct ifpga_fme_hw *fme, \
+ u8 port_id, u64 *counter) \
+{ \
+ *counter = read_fabric_counter(fme, port_id, event); \
+ return 0; \
+}
+
+FAB_PORT_SHOW(pcie0_read, IPERF_FAB_PCIE0_RD);
+FAB_PORT_SHOW(pcie0_write, IPERF_FAB_PCIE0_WR);
+FAB_PORT_SHOW(pcie1_read, IPERF_FAB_PCIE1_RD);
+FAB_PORT_SHOW(pcie1_write, IPERF_FAB_PCIE1_WR);
+FAB_PORT_SHOW(upi_read, IPERF_FAB_UPI_RD);
+FAB_PORT_SHOW(upi_write, IPERF_FAB_UPI_WR);
+FAB_PORT_SHOW(mmio_read, IPERF_FAB_MMIO_RD);
+FAB_PORT_SHOW(mmio_write, IPERF_FAB_MMIO_WR);
+
+static int fme_iperf_get_fab_port_enable(struct ifpga_fme_hw *fme,
+ u8 port_id, u64 *enable)
+{
+ struct feature_fme_iperf *iperf;
+ int status;
+
+ iperf = get_fme_feature_ioaddr_by_index(fme,
+ FME_FEATURE_ID_GLOBAL_IPERF);
+
+ status = fabric_pobj_is_enabled(port_id, iperf);
+ *enable = (u64)status;
+
+ return 0;
+}
+
+/*
+ * If enable one port or all port event counter in fabric, other
+ * fabric event counter originally enabled will be disable automatically.
+ */
+static int fme_iperf_set_fab_port_enable(struct ifpga_fme_hw *fme,
+ u8 port_id, u64 enable)
+{
+ struct feature_fme_ifpmon_fab_ctl ctl;
+ struct feature_fme_iperf *iperf;
+ bool state;
+
+ state = !!enable;
+
+ if (!state)
+ return -EINVAL;
+
+ iperf = get_fme_feature_ioaddr_by_index(fme,
+ FME_FEATURE_ID_GLOBAL_IPERF);
+
+ /* if it is already enabled. */
+ if (fabric_pobj_is_enabled(port_id, iperf))
+ return 0;
+
+ spinlock_lock(&fme->lock);
+ ctl.csr = readq(&iperf->fab_ctl);
+ if (port_id == PERF_OBJ_ROOT_ID) {
+ ctl.port_filter = FAB_DISABLE_FILTER;
+ } else {
+ ctl.port_filter = FAB_ENABLE_FILTER;
+ ctl.port_id = port_id;
+ }
+
+ writeq(ctl.csr, &iperf->fab_ctl);
+ spinlock_unlock(&fme->lock);
+
+ return 0;
+}
+
+static int fme_iperf_get_fab_freeze(struct ifpga_fme_hw *fme, u64 *freeze)
+{
+ struct feature_fme_iperf *iperf;
+ struct feature_fme_ifpmon_fab_ctl ctl;
+
+ iperf = get_fme_feature_ioaddr_by_index(fme,
+ FME_FEATURE_ID_GLOBAL_IPERF);
+ ctl.csr = readq(&iperf->fab_ctl);
+ *freeze = (u64)ctl.freeze;
+
+ return 0;
+}
+
+static int fme_iperf_set_fab_freeze(struct ifpga_fme_hw *fme, u64 freeze)
+{
+ struct feature_fme_iperf *iperf;
+ struct feature_fme_ifpmon_fab_ctl ctl;
+ bool state;
+
+ state = !!freeze;
+
+ spinlock_lock(&fme->lock);
+ iperf = get_fme_feature_ioaddr_by_index(fme,
+ FME_FEATURE_ID_GLOBAL_IPERF);
+ ctl.csr = readq(&iperf->fab_ctl);
+ ctl.freeze = state;
+ writeq(ctl.csr, &iperf->fab_ctl);
+ spinlock_unlock(&fme->lock);
+
+ return 0;
+}
+
+#define PERF_MAX_PORT_NUM 1
+#define FME_IPERF_CAP_IOMMU 0x1
+
+static int fme_global_iperf_init(struct feature *feature)
+{
+ struct ifpga_fme_hw *fme;
+ struct feature_fme_header *fme_hdr;
+ struct feature_fme_capability fme_capability;
+
+ dev_info(NULL, "FME global_iperf Init.\n");
+
+ fme = (struct ifpga_fme_hw *)feature->parent;
+ fme_hdr = get_fme_feature_ioaddr_by_index(fme, FME_FEATURE_ID_HEADER);
+
+ /* check if iommu is not supported on this device. */
+ fme_capability.csr = readq(&fme_hdr->capability);
+ dev_info(NULL, "FME HEAD fme_capability %llx.\n",
+ (unsigned long long)fme_hdr->capability.csr);
+
+ if (fme_capability.iommu_support)
+ feature->cap |= FME_IPERF_CAP_IOMMU;
+
+ return 0;
+}
+
+static void fme_global_iperf_uinit(struct feature *feature)
+{
+ UNUSED(feature);
+
+ dev_info(NULL, "FME global_iperf UInit.\n");
+}
+
+static int fme_iperf_root_get_prop(struct feature *feature,
+ struct feature_prop *prop)
+{
+ struct ifpga_fme_hw *fme = feature->parent;
+ u8 sub = GET_FIELD(PROP_SUB, prop->prop_id);
+ u16 id = GET_FIELD(PROP_ID, prop->prop_id);
+
+ if (sub != PERF_PROP_SUB_UNUSED)
+ return -ENOENT;
+
+ switch (id) {
+ case 0x1: /* CLOCK */
+ return fme_iperf_get_clock(fme, &prop->data);
+ case 0x2: /* REVISION */
+ return fme_iperf_get_revision(fme, &prop->data);
+ }
+
+ return -ENOENT;
+}
+
+static int fme_iperf_cache_get_prop(struct feature *feature,
+ struct feature_prop *prop)
+{
+ struct ifpga_fme_hw *fme = feature->parent;
+ u8 sub = GET_FIELD(PROP_SUB, prop->prop_id);
+ u16 id = GET_FIELD(PROP_ID, prop->prop_id);
+
+ if (sub != PERF_PROP_SUB_UNUSED)
+ return -ENOENT;
+
+ switch (id) {
+ case 0x1: /* FREEZE */
+ return fme_iperf_get_cache_freeze(fme, &prop->data);
+ case 0x2: /* READ_HIT */
+ return fme_iperf_get_cache_read_hit(fme, &prop->data);
+ case 0x3: /* READ_MISS */
+ return fme_iperf_get_cache_read_miss(fme, &prop->data);
+ case 0x4: /* WRITE_HIT */
+ return fme_iperf_get_cache_write_hit(fme, &prop->data);
+ case 0x5: /* WRITE_MISS */
+ return fme_iperf_get_cache_write_miss(fme, &prop->data);
+ case 0x6: /* HOLD_REQUEST */
+ return fme_iperf_get_cache_hold_request(fme, &prop->data);
+ case 0x7: /* TX_REQ_STALL */
+ return fme_iperf_get_cache_tx_req_stall(fme, &prop->data);
+ case 0x8: /* RX_REQ_STALL */
+ return fme_iperf_get_cache_rx_req_stall(fme, &prop->data);
+ case 0x9: /* RX_EVICTION */
+ return fme_iperf_get_cache_rx_eviction(fme, &prop->data);
+ case 0xa: /* DATA_WRITE_PORT_CONTENTION */
+ return fme_iperf_get_cache_data_write_port_contention(fme,
+ &prop->data);
+ case 0xb: /* TAG_WRITE_PORT_CONTENTION */
+ return fme_iperf_get_cache_tag_write_port_contention(fme,
+ &prop->data);
+ }
+
+ return -ENOENT;
+}
+
+static int fme_iperf_vtd_root_get_prop(struct feature *feature,
+ struct feature_prop *prop)
+{
+ struct ifpga_fme_hw *fme = feature->parent;
+ u16 id = GET_FIELD(PROP_ID, prop->prop_id);
+
+ switch (id) {
+ case 0x1: /* FREEZE */
+ return fme_iperf_get_vtd_freeze(fme, &prop->data);
+ case 0x2: /* IOTLB_4K_HIT */
+ return fme_iperf_get_vtd_sip_iotlb_4k_hit(fme, &prop->data);
+ case 0x3: /* IOTLB_2M_HIT */
+ return fme_iperf_get_vtd_sip_iotlb_2m_hit(fme, &prop->data);
+ case 0x4: /* IOTLB_1G_HIT */
+ return fme_iperf_get_vtd_sip_iotlb_1g_hit(fme, &prop->data);
+ case 0x5: /* SLPWC_L3_HIT */
+ return fme_iperf_get_vtd_sip_slpwc_l3_hit(fme, &prop->data);
+ case 0x6: /* SLPWC_L4_HIT */
+ return fme_iperf_get_vtd_sip_slpwc_l4_hit(fme, &prop->data);
+ case 0x7: /* RCC_HIT */
+ return fme_iperf_get_vtd_sip_rcc_hit(fme, &prop->data);
+ case 0x8: /* IOTLB_4K_MISS */
+ return fme_iperf_get_vtd_sip_iotlb_4k_miss(fme, &prop->data);
+ case 0x9: /* IOTLB_2M_MISS */
+ return fme_iperf_get_vtd_sip_iotlb_2m_miss(fme, &prop->data);
+ case 0xa: /* IOTLB_1G_MISS */
+ return fme_iperf_get_vtd_sip_iotlb_1g_miss(fme, &prop->data);
+ case 0xb: /* SLPWC_L3_MISS */
+ return fme_iperf_get_vtd_sip_slpwc_l3_miss(fme, &prop->data);
+ case 0xc: /* SLPWC_L4_MISS */
+ return fme_iperf_get_vtd_sip_slpwc_l4_miss(fme, &prop->data);
+ case 0xd: /* RCC_MISS */
+ return fme_iperf_get_vtd_sip_rcc_miss(fme, &prop->data);
+ }
+
+ return -ENOENT;
+}
+
+static int fme_iperf_vtd_sub_get_prop(struct feature *feature,
+ struct feature_prop *prop)
+{
+ struct ifpga_fme_hw *fme = feature->parent;
+ u16 id = GET_FIELD(PROP_ID, prop->prop_id);
+ u8 sub = GET_FIELD(PROP_SUB, prop->prop_id);
+
+ if (sub > PERF_MAX_PORT_NUM)
+ return -ENOENT;
+
+ switch (id) {
+ case 0xe: /* READ_TRANSACTION */
+ return fme_iperf_get_vtd_port_read_transaction(fme, sub,
+ &prop->data);
+ case 0xf: /* WRITE_TRANSACTION */
+ return fme_iperf_get_vtd_port_write_transaction(fme, sub,
+ &prop->data);
+ case 0x10: /* DEVTLB_READ_HIT */
+ return fme_iperf_get_vtd_port_devtlb_read_hit(fme, sub,
+ &prop->data);
+ case 0x11: /* DEVTLB_WRITE_HIT */
+ return fme_iperf_get_vtd_port_devtlb_write_hit(fme, sub,
+ &prop->data);
+ case 0x12: /* DEVTLB_4K_FILL */
+ return fme_iperf_get_vtd_port_devtlb_4k_fill(fme, sub,
+ &prop->data);
+ case 0x13: /* DEVTLB_2M_FILL */
+ return fme_iperf_get_vtd_port_devtlb_2m_fill(fme, sub,
+ &prop->data);
+ case 0x14: /* DEVTLB_1G_FILL */
+ return fme_iperf_get_vtd_port_devtlb_1g_fill(fme, sub,
+ &prop->data);
+ }
+
+ return -ENOENT;
+}
+
+static int fme_iperf_vtd_get_prop(struct feature *feature,
+ struct feature_prop *prop)
+{
+ u8 sub = GET_FIELD(PROP_SUB, prop->prop_id);
+
+ if (sub == PERF_PROP_SUB_UNUSED)
+ return fme_iperf_vtd_root_get_prop(feature, prop);
+
+ return fme_iperf_vtd_sub_get_prop(feature, prop);
+}
+
+static int fme_iperf_fab_get_prop(struct feature *feature,
+ struct feature_prop *prop)
+{
+ struct ifpga_fme_hw *fme = feature->parent;
+ u8 sub = GET_FIELD(PROP_SUB, prop->prop_id);
+ u16 id = GET_FIELD(PROP_ID, prop->prop_id);
+
+ /* Other properties are present for both top and sub levels */
+ switch (id) {
+ case 0x1: /* FREEZE */
+ if (sub != PERF_PROP_SUB_UNUSED)
+ return -ENOENT;
+ return fme_iperf_get_fab_freeze(fme, &prop->data);
+ case 0x2: /* PCIE0_READ */
+ return fme_iperf_get_fab_port_pcie0_read(fme, sub,
+ &prop->data);
+ case 0x3: /* PCIE0_WRITE */
+ return fme_iperf_get_fab_port_pcie0_write(fme, sub,
+ &prop->data);
+ case 0x4: /* PCIE1_READ */
+ return fme_iperf_get_fab_port_pcie1_read(fme, sub,
+ &prop->data);
+ case 0x5: /* PCIE1_WRITE */
+ return fme_iperf_get_fab_port_pcie1_write(fme, sub,
+ &prop->data);
+ case 0x6: /* UPI_READ */
+ return fme_iperf_get_fab_port_upi_read(fme, sub,
+ &prop->data);
+ case 0x7: /* UPI_WRITE */
+ return fme_iperf_get_fab_port_upi_write(fme, sub,
+ &prop->data);
+ case 0x8: /* MMIO_READ */
+ return fme_iperf_get_fab_port_mmio_read(fme, sub,
+ &prop->data);
+ case 0x9: /* MMIO_WRITE */
+ return fme_iperf_get_fab_port_mmio_write(fme, sub,
+ &prop->data);
+ case 0xa: /* ENABLE */
+ return fme_iperf_get_fab_port_enable(fme, sub, &prop->data);
+ }
+
+ return -ENOENT;
+}
+
+static int fme_global_iperf_get_prop(struct feature *feature,
+ struct feature_prop *prop)
+{
+ u8 top = GET_FIELD(PROP_TOP, prop->prop_id);
+
+ switch (top) {
+ case PERF_PROP_TOP_CACHE:
+ return fme_iperf_cache_get_prop(feature, prop);
+ case PERF_PROP_TOP_VTD:
+ return fme_iperf_vtd_get_prop(feature, prop);
+ case PERF_PROP_TOP_FAB:
+ return fme_iperf_fab_get_prop(feature, prop);
+ case PERF_PROP_TOP_UNUSED:
+ return fme_iperf_root_get_prop(feature, prop);
+ }
+
+ return -ENOENT;
+}
+
+static int fme_iperf_cache_set_prop(struct feature *feature,
+ struct feature_prop *prop)
+{
+ struct ifpga_fme_hw *fme = feature->parent;
+ u8 sub = GET_FIELD(PROP_SUB, prop->prop_id);
+ u16 id = GET_FIELD(PROP_ID, prop->prop_id);
+
+ if (sub == PERF_PROP_SUB_UNUSED && id == 0x1) /* FREEZE */
+ return fme_iperf_set_cache_freeze(fme, prop->data);
+
+ return -ENOENT;
+}
+
+static int fme_iperf_vtd_set_prop(struct feature *feature,
+ struct feature_prop *prop)
+{
+ struct ifpga_fme_hw *fme = feature->parent;
+ u8 sub = GET_FIELD(PROP_SUB, prop->prop_id);
+ u16 id = GET_FIELD(PROP_ID, prop->prop_id);
+
+ if (sub == PERF_PROP_SUB_UNUSED && id == 0x1) /* FREEZE */
+ return fme_iperf_set_vtd_freeze(fme, prop->data);
+
+ return -ENOENT;
+}
+
+static int fme_iperf_fab_set_prop(struct feature *feature,
+ struct feature_prop *prop)
+{
+ struct ifpga_fme_hw *fme = feature->parent;
+ u8 sub = GET_FIELD(PROP_SUB, prop->prop_id);
+ u16 id = GET_FIELD(PROP_ID, prop->prop_id);
+
+ switch (id) {
+ case 0x1: /* FREEZE */
+ if (sub != PERF_PROP_SUB_UNUSED)
+ return -ENOENT;
+ return fme_iperf_set_fab_freeze(fme, prop->data);
+ case 0xa: /* ENABLE */
+ return fme_iperf_set_fab_port_enable(fme, sub, prop->data);
+ }
+
+ return -ENOENT;
+}
+
+static int fme_global_iperf_set_prop(struct feature *feature,
+ struct feature_prop *prop)
+{
+ u8 top = GET_FIELD(PROP_TOP, prop->prop_id);
+
+ switch (top) {
+ case PERF_PROP_TOP_CACHE:
+ return fme_iperf_cache_set_prop(feature, prop);
+ case PERF_PROP_TOP_VTD:
+ return fme_iperf_vtd_set_prop(feature, prop);
+ case PERF_PROP_TOP_FAB:
+ return fme_iperf_fab_set_prop(feature, prop);
+ }
+
+ return -ENOENT;
+}
+
+struct feature_ops fme_global_iperf_ops = {
+ .init = fme_global_iperf_init,
+ .uinit = fme_global_iperf_uinit,
+ .get_prop = fme_global_iperf_get_prop,
+ .set_prop = fme_global_iperf_set_prop,
+};
diff --git a/drivers/raw/ifpga_rawdev/base/ifpga_fme_pr.c b/drivers/raw/ifpga_rawdev/base/ifpga_fme_pr.c
new file mode 100644
index 00000000..ec0beeb1
--- /dev/null
+++ b/drivers/raw/ifpga_rawdev/base/ifpga_fme_pr.c
@@ -0,0 +1,352 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2010-2018 Intel Corporation
+ */
+
+#include "ifpga_feature_dev.h"
+
+static u64
+pr_err_handle(struct feature_fme_pr *fme_pr)
+{
+ struct feature_fme_pr_status fme_pr_status;
+ unsigned long err_code;
+ u64 fme_pr_error;
+ int i;
+
+ fme_pr_status.csr = readq(&fme_pr->ccip_fme_pr_status);
+ if (!fme_pr_status.pr_status)
+ return 0;
+
+ err_code = readq(&fme_pr->ccip_fme_pr_err);
+ fme_pr_error = err_code;
+
+ for (i = 0; i < PR_MAX_ERR_NUM; i++) {
+ if (err_code & (1 << i))
+ dev_info(NULL, "%s\n", pr_err_msg[i]);
+ }
+
+ writeq(fme_pr_error, &fme_pr->ccip_fme_pr_err);
+ return fme_pr_error;
+}
+
+static int fme_pr_write_init(struct ifpga_fme_hw *fme_dev,
+ struct fpga_pr_info *info)
+{
+ struct feature_fme_pr *fme_pr;
+ struct feature_fme_pr_ctl fme_pr_ctl;
+ struct feature_fme_pr_status fme_pr_status;
+
+ fme_pr = get_fme_feature_ioaddr_by_index(fme_dev,
+ FME_FEATURE_ID_PR_MGMT);
+ if (!fme_pr)
+ return -EINVAL;
+
+ if (info->flags != FPGA_MGR_PARTIAL_RECONFIG)
+ return -EINVAL;
+
+ dev_info(fme_dev, "resetting PR before initiated PR\n");
+
+ fme_pr_ctl.csr = readq(&fme_pr->ccip_fme_pr_control);
+ fme_pr_ctl.pr_reset = 1;
+ writeq(fme_pr_ctl.csr, &fme_pr->ccip_fme_pr_control);
+
+ fme_pr_ctl.pr_reset_ack = 1;
+
+ if (fpga_wait_register_field(pr_reset_ack, fme_pr_ctl,
+ &fme_pr->ccip_fme_pr_control,
+ PR_WAIT_TIMEOUT, 1)) {
+ dev_err(fme_dev, "maximum PR timeout\n");
+ return -ETIMEDOUT;
+ }
+
+ fme_pr_ctl.csr = readq(&fme_pr->ccip_fme_pr_control);
+ fme_pr_ctl.pr_reset = 0;
+ writeq(fme_pr_ctl.csr, &fme_pr->ccip_fme_pr_control);
+
+ dev_info(fme_dev, "waiting for PR resource in HW to be initialized and ready\n");
+
+ fme_pr_status.pr_host_status = PR_HOST_STATUS_IDLE;
+
+ if (fpga_wait_register_field(pr_host_status, fme_pr_status,
+ &fme_pr->ccip_fme_pr_status,
+ PR_WAIT_TIMEOUT, 1)) {
+ dev_err(fme_dev, "maximum PR timeout\n");
+ return -ETIMEDOUT;
+ }
+
+ dev_info(fme_dev, "check if have any previous PR error\n");
+ pr_err_handle(fme_pr);
+ return 0;
+}
+
+static int fme_pr_write(struct ifpga_fme_hw *fme_dev,
+ int port_id, const char *buf, size_t count,
+ struct fpga_pr_info *info)
+{
+ struct feature_fme_pr *fme_pr;
+ struct feature_fme_pr_ctl fme_pr_ctl;
+ struct feature_fme_pr_status fme_pr_status;
+ struct feature_fme_pr_data fme_pr_data;
+ int delay, pr_credit;
+ int ret = 0;
+
+ fme_pr = get_fme_feature_ioaddr_by_index(fme_dev,
+ FME_FEATURE_ID_PR_MGMT);
+ if (!fme_pr)
+ return -EINVAL;
+
+ dev_info(fme_dev, "set PR port ID and start request\n");
+
+ fme_pr_ctl.csr = readq(&fme_pr->ccip_fme_pr_control);
+ fme_pr_ctl.pr_regionid = port_id;
+ fme_pr_ctl.pr_start_req = 1;
+ writeq(fme_pr_ctl.csr, &fme_pr->ccip_fme_pr_control);
+
+ dev_info(fme_dev, "pushing data from bitstream to HW\n");
+
+ fme_pr_status.csr = readq(&fme_pr->ccip_fme_pr_status);
+ pr_credit = fme_pr_status.pr_credit;
+
+ while (count > 0) {
+ delay = 0;
+ while (pr_credit <= 1) {
+ if (delay++ > PR_WAIT_TIMEOUT) {
+ dev_err(fme_dev, "maximum try\n");
+
+ info->pr_err = pr_err_handle(fme_pr);
+ return info->pr_err ? -EIO : -ETIMEDOUT;
+ }
+ udelay(1);
+
+ fme_pr_status.csr = readq(&fme_pr->ccip_fme_pr_status);
+ pr_credit = fme_pr_status.pr_credit;
+ };
+
+ if (count >= fme_dev->pr_bandwidth) {
+ switch (fme_dev->pr_bandwidth) {
+ case 4:
+ fme_pr_data.rsvd = 0;
+ fme_pr_data.pr_data_raw = *((const u32 *)buf);
+ writeq(fme_pr_data.csr,
+ &fme_pr->ccip_fme_pr_data);
+ break;
+ default:
+ ret = -EFAULT;
+ goto done;
+ }
+
+ buf += fme_dev->pr_bandwidth;
+ count -= fme_dev->pr_bandwidth;
+ pr_credit--;
+ } else {
+ WARN_ON(1);
+ ret = -EINVAL;
+ goto done;
+ }
+ }
+
+done:
+ return ret;
+}
+
+static int fme_pr_write_complete(struct ifpga_fme_hw *fme_dev,
+ struct fpga_pr_info *info)
+{
+ struct feature_fme_pr *fme_pr;
+ struct feature_fme_pr_ctl fme_pr_ctl;
+
+ fme_pr = get_fme_feature_ioaddr_by_index(fme_dev,
+ FME_FEATURE_ID_PR_MGMT);
+
+ fme_pr_ctl.csr = readq(&fme_pr->ccip_fme_pr_control);
+ fme_pr_ctl.pr_push_complete = 1;
+ writeq(fme_pr_ctl.csr, &fme_pr->ccip_fme_pr_control);
+
+ dev_info(fme_dev, "green bitstream push complete\n");
+ dev_info(fme_dev, "waiting for HW to release PR resource\n");
+
+ fme_pr_ctl.pr_start_req = 0;
+
+ if (fpga_wait_register_field(pr_start_req, fme_pr_ctl,
+ &fme_pr->ccip_fme_pr_control,
+ PR_WAIT_TIMEOUT, 1)) {
+ printf("maximum try.\n");
+ return -ETIMEDOUT;
+ }
+
+ dev_info(fme_dev, "PR operation complete, checking status\n");
+ info->pr_err = pr_err_handle(fme_pr);
+ if (info->pr_err)
+ return -EIO;
+
+ dev_info(fme_dev, "PR done successfully\n");
+ return 0;
+}
+
+static int fpga_pr_buf_load(struct ifpga_fme_hw *fme_dev,
+ struct fpga_pr_info *info, const char *buf,
+ size_t count)
+{
+ int ret;
+
+ info->state = FPGA_PR_STATE_WRITE_INIT;
+ ret = fme_pr_write_init(fme_dev, info);
+ if (ret) {
+ dev_err(fme_dev, "Error preparing FPGA for writing\n");
+ info->state = FPGA_PR_STATE_WRITE_INIT_ERR;
+ return ret;
+ }
+
+ /*
+ * Write the FPGA image to the FPGA.
+ */
+ info->state = FPGA_PR_STATE_WRITE;
+ ret = fme_pr_write(fme_dev, info->port_id, buf, count, info);
+ if (ret) {
+ dev_err(fme_dev, "Error while writing image data to FPGA\n");
+ info->state = FPGA_PR_STATE_WRITE_ERR;
+ return ret;
+ }
+
+ /*
+ * After all the FPGA image has been written, do the device specific
+ * steps to finish and set the FPGA into operating mode.
+ */
+ info->state = FPGA_PR_STATE_WRITE_COMPLETE;
+ ret = fme_pr_write_complete(fme_dev, info);
+ if (ret) {
+ dev_err(fme_dev, "Error after writing image data to FPGA\n");
+ info->state = FPGA_PR_STATE_WRITE_COMPLETE_ERR;
+ return ret;
+ }
+ info->state = FPGA_PR_STATE_DONE;
+
+ return 0;
+}
+
+static int fme_pr(struct ifpga_hw *hw, u32 port_id, void *buffer, u32 size,
+ u64 *status)
+{
+ struct feature_fme_header *fme_hdr;
+ struct feature_fme_capability fme_capability;
+ struct ifpga_fme_hw *fme = &hw->fme;
+ struct fpga_pr_info info;
+ struct ifpga_port_hw *port;
+ int ret = 0;
+
+ if (!buffer || size == 0)
+ return -EINVAL;
+ if (fme->state != IFPGA_FME_IMPLEMENTED)
+ return -EINVAL;
+
+ /*
+ * Padding extra zeros to align PR buffer with PR bandwidth, HW will
+ * ignore these zeros automatically.
+ */
+ size = IFPGA_ALIGN(size, fme->pr_bandwidth);
+
+ /* get fme header region */
+ fme_hdr = get_fme_feature_ioaddr_by_index(fme,
+ FME_FEATURE_ID_HEADER);
+ if (!fme_hdr)
+ return -EINVAL;
+
+ /* check port id */
+ fme_capability.csr = readq(&fme_hdr->capability);
+ if (port_id >= fme_capability.num_ports) {
+ dev_err(fme, "port number more than maximum\n");
+ return -EINVAL;
+ }
+
+ memset(&info, 0, sizeof(struct fpga_pr_info));
+ info.flags = FPGA_MGR_PARTIAL_RECONFIG;
+ info.port_id = port_id;
+
+ spinlock_lock(&fme->lock);
+
+ /* get port device by port_id */
+ port = &hw->port[port_id];
+
+ /* Disable Port before PR */
+ fpga_port_disable(port);
+
+ ret = fpga_pr_buf_load(fme, &info, (void *)buffer, size);
+
+ *status = info.pr_err;
+
+ /* Re-enable Port after PR finished */
+ fpga_port_enable(port);
+ spinlock_unlock(&fme->lock);
+
+ return ret;
+}
+
+int do_pr(struct ifpga_hw *hw, u32 port_id, void *buffer, u32 size, u64 *status)
+{
+ struct bts_header *bts_hdr;
+ void *buf;
+ struct ifpga_port_hw *port;
+ int ret;
+
+ if (!buffer || size == 0) {
+ dev_err(hw, "invalid parameter\n");
+ return -EINVAL;
+ }
+
+ bts_hdr = (struct bts_header *)buffer;
+
+ if (is_valid_bts(bts_hdr)) {
+ dev_info(hw, "this is a valid bitsteam..\n");
+ size -= (sizeof(struct bts_header) +
+ bts_hdr->metadata_len);
+ buf = (u8 *)buffer + sizeof(struct bts_header) +
+ bts_hdr->metadata_len;
+ } else {
+ return -EINVAL;
+ }
+
+ /* clean port error before do PR */
+ port = &hw->port[port_id];
+ ret = port_clear_error(port);
+ if (ret) {
+ dev_err(hw, "port cannot clear error\n");
+ return -EINVAL;
+ }
+
+ return fme_pr(hw, port_id, buf, size, status);
+}
+
+static int fme_pr_mgmt_init(struct feature *feature)
+{
+ struct feature_fme_pr *fme_pr;
+ struct feature_header fme_pr_header;
+ struct ifpga_fme_hw *fme;
+
+ dev_info(NULL, "FME PR MGMT Init.\n");
+
+ fme = (struct ifpga_fme_hw *)feature->parent;
+
+ fme_pr = (struct feature_fme_pr *)feature->addr;
+
+ fme_pr_header.csr = readq(&fme_pr->header);
+ if (fme_pr_header.revision == 2) {
+ dev_info(NULL, "using 512-bit PR\n");
+ fme->pr_bandwidth = 64;
+ } else {
+ dev_info(NULL, "using 32-bit PR\n");
+ fme->pr_bandwidth = 4;
+ }
+
+ return 0;
+}
+
+static void fme_pr_mgmt_uinit(struct feature *feature)
+{
+ UNUSED(feature);
+
+ dev_info(NULL, "FME PR MGMT UInit.\n");
+}
+
+struct feature_ops fme_pr_mgmt_ops = {
+ .init = fme_pr_mgmt_init,
+ .uinit = fme_pr_mgmt_uinit,
+};
diff --git a/drivers/raw/ifpga_rawdev/base/ifpga_hw.h b/drivers/raw/ifpga_rawdev/base/ifpga_hw.h
new file mode 100644
index 00000000..a20520c9
--- /dev/null
+++ b/drivers/raw/ifpga_rawdev/base/ifpga_hw.h
@@ -0,0 +1,127 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2010-2018 Intel Corporation
+ */
+
+#ifndef _IFPGA_HW_H_
+#define _IFPGA_HW_H_
+
+#include "ifpga_defines.h"
+#include "opae_ifpga_hw_api.h"
+
+enum ifpga_feature_state {
+ IFPGA_FEATURE_UNUSED = 0,
+ IFPGA_FEATURE_ATTACHED,
+};
+
+struct feature_irq_ctx {
+ int eventfd;
+ int idx;
+};
+
+struct feature {
+ enum ifpga_feature_state state;
+ const char *name;
+ u64 id;
+ u8 *addr;
+ uint64_t phys_addr;
+ u32 size;
+ int revision;
+ u64 cap;
+ int vfio_dev_fd;
+ struct feature_irq_ctx *ctx;
+ unsigned int ctx_num;
+
+ void *parent; /* to parent hw data structure */
+
+ struct feature_ops *ops;/* callback to this private feature */
+};
+
+struct feature_ops {
+ int (*init)(struct feature *feature);
+ void (*uinit)(struct feature *feature);
+ int (*get_prop)(struct feature *feature, struct feature_prop *prop);
+ int (*set_prop)(struct feature *feature, struct feature_prop *prop);
+ int (*set_irq)(struct feature *feature, void *irq_set);
+};
+
+enum ifpga_fme_state {
+ IFPGA_FME_UNUSED = 0,
+ IFPGA_FME_IMPLEMENTED,
+};
+
+struct ifpga_fme_hw {
+ enum ifpga_fme_state state;
+
+ struct feature sub_feature[FME_FEATURE_ID_MAX];
+ spinlock_t lock; /* protect hardware access */
+
+ void *parent; /* pointer to ifpga_hw */
+
+ /* provied by HEADER feature */
+ u32 port_num;
+ struct uuid bitstream_id;
+ u64 bitstream_md;
+ size_t pr_bandwidth;
+ u32 socket_id;
+ u32 fabric_version_id;
+ u32 cache_size;
+
+ u32 capability;
+};
+
+enum ifpga_port_state {
+ IFPGA_PORT_UNUSED = 0,
+ IFPGA_PORT_ATTACHED,
+ IFPGA_PORT_DETACHED,
+};
+
+struct ifpga_port_hw {
+ enum ifpga_port_state state;
+
+ struct feature sub_feature[PORT_FEATURE_ID_MAX];
+ spinlock_t lock; /* protect access to hw */
+
+ void *parent; /* pointer to ifpga_hw */
+
+ int port_id; /* provied by HEADER feature */
+ struct uuid afu_id; /* provied by User AFU feature */
+
+ unsigned int disable_count;
+
+ u32 capability;
+ u32 num_umsgs; /* The number of allocated umsgs */
+ u32 num_uafu_irqs; /* The number of uafu interrupts */
+ u8 *stp_addr;
+ u32 stp_size;
+};
+
+#define AFU_MAX_REGION 1
+
+struct ifpga_afu_info {
+ struct opae_reg_region region[AFU_MAX_REGION];
+ unsigned int num_regions;
+ unsigned int num_irqs;
+};
+
+struct ifpga_hw {
+ struct opae_adapter *adapter;
+ struct opae_adapter_data_pci *pci_data;
+
+ struct ifpga_fme_hw fme;
+ struct ifpga_port_hw port[MAX_FPGA_PORT_NUM];
+};
+
+static inline bool is_ifpga_hw_pf(struct ifpga_hw *hw)
+{
+ return hw->fme.state != IFPGA_FME_UNUSED;
+}
+
+static inline bool is_valid_port_id(struct ifpga_hw *hw, u32 port_id)
+{
+ if (port_id >= MAX_FPGA_PORT_NUM ||
+ hw->port[port_id].state != IFPGA_PORT_ATTACHED)
+ return false;
+
+ return true;
+}
+#endif /* _IFPGA_HW_H_ */
diff --git a/drivers/raw/ifpga_rawdev/base/ifpga_port.c b/drivers/raw/ifpga_rawdev/base/ifpga_port.c
new file mode 100644
index 00000000..a962f5b4
--- /dev/null
+++ b/drivers/raw/ifpga_rawdev/base/ifpga_port.c
@@ -0,0 +1,388 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2010-2018 Intel Corporation
+ */
+
+#include "ifpga_feature_dev.h"
+
+int port_get_prop(struct ifpga_port_hw *port, struct feature_prop *prop)
+{
+ struct feature *feature;
+
+ if (!port)
+ return -ENOENT;
+
+ feature = get_port_feature_by_id(port, prop->feature_id);
+
+ if (feature && feature->ops && feature->ops->get_prop)
+ return feature->ops->get_prop(feature, prop);
+
+ return -ENOENT;
+}
+
+int port_set_prop(struct ifpga_port_hw *port, struct feature_prop *prop)
+{
+ struct feature *feature;
+
+ if (!port)
+ return -ENOENT;
+
+ feature = get_port_feature_by_id(port, prop->feature_id);
+
+ if (feature && feature->ops && feature->ops->set_prop)
+ return feature->ops->set_prop(feature, prop);
+
+ return -ENOENT;
+}
+
+int port_set_irq(struct ifpga_port_hw *port, u32 feature_id, void *irq_set)
+{
+ struct feature *feature;
+
+ if (!port)
+ return -ENOENT;
+
+ feature = get_port_feature_by_id(port, feature_id);
+
+ if (feature && feature->ops && feature->ops->set_irq)
+ return feature->ops->set_irq(feature, irq_set);
+
+ return -ENOENT;
+}
+
+static int port_get_revision(struct ifpga_port_hw *port, u64 *revision)
+{
+ struct feature_port_header *port_hdr
+ = get_port_feature_ioaddr_by_index(port,
+ PORT_FEATURE_ID_HEADER);
+ struct feature_header header;
+
+ header.csr = readq(&port_hdr->header);
+
+ *revision = header.revision;
+
+ return 0;
+}
+
+static int port_get_portidx(struct ifpga_port_hw *port, u64 *idx)
+{
+ struct feature_port_header *port_hdr;
+ struct feature_port_capability capability;
+
+ port_hdr = get_port_feature_ioaddr_by_index(port,
+ PORT_FEATURE_ID_HEADER);
+
+ capability.csr = readq(&port_hdr->capability);
+ *idx = capability.port_number;
+
+ return 0;
+}
+
+static int port_get_latency_tolerance(struct ifpga_port_hw *port, u64 *val)
+{
+ struct feature_port_header *port_hdr;
+ struct feature_port_control control;
+
+ port_hdr = get_port_feature_ioaddr_by_index(port,
+ PORT_FEATURE_ID_HEADER);
+
+ control.csr = readq(&port_hdr->control);
+ *val = control.latency_tolerance;
+
+ return 0;
+}
+
+static int port_get_ap1_event(struct ifpga_port_hw *port, u64 *val)
+{
+ struct feature_port_header *port_hdr;
+ struct feature_port_status status;
+
+ port_hdr = get_port_feature_ioaddr_by_index(port,
+ PORT_FEATURE_ID_HEADER);
+
+ spinlock_lock(&port->lock);
+ status.csr = readq(&port_hdr->status);
+ spinlock_unlock(&port->lock);
+
+ *val = status.ap1_event;
+
+ return 0;
+}
+
+static int port_set_ap1_event(struct ifpga_port_hw *port, u64 val)
+{
+ struct feature_port_header *port_hdr;
+ struct feature_port_status status;
+
+ port_hdr = get_port_feature_ioaddr_by_index(port,
+ PORT_FEATURE_ID_HEADER);
+
+ spinlock_lock(&port->lock);
+ status.csr = readq(&port_hdr->status);
+ status.ap1_event = val;
+ writeq(status.csr, &port_hdr->status);
+ spinlock_unlock(&port->lock);
+
+ return 0;
+}
+
+static int port_get_ap2_event(struct ifpga_port_hw *port, u64 *val)
+{
+ struct feature_port_header *port_hdr;
+ struct feature_port_status status;
+
+ port_hdr = get_port_feature_ioaddr_by_index(port,
+ PORT_FEATURE_ID_HEADER);
+
+ spinlock_lock(&port->lock);
+ status.csr = readq(&port_hdr->status);
+ spinlock_unlock(&port->lock);
+
+ *val = status.ap2_event;
+
+ return 0;
+}
+
+static int port_set_ap2_event(struct ifpga_port_hw *port, u64 val)
+{
+ struct feature_port_header *port_hdr;
+ struct feature_port_status status;
+
+ port_hdr = get_port_feature_ioaddr_by_index(port,
+ PORT_FEATURE_ID_HEADER);
+
+ spinlock_lock(&port->lock);
+ status.csr = readq(&port_hdr->status);
+ status.ap2_event = val;
+ writeq(status.csr, &port_hdr->status);
+ spinlock_unlock(&port->lock);
+
+ return 0;
+}
+
+static int port_get_power_state(struct ifpga_port_hw *port, u64 *val)
+{
+ struct feature_port_header *port_hdr;
+ struct feature_port_status status;
+
+ port_hdr = get_port_feature_ioaddr_by_index(port,
+ PORT_FEATURE_ID_HEADER);
+
+ spinlock_lock(&port->lock);
+ status.csr = readq(&port_hdr->status);
+ spinlock_unlock(&port->lock);
+
+ *val = status.power_state;
+
+ return 0;
+}
+
+static int port_get_userclk_freqcmd(struct ifpga_port_hw *port, u64 *val)
+{
+ struct feature_port_header *port_hdr;
+
+ port_hdr = get_port_feature_ioaddr_by_index(port,
+ PORT_FEATURE_ID_HEADER);
+
+ spinlock_lock(&port->lock);
+ *val = readq(&port_hdr->user_clk_freq_cmd0);
+ spinlock_unlock(&port->lock);
+
+ return 0;
+}
+
+static int port_set_userclk_freqcmd(struct ifpga_port_hw *port, u64 val)
+{
+ struct feature_port_header *port_hdr;
+
+ port_hdr = get_port_feature_ioaddr_by_index(port,
+ PORT_FEATURE_ID_HEADER);
+
+ spinlock_lock(&port->lock);
+ writeq(val, &port_hdr->user_clk_freq_cmd0);
+ spinlock_unlock(&port->lock);
+
+ return 0;
+}
+
+static int port_get_userclk_freqcntrcmd(struct ifpga_port_hw *port, u64 *val)
+{
+ struct feature_port_header *port_hdr;
+
+ port_hdr = get_port_feature_ioaddr_by_index(port,
+ PORT_FEATURE_ID_HEADER);
+
+ spinlock_lock(&port->lock);
+ *val = readq(&port_hdr->user_clk_freq_cmd1);
+ spinlock_unlock(&port->lock);
+
+ return 0;
+}
+
+static int port_set_userclk_freqcntrcmd(struct ifpga_port_hw *port, u64 val)
+{
+ struct feature_port_header *port_hdr;
+
+ port_hdr = get_port_feature_ioaddr_by_index(port,
+ PORT_FEATURE_ID_HEADER);
+
+ spinlock_lock(&port->lock);
+ writeq(val, &port_hdr->user_clk_freq_cmd1);
+ spinlock_unlock(&port->lock);
+
+ return 0;
+}
+
+static int port_get_userclk_freqsts(struct ifpga_port_hw *port, u64 *val)
+{
+ struct feature_port_header *port_hdr;
+
+ port_hdr = get_port_feature_ioaddr_by_index(port,
+ PORT_FEATURE_ID_HEADER);
+
+ spinlock_lock(&port->lock);
+ *val = readq(&port_hdr->user_clk_freq_sts0);
+ spinlock_unlock(&port->lock);
+
+ return 0;
+}
+
+static int port_get_userclk_freqcntrsts(struct ifpga_port_hw *port, u64 *val)
+{
+ struct feature_port_header *port_hdr;
+
+ port_hdr = get_port_feature_ioaddr_by_index(port,
+ PORT_FEATURE_ID_HEADER);
+
+ spinlock_lock(&port->lock);
+ *val = readq(&port_hdr->user_clk_freq_sts1);
+ spinlock_unlock(&port->lock);
+
+ return 0;
+}
+
+static int port_hdr_init(struct feature *feature)
+{
+ struct ifpga_port_hw *port = feature->parent;
+
+ dev_info(NULL, "port hdr Init.\n");
+
+ fpga_port_reset(port);
+
+ return 0;
+}
+
+static void port_hdr_uinit(struct feature *feature)
+{
+ UNUSED(feature);
+
+ dev_info(NULL, "port hdr uinit.\n");
+}
+
+static int port_hdr_get_prop(struct feature *feature, struct feature_prop *prop)
+{
+ struct ifpga_port_hw *port = feature->parent;
+
+ switch (prop->prop_id) {
+ case PORT_HDR_PROP_REVISION:
+ return port_get_revision(port, &prop->data);
+ case PORT_HDR_PROP_PORTIDX:
+ return port_get_portidx(port, &prop->data);
+ case PORT_HDR_PROP_LATENCY_TOLERANCE:
+ return port_get_latency_tolerance(port, &prop->data);
+ case PORT_HDR_PROP_AP1_EVENT:
+ return port_get_ap1_event(port, &prop->data);
+ case PORT_HDR_PROP_AP2_EVENT:
+ return port_get_ap2_event(port, &prop->data);
+ case PORT_HDR_PROP_POWER_STATE:
+ return port_get_power_state(port, &prop->data);
+ case PORT_HDR_PROP_USERCLK_FREQCMD:
+ return port_get_userclk_freqcmd(port, &prop->data);
+ case PORT_HDR_PROP_USERCLK_FREQCNTRCMD:
+ return port_get_userclk_freqcntrcmd(port, &prop->data);
+ case PORT_HDR_PROP_USERCLK_FREQSTS:
+ return port_get_userclk_freqsts(port, &prop->data);
+ case PORT_HDR_PROP_USERCLK_CNTRSTS:
+ return port_get_userclk_freqcntrsts(port, &prop->data);
+ }
+
+ return -ENOENT;
+}
+
+static int port_hdr_set_prop(struct feature *feature, struct feature_prop *prop)
+{
+ struct ifpga_port_hw *port = feature->parent;
+
+ switch (prop->prop_id) {
+ case PORT_HDR_PROP_AP1_EVENT:
+ return port_set_ap1_event(port, prop->data);
+ case PORT_HDR_PROP_AP2_EVENT:
+ return port_set_ap2_event(port, prop->data);
+ case PORT_HDR_PROP_USERCLK_FREQCMD:
+ return port_set_userclk_freqcmd(port, prop->data);
+ case PORT_HDR_PROP_USERCLK_FREQCNTRCMD:
+ return port_set_userclk_freqcntrcmd(port, prop->data);
+ }
+
+ return -ENOENT;
+}
+
+struct feature_ops port_hdr_ops = {
+ .init = port_hdr_init,
+ .uinit = port_hdr_uinit,
+ .get_prop = port_hdr_get_prop,
+ .set_prop = port_hdr_set_prop,
+};
+
+static int port_stp_init(struct feature *feature)
+{
+ struct ifpga_port_hw *port = feature->parent;
+
+ dev_info(NULL, "port stp Init.\n");
+
+ spinlock_lock(&port->lock);
+ port->stp_addr = feature->addr;
+ port->stp_size = feature->size;
+ spinlock_unlock(&port->lock);
+
+ return 0;
+}
+
+static void port_stp_uinit(struct feature *feature)
+{
+ UNUSED(feature);
+
+ dev_info(NULL, "port stp uinit.\n");
+}
+
+struct feature_ops port_stp_ops = {
+ .init = port_stp_init,
+ .uinit = port_stp_uinit,
+};
+
+static int port_uint_init(struct feature *feature)
+{
+ struct ifpga_port_hw *port = feature->parent;
+
+ dev_info(NULL, "PORT UINT Init.\n");
+
+ spinlock_lock(&port->lock);
+ if (feature->ctx_num) {
+ port->capability |= FPGA_PORT_CAP_UAFU_IRQ;
+ port->num_uafu_irqs = feature->ctx_num;
+ }
+ spinlock_unlock(&port->lock);
+
+ return 0;
+}
+
+static void port_uint_uinit(struct feature *feature)
+{
+ UNUSED(feature);
+
+ dev_info(NULL, "PORT UINT UInit.\n");
+}
+
+struct feature_ops port_uint_ops = {
+ .init = port_uint_init,
+ .uinit = port_uint_uinit,
+};
diff --git a/drivers/raw/ifpga_rawdev/base/ifpga_port_error.c b/drivers/raw/ifpga_rawdev/base/ifpga_port_error.c
new file mode 100644
index 00000000..23db562b
--- /dev/null
+++ b/drivers/raw/ifpga_rawdev/base/ifpga_port_error.c
@@ -0,0 +1,144 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2010-2018 Intel Corporation
+ */
+
+#include "ifpga_feature_dev.h"
+
+static int port_err_get_revision(struct ifpga_port_hw *port, u64 *val)
+{
+ struct feature_port_error *port_err;
+ struct feature_header header;
+
+ port_err = get_port_feature_ioaddr_by_index(port,
+ PORT_FEATURE_ID_ERROR);
+ header.csr = readq(&port_err->header);
+ *val = header.revision;
+
+ return 0;
+}
+
+static int port_err_get_errors(struct ifpga_port_hw *port, u64 *val)
+{
+ struct feature_port_error *port_err;
+ struct feature_port_err_key error;
+
+ port_err = get_port_feature_ioaddr_by_index(port,
+ PORT_FEATURE_ID_ERROR);
+ error.csr = readq(&port_err->port_error);
+ *val = error.csr;
+
+ return 0;
+}
+
+static int port_err_get_first_error(struct ifpga_port_hw *port, u64 *val)
+{
+ struct feature_port_error *port_err;
+ struct feature_port_first_err_key first_error;
+
+ port_err = get_port_feature_ioaddr_by_index(port,
+ PORT_FEATURE_ID_ERROR);
+ first_error.csr = readq(&port_err->port_first_error);
+ *val = first_error.csr;
+
+ return 0;
+}
+
+static int port_err_get_first_malformed_req_lsb(struct ifpga_port_hw *port,
+ u64 *val)
+{
+ struct feature_port_error *port_err;
+ struct feature_port_malformed_req0 malreq0;
+
+ port_err = get_port_feature_ioaddr_by_index(port,
+ PORT_FEATURE_ID_ERROR);
+
+ malreq0.header_lsb = readq(&port_err->malreq0);
+ *val = malreq0.header_lsb;
+
+ return 0;
+}
+
+static int port_err_get_first_malformed_req_msb(struct ifpga_port_hw *port,
+ u64 *val)
+{
+ struct feature_port_error *port_err;
+ struct feature_port_malformed_req1 malreq1;
+
+ port_err = get_port_feature_ioaddr_by_index(port,
+ PORT_FEATURE_ID_ERROR);
+
+ malreq1.header_msb = readq(&port_err->malreq1);
+ *val = malreq1.header_msb;
+
+ return 0;
+}
+
+static int port_err_set_clear(struct ifpga_port_hw *port, u64 val)
+{
+ int ret;
+
+ spinlock_lock(&port->lock);
+ ret = port_err_clear(port, val);
+ spinlock_unlock(&port->lock);
+
+ return ret;
+}
+
+static int port_error_init(struct feature *feature)
+{
+ struct ifpga_port_hw *port = feature->parent;
+
+ dev_info(NULL, "port error Init.\n");
+
+ spinlock_lock(&port->lock);
+ port_err_mask(port, false);
+ if (feature->ctx_num)
+ port->capability |= FPGA_PORT_CAP_ERR_IRQ;
+ spinlock_unlock(&port->lock);
+
+ return 0;
+}
+
+static void port_error_uinit(struct feature *feature)
+{
+ UNUSED(feature);
+}
+
+static int port_error_get_prop(struct feature *feature,
+ struct feature_prop *prop)
+{
+ struct ifpga_port_hw *port = feature->parent;
+
+ switch (prop->prop_id) {
+ case PORT_ERR_PROP_REVISION:
+ return port_err_get_revision(port, &prop->data);
+ case PORT_ERR_PROP_ERRORS:
+ return port_err_get_errors(port, &prop->data);
+ case PORT_ERR_PROP_FIRST_ERROR:
+ return port_err_get_first_error(port, &prop->data);
+ case PORT_ERR_PROP_FIRST_MALFORMED_REQ_LSB:
+ return port_err_get_first_malformed_req_lsb(port, &prop->data);
+ case PORT_ERR_PROP_FIRST_MALFORMED_REQ_MSB:
+ return port_err_get_first_malformed_req_msb(port, &prop->data);
+ }
+
+ return -ENOENT;
+}
+
+static int port_error_set_prop(struct feature *feature,
+ struct feature_prop *prop)
+{
+ struct ifpga_port_hw *port = feature->parent;
+
+ if (prop->prop_id == PORT_ERR_PROP_CLEAR)
+ return port_err_set_clear(port, prop->data);
+
+ return -ENOENT;
+}
+
+struct feature_ops port_error_ops = {
+ .init = port_error_init,
+ .uinit = port_error_uinit,
+ .get_prop = port_error_get_prop,
+ .set_prop = port_error_set_prop,
+};
diff --git a/drivers/raw/ifpga_rawdev/base/meson.build b/drivers/raw/ifpga_rawdev/base/meson.build
new file mode 100644
index 00000000..cb655352
--- /dev/null
+++ b/drivers/raw/ifpga_rawdev/base/meson.build
@@ -0,0 +1,34 @@
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright(c) 2018 Intel Corporation
+
+sources = [
+ 'ifpga_api.c',
+ 'ifpga_enumerate.c',
+ 'ifpga_feature_dev.c',
+ 'ifpga_fme.c',
+ 'ifpga_fme_iperf.c',
+ 'ifpga_fme_dperf.c',
+ 'ifpga_fme_error.c',
+ 'ifpga_port.c',
+ 'ifpga_port_error.c',
+ 'ifpga_fme_pr.c',
+ 'opae_hw_api.c',
+ 'opae_ifpga_hw_api.c',
+ 'opae_debug.c'
+]
+
+error_cflags = ['-Wno-sign-compare', '-Wno-unused-value',
+ '-Wno-format', '-Wno-unused-but-set-variable',
+ '-Wno-strict-aliasing'
+]
+c_args = cflags
+foreach flag: error_cflags
+ if cc.has_argument(flag)
+ c_args += flag
+ endif
+endforeach
+
+base_lib = static_library('ifpga_rawdev_base', sources,
+ dependencies: static_rte_eal,
+ c_args: c_args)
+base_objs = base_lib.extract_all_objects()
diff --git a/drivers/raw/ifpga_rawdev/base/opae_debug.c b/drivers/raw/ifpga_rawdev/base/opae_debug.c
new file mode 100644
index 00000000..024d7d29
--- /dev/null
+++ b/drivers/raw/ifpga_rawdev/base/opae_debug.c
@@ -0,0 +1,99 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2010-2018 Intel Corporation
+ */
+
+#define OPAE_HW_DEBUG
+
+#include "opae_hw_api.h"
+#include "opae_debug.h"
+
+void opae_manager_dump(struct opae_manager *mgr)
+{
+ opae_log("=====%s=====\n", __func__);
+ opae_log("OPAE Manger %s\n", mgr->name);
+ opae_log("OPAE Manger OPs = %p\n", mgr->ops);
+ opae_log("OPAE Manager Private Data = %p\n", mgr->data);
+ opae_log("OPAE Adapter(parent) = %p\n", mgr->adapter);
+ opae_log("==========================\n");
+}
+
+void opae_bridge_dump(struct opae_bridge *br)
+{
+ opae_log("=====%s=====\n", __func__);
+ opae_log("OPAE Bridge %s\n", br->name);
+ opae_log("OPAE Bridge ID = %d\n", br->id);
+ opae_log("OPAE Bridge OPs = %p\n", br->ops);
+ opae_log("OPAE Bridge Private Data = %p\n", br->data);
+ opae_log("OPAE Accelerator(under this bridge) = %p\n", br->acc);
+ opae_log("==========================\n");
+}
+
+void opae_accelerator_dump(struct opae_accelerator *acc)
+{
+ opae_log("=====%s=====\n", __func__);
+ opae_log("OPAE Accelerator %s\n", acc->name);
+ opae_log("OPAE Accelerator Index = %d\n", acc->index);
+ opae_log("OPAE Accelerator OPs = %p\n", acc->ops);
+ opae_log("OPAE Accelerator Private Data = %p\n", acc->data);
+ opae_log("OPAE Bridge (upstream) = %p\n", acc->br);
+ opae_log("OPAE Manager (upstream) = %p\n", acc->mgr);
+ opae_log("==========================\n");
+
+ if (acc->br)
+ opae_bridge_dump(acc->br);
+}
+
+static void opae_adapter_data_dump(void *data)
+{
+ struct opae_adapter_data *d = data;
+ struct opae_adapter_data_pci *d_pci;
+ struct opae_reg_region *r;
+ int i;
+
+ opae_log("=====%s=====\n", __func__);
+
+ switch (d->type) {
+ case OPAE_FPGA_PCI:
+ d_pci = (struct opae_adapter_data_pci *)d;
+
+ opae_log("OPAE Adapter Type = PCI\n");
+ opae_log("PCI Device ID: 0x%04x\n", d_pci->device_id);
+ opae_log("PCI Vendor ID: 0x%04x\n", d_pci->vendor_id);
+
+ for (i = 0; i < PCI_MAX_RESOURCE; i++) {
+ r = &d_pci->region[i];
+ opae_log("PCI Bar %d: phy(%llx) len(%llx) addr(%p)\n",
+ i, (unsigned long long)r->phys_addr,
+ (unsigned long long)r->len, r->addr);
+ }
+ break;
+ case OPAE_FPGA_NET:
+ break;
+ }
+
+ opae_log("==========================\n");
+}
+
+void opae_adapter_dump(struct opae_adapter *adapter, int verbose)
+{
+ struct opae_accelerator *acc;
+
+ opae_log("=====%s=====\n", __func__);
+ opae_log("OPAE Adapter %s\n", adapter->name);
+ opae_log("OPAE Adapter OPs = %p\n", adapter->ops);
+ opae_log("OPAE Adapter Private Data = %p\n", adapter->data);
+ opae_log("OPAE Manager (downstream) = %p\n", adapter->mgr);
+
+ if (verbose) {
+ if (adapter->mgr)
+ opae_manager_dump(adapter->mgr);
+
+ opae_adapter_for_each_acc(adapter, acc)
+ opae_accelerator_dump(acc);
+
+ if (adapter->data)
+ opae_adapter_data_dump(adapter->data);
+ }
+
+ opae_log("==========================\n");
+}
diff --git a/drivers/raw/ifpga_rawdev/base/opae_debug.h b/drivers/raw/ifpga_rawdev/base/opae_debug.h
new file mode 100644
index 00000000..a03dff92
--- /dev/null
+++ b/drivers/raw/ifpga_rawdev/base/opae_debug.h
@@ -0,0 +1,19 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2010-2018 Intel Corporation
+ */
+
+#ifndef _OPAE_DEBUG_H_
+#define _OPAE_DEBUG_H_
+
+#ifdef OPAE_HW_DEBUG
+#define opae_log(fmt, args...) printf(fmt, ## args)
+#else
+#define opae_log(fme, args...) do {} while (0)
+#endif
+
+void opae_manager_dump(struct opae_manager *mgr);
+void opae_bridge_dump(struct opae_bridge *br);
+void opae_accelerator_dump(struct opae_accelerator *acc);
+void opae_adapter_dump(struct opae_adapter *adapter, int verbose);
+
+#endif /* _OPAE_DEBUG_H_ */
diff --git a/drivers/raw/ifpga_rawdev/base/opae_hw_api.c b/drivers/raw/ifpga_rawdev/base/opae_hw_api.c
new file mode 100644
index 00000000..a533dfea
--- /dev/null
+++ b/drivers/raw/ifpga_rawdev/base/opae_hw_api.c
@@ -0,0 +1,381 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2010-2018 Intel Corporation
+ */
+
+#include "opae_hw_api.h"
+#include "opae_debug.h"
+#include "ifpga_api.h"
+
+/* OPAE Bridge Functions */
+
+/**
+ * opae_bridge_alloc - alloc opae_bridge data structure
+ * @name: bridge name.
+ * @ops: ops of this bridge.
+ * @data: private data of this bridge.
+ *
+ * Return opae_bridge on success, otherwise NULL.
+ */
+struct opae_bridge *
+opae_bridge_alloc(const char *name, struct opae_bridge_ops *ops, void *data)
+{
+ struct opae_bridge *br = opae_zmalloc(sizeof(*br));
+
+ if (!br)
+ return NULL;
+
+ br->name = name;
+ br->ops = ops;
+ br->data = data;
+
+ opae_log("%s %p\n", __func__, br);
+
+ return br;
+}
+
+/**
+ * opae_bridge_reset - reset opae_bridge
+ * @br: bridge to be reset.
+ *
+ * Return: 0 on success, otherwise error code.
+ */
+int opae_bridge_reset(struct opae_bridge *br)
+{
+ if (!br)
+ return -EINVAL;
+
+ if (br->ops && br->ops->reset)
+ return br->ops->reset(br);
+
+ opae_log("%s no ops\n", __func__);
+
+ return -ENOENT;
+}
+
+/* Accelerator Functions */
+
+/**
+ * opae_accelerator_alloc - alloc opae_accelerator data structure
+ * @name: accelerator name.
+ * @ops: ops of this accelerator.
+ * @data: private data of this accelerator.
+ *
+ * Return: opae_accelerator on success, otherwise NULL.
+ */
+struct opae_accelerator *
+opae_accelerator_alloc(const char *name, struct opae_accelerator_ops *ops,
+ void *data)
+{
+ struct opae_accelerator *acc = opae_zmalloc(sizeof(*acc));
+
+ if (!acc)
+ return NULL;
+
+ acc->name = name;
+ acc->ops = ops;
+ acc->data = data;
+
+ opae_log("%s %p\n", __func__, acc);
+
+ return acc;
+}
+
+/**
+ * opae_acc_reg_read - read accelerator's register from its reg region.
+ * @acc: accelerator to read.
+ * @region_idx: reg region index.
+ * @offset: reg offset.
+ * @byte: read operation width, e.g 4 byte = 32bit read.
+ * @data: data to store the value read from the register.
+ *
+ * Return: 0 on success, otherwise error code.
+ */
+int opae_acc_reg_read(struct opae_accelerator *acc, unsigned int region_idx,
+ u64 offset, unsigned int byte, void *data)
+{
+ if (!acc || !data)
+ return -EINVAL;
+
+ if (acc->ops && acc->ops->read)
+ return acc->ops->read(acc, region_idx, offset, byte, data);
+
+ return -ENOENT;
+}
+
+/**
+ * opae_acc_reg_write - write to accelerator's register from its reg region.
+ * @acc: accelerator to write.
+ * @region_idx: reg region index.
+ * @offset: reg offset.
+ * @byte: write operation width, e.g 4 byte = 32bit write.
+ * @data: data stored the value to write to the register.
+ *
+ * Return: 0 on success, otherwise error code.
+ */
+int opae_acc_reg_write(struct opae_accelerator *acc, unsigned int region_idx,
+ u64 offset, unsigned int byte, void *data)
+{
+ if (!acc || !data)
+ return -EINVAL;
+
+ if (acc->ops && acc->ops->write)
+ return acc->ops->write(acc, region_idx, offset, byte, data);
+
+ return -ENOENT;
+}
+
+/**
+ * opae_acc_get_info - get information of an accelerator.
+ * @acc: targeted accelerator
+ * @info: accelerator info data structure to be filled.
+ *
+ * Return: 0 on success, otherwise error code.
+ */
+int opae_acc_get_info(struct opae_accelerator *acc, struct opae_acc_info *info)
+{
+ if (!acc || !info)
+ return -EINVAL;
+
+ if (acc->ops && acc->ops->get_info)
+ return acc->ops->get_info(acc, info);
+
+ return -ENOENT;
+}
+
+/**
+ * opae_acc_get_region_info - get information of an accelerator register region.
+ * @acc: targeted accelerator
+ * @info: accelerator region info data structure to be filled.
+ *
+ * Return: 0 on success, otherwise error code.
+ */
+int opae_acc_get_region_info(struct opae_accelerator *acc,
+ struct opae_acc_region_info *info)
+{
+ if (!acc || !info)
+ return -EINVAL;
+
+ if (acc->ops && acc->ops->get_region_info)
+ return acc->ops->get_region_info(acc, info);
+
+ return -ENOENT;
+}
+
+/**
+ * opae_acc_set_irq - set an accelerator's irq.
+ * @acc: targeted accelerator
+ * @start: start vector number
+ * @count: count of vectors to be set from the start vector
+ * @evtfds: event fds to be notified when corresponding irqs happens
+ *
+ * Return: 0 on success, otherwise error code.
+ */
+int opae_acc_set_irq(struct opae_accelerator *acc,
+ u32 start, u32 count, s32 evtfds[])
+{
+ if (!acc || !acc->data)
+ return -EINVAL;
+
+ if (start + count <= start)
+ return -EINVAL;
+
+ if (acc->ops && acc->ops->set_irq)
+ return acc->ops->set_irq(acc, start, count, evtfds);
+
+ return -ENOENT;
+}
+
+/**
+ * opae_acc_get_uuid - get accelerator's UUID.
+ * @acc: targeted accelerator
+ * @uuid: a pointer to UUID
+ *
+ * Return: 0 on success, otherwise error code.
+ */
+int opae_acc_get_uuid(struct opae_accelerator *acc,
+ struct uuid *uuid)
+{
+ if (!acc || !uuid)
+ return -EINVAL;
+
+ if (acc->ops && acc->ops->get_uuid)
+ return acc->ops->get_uuid(acc, uuid);
+
+ return -ENOENT;
+}
+
+/* Manager Functions */
+
+/**
+ * opae_manager_alloc - alloc opae_manager data structure
+ * @name: manager name.
+ * @ops: ops of this manager.
+ * @data: private data of this manager.
+ *
+ * Return: opae_manager on success, otherwise NULL.
+ */
+struct opae_manager *
+opae_manager_alloc(const char *name, struct opae_manager_ops *ops, void *data)
+{
+ struct opae_manager *mgr = opae_zmalloc(sizeof(*mgr));
+
+ if (!mgr)
+ return NULL;
+
+ mgr->name = name;
+ mgr->ops = ops;
+ mgr->data = data;
+
+ opae_log("%s %p\n", __func__, mgr);
+
+ return mgr;
+}
+
+/**
+ * opae_manager_flash - flash a reconfiguration image via opae_manager
+ * @mgr: opae_manager for flash.
+ * @id: id of target region (accelerator).
+ * @buf: image data buffer.
+ * @size: buffer size.
+ * @status: status to store flash result.
+ *
+ * Return: 0 on success, otherwise error code.
+ */
+int opae_manager_flash(struct opae_manager *mgr, int id, void *buf, u32 size,
+ u64 *status)
+{
+ if (!mgr)
+ return -EINVAL;
+
+ if (mgr && mgr->ops && mgr->ops->flash)
+ return mgr->ops->flash(mgr, id, buf, size, status);
+
+ return -ENOENT;
+}
+
+/* Adapter Functions */
+
+/**
+ * opae_adapter_data_alloc - alloc opae_adapter_data data structure
+ * @type: opae_adapter_type.
+ *
+ * Return: opae_adapter_data on success, otherwise NULL.
+ */
+void *opae_adapter_data_alloc(enum opae_adapter_type type)
+{
+ struct opae_adapter_data *data;
+ int size;
+
+ switch (type) {
+ case OPAE_FPGA_PCI:
+ size = sizeof(struct opae_adapter_data_pci);
+ break;
+ case OPAE_FPGA_NET:
+ size = sizeof(struct opae_adapter_data_net);
+ break;
+ default:
+ size = sizeof(struct opae_adapter_data);
+ break;
+ }
+
+ data = opae_zmalloc(size);
+ if (!data)
+ return NULL;
+
+ data->type = type;
+
+ return data;
+}
+
+static struct opae_adapter_ops *match_ops(struct opae_adapter *adapter)
+{
+ struct opae_adapter_data *data;
+
+ if (!adapter || !adapter->data)
+ return NULL;
+
+ data = adapter->data;
+
+ if (data->type == OPAE_FPGA_PCI)
+ return &ifpga_adapter_ops;
+
+ return NULL;
+}
+
+/**
+ * opae_adapter_data_alloc - alloc opae_adapter_data data structure
+ * @name: adapter name.
+ * @data: private data of this adapter.
+ *
+ * Return: opae_adapter on success, otherwise NULL.
+ */
+struct opae_adapter *opae_adapter_alloc(const char *name, void *data)
+{
+ struct opae_adapter *adapter = opae_zmalloc(sizeof(*adapter));
+
+ if (!adapter)
+ return NULL;
+
+ TAILQ_INIT(&adapter->acc_list);
+ adapter->data = data;
+ adapter->name = name;
+ adapter->ops = match_ops(adapter);
+
+ return adapter;
+}
+
+/**
+ * opae_adapter_enumerate - enumerate this adapter
+ * @adapter: adapter to enumerate.
+ *
+ * Return: 0 on success, otherwise error code.
+ */
+int opae_adapter_enumerate(struct opae_adapter *adapter)
+{
+ int ret = -ENOENT;
+
+ if (!adapter)
+ return -EINVAL;
+
+ if (adapter->ops && adapter->ops->enumerate)
+ ret = adapter->ops->enumerate(adapter);
+
+ if (!ret)
+ opae_adapter_dump(adapter, 1);
+
+ return ret;
+}
+
+/**
+ * opae_adapter_destroy - destroy this adapter
+ * @adapter: adapter to destroy.
+ *
+ * destroy things allocated during adapter enumeration.
+ */
+void opae_adapter_destroy(struct opae_adapter *adapter)
+{
+ if (adapter && adapter->ops && adapter->ops->destroy)
+ adapter->ops->destroy(adapter);
+}
+
+/**
+ * opae_adapter_get_acc - find and return accelerator with matched id
+ * @adapter: adapter to find the accelerator.
+ * @acc_id: id (index) of the accelerator.
+ *
+ * destroy things allocated during adapter enumeration.
+ */
+struct opae_accelerator *
+opae_adapter_get_acc(struct opae_adapter *adapter, int acc_id)
+{
+ struct opae_accelerator *acc = NULL;
+
+ if (!adapter)
+ return NULL;
+
+ opae_adapter_for_each_acc(adapter, acc)
+ if (acc->index == acc_id)
+ return acc;
+
+ return NULL;
+}
diff --git a/drivers/raw/ifpga_rawdev/base/opae_hw_api.h b/drivers/raw/ifpga_rawdev/base/opae_hw_api.h
new file mode 100644
index 00000000..4bbc9df5
--- /dev/null
+++ b/drivers/raw/ifpga_rawdev/base/opae_hw_api.h
@@ -0,0 +1,253 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2010-2018 Intel Corporation
+ */
+
+#ifndef _OPAE_HW_API_H_
+#define _OPAE_HW_API_H_
+
+#include <stdint.h>
+#include <stdlib.h>
+#include <stdio.h>
+#include <sys/queue.h>
+
+#include "opae_osdep.h"
+
+#ifndef PCI_MAX_RESOURCE
+#define PCI_MAX_RESOURCE 6
+#endif
+
+struct opae_adapter;
+
+enum opae_adapter_type {
+ OPAE_FPGA_PCI,
+ OPAE_FPGA_NET,
+};
+
+/* OPAE Manager Data Structure */
+struct opae_manager_ops;
+
+/*
+ * opae_manager has pointer to its parent adapter, as it could be able to manage
+ * all components on this FPGA device (adapter). If not the case, don't set this
+ * adapter, which limit opae_manager ops to manager itself.
+ */
+struct opae_manager {
+ const char *name;
+ struct opae_adapter *adapter;
+ struct opae_manager_ops *ops;
+ void *data;
+};
+
+/* FIXME: add more management ops, e.g power/thermal and etc */
+struct opae_manager_ops {
+ int (*flash)(struct opae_manager *mgr, int id, void *buffer,
+ u32 size, u64 *status);
+};
+
+/* OPAE Manager APIs */
+struct opae_manager *
+opae_manager_alloc(const char *name, struct opae_manager_ops *ops, void *data);
+#define opae_manager_free(mgr) opae_free(mgr)
+int opae_manager_flash(struct opae_manager *mgr, int acc_id, void *buf,
+ u32 size, u64 *status);
+
+/* OPAE Bridge Data Structure */
+struct opae_bridge_ops;
+
+/*
+ * opae_bridge only has pointer to its downstream accelerator.
+ */
+struct opae_bridge {
+ const char *name;
+ int id;
+ struct opae_accelerator *acc;
+ struct opae_bridge_ops *ops;
+ void *data;
+};
+
+struct opae_bridge_ops {
+ int (*reset)(struct opae_bridge *br);
+};
+
+/* OPAE Bridge APIs */
+struct opae_bridge *
+opae_bridge_alloc(const char *name, struct opae_bridge_ops *ops, void *data);
+int opae_bridge_reset(struct opae_bridge *br);
+#define opae_bridge_free(br) opae_free(br)
+
+/* OPAE Acceleraotr Data Structure */
+struct opae_accelerator_ops;
+
+/*
+ * opae_accelerator has pointer to its upstream bridge(port).
+ * In some cases, if we allow same user to do PR on its own accelerator, then
+ * set the manager pointer during the enumeration. But in other cases, the PR
+ * functions only could be done via manager in another module / thread / service
+ * / application for better protection.
+ */
+struct opae_accelerator {
+ TAILQ_ENTRY(opae_accelerator) node;
+ const char *name;
+ int index;
+ struct opae_bridge *br;
+ struct opae_manager *mgr;
+ struct opae_accelerator_ops *ops;
+ void *data;
+};
+
+struct opae_acc_info {
+ unsigned int num_regions;
+ unsigned int num_irqs;
+};
+
+struct opae_acc_region_info {
+ u32 flags;
+#define ACC_REGION_READ (1 << 0)
+#define ACC_REGION_WRITE (1 << 1)
+#define ACC_REGION_MMIO (1 << 2)
+ u32 index;
+ u64 phys_addr;
+ u64 len;
+ u8 *addr;
+};
+
+struct opae_accelerator_ops {
+ int (*read)(struct opae_accelerator *acc, unsigned int region_idx,
+ u64 offset, unsigned int byte, void *data);
+ int (*write)(struct opae_accelerator *acc, unsigned int region_idx,
+ u64 offset, unsigned int byte, void *data);
+ int (*get_info)(struct opae_accelerator *acc,
+ struct opae_acc_info *info);
+ int (*get_region_info)(struct opae_accelerator *acc,
+ struct opae_acc_region_info *info);
+ int (*set_irq)(struct opae_accelerator *acc,
+ u32 start, u32 count, s32 evtfds[]);
+ int (*get_uuid)(struct opae_accelerator *acc,
+ struct uuid *uuid);
+};
+
+/* OPAE accelerator APIs */
+struct opae_accelerator *
+opae_accelerator_alloc(const char *name, struct opae_accelerator_ops *ops,
+ void *data);
+#define opae_accelerator_free(acc) opae_free(acc)
+int opae_acc_get_info(struct opae_accelerator *acc, struct opae_acc_info *info);
+int opae_acc_get_region_info(struct opae_accelerator *acc,
+ struct opae_acc_region_info *info);
+int opae_acc_set_irq(struct opae_accelerator *acc,
+ u32 start, u32 count, s32 evtfds[]);
+int opae_acc_get_uuid(struct opae_accelerator *acc,
+ struct uuid *uuid);
+
+static inline struct opae_bridge *
+opae_acc_get_br(struct opae_accelerator *acc)
+{
+ return acc ? acc->br : NULL;
+}
+
+static inline struct opae_manager *
+opae_acc_get_mgr(struct opae_accelerator *acc)
+{
+ return acc ? acc->mgr : NULL;
+}
+
+int opae_acc_reg_read(struct opae_accelerator *acc, unsigned int region_idx,
+ u64 offset, unsigned int byte, void *data);
+int opae_acc_reg_write(struct opae_accelerator *acc, unsigned int region_idx,
+ u64 offset, unsigned int byte, void *data);
+
+#define opae_acc_reg_read64(acc, region, offset, data) \
+ opae_acc_reg_read(acc, region, offset, 8, data)
+#define opae_acc_reg_write64(acc, region, offset, data) \
+ opae_acc_reg_write(acc, region, offset, 8, data)
+#define opae_acc_reg_read32(acc, region, offset, data) \
+ opae_acc_reg_read(acc, region, offset, 4, data)
+#define opae_acc_reg_write32(acc, region, offset, data) \
+ opae_acc_reg_write(acc, region, offset, 4, data)
+#define opae_acc_reg_read16(acc, region, offset, data) \
+ opae_acc_reg_read(acc, region, offset, 2, data)
+#define opae_acc_reg_write16(acc, region, offset, data) \
+ opae_acc_reg_write(acc, region, offset, 2, data)
+#define opae_acc_reg_read8(acc, region, offset, data) \
+ opae_acc_reg_read(acc, region, offset, 1, data)
+#define opae_acc_reg_write8(acc, region, offset, data) \
+ opae_acc_reg_write(acc, region, offset, 1, data)
+
+/*for data stream read/write*/
+int opae_acc_data_read(struct opae_accelerator *acc, unsigned int flags,
+ u64 offset, unsigned int byte, void *data);
+int opae_acc_data_write(struct opae_accelerator *acc, unsigned int flags,
+ u64 offset, unsigned int byte, void *data);
+
+/* OPAE Adapter Data Structure */
+struct opae_adapter_data {
+ enum opae_adapter_type type;
+};
+
+struct opae_reg_region {
+ u64 phys_addr;
+ u64 len;
+ u8 *addr;
+};
+
+struct opae_adapter_data_pci {
+ enum opae_adapter_type type;
+ u16 device_id;
+ u16 vendor_id;
+ struct opae_reg_region region[PCI_MAX_RESOURCE];
+ int vfio_dev_fd; /* VFIO device file descriptor */
+};
+
+/* FIXME: OPAE_FPGA_NET type */
+struct opae_adapter_data_net {
+ enum opae_adapter_type type;
+};
+
+struct opae_adapter_ops {
+ int (*enumerate)(struct opae_adapter *adapter);
+ void (*destroy)(struct opae_adapter *adapter);
+};
+
+TAILQ_HEAD(opae_accelerator_list, opae_accelerator);
+
+#define opae_adapter_for_each_acc(adatper, acc) \
+ TAILQ_FOREACH(acc, &adapter->acc_list, node)
+
+struct opae_adapter {
+ const char *name;
+ struct opae_manager *mgr;
+ struct opae_accelerator_list acc_list;
+ struct opae_adapter_ops *ops;
+ void *data;
+};
+
+/* OPAE Adapter APIs */
+void *opae_adapter_data_alloc(enum opae_adapter_type type);
+#define opae_adapter_data_free(data) opae_free(data)
+
+struct opae_adapter *opae_adapter_alloc(const char *name, void *data);
+#define opae_adapter_free(adapter) opae_free(adapter)
+
+int opae_adapter_enumerate(struct opae_adapter *adapter);
+void opae_adapter_destroy(struct opae_adapter *adapter);
+static inline struct opae_manager *
+opae_adapter_get_mgr(struct opae_adapter *adapter)
+{
+ return adapter ? adapter->mgr : NULL;
+}
+
+struct opae_accelerator *
+opae_adapter_get_acc(struct opae_adapter *adapter, int acc_id);
+
+static inline void opae_adapter_add_acc(struct opae_adapter *adapter,
+ struct opae_accelerator *acc)
+{
+ TAILQ_INSERT_TAIL(&adapter->acc_list, acc, node);
+}
+
+static inline void opae_adapter_remove_acc(struct opae_adapter *adapter,
+ struct opae_accelerator *acc)
+{
+ TAILQ_REMOVE(&adapter->acc_list, acc, node);
+}
+#endif /* _OPAE_HW_API_H_*/
diff --git a/drivers/raw/ifpga_rawdev/base/opae_ifpga_hw_api.c b/drivers/raw/ifpga_rawdev/base/opae_ifpga_hw_api.c
new file mode 100644
index 00000000..89c7b492
--- /dev/null
+++ b/drivers/raw/ifpga_rawdev/base/opae_ifpga_hw_api.c
@@ -0,0 +1,145 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2010-2018 Intel Corporation
+ */
+
+#include "opae_ifpga_hw_api.h"
+#include "ifpga_api.h"
+
+int opae_manager_ifpga_get_prop(struct opae_manager *mgr,
+ struct feature_prop *prop)
+{
+ struct ifpga_fme_hw *fme;
+
+ if (!mgr || !mgr->data)
+ return -EINVAL;
+
+ fme = mgr->data;
+
+ return ifpga_get_prop(fme->parent, FEATURE_FIU_ID_FME, 0, prop);
+}
+
+int opae_manager_ifpga_set_prop(struct opae_manager *mgr,
+ struct feature_prop *prop)
+{
+ struct ifpga_fme_hw *fme;
+
+ if (!mgr || !mgr->data)
+ return -EINVAL;
+
+ fme = mgr->data;
+
+ return ifpga_set_prop(fme->parent, FEATURE_FIU_ID_FME, 0, prop);
+}
+
+int opae_manager_ifpga_get_info(struct opae_manager *mgr,
+ struct fpga_fme_info *fme_info)
+{
+ struct ifpga_fme_hw *fme;
+
+ if (!mgr || !mgr->data || !fme_info)
+ return -EINVAL;
+
+ fme = mgr->data;
+
+ spinlock_lock(&fme->lock);
+ fme_info->capability = fme->capability;
+ spinlock_unlock(&fme->lock);
+
+ return 0;
+}
+
+int opae_manager_ifpga_set_err_irq(struct opae_manager *mgr,
+ struct fpga_fme_err_irq_set *err_irq_set)
+{
+ struct ifpga_fme_hw *fme;
+
+ if (!mgr || !mgr->data)
+ return -EINVAL;
+
+ fme = mgr->data;
+
+ return ifpga_set_irq(fme->parent, FEATURE_FIU_ID_FME, 0,
+ IFPGA_FME_FEATURE_ID_GLOBAL_ERR, err_irq_set);
+}
+
+int opae_bridge_ifpga_get_prop(struct opae_bridge *br,
+ struct feature_prop *prop)
+{
+ struct ifpga_port_hw *port;
+
+ if (!br || !br->data)
+ return -EINVAL;
+
+ port = br->data;
+
+ return ifpga_get_prop(port->parent, FEATURE_FIU_ID_PORT,
+ port->port_id, prop);
+}
+
+int opae_bridge_ifpga_set_prop(struct opae_bridge *br,
+ struct feature_prop *prop)
+{
+ struct ifpga_port_hw *port;
+
+ if (!br || !br->data)
+ return -EINVAL;
+
+ port = br->data;
+
+ return ifpga_set_prop(port->parent, FEATURE_FIU_ID_PORT,
+ port->port_id, prop);
+}
+
+int opae_bridge_ifpga_get_info(struct opae_bridge *br,
+ struct fpga_port_info *port_info)
+{
+ struct ifpga_port_hw *port;
+
+ if (!br || !br->data || !port_info)
+ return -EINVAL;
+
+ port = br->data;
+
+ spinlock_lock(&port->lock);
+ port_info->capability = port->capability;
+ port_info->num_uafu_irqs = port->num_uafu_irqs;
+ spinlock_unlock(&port->lock);
+
+ return 0;
+}
+
+int opae_bridge_ifpga_get_region_info(struct opae_bridge *br,
+ struct fpga_port_region_info *info)
+{
+ struct ifpga_port_hw *port;
+
+ if (!br || !br->data || !info)
+ return -EINVAL;
+
+ /* Only support STP region now */
+ if (info->index != PORT_REGION_INDEX_STP)
+ return -EINVAL;
+
+ port = br->data;
+
+ spinlock_lock(&port->lock);
+ info->addr = port->stp_addr;
+ info->size = port->stp_size;
+ spinlock_unlock(&port->lock);
+
+ return 0;
+}
+
+int opae_bridge_ifpga_set_err_irq(struct opae_bridge *br,
+ struct fpga_port_err_irq_set *err_irq_set)
+{
+ struct ifpga_port_hw *port;
+
+ if (!br || !br->data)
+ return -EINVAL;
+
+ port = br->data;
+
+ return ifpga_set_irq(port->parent, FEATURE_FIU_ID_PORT, port->port_id,
+ IFPGA_PORT_FEATURE_ID_ERROR, err_irq_set);
+}
diff --git a/drivers/raw/ifpga_rawdev/base/opae_ifpga_hw_api.h b/drivers/raw/ifpga_rawdev/base/opae_ifpga_hw_api.h
new file mode 100644
index 00000000..4c2c9904
--- /dev/null
+++ b/drivers/raw/ifpga_rawdev/base/opae_ifpga_hw_api.h
@@ -0,0 +1,279 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2010-2018 Intel Corporation
+ */
+
+#ifndef _OPAE_IFPGA_HW_API_H_
+#define _OPAE_IFPGA_HW_API_H_
+
+#include "opae_hw_api.h"
+
+/**
+ * struct feature_prop - data structure for feature property
+ * @feature_id: id of this feature.
+ * @prop_id: id of this property under this feature.
+ * @data: property value to set/get.
+ */
+struct feature_prop {
+ u64 feature_id;
+ u64 prop_id;
+ u64 data;
+};
+
+#define IFPGA_FIU_ID_FME 0x0
+#define IFPGA_FIU_ID_PORT 0x1
+
+#define IFPGA_FME_FEATURE_ID_HEADER 0x0
+#define IFPGA_FME_FEATURE_ID_THERMAL_MGMT 0x1
+#define IFPGA_FME_FEATURE_ID_POWER_MGMT 0x2
+#define IFPGA_FME_FEATURE_ID_GLOBAL_IPERF 0x3
+#define IFPGA_FME_FEATURE_ID_GLOBAL_ERR 0x4
+#define IFPGA_FME_FEATURE_ID_PR_MGMT 0x5
+#define IFPGA_FME_FEATURE_ID_HSSI 0x6
+#define IFPGA_FME_FEATURE_ID_GLOBAL_DPERF 0x7
+
+#define IFPGA_PORT_FEATURE_ID_HEADER 0x0
+#define IFPGA_PORT_FEATURE_ID_AFU 0xff
+#define IFPGA_PORT_FEATURE_ID_ERROR 0x10
+#define IFPGA_PORT_FEATURE_ID_UMSG 0x11
+#define IFPGA_PORT_FEATURE_ID_UINT 0x12
+#define IFPGA_PORT_FEATURE_ID_STP 0x13
+
+/*
+ * PROP format (TOP + SUB + ID)
+ *
+ * (~0x0) means this field is unused.
+ */
+#define PROP_TOP GENMASK(31, 24)
+#define PROP_TOP_UNUSED 0xff
+#define PROP_SUB GENMASK(23, 16)
+#define PROP_SUB_UNUSED 0xff
+#define PROP_ID GENMASK(15, 0)
+
+#define PROP(_top, _sub, _id) \
+ (SET_FIELD(PROP_TOP, _top) | SET_FIELD(PROP_SUB, _sub) |\
+ SET_FIELD(PROP_ID, _id))
+
+/* FME head feature's properties*/
+#define FME_HDR_PROP_REVISION 0x1 /* RDONLY */
+#define FME_HDR_PROP_PORTS_NUM 0x2 /* RDONLY */
+#define FME_HDR_PROP_CACHE_SIZE 0x3 /* RDONLY */
+#define FME_HDR_PROP_VERSION 0x4 /* RDONLY */
+#define FME_HDR_PROP_SOCKET_ID 0x5 /* RDONLY */
+#define FME_HDR_PROP_BITSTREAM_ID 0x6 /* RDONLY */
+#define FME_HDR_PROP_BITSTREAM_METADATA 0x7 /* RDONLY */
+
+/* FME error reporting feature's properties */
+/* FME error reporting properties format */
+#define ERR_PROP(_top, _id) PROP(_top, 0xff, _id)
+#define ERR_PROP_TOP_UNUSED PROP_TOP_UNUSED
+#define ERR_PROP_TOP_FME_ERR 0x1
+#define ERR_PROP_ROOT(_id) ERR_PROP(0xff, _id)
+#define ERR_PROP_FME_ERR(_id) ERR_PROP(ERR_PROP_TOP_FME_ERR, _id)
+
+#define FME_ERR_PROP_ERRORS ERR_PROP_FME_ERR(0x1)
+#define FME_ERR_PROP_FIRST_ERROR ERR_PROP_FME_ERR(0x2)
+#define FME_ERR_PROP_NEXT_ERROR ERR_PROP_FME_ERR(0x3)
+#define FME_ERR_PROP_CLEAR ERR_PROP_FME_ERR(0x4) /* WO */
+#define FME_ERR_PROP_REVISION ERR_PROP_ROOT(0x5)
+#define FME_ERR_PROP_PCIE0_ERRORS ERR_PROP_ROOT(0x6) /* RW */
+#define FME_ERR_PROP_PCIE1_ERRORS ERR_PROP_ROOT(0x7) /* RW */
+#define FME_ERR_PROP_NONFATAL_ERRORS ERR_PROP_ROOT(0x8)
+#define FME_ERR_PROP_CATFATAL_ERRORS ERR_PROP_ROOT(0x9)
+#define FME_ERR_PROP_INJECT_ERRORS ERR_PROP_ROOT(0xa) /* RW */
+
+/* FME thermal feature's properties */
+#define FME_THERMAL_PROP_THRESHOLD1 0x1 /* RW */
+#define FME_THERMAL_PROP_THRESHOLD2 0x2 /* RW */
+#define FME_THERMAL_PROP_THRESHOLD_TRIP 0x3 /* RDONLY */
+#define FME_THERMAL_PROP_THRESHOLD1_REACHED 0x4 /* RDONLY */
+#define FME_THERMAL_PROP_THRESHOLD2_REACHED 0x5 /* RDONLY */
+#define FME_THERMAL_PROP_THRESHOLD1_POLICY 0x6 /* RW */
+#define FME_THERMAL_PROP_TEMPERATURE 0x7 /* RDONLY */
+#define FME_THERMAL_PROP_REVISION 0x8 /* RDONLY */
+
+/* FME power feature's properties */
+#define FME_PWR_PROP_CONSUMED 0x1 /* RDONLY */
+#define FME_PWR_PROP_THRESHOLD1 0x2 /* RW */
+#define FME_PWR_PROP_THRESHOLD2 0x3 /* RW */
+#define FME_PWR_PROP_THRESHOLD1_STATUS 0x4 /* RDONLY */
+#define FME_PWR_PROP_THRESHOLD2_STATUS 0x5 /* RDONLY */
+#define FME_PWR_PROP_RTL 0x6 /* RDONLY */
+#define FME_PWR_PROP_XEON_LIMIT 0x7 /* RDONLY */
+#define FME_PWR_PROP_FPGA_LIMIT 0x8 /* RDONLY */
+#define FME_PWR_PROP_REVISION 0x9 /* RDONLY */
+
+/* FME iperf/dperf PROP format */
+#define PERF_PROP_TOP_CACHE 0x1
+#define PERF_PROP_TOP_VTD 0x2
+#define PERF_PROP_TOP_FAB 0x3
+#define PERF_PROP_TOP_UNUSED PROP_TOP_UNUSED
+#define PERF_PROP_SUB_UNUSED PROP_SUB_UNUSED
+
+#define PERF_PROP_ROOT(_id) PROP(0xff, 0xff, _id)
+#define PERF_PROP_CACHE(_id) PROP(PERF_PROP_TOP_CACHE, 0xff, _id)
+#define PERF_PROP_VTD(_sub, _id) PROP(PERF_PROP_TOP_VTD, _sub, _id)
+#define PERF_PROP_VTD_ROOT(_id) PROP(PERF_PROP_TOP_VTD, 0xff, _id)
+#define PERF_PROP_FAB(_sub, _id) PROP(PERF_PROP_TOP_FAB, _sub, _id)
+#define PERF_PROP_FAB_ROOT(_id) PROP(PERF_PROP_TOP_FAB, 0xff, _id)
+
+/* FME iperf feature's properties */
+#define FME_IPERF_PROP_CLOCK PERF_PROP_ROOT(0x1)
+#define FME_IPERF_PROP_REVISION PERF_PROP_ROOT(0x2)
+
+/* iperf CACHE properties */
+#define FME_IPERF_PROP_CACHE_FREEZE PERF_PROP_CACHE(0x1) /* RW */
+#define FME_IPERF_PROP_CACHE_READ_HIT PERF_PROP_CACHE(0x2)
+#define FME_IPERF_PROP_CACHE_READ_MISS PERF_PROP_CACHE(0x3)
+#define FME_IPERF_PROP_CACHE_WRITE_HIT PERF_PROP_CACHE(0x4)
+#define FME_IPERF_PROP_CACHE_WRITE_MISS PERF_PROP_CACHE(0x5)
+#define FME_IPERF_PROP_CACHE_HOLD_REQUEST PERF_PROP_CACHE(0x6)
+#define FME_IPERF_PROP_CACHE_TX_REQ_STALL PERF_PROP_CACHE(0x7)
+#define FME_IPERF_PROP_CACHE_RX_REQ_STALL PERF_PROP_CACHE(0x8)
+#define FME_IPERF_PROP_CACHE_RX_EVICTION PERF_PROP_CACHE(0x9)
+#define FME_IPERF_PROP_CACHE_DATA_WRITE_PORT_CONTENTION PERF_PROP_CACHE(0xa)
+#define FME_IPERF_PROP_CACHE_TAG_WRITE_PORT_CONTENTION PERF_PROP_CACHE(0xb)
+/* iperf VTD properties */
+#define FME_IPERF_PROP_VTD_FREEZE PERF_PROP_VTD_ROOT(0x1) /* RW */
+#define FME_IPERF_PROP_VTD_SIP_IOTLB_4K_HIT PERF_PROP_VTD_ROOT(0x2)
+#define FME_IPERF_PROP_VTD_SIP_IOTLB_2M_HIT PERF_PROP_VTD_ROOT(0x3)
+#define FME_IPERF_PROP_VTD_SIP_IOTLB_1G_HIT PERF_PROP_VTD_ROOT(0x4)
+#define FME_IPERF_PROP_VTD_SIP_SLPWC_L3_HIT PERF_PROP_VTD_ROOT(0x5)
+#define FME_IPERF_PROP_VTD_SIP_SLPWC_L4_HIT PERF_PROP_VTD_ROOT(0x6)
+#define FME_IPERF_PROP_VTD_SIP_RCC_HIT PERF_PROP_VTD_ROOT(0x7)
+#define FME_IPERF_PROP_VTD_SIP_IOTLB_4K_MISS PERF_PROP_VTD_ROOT(0x8)
+#define FME_IPERF_PROP_VTD_SIP_IOTLB_2M_MISS PERF_PROP_VTD_ROOT(0x9)
+#define FME_IPERF_PROP_VTD_SIP_IOTLB_1G_MISS PERF_PROP_VTD_ROOT(0xa)
+#define FME_IPERF_PROP_VTD_SIP_SLPWC_L3_MISS PERF_PROP_VTD_ROOT(0xb)
+#define FME_IPERF_PROP_VTD_SIP_SLPWC_L4_MISS PERF_PROP_VTD_ROOT(0xc)
+#define FME_IPERF_PROP_VTD_SIP_RCC_MISS PERF_PROP_VTD_ROOT(0xd)
+#define FME_IPERF_PROP_VTD_PORT_READ_TRANSACTION(n) PERF_PROP_VTD(n, 0xe)
+#define FME_IPERF_PROP_VTD_PORT_WRITE_TRANSACTION(n) PERF_PROP_VTD(n, 0xf)
+#define FME_IPERF_PROP_VTD_PORT_DEVTLB_READ_HIT(n) PERF_PROP_VTD(n, 0x10)
+#define FME_IPERF_PROP_VTD_PORT_DEVTLB_WRITE_HIT(n) PERF_PROP_VTD(n, 0x11)
+#define FME_IPERF_PROP_VTD_PORT_DEVTLB_4K_FILL(n) PERF_PROP_VTD(n, 0x12)
+#define FME_IPERF_PROP_VTD_PORT_DEVTLB_2M_FILL(n) PERF_PROP_VTD(n, 0x13)
+#define FME_IPERF_PROP_VTD_PORT_DEVTLB_1G_FILL(n) PERF_PROP_VTD(n, 0x14)
+/* iperf FAB properties */
+#define FME_IPERF_PROP_FAB_FREEZE PERF_PROP_FAB_ROOT(0x1) /* RW */
+#define FME_IPERF_PROP_FAB_PCIE0_READ PERF_PROP_FAB_ROOT(0x2)
+#define FME_IPERF_PROP_FAB_PORT_PCIE0_READ(n) PERF_PROP_FAB(n, 0x2)
+#define FME_IPERF_PROP_FAB_PCIE0_WRITE PERF_PROP_FAB_ROOT(0x3)
+#define FME_IPERF_PROP_FAB_PORT_PCIE0_WRITE(n) PERF_PROP_FAB(n, 0x3)
+#define FME_IPERF_PROP_FAB_PCIE1_READ PERF_PROP_FAB_ROOT(0x4)
+#define FME_IPERF_PROP_FAB_PORT_PCIE1_READ(n) PERF_PROP_FAB(n, 0x4)
+#define FME_IPERF_PROP_FAB_PCIE1_WRITE PERF_PROP_FAB_ROOT(0x5)
+#define FME_IPERF_PROP_FAB_PORT_PCIE1_WRITE(n) PERF_PROP_FAB(n, 0x5)
+#define FME_IPERF_PROP_FAB_UPI_READ PERF_PROP_FAB_ROOT(0x6)
+#define FME_IPERF_PROP_FAB_PORT_UPI_READ(n) PERF_PROP_FAB(n, 0x6)
+#define FME_IPERF_PROP_FAB_UPI_WRITE PERF_PROP_FAB_ROOT(0x7)
+#define FME_IPERF_PROP_FAB_PORT_UPI_WRITE(n) PERF_PROP_FAB(n, 0x7)
+#define FME_IPERF_PROP_FAB_MMIO_READ PERF_PROP_FAB_ROOT(0x8)
+#define FME_IPERF_PROP_FAB_PORT_MMIO_READ(n) PERF_PROP_FAB(n, 0x8)
+#define FME_IPERF_PROP_FAB_MMIO_WRITE PERF_PROP_FAB_ROOT(0x9)
+#define FME_IPERF_PROP_FAB_PORT_MMIO_WRITE(n) PERF_PROP_FAB(n, 0x9)
+#define FME_IPERF_PROP_FAB_ENABLE PERF_PROP_FAB_ROOT(0xa) /* RW */
+#define FME_IPERF_PROP_FAB_PORT_ENABLE(n) PERF_PROP_FAB(n, 0xa) /* RW */
+
+/* FME dperf properties */
+#define FME_DPERF_PROP_CLOCK PERF_PROP_ROOT(0x1)
+#define FME_DPERF_PROP_REVISION PERF_PROP_ROOT(0x2)
+
+/* dperf FAB properties */
+#define FME_DPERF_PROP_FAB_FREEZE PERF_PROP_FAB_ROOT(0x1) /* RW */
+#define FME_DPERF_PROP_FAB_PCIE0_READ PERF_PROP_FAB_ROOT(0x2)
+#define FME_DPERF_PROP_FAB_PORT_PCIE0_READ(n) PERF_PROP_FAB(n, 0x2)
+#define FME_DPERF_PROP_FAB_PCIE0_WRITE PERF_PROP_FAB_ROOT(0x3)
+#define FME_DPERF_PROP_FAB_PORT_PCIE0_WRITE(n) PERF_PROP_FAB(n, 0x3)
+#define FME_DPERF_PROP_FAB_MMIO_READ PERF_PROP_FAB_ROOT(0x4)
+#define FME_DPERF_PROP_FAB_PORT_MMIO_READ(n) PERF_PROP_FAB(n, 0x4)
+#define FME_DPERF_PROP_FAB_MMIO_WRITE PERF_PROP_FAB_ROOT(0x5)
+#define FME_DPERF_PROP_FAB_PORT_MMIO_WRITE(n) PERF_PROP_FAB(n, 0x5)
+#define FME_DPERF_PROP_FAB_ENABLE PERF_PROP_FAB_ROOT(0x6) /* RW */
+#define FME_DPERF_PROP_FAB_PORT_ENABLE(n) PERF_PROP_FAB(n, 0x6) /* RW */
+
+/*PORT hdr feature's properties*/
+#define PORT_HDR_PROP_REVISION 0x1 /* RDONLY */
+#define PORT_HDR_PROP_PORTIDX 0x2 /* RDONLY */
+#define PORT_HDR_PROP_LATENCY_TOLERANCE 0x3 /* RDONLY */
+#define PORT_HDR_PROP_AP1_EVENT 0x4 /* RW */
+#define PORT_HDR_PROP_AP2_EVENT 0x5 /* RW */
+#define PORT_HDR_PROP_POWER_STATE 0x6 /* RDONLY */
+#define PORT_HDR_PROP_USERCLK_FREQCMD 0x7 /* RW */
+#define PORT_HDR_PROP_USERCLK_FREQCNTRCMD 0x8 /* RW */
+#define PORT_HDR_PROP_USERCLK_FREQSTS 0x9 /* RDONLY */
+#define PORT_HDR_PROP_USERCLK_CNTRSTS 0xa /* RDONLY */
+
+/*PORT error feature's properties*/
+#define PORT_ERR_PROP_REVISION 0x1 /* RDONLY */
+#define PORT_ERR_PROP_ERRORS 0x2 /* RDONLY */
+#define PORT_ERR_PROP_FIRST_ERROR 0x3 /* RDONLY */
+#define PORT_ERR_PROP_FIRST_MALFORMED_REQ_LSB 0x4 /* RDONLY */
+#define PORT_ERR_PROP_FIRST_MALFORMED_REQ_MSB 0x5 /* RDONLY */
+#define PORT_ERR_PROP_CLEAR 0x6 /* WRONLY */
+
+int opae_manager_ifpga_get_prop(struct opae_manager *mgr,
+ struct feature_prop *prop);
+int opae_manager_ifpga_set_prop(struct opae_manager *mgr,
+ struct feature_prop *prop);
+int opae_bridge_ifpga_get_prop(struct opae_bridge *br,
+ struct feature_prop *prop);
+int opae_bridge_ifpga_set_prop(struct opae_bridge *br,
+ struct feature_prop *prop);
+
+/*
+ * Retrieve information about the fpga fme.
+ * Driver fills the info in provided struct fpga_fme_info.
+ */
+struct fpga_fme_info {
+ u32 capability; /* The capability of FME device */
+#define FPGA_FME_CAP_ERR_IRQ (1 << 0) /* Support fme error interrupt */
+};
+
+int opae_manager_ifpga_get_info(struct opae_manager *mgr,
+ struct fpga_fme_info *fme_info);
+
+/* Set eventfd information for ifpga FME error interrupt */
+struct fpga_fme_err_irq_set {
+ s32 evtfd; /* Eventfd handler */
+};
+
+int opae_manager_ifpga_set_err_irq(struct opae_manager *mgr,
+ struct fpga_fme_err_irq_set *err_irq_set);
+
+/*
+ * Retrieve information about the fpga port.
+ * Driver fills the info in provided struct fpga_port_info.
+ */
+struct fpga_port_info {
+ u32 capability; /* The capability of port device */
+#define FPGA_PORT_CAP_ERR_IRQ (1 << 0) /* Support port error interrupt */
+#define FPGA_PORT_CAP_UAFU_IRQ (1 << 1) /* Support uafu error interrupt */
+ u32 num_umsgs; /* The number of allocated umsgs */
+ u32 num_uafu_irqs; /* The number of uafu interrupts */
+};
+
+int opae_bridge_ifpga_get_info(struct opae_bridge *br,
+ struct fpga_port_info *port_info);
+/*
+ * Retrieve region information about the fpga port.
+ * Driver needs to fill the index of struct fpga_port_region_info.
+ */
+struct fpga_port_region_info {
+ u32 index;
+#define PORT_REGION_INDEX_STP (1 << 1) /* Signal Tap Region */
+ u64 size; /* Region Size */
+ u8 *addr; /* Base address of the region */
+};
+
+int opae_bridge_ifpga_get_region_info(struct opae_bridge *br,
+ struct fpga_port_region_info *info);
+
+/* Set eventfd information for ifpga port error interrupt */
+struct fpga_port_err_irq_set {
+ s32 evtfd; /* Eventfd handler */
+};
+
+int opae_bridge_ifpga_set_err_irq(struct opae_bridge *br,
+ struct fpga_port_err_irq_set *err_irq_set);
+
+#endif /* _OPAE_IFPGA_HW_API_H_ */
diff --git a/drivers/raw/ifpga_rawdev/base/opae_osdep.h b/drivers/raw/ifpga_rawdev/base/opae_osdep.h
new file mode 100644
index 00000000..90f54f77
--- /dev/null
+++ b/drivers/raw/ifpga_rawdev/base/opae_osdep.h
@@ -0,0 +1,79 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2010-2018 Intel Corporation
+ */
+
+#ifndef _OPAE_OSDEP_H
+#define _OPAE_OSDEP_H
+
+#include <string.h>
+#include <stdbool.h>
+
+#ifdef RTE_LIBRTE_EAL
+#include "osdep_rte/osdep_generic.h"
+#else
+#include "osdep_raw/osdep_generic.h"
+#endif
+
+#define __iomem
+
+typedef uint8_t u8;
+typedef int8_t s8;
+typedef uint16_t u16;
+typedef uint32_t u32;
+typedef int32_t s32;
+typedef uint64_t u64;
+typedef uint64_t dma_addr_t;
+
+struct uuid {
+ u8 b[16];
+};
+
+#ifndef LINUX_MACROS
+#ifndef BITS_PER_LONG
+#define BITS_PER_LONG (__SIZEOF_LONG__ * 8)
+#endif
+#ifndef BIT
+#define BIT(a) (1UL << (a))
+#endif /* BIT */
+#ifndef BIT_ULL
+#define BIT_ULL(a) (1ULL << (a))
+#endif /* BIT_ULL */
+#ifndef GENMASK
+#define GENMASK(h, l) (((~0UL) << (l)) & (~0UL >> (BITS_PER_LONG - 1 - (h))))
+#endif /* GENMASK */
+#ifndef GENMASK_ULL
+#define GENMASK_ULL(h, l) (((U64_C(1) << ((h) - (l) + 1)) - 1) << (l))
+#endif /* GENMASK_ULL */
+#endif /* LINUX_MACROS */
+
+#define SET_FIELD(m, v) (((v) << (__builtin_ffsll(m) - 1)) & (m))
+#define GET_FIELD(m, v) (((v) & (m)) >> (__builtin_ffsll(m) - 1))
+
+#define dev_err(x, args...) dev_printf(ERR, args)
+#define dev_info(x, args...) dev_printf(INFO, args)
+#define dev_warn(x, args...) dev_printf(WARNING, args)
+
+#ifdef OPAE_DEBUG
+#define dev_debug(x, args...) dev_printf(DEBUG, args)
+#else
+#define dev_debug(x, args...) do { } while (0)
+#endif
+
+#define pr_err(y, args...) dev_err(0, y, ##args)
+#define pr_warn(y, args...) dev_warn(0, y, ##args)
+#define pr_info(y, args...) dev_info(0, y, ##args)
+
+#ifndef WARN_ON
+#define WARN_ON(x) do { \
+ int ret = !!(x); \
+ if (unlikely(ret)) \
+ pr_warn("WARN_ON: \"" #x "\" at %s:%d\n", __func__, __LINE__); \
+} while (0)
+#endif
+
+#define DIV_ROUND_UP(n, d) (((n) + (d) - 1) / (d))
+#define udelay(x) opae_udelay(x)
+#define msleep(x) opae_udelay(1000 * (x))
+#define usleep_range(min, max) msleep(DIV_ROUND_UP(min, 1000))
+
+#endif
diff --git a/drivers/raw/ifpga_rawdev/base/osdep_raw/osdep_generic.h b/drivers/raw/ifpga_rawdev/base/osdep_raw/osdep_generic.h
new file mode 100644
index 00000000..895a1d82
--- /dev/null
+++ b/drivers/raw/ifpga_rawdev/base/osdep_raw/osdep_generic.h
@@ -0,0 +1,75 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2010-2018 Intel Corporation
+ */
+
+#ifndef _OSDEP_RAW_GENERIC_H
+#define _OSDEP_RAW_GENERIC_H
+
+#define compiler_barrier() (asm volatile ("" : : : "memory"))
+
+#define io_wmb() compiler_barrier()
+#define io_rmb() compiler_barrier()
+
+static inline uint8_t opae_readb(const volatile void *addr)
+{
+ uint8_t val;
+
+ val = *(const volatile uint8_t *)addr;
+ io_rmb();
+ return val;
+}
+
+static inline uint16_t opae_readw(const volatile void *addr)
+{
+ uint16_t val;
+
+ val = *(const volatile uint16_t *)addr;
+ io_rmb();
+ return val;
+}
+
+static inline uint32_t opae_readl(const volatile void *addr)
+{
+ uint32_t val;
+
+ val = *(const volatile uint32_t *)addr;
+ io_rmb();
+ return val;
+}
+
+static inline uint64_t opae_readq(const volatile void *addr)
+{
+ uint64_t val;
+
+ val = *(const volatile uint64_t *)addr;
+ io_rmb();
+ return val;
+}
+
+static inline void opae_writeb(uint8_t value, volatile void *addr)
+{
+ io_wmb();
+ *(volatile uint8_t *)addr = value;
+}
+
+static inline void opae_writew(uint16_t value, volatile void *addr)
+{
+ io_wmb();
+ *(volatile uint16_t *)addr = value;
+}
+
+static inline void opae_writel(uint32_t value, volatile void *addr)
+{
+ io_wmb();
+ *(volatile uint32_t *)addr = value;
+}
+
+static inline void opae_writeq(uint64_t value, volatile void *addr)
+{
+ io_wmb();
+ *(volatile uint64_t *)addr = value;
+}
+
+#define opae_free(addr) free(addr)
+
+#endif
diff --git a/drivers/raw/ifpga_rawdev/base/osdep_rte/osdep_generic.h b/drivers/raw/ifpga_rawdev/base/osdep_rte/osdep_generic.h
new file mode 100644
index 00000000..76902e28
--- /dev/null
+++ b/drivers/raw/ifpga_rawdev/base/osdep_rte/osdep_generic.h
@@ -0,0 +1,45 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2010-2018 Intel Corporation
+ */
+
+#ifndef _OSDEP_RTE_GENERIC_H
+#define _OSDEP_RTE_GENERIC_H
+
+#include <rte_common.h>
+#include <rte_cycles.h>
+#include <rte_spinlock.h>
+#include <rte_log.h>
+#include <rte_io.h>
+#include <rte_malloc.h>
+
+#define dev_printf(level, fmt, args...) \
+ RTE_LOG(level, PMD, "osdep_rte: " fmt, ## args)
+
+#define osdep_panic(...) rte_panic(...)
+
+#define opae_udelay(x) rte_delay_us(x)
+
+#define opae_readb(addr) rte_read8(addr)
+#define opae_readw(addr) rte_read16(addr)
+#define opae_readl(addr) rte_read32(addr)
+#define opae_readq(addr) rte_read64(addr)
+#define opae_writeb(value, addr) rte_write8(value, addr)
+#define opae_writew(value, addr) rte_write16(value, addr)
+#define opae_writel(value, addr) rte_write32(value, addr)
+#define opae_writeq(value, addr) rte_write64(value, addr)
+
+#define opae_malloc(size) rte_malloc(NULL, size, 0)
+#define opae_zmalloc(size) rte_zmalloc(NULL, size, 0)
+#define opae_free(addr) rte_free(addr)
+
+#define ARRAY_SIZE(arr) RTE_DIM(arr)
+
+#define min(a, b) RTE_MIN(a, b)
+#define max(a, b) RTE_MAX(a, b)
+
+#define spinlock_t rte_spinlock_t
+#define spinlock_init(x) rte_spinlock_init(x)
+#define spinlock_lock(x) rte_spinlock_lock(x)
+#define spinlock_unlock(x) rte_spinlock_unlock(x)
+
+#endif
diff --git a/drivers/raw/ifpga_rawdev/ifpga_rawdev.c b/drivers/raw/ifpga_rawdev/ifpga_rawdev.c
new file mode 100644
index 00000000..3fed0578
--- /dev/null
+++ b/drivers/raw/ifpga_rawdev/ifpga_rawdev.c
@@ -0,0 +1,617 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2010-2018 Intel Corporation
+ */
+
+#include <string.h>
+#include <dirent.h>
+#include <sys/stat.h>
+#include <unistd.h>
+#include <sys/types.h>
+#include <fcntl.h>
+#include <rte_log.h>
+#include <rte_bus.h>
+#include <rte_eal_memconfig.h>
+#include <rte_malloc.h>
+#include <rte_devargs.h>
+#include <rte_memcpy.h>
+#include <rte_pci.h>
+#include <rte_bus_pci.h>
+#include <rte_kvargs.h>
+#include <rte_alarm.h>
+
+#include <rte_errno.h>
+#include <rte_per_lcore.h>
+#include <rte_memory.h>
+#include <rte_memzone.h>
+#include <rte_eal.h>
+#include <rte_common.h>
+#include <rte_bus_vdev.h>
+
+#include "base/opae_hw_api.h"
+#include "rte_rawdev.h"
+#include "rte_rawdev_pmd.h"
+#include "rte_bus_ifpga.h"
+#include "ifpga_common.h"
+#include "ifpga_logs.h"
+#include "ifpga_rawdev.h"
+
+int ifpga_rawdev_logtype;
+
+#define PCI_VENDOR_ID_INTEL 0x8086
+/* PCI Device ID */
+#define PCIE_DEVICE_ID_PF_INT_5_X 0xBCBD
+#define PCIE_DEVICE_ID_PF_INT_6_X 0xBCC0
+#define PCIE_DEVICE_ID_PF_DSC_1_X 0x09C4
+/* VF Device */
+#define PCIE_DEVICE_ID_VF_INT_5_X 0xBCBF
+#define PCIE_DEVICE_ID_VF_INT_6_X 0xBCC1
+#define PCIE_DEVICE_ID_VF_DSC_1_X 0x09C5
+#define RTE_MAX_RAW_DEVICE 10
+
+static const struct rte_pci_id pci_ifpga_map[] = {
+ { RTE_PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCIE_DEVICE_ID_PF_INT_5_X) },
+ { RTE_PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCIE_DEVICE_ID_VF_INT_5_X) },
+ { RTE_PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCIE_DEVICE_ID_PF_INT_6_X) },
+ { RTE_PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCIE_DEVICE_ID_VF_INT_6_X) },
+ { RTE_PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCIE_DEVICE_ID_PF_DSC_1_X) },
+ { RTE_PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCIE_DEVICE_ID_VF_DSC_1_X) },
+ { .vendor_id = 0, /* sentinel */ },
+};
+
+static int
+ifpga_fill_afu_dev(struct opae_accelerator *acc,
+ struct rte_afu_device *afu_dev)
+{
+ struct rte_mem_resource *res = afu_dev->mem_resource;
+ struct opae_acc_region_info region_info;
+ struct opae_acc_info info;
+ unsigned long i;
+ int ret;
+
+ ret = opae_acc_get_info(acc, &info);
+ if (ret)
+ return ret;
+
+ if (info.num_regions > PCI_MAX_RESOURCE)
+ return -EFAULT;
+
+ afu_dev->num_region = info.num_regions;
+
+ for (i = 0; i < info.num_regions; i++) {
+ region_info.index = i;
+ ret = opae_acc_get_region_info(acc, &region_info);
+ if (ret)
+ return ret;
+
+ if ((region_info.flags & ACC_REGION_MMIO) &&
+ (region_info.flags & ACC_REGION_READ) &&
+ (region_info.flags & ACC_REGION_WRITE)) {
+ res[i].phys_addr = region_info.phys_addr;
+ res[i].len = region_info.len;
+ res[i].addr = region_info.addr;
+ } else
+ return -EFAULT;
+ }
+
+ return 0;
+}
+
+static void
+ifpga_rawdev_info_get(struct rte_rawdev *dev,
+ rte_rawdev_obj_t dev_info)
+{
+ struct opae_adapter *adapter;
+ struct opae_accelerator *acc;
+ struct rte_afu_device *afu_dev;
+
+ IFPGA_RAWDEV_PMD_FUNC_TRACE();
+
+ if (!dev_info) {
+ IFPGA_RAWDEV_PMD_ERR("Invalid request");
+ return;
+ }
+
+ adapter = ifpga_rawdev_get_priv(dev);
+ if (!adapter)
+ return;
+
+ afu_dev = dev_info;
+ afu_dev->rawdev = dev;
+
+ /* find opae_accelerator and fill info into afu_device */
+ opae_adapter_for_each_acc(adapter, acc) {
+ if (acc->index != afu_dev->id.port)
+ continue;
+
+ if (ifpga_fill_afu_dev(acc, afu_dev)) {
+ IFPGA_RAWDEV_PMD_ERR("cannot get info\n");
+ return;
+ }
+ }
+}
+
+static int
+ifpga_rawdev_configure(const struct rte_rawdev *dev,
+ rte_rawdev_obj_t config)
+{
+ IFPGA_RAWDEV_PMD_FUNC_TRACE();
+
+ RTE_FUNC_PTR_OR_ERR_RET(dev, -EINVAL);
+
+ return config ? 0 : 1;
+}
+
+static int
+ifpga_rawdev_start(struct rte_rawdev *dev)
+{
+ int ret = 0;
+ struct opae_adapter *adapter;
+
+ IFPGA_RAWDEV_PMD_FUNC_TRACE();
+
+ RTE_FUNC_PTR_OR_ERR_RET(dev, -EINVAL);
+
+ adapter = ifpga_rawdev_get_priv(dev);
+ if (!adapter)
+ return -ENODEV;
+
+ return ret;
+}
+
+static void
+ifpga_rawdev_stop(struct rte_rawdev *dev)
+{
+ dev->started = 0;
+}
+
+static int
+ifpga_rawdev_close(struct rte_rawdev *dev)
+{
+ return dev ? 0:1;
+}
+
+static int
+ifpga_rawdev_reset(struct rte_rawdev *dev)
+{
+ return dev ? 0:1;
+}
+
+static int
+fpga_pr(struct rte_rawdev *raw_dev, u32 port_id, u64 *buffer, u32 size,
+ u64 *status)
+{
+
+ struct opae_adapter *adapter;
+ struct opae_manager *mgr;
+ struct opae_accelerator *acc;
+ struct opae_bridge *br;
+ int ret;
+
+ adapter = ifpga_rawdev_get_priv(raw_dev);
+ if (!adapter)
+ return -ENODEV;
+
+ mgr = opae_adapter_get_mgr(adapter);
+ if (!mgr)
+ return -ENODEV;
+
+ acc = opae_adapter_get_acc(adapter, port_id);
+ if (!acc)
+ return -ENODEV;
+
+ br = opae_acc_get_br(acc);
+ if (!br)
+ return -ENODEV;
+
+ ret = opae_manager_flash(mgr, port_id, buffer, size, status);
+ if (ret) {
+ IFPGA_RAWDEV_PMD_ERR("%s pr error %d\n", __func__, ret);
+ return ret;
+ }
+
+ ret = opae_bridge_reset(br);
+ if (ret) {
+ IFPGA_RAWDEV_PMD_ERR("%s reset port:%d error %d\n",
+ __func__, port_id, ret);
+ return ret;
+ }
+
+ return ret;
+}
+
+static int
+rte_fpga_do_pr(struct rte_rawdev *rawdev, int port_id,
+ const char *file_name)
+{
+ struct stat file_stat;
+ int file_fd;
+ int ret = 0;
+ ssize_t buffer_size;
+ void *buffer;
+ u64 pr_error;
+
+ if (!file_name)
+ return -EINVAL;
+
+ file_fd = open(file_name, O_RDONLY);
+ if (file_fd < 0) {
+ IFPGA_RAWDEV_PMD_ERR("%s: open file error: %s\n",
+ __func__, file_name);
+ IFPGA_RAWDEV_PMD_ERR("Message : %s\n", strerror(errno));
+ return -EINVAL;
+ }
+ ret = stat(file_name, &file_stat);
+ if (ret) {
+ IFPGA_RAWDEV_PMD_ERR("stat on bitstream file failed: %s\n",
+ file_name);
+ return -EINVAL;
+ }
+ buffer_size = file_stat.st_size;
+ IFPGA_RAWDEV_PMD_INFO("bitstream file size: %zu\n", buffer_size);
+ buffer = rte_malloc(NULL, buffer_size, 0);
+ if (!buffer) {
+ ret = -ENOMEM;
+ goto close_fd;
+ }
+
+ /*read the raw data*/
+ if (buffer_size != read(file_fd, (void *)buffer, buffer_size)) {
+ ret = -EINVAL;
+ goto free_buffer;
+ }
+
+ /*do PR now*/
+ ret = fpga_pr(rawdev, port_id, buffer, buffer_size, &pr_error);
+ IFPGA_RAWDEV_PMD_INFO("downloading to device port %d....%s.\n", port_id,
+ ret ? "failed" : "success");
+ if (ret) {
+ ret = -EINVAL;
+ goto free_buffer;
+ }
+
+free_buffer:
+ if (buffer)
+ rte_free(buffer);
+close_fd:
+ close(file_fd);
+ file_fd = 0;
+ return ret;
+}
+
+static int
+ifpga_rawdev_pr(struct rte_rawdev *dev,
+ rte_rawdev_obj_t pr_conf)
+{
+ struct opae_adapter *adapter;
+ struct rte_afu_pr_conf *afu_pr_conf;
+ int ret;
+ struct uuid uuid;
+ struct opae_accelerator *acc;
+
+ IFPGA_RAWDEV_PMD_FUNC_TRACE();
+
+ adapter = ifpga_rawdev_get_priv(dev);
+ if (!adapter)
+ return -ENODEV;
+
+ if (!pr_conf)
+ return -EINVAL;
+
+ afu_pr_conf = pr_conf;
+
+ if (afu_pr_conf->pr_enable) {
+ ret = rte_fpga_do_pr(dev,
+ afu_pr_conf->afu_id.port,
+ afu_pr_conf->bs_path);
+ if (ret) {
+ IFPGA_RAWDEV_PMD_ERR("do pr error %d\n", ret);
+ return ret;
+ }
+ }
+
+ acc = opae_adapter_get_acc(adapter, afu_pr_conf->afu_id.port);
+ if (!acc)
+ return -ENODEV;
+
+ ret = opae_acc_get_uuid(acc, &uuid);
+ if (ret)
+ return ret;
+
+ memcpy(&afu_pr_conf->afu_id.uuid.uuid_low, uuid.b, sizeof(u64));
+ memcpy(&afu_pr_conf->afu_id.uuid.uuid_high, uuid.b + 8, sizeof(u64));
+
+ IFPGA_RAWDEV_PMD_INFO("%s: uuid_l=0x%lx, uuid_h=0x%lx\n", __func__,
+ (unsigned long)afu_pr_conf->afu_id.uuid.uuid_low,
+ (unsigned long)afu_pr_conf->afu_id.uuid.uuid_high);
+
+ return 0;
+}
+
+static const struct rte_rawdev_ops ifpga_rawdev_ops = {
+ .dev_info_get = ifpga_rawdev_info_get,
+ .dev_configure = ifpga_rawdev_configure,
+ .dev_start = ifpga_rawdev_start,
+ .dev_stop = ifpga_rawdev_stop,
+ .dev_close = ifpga_rawdev_close,
+ .dev_reset = ifpga_rawdev_reset,
+
+ .queue_def_conf = NULL,
+ .queue_setup = NULL,
+ .queue_release = NULL,
+
+ .attr_get = NULL,
+ .attr_set = NULL,
+
+ .enqueue_bufs = NULL,
+ .dequeue_bufs = NULL,
+
+ .dump = NULL,
+
+ .xstats_get = NULL,
+ .xstats_get_names = NULL,
+ .xstats_get_by_name = NULL,
+ .xstats_reset = NULL,
+
+ .firmware_status_get = NULL,
+ .firmware_version_get = NULL,
+ .firmware_load = ifpga_rawdev_pr,
+ .firmware_unload = NULL,
+
+ .dev_selftest = NULL,
+};
+
+static int
+ifpga_rawdev_create(struct rte_pci_device *pci_dev,
+ int socket_id)
+{
+ int ret = 0;
+ struct rte_rawdev *rawdev = NULL;
+ struct opae_adapter *adapter = NULL;
+ struct opae_manager *mgr = NULL;
+ struct opae_adapter_data_pci *data = NULL;
+ char name[RTE_RAWDEV_NAME_MAX_LEN];
+ int i;
+
+ if (!pci_dev) {
+ IFPGA_RAWDEV_PMD_ERR("Invalid pci_dev of the device!");
+ ret = -EINVAL;
+ goto cleanup;
+ }
+
+ memset(name, 0, sizeof(name));
+ snprintf(name, RTE_RAWDEV_NAME_MAX_LEN, "IFPGA:%x:%02x.%x",
+ pci_dev->addr.bus, pci_dev->addr.devid, pci_dev->addr.function);
+
+ IFPGA_RAWDEV_PMD_INFO("Init %s on NUMA node %d", name, rte_socket_id());
+
+ /* Allocate device structure */
+ rawdev = rte_rawdev_pmd_allocate(name, sizeof(struct opae_adapter),
+ socket_id);
+ if (rawdev == NULL) {
+ IFPGA_RAWDEV_PMD_ERR("Unable to allocate rawdevice");
+ ret = -EINVAL;
+ goto cleanup;
+ }
+
+ /* alloc OPAE_FPGA_PCI data to register to OPAE hardware level API */
+ data = opae_adapter_data_alloc(OPAE_FPGA_PCI);
+ if (!data) {
+ ret = -ENOMEM;
+ goto cleanup;
+ }
+
+ /* init opae_adapter_data_pci for device specific information */
+ for (i = 0; i < PCI_MAX_RESOURCE; i++) {
+ data->region[i].phys_addr = pci_dev->mem_resource[i].phys_addr;
+ data->region[i].len = pci_dev->mem_resource[i].len;
+ data->region[i].addr = pci_dev->mem_resource[i].addr;
+ }
+ data->device_id = pci_dev->id.device_id;
+ data->vendor_id = pci_dev->id.vendor_id;
+
+ /* create a opae_adapter based on above device data */
+ adapter = opae_adapter_alloc(pci_dev->device.name, data);
+ if (!adapter) {
+ ret = -ENOMEM;
+ goto free_adapter_data;
+ }
+
+ rawdev->dev_ops = &ifpga_rawdev_ops;
+ rawdev->device = &pci_dev->device;
+ rawdev->driver_name = pci_dev->device.driver->name;
+
+ rawdev->dev_private = adapter;
+
+ /* must enumerate the adapter before use it */
+ ret = opae_adapter_enumerate(adapter);
+ if (ret)
+ goto free_adapter;
+
+ /* get opae_manager to rawdev */
+ mgr = opae_adapter_get_mgr(adapter);
+ if (mgr) {
+ /* PF function */
+ IFPGA_RAWDEV_PMD_INFO("this is a PF function");
+ }
+
+ return ret;
+
+free_adapter:
+ if (adapter)
+ opae_adapter_free(adapter);
+free_adapter_data:
+ if (data)
+ opae_adapter_data_free(data);
+cleanup:
+ if (rawdev)
+ rte_rawdev_pmd_release(rawdev);
+
+ return ret;
+}
+
+static int
+ifpga_rawdev_destroy(struct rte_pci_device *pci_dev)
+{
+ int ret;
+ struct rte_rawdev *rawdev;
+ char name[RTE_RAWDEV_NAME_MAX_LEN];
+ struct opae_adapter *adapter;
+
+ if (!pci_dev) {
+ IFPGA_RAWDEV_PMD_ERR("Invalid pci_dev of the device!");
+ ret = -EINVAL;
+ return ret;
+ }
+
+ memset(name, 0, sizeof(name));
+ snprintf(name, RTE_RAWDEV_NAME_MAX_LEN, "IFPGA:%x:%02x.%x",
+ pci_dev->addr.bus, pci_dev->addr.devid, pci_dev->addr.function);
+
+ IFPGA_RAWDEV_PMD_INFO("Closing %s on NUMA node %d",
+ name, rte_socket_id());
+
+ rawdev = rte_rawdev_pmd_get_named_dev(name);
+ if (!rawdev) {
+ IFPGA_RAWDEV_PMD_ERR("Invalid device name (%s)", name);
+ return -EINVAL;
+ }
+
+ adapter = ifpga_rawdev_get_priv(rawdev);
+ if (!adapter)
+ return -ENODEV;
+
+ opae_adapter_data_free(adapter->data);
+ opae_adapter_free(adapter);
+
+ /* rte_rawdev_close is called by pmd_release */
+ ret = rte_rawdev_pmd_release(rawdev);
+ if (ret)
+ IFPGA_RAWDEV_PMD_DEBUG("Device cleanup failed");
+
+ return ret;
+}
+
+static int
+ifpga_rawdev_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
+ struct rte_pci_device *pci_dev)
+{
+
+ IFPGA_RAWDEV_PMD_FUNC_TRACE();
+ return ifpga_rawdev_create(pci_dev, rte_socket_id());
+}
+
+static int
+ifpga_rawdev_pci_remove(struct rte_pci_device *pci_dev)
+{
+ return ifpga_rawdev_destroy(pci_dev);
+}
+
+static struct rte_pci_driver rte_ifpga_rawdev_pmd = {
+ .id_table = pci_ifpga_map,
+ .drv_flags = RTE_PCI_DRV_NEED_MAPPING,
+ .probe = ifpga_rawdev_pci_probe,
+ .remove = ifpga_rawdev_pci_remove,
+};
+
+RTE_PMD_REGISTER_PCI(ifpga_rawdev_pci_driver, rte_ifpga_rawdev_pmd);
+RTE_PMD_REGISTER_PCI_TABLE(ifpga_rawdev_pci_driver, rte_ifpga_rawdev_pmd);
+RTE_PMD_REGISTER_KMOD_DEP(ifpga_rawdev_pci_driver, "* igb_uio | uio_pci_generic | vfio-pci");
+
+RTE_INIT(ifpga_rawdev_init_log)
+{
+ ifpga_rawdev_logtype = rte_log_register("driver.raw.init");
+ if (ifpga_rawdev_logtype >= 0)
+ rte_log_set_level(ifpga_rawdev_logtype, RTE_LOG_NOTICE);
+}
+
+static const char * const valid_args[] = {
+#define IFPGA_ARG_NAME "ifpga"
+ IFPGA_ARG_NAME,
+#define IFPGA_ARG_PORT "port"
+ IFPGA_ARG_PORT,
+#define IFPGA_AFU_BTS "afu_bts"
+ IFPGA_AFU_BTS,
+ NULL
+};
+
+static int
+ifpga_cfg_probe(struct rte_vdev_device *dev)
+{
+ struct rte_devargs *devargs;
+ struct rte_kvargs *kvlist = NULL;
+ int port;
+ char *name = NULL;
+ char dev_name[RTE_RAWDEV_NAME_MAX_LEN];
+
+ devargs = dev->device.devargs;
+
+ kvlist = rte_kvargs_parse(devargs->args, valid_args);
+ if (!kvlist) {
+ IFPGA_RAWDEV_PMD_LOG(ERR, "error when parsing param");
+ goto end;
+ }
+
+ if (rte_kvargs_count(kvlist, IFPGA_ARG_NAME) == 1) {
+ if (rte_kvargs_process(kvlist, IFPGA_ARG_NAME,
+ &rte_ifpga_get_string_arg, &name) < 0) {
+ IFPGA_RAWDEV_PMD_ERR("error to parse %s",
+ IFPGA_ARG_NAME);
+ goto end;
+ }
+ } else {
+ IFPGA_RAWDEV_PMD_ERR("arg %s is mandatory for ifpga bus",
+ IFPGA_ARG_NAME);
+ goto end;
+ }
+
+ if (rte_kvargs_count(kvlist, IFPGA_ARG_PORT) == 1) {
+ if (rte_kvargs_process(kvlist,
+ IFPGA_ARG_PORT,
+ &rte_ifpga_get_integer32_arg,
+ &port) < 0) {
+ IFPGA_RAWDEV_PMD_ERR("error to parse %s",
+ IFPGA_ARG_PORT);
+ goto end;
+ }
+ } else {
+ IFPGA_RAWDEV_PMD_ERR("arg %s is mandatory for ifpga bus",
+ IFPGA_ARG_PORT);
+ goto end;
+ }
+
+ memset(dev_name, 0, sizeof(dev_name));
+ snprintf(dev_name, RTE_RAWDEV_NAME_MAX_LEN, "%d|%s",
+ port, name);
+
+ rte_eal_hotplug_add(RTE_STR(IFPGA_BUS_NAME),
+ dev_name, devargs->args);
+end:
+ if (kvlist)
+ rte_kvargs_free(kvlist);
+ if (name)
+ free(name);
+
+ return 0;
+}
+
+static int
+ifpga_cfg_remove(struct rte_vdev_device *vdev)
+{
+ IFPGA_RAWDEV_PMD_INFO("Remove ifpga_cfg %p",
+ vdev);
+
+ return 0;
+}
+
+static struct rte_vdev_driver ifpga_cfg_driver = {
+ .probe = ifpga_cfg_probe,
+ .remove = ifpga_cfg_remove,
+};
+
+RTE_PMD_REGISTER_VDEV(ifpga_rawdev_cfg, ifpga_cfg_driver);
+RTE_PMD_REGISTER_ALIAS(ifpga_rawdev_cfg, ifpga_cfg);
+RTE_PMD_REGISTER_PARAM_STRING(ifpga_rawdev_cfg,
+ "ifpga=<string> "
+ "port=<int> "
+ "afu_bts=<path>");
+
diff --git a/drivers/raw/ifpga_rawdev/ifpga_rawdev.h b/drivers/raw/ifpga_rawdev/ifpga_rawdev.h
new file mode 100644
index 00000000..c7759b8b
--- /dev/null
+++ b/drivers/raw/ifpga_rawdev/ifpga_rawdev.h
@@ -0,0 +1,37 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2010-2018 Intel Corporation
+ */
+
+#ifndef _IFPGA_RAWDEV_H_
+#define _IFPGA_RAWDEV_H_
+
+extern int ifpga_rawdev_logtype;
+
+#define IFPGA_RAWDEV_PMD_LOG(level, fmt, args...) \
+ rte_log(RTE_LOG_ ## level, ifpga_rawdev_logtype, "ifgpa: " fmt, \
+ ##args)
+
+#define IFPGA_RAWDEV_PMD_FUNC_TRACE() IFPGA_RAWDEV_PMD_LOG(DEBUG, ">>")
+
+#define IFPGA_RAWDEV_PMD_DEBUG(fmt, args...) \
+ IFPGA_RAWDEV_PMD_LOG(DEBUG, fmt, ## args)
+#define IFPGA_RAWDEV_PMD_INFO(fmt, args...) \
+ IFPGA_RAWDEV_PMD_LOG(INFO, fmt, ## args)
+#define IFPGA_RAWDEV_PMD_ERR(fmt, args...) \
+ IFPGA_RAWDEV_PMD_LOG(ERR, fmt, ## args)
+#define IFPGA_RAWDEV_PMD_WARN(fmt, args...) \
+ IFPGA_RAWDEV_PMD_LOG(WARNING, fmt, ## args)
+
+enum ifpga_rawdev_device_state {
+ IFPGA_IDLE,
+ IFPGA_READY,
+ IFPGA_ERROR
+};
+
+static inline struct opae_adapter *
+ifpga_rawdev_get_priv(const struct rte_rawdev *rawdev)
+{
+ return rawdev->dev_private;
+}
+
+#endif /* _IFPGA_RAWDEV_H_ */
diff --git a/drivers/raw/ifpga_rawdev/meson.build b/drivers/raw/ifpga_rawdev/meson.build
new file mode 100644
index 00000000..67256872
--- /dev/null
+++ b/drivers/raw/ifpga_rawdev/meson.build
@@ -0,0 +1,15 @@
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright(c) 2018 Intel Corporation
+
+version = 1
+
+subdir('base')
+objs = [base_objs]
+
+deps += ['rawdev', 'pci', 'bus_pci', 'kvargs',
+ 'bus_vdev', 'bus_ifpga']
+sources = files('ifpga_rawdev.c')
+
+includes += include_directories('base')
+
+allow_experimental_apis = true
diff --git a/drivers/raw/ifpga_rawdev/rte_pmd_ifpga_rawdev_version.map b/drivers/raw/ifpga_rawdev/rte_pmd_ifpga_rawdev_version.map
new file mode 100644
index 00000000..9b9ab1a4
--- /dev/null
+++ b/drivers/raw/ifpga_rawdev/rte_pmd_ifpga_rawdev_version.map
@@ -0,0 +1,4 @@
+DPDK_18.05 {
+
+ local: *;
+};
diff --git a/drivers/raw/meson.build b/drivers/raw/meson.build
new file mode 100644
index 00000000..a61cdcce
--- /dev/null
+++ b/drivers/raw/meson.build
@@ -0,0 +1,7 @@
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright 2018 NXP
+
+drivers = ['skeleton_rawdev', 'dpaa2_cmdif', 'dpaa2_qdma', 'ifpga_rawdev']
+std_deps = ['rawdev']
+config_flag_fmt = 'RTE_LIBRTE_PMD_@0@_RAWDEV'
+driver_name_fmt = 'rte_pmd_@0@'
diff --git a/drivers/raw/skeleton_rawdev/Makefile b/drivers/raw/skeleton_rawdev/Makefile
index bacc66dd..3f97c2ee 100644
--- a/drivers/raw/skeleton_rawdev/Makefile
+++ b/drivers/raw/skeleton_rawdev/Makefile
@@ -8,7 +8,6 @@ include $(RTE_SDK)/mk/rte.vars.mk
#
LIB = librte_pmd_skeleton_rawdev.a
-CFLAGS += -DALLOW_EXPERIMENTAL_API
CFLAGS += -O3
CFLAGS += $(WERROR_FLAGS)
LDLIBS += -lrte_eal
diff --git a/drivers/raw/skeleton_rawdev/meson.build b/drivers/raw/skeleton_rawdev/meson.build
new file mode 100644
index 00000000..b4a6ed08
--- /dev/null
+++ b/drivers/raw/skeleton_rawdev/meson.build
@@ -0,0 +1,6 @@
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright 2018 NXP
+
+deps += ['rawdev', 'kvargs', 'mbuf', 'bus_vdev']
+sources = files('skeleton_rawdev.c',
+ 'skeleton_rawdev_test.c')
diff --git a/drivers/raw/skeleton_rawdev/skeleton_rawdev.c b/drivers/raw/skeleton_rawdev/skeleton_rawdev.c
index 6bdbbb50..6518a2d9 100644
--- a/drivers/raw/skeleton_rawdev/skeleton_rawdev.c
+++ b/drivers/raw/skeleton_rawdev/skeleton_rawdev.c
@@ -305,6 +305,18 @@ static int skeleton_rawdev_queue_release(struct rte_rawdev *dev,
return ret;
}
+static uint16_t skeleton_rawdev_queue_count(struct rte_rawdev *dev)
+{
+ struct skeleton_rawdev *skeldev;
+
+ SKELETON_PMD_FUNC_TRACE();
+
+ RTE_FUNC_PTR_OR_ERR_RET(dev, -EINVAL);
+
+ skeldev = skeleton_rawdev_get_priv(dev);
+ return skeldev->num_queues;
+}
+
static int skeleton_rawdev_get_attr(struct rte_rawdev *dev,
const char *attr_name,
uint64_t *attr_value)
@@ -524,6 +536,7 @@ static const struct rte_rawdev_ops skeleton_rawdev_ops = {
.queue_def_conf = skeleton_rawdev_queue_def_conf,
.queue_setup = skeleton_rawdev_queue_setup,
.queue_release = skeleton_rawdev_queue_release,
+ .queue_count = skeleton_rawdev_queue_count,
.attr_get = skeleton_rawdev_get_attr,
.attr_set = skeleton_rawdev_set_attr,
@@ -744,10 +757,7 @@ static struct rte_vdev_driver skeleton_pmd_drv = {
RTE_PMD_REGISTER_VDEV(SKELETON_PMD_RAWDEV_NAME, skeleton_pmd_drv);
-RTE_INIT(skeleton_pmd_init_log);
-
-static void
-skeleton_pmd_init_log(void)
+RTE_INIT(skeleton_pmd_init_log)
{
skeleton_pmd_logtype = rte_log_register("rawdev.skeleton");
if (skeleton_pmd_logtype >= 0)
diff --git a/drivers/raw/skeleton_rawdev/skeleton_rawdev_test.c b/drivers/raw/skeleton_rawdev/skeleton_rawdev_test.c
index 795f24bc..3405b898 100644
--- a/drivers/raw/skeleton_rawdev/skeleton_rawdev_test.c
+++ b/drivers/raw/skeleton_rawdev/skeleton_rawdev_test.c
@@ -194,6 +194,18 @@ test_rawdev_queue_default_conf_get(void)
}
static int
+test_rawdev_queue_count(void)
+{
+ unsigned int q_count;
+
+ /* Get the current configuration */
+ q_count = rte_rawdev_queue_count(TEST_DEV_ID);
+ RTE_TEST_ASSERT_EQUAL(q_count, 1, "Invalid queue count (%d)", q_count);
+
+ return TEST_SUCCESS;
+}
+
+static int
test_rawdev_queue_setup(void)
{
int ret;
@@ -288,6 +300,7 @@ test_rawdev_attr_set_get(void)
"Attribute (Test2) not set correctly (%" PRIu64 ")",
ret_value);
+ free(dummy_value);
return TEST_SUCCESS;
}
@@ -379,8 +392,6 @@ test_rawdev_enqdeq(void)
cleanup:
if (buffers[0].buf_addr)
free(buffers[0].buf_addr);
- if (deq_buffers)
- free(deq_buffers);
return TEST_FAILED;
}
@@ -430,6 +441,7 @@ test_rawdev_skeldev(void)
SKELDEV_TEST_RUN(test_rawdev_configure, NULL,
test_rawdev_queue_default_conf_get);
SKELDEV_TEST_RUN(test_rawdev_configure, NULL, test_rawdev_queue_setup);
+ SKELDEV_TEST_RUN(NULL, NULL, test_rawdev_queue_count);
SKELDEV_TEST_RUN(test_rawdev_queue_setup, NULL,
test_rawdev_queue_release);
SKELDEV_TEST_RUN(NULL, NULL, test_rawdev_attr_set_get);