aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/bus
diff options
context:
space:
mode:
authorLuca Boccassi <luca.boccassi@gmail.com>2018-11-01 11:59:50 +0000
committerLuca Boccassi <luca.boccassi@gmail.com>2018-11-01 12:00:19 +0000
commit8d01b9cd70a67cdafd5b965a70420c3bd7fb3f82 (patch)
tree208e3bc33c220854d89d010e3abf720a2e62e546 /drivers/bus
parentb63264c8342e6a1b6971c79550d2af2024b6a4de (diff)
New upstream version 18.11-rc1upstream/18.11-rc1
Change-Id: Iaa71986dd6332e878d8f4bf493101b2bbc6313bb Signed-off-by: Luca Boccassi <luca.boccassi@gmail.com>
Diffstat (limited to 'drivers/bus')
-rw-r--r--drivers/bus/dpaa/Makefile4
-rw-r--r--drivers/bus/dpaa/base/fman/netcfg_layer.c2
-rw-r--r--drivers/bus/dpaa/base/qbman/bman_driver.c17
-rw-r--r--drivers/bus/dpaa/base/qbman/qman.c72
-rw-r--r--drivers/bus/dpaa/base/qbman/qman_driver.c7
-rw-r--r--drivers/bus/dpaa/dpaa_bus.c19
-rw-r--r--drivers/bus/dpaa/include/compat.h20
-rw-r--r--drivers/bus/dpaa/include/fsl_fman_crc64.h8
-rw-r--r--drivers/bus/dpaa/include/fsl_qman.h20
-rw-r--r--drivers/bus/dpaa/include/fsl_usd.h6
-rw-r--r--drivers/bus/dpaa/meson.build5
-rw-r--r--drivers/bus/dpaa/rte_bus_dpaa_version.map16
-rw-r--r--drivers/bus/dpaa/rte_dpaa_bus.h6
-rw-r--r--drivers/bus/fslmc/Makefile3
-rw-r--r--drivers/bus/fslmc/fslmc_bus.c35
-rw-r--r--drivers/bus/fslmc/fslmc_vfio.c13
-rw-r--r--drivers/bus/fslmc/mc/dpbp.c10
-rw-r--r--drivers/bus/fslmc/mc/dpci.c197
-rw-r--r--drivers/bus/fslmc/mc/dpcon.c30
-rw-r--r--drivers/bus/fslmc/mc/dpdmai.c14
-rw-r--r--drivers/bus/fslmc/mc/dpio.c9
-rw-r--r--drivers/bus/fslmc/mc/fsl_dpbp.h1
-rw-r--r--drivers/bus/fslmc/mc/fsl_dpbp_cmd.h16
-rw-r--r--drivers/bus/fslmc/mc/fsl_dpci.h47
-rw-r--r--drivers/bus/fslmc/mc/fsl_dpci_cmd.h62
-rw-r--r--drivers/bus/fslmc/mc/fsl_dpcon.h19
-rw-r--r--drivers/bus/fslmc/mc/fsl_dpdmai.h5
-rw-r--r--drivers/bus/fslmc/mc/fsl_dpdmai_cmd.h20
-rw-r--r--drivers/bus/fslmc/mc/fsl_dpmng.h2
-rw-r--r--drivers/bus/fslmc/mc/fsl_dpopr.h85
-rw-r--r--drivers/bus/fslmc/meson.build5
-rw-r--r--drivers/bus/fslmc/portal/dpaa2_hw_dpbp.c7
-rw-r--r--drivers/bus/fslmc/portal/dpaa2_hw_dpio.c197
-rw-r--r--drivers/bus/fslmc/portal/dpaa2_hw_dpio.h4
-rw-r--r--drivers/bus/fslmc/portal/dpaa2_hw_pvt.h53
-rw-r--r--drivers/bus/fslmc/qbman/include/compat.h3
-rw-r--r--drivers/bus/fslmc/qbman/include/fsl_qbman_portal.h33
-rw-r--r--drivers/bus/fslmc/qbman/qbman_portal.c764
-rw-r--r--drivers/bus/fslmc/qbman/qbman_portal.h30
-rw-r--r--drivers/bus/fslmc/qbman/qbman_sys.h100
-rw-r--r--drivers/bus/fslmc/qbman/qbman_sys_decl.h4
-rw-r--r--drivers/bus/fslmc/rte_bus_fslmc_version.map13
-rw-r--r--drivers/bus/ifpga/Makefile2
-rw-r--r--drivers/bus/ifpga/ifpga_bus.c33
-rw-r--r--drivers/bus/ifpga/meson.build2
-rw-r--r--drivers/bus/ifpga/rte_bus_ifpga.h3
-rw-r--r--drivers/bus/pci/Makefile5
-rw-r--r--drivers/bus/pci/bsd/pci.c6
-rw-r--r--drivers/bus/pci/linux/Makefile2
-rw-r--r--drivers/bus/pci/linux/pci.c46
-rw-r--r--drivers/bus/pci/linux/pci_vfio.c268
-rw-r--r--drivers/bus/pci/meson.build9
-rw-r--r--drivers/bus/pci/pci_common.c143
-rw-r--r--drivers/bus/pci/pci_common_uio.c33
-rw-r--r--drivers/bus/pci/pci_params.c78
-rw-r--r--drivers/bus/pci/private.h39
-rw-r--r--drivers/bus/pci/rte_bus_pci.h10
-rw-r--r--drivers/bus/vdev/Makefile5
-rw-r--r--drivers/bus/vdev/meson.build7
-rw-r--r--drivers/bus/vdev/vdev.c50
-rw-r--r--drivers/bus/vdev/vdev_params.c66
-rw-r--r--drivers/bus/vdev/vdev_private.h26
-rw-r--r--drivers/bus/vmbus/Makefile2
-rw-r--r--drivers/bus/vmbus/linux/vmbus_bus.c3
-rw-r--r--drivers/bus/vmbus/meson.build2
-rw-r--r--drivers/bus/vmbus/private.h6
-rw-r--r--drivers/bus/vmbus/rte_bus_vmbus.h15
-rw-r--r--drivers/bus/vmbus/rte_bus_vmbus_version.map7
-rw-r--r--drivers/bus/vmbus/vmbus_channel.c26
-rw-r--r--drivers/bus/vmbus/vmbus_common.c27
70 files changed, 2459 insertions, 445 deletions
diff --git a/drivers/bus/dpaa/Makefile b/drivers/bus/dpaa/Makefile
index bffaa9d9..800e5cd2 100644
--- a/drivers/bus/dpaa/Makefile
+++ b/drivers/bus/dpaa/Makefile
@@ -14,7 +14,6 @@ CFLAGS := -I$(SRCDIR) $(CFLAGS)
CFLAGS += -O3 $(WERROR_FLAGS)
CFLAGS += -Wno-pointer-arith
CFLAGS += -Wno-cast-qual
-CFLAGS += -D _GNU_SOURCE
CFLAGS += -I$(RTE_BUS_DPAA)/
CFLAGS += -I$(RTE_BUS_DPAA)/include
CFLAGS += -I$(RTE_BUS_DPAA)/base/qbman
@@ -24,7 +23,7 @@ CFLAGS += -I$(RTE_SDK)/lib/librte_eal/common/include
# versioning export map
EXPORT_MAP := rte_bus_dpaa_version.map
-LIBABIVER := 1
+LIBABIVER := 2
# all source are stored in SRCS-y
#
@@ -48,5 +47,6 @@ SRCS-$(CONFIG_RTE_LIBRTE_DPAA_BUS) += \
LDLIBS += -lpthread
LDLIBS += -lrte_eal -lrte_mbuf -lrte_mempool -lrte_ring
LDLIBS += -lrte_ethdev
+LDLIBS += -lrte_common_dpaax
include $(RTE_SDK)/mk/rte.lib.mk
diff --git a/drivers/bus/dpaa/base/fman/netcfg_layer.c b/drivers/bus/dpaa/base/fman/netcfg_layer.c
index 031c6f1a..6b522420 100644
--- a/drivers/bus/dpaa/base/fman/netcfg_layer.c
+++ b/drivers/bus/dpaa/base/fman/netcfg_layer.c
@@ -21,7 +21,7 @@
/* This data structure contaings all configurations information
* related to usages of DPA devices.
*/
-struct netcfg_info *netcfg;
+static struct netcfg_info *netcfg;
/* fd to open a socket for making ioctl request to disable/enable shared
* interfaces.
*/
diff --git a/drivers/bus/dpaa/base/qbman/bman_driver.c b/drivers/bus/dpaa/base/qbman/bman_driver.c
index b14b5905..750b756b 100644
--- a/drivers/bus/dpaa/base/qbman/bman_driver.c
+++ b/drivers/bus/dpaa/base/qbman/bman_driver.c
@@ -23,7 +23,7 @@ static void *bman_ccsr_map;
/* Portal driver */
/*****************/
-static __thread int fd = -1;
+static __thread int bmfd = -1;
static __thread struct bm_portal_config pcfg;
static __thread struct dpaa_ioctl_portal_map map = {
.type = dpaa_portal_bman
@@ -70,14 +70,14 @@ static int fsl_bman_portal_init(uint32_t idx, int is_shared)
pcfg.index = map.index;
bman_depletion_fill(&pcfg.mask);
- fd = open(BMAN_PORTAL_IRQ_PATH, O_RDONLY);
- if (fd == -1) {
+ bmfd = open(BMAN_PORTAL_IRQ_PATH, O_RDONLY);
+ if (bmfd == -1) {
pr_err("BMan irq init failed");
process_portal_unmap(&map.addr);
return -EBUSY;
}
/* Use the IRQ FD as a unique IRQ number */
- pcfg.irq = fd;
+ pcfg.irq = bmfd;
portal = bman_create_affine_portal(&pcfg);
if (!portal) {
@@ -90,7 +90,7 @@ static int fsl_bman_portal_init(uint32_t idx, int is_shared)
/* Set the IRQ number */
irq_map.type = dpaa_portal_bman;
irq_map.portal_cinh = map.addr.cinh;
- process_portal_irq_map(fd, &irq_map);
+ process_portal_irq_map(bmfd, &irq_map);
return 0;
}
@@ -99,7 +99,7 @@ static int fsl_bman_portal_finish(void)
__maybe_unused const struct bm_portal_config *cfg;
int ret;
- process_portal_irq_unmap(fd);
+ process_portal_irq_unmap(bmfd);
cfg = bman_destroy_affine_portal();
DPAA_BUG_ON(cfg != &pcfg);
@@ -109,6 +109,11 @@ static int fsl_bman_portal_finish(void)
return ret;
}
+int bman_thread_fd(void)
+{
+ return bmfd;
+}
+
int bman_thread_init(void)
{
/* Convert from contiguous/virtual cpu numbering to real cpu when
diff --git a/drivers/bus/dpaa/base/qbman/qman.c b/drivers/bus/dpaa/base/qbman/qman.c
index 7c17027f..dc64d089 100644
--- a/drivers/bus/dpaa/base/qbman/qman.c
+++ b/drivers/bus/dpaa/base/qbman/qman.c
@@ -852,11 +852,9 @@ mr_loop:
case QM_MR_VERB_FQPN:
/* Parked */
#ifdef CONFIG_FSL_QMAN_FQ_LOOKUP
- fq = get_fq_table_entry(
- be32_to_cpu(msg->fq.contextB));
+ fq = get_fq_table_entry(msg->fq.contextB);
#else
- fq = (void *)(uintptr_t)
- be32_to_cpu(msg->fq.contextB);
+ fq = (void *)(uintptr_t)msg->fq.contextB;
#endif
fq_state_change(p, fq, msg, verb);
if (fq->cb.fqs)
@@ -967,7 +965,6 @@ static inline unsigned int __poll_portal_fast(struct qman_portal *p,
*shadow = *dq;
dq = shadow;
shadow->fqid = be32_to_cpu(shadow->fqid);
- shadow->contextB = be32_to_cpu(shadow->contextB);
shadow->seqnum = be16_to_cpu(shadow->seqnum);
hw_fd_to_cpu(&shadow->fd);
#endif
@@ -1040,6 +1037,50 @@ static inline unsigned int __poll_portal_fast(struct qman_portal *p,
return limit;
}
+int qman_irqsource_add(u32 bits)
+{
+ struct qman_portal *p = get_affine_portal();
+
+ bits = bits & QM_PIRQ_VISIBLE;
+
+ /* Clear any previously remaining interrupt conditions in
+ * QCSP_ISR. This prevents raising a false interrupt when
+ * interrupt conditions are enabled in QCSP_IER.
+ */
+ qm_isr_status_clear(&p->p, bits);
+ dpaa_set_bits(bits, &p->irq_sources);
+ qm_isr_enable_write(&p->p, p->irq_sources);
+
+
+ return 0;
+}
+
+int qman_irqsource_remove(u32 bits)
+{
+ struct qman_portal *p = get_affine_portal();
+ u32 ier;
+
+ /* Our interrupt handler only processes+clears status register bits that
+ * are in p->irq_sources. As we're trimming that mask, if one of them
+ * were to assert in the status register just before we remove it from
+ * the enable register, there would be an interrupt-storm when we
+ * release the IRQ lock. So we wait for the enable register update to
+ * take effect in h/w (by reading it back) and then clear all other bits
+ * in the status register. Ie. we clear them from ISR once it's certain
+ * IER won't allow them to reassert.
+ */
+
+ bits &= QM_PIRQ_VISIBLE;
+ dpaa_clear_bits(bits, &p->irq_sources);
+ qm_isr_enable_write(&p->p, p->irq_sources);
+ ier = qm_isr_enable_read(&p->p);
+ /* Using "~ier" (rather than "bits" or "~p->irq_sources") creates a
+ * data-dependency, ie. to protect against re-ordering.
+ */
+ qm_isr_status_clear(&p->p, ~ier);
+ return 0;
+}
+
u16 qman_affine_channel(int cpu)
{
if (cpu < 0) {
@@ -1092,9 +1133,9 @@ unsigned int qman_portal_poll_rx(unsigned int poll_limit,
/* SDQCR: context_b points to the FQ */
#ifdef CONFIG_FSL_QMAN_FQ_LOOKUP
- fq = qman_fq_lookup_table[be32_to_cpu(dq[rx_number]->contextB)];
+ fq = qman_fq_lookup_table[dq[rx_number]->contextB];
#else
- fq = (void *)be32_to_cpu(dq[rx_number]->contextB);
+ fq = (void *)dq[rx_number]->contextB;
#endif
if (fq->cb.dqrr_prepare)
fq->cb.dqrr_prepare(shadow[rx_number],
@@ -1114,6 +1155,14 @@ unsigned int qman_portal_poll_rx(unsigned int poll_limit,
return rx_number;
}
+void qman_clear_irq(void)
+{
+ struct qman_portal *p = get_affine_portal();
+ u32 clear = QM_DQAVAIL_MASK | (p->irq_sources &
+ ~(QM_PIRQ_CSCI | QM_PIRQ_CCSCI));
+ qm_isr_status_clear(&p->p, clear);
+}
+
u32 qman_portal_dequeue(struct rte_event ev[], unsigned int poll_limit,
void **bufs)
{
@@ -1143,7 +1192,6 @@ u32 qman_portal_dequeue(struct rte_event ev[], unsigned int poll_limit,
*shadow = *dq;
dq = shadow;
shadow->fqid = be32_to_cpu(shadow->fqid);
- shadow->contextB = be32_to_cpu(shadow->contextB);
shadow->seqnum = be16_to_cpu(shadow->seqnum);
hw_fd_to_cpu(&shadow->fd);
#endif
@@ -1208,7 +1256,6 @@ struct qm_dqrr_entry *qman_dequeue(struct qman_fq *fq)
*shadow = *dq;
dq = shadow;
shadow->fqid = be32_to_cpu(shadow->fqid);
- shadow->contextB = be32_to_cpu(shadow->contextB);
shadow->seqnum = be16_to_cpu(shadow->seqnum);
hw_fd_to_cpu(&shadow->fd);
#endif
@@ -1504,7 +1551,7 @@ int qman_init_fq(struct qman_fq *fq, u32 flags, struct qm_mcc_initfq *opts)
mcc->initfq.we_mask |= QM_INITFQ_WE_CONTEXTB;
#ifdef CONFIG_FSL_QMAN_FQ_LOOKUP
- mcc->initfq.fqd.context_b = fq->key;
+ mcc->initfq.fqd.context_b = cpu_to_be32(fq->key);
#else
mcc->initfq.fqd.context_b = (u32)(uintptr_t)fq;
#endif
@@ -2186,11 +2233,6 @@ int qman_enqueue_multi(struct qman_fq *fq,
/* try to send as many frames as possible */
while (eqcr->available && frames_to_send--) {
eq->fqid = fq->fqid_le;
-#ifdef CONFIG_FSL_QMAN_FQ_LOOKUP
- eq->tag = cpu_to_be32(fq->key);
-#else
- eq->tag = cpu_to_be32((u32)(uintptr_t)fq);
-#endif
eq->fd.opaque_addr = fd->opaque_addr;
eq->fd.addr = cpu_to_be40(fd->addr);
eq->fd.status = cpu_to_be32(fd->status);
diff --git a/drivers/bus/dpaa/base/qbman/qman_driver.c b/drivers/bus/dpaa/base/qbman/qman_driver.c
index f6ecd6b2..ba153396 100644
--- a/drivers/bus/dpaa/base/qbman/qman_driver.c
+++ b/drivers/bus/dpaa/base/qbman/qman_driver.c
@@ -113,6 +113,11 @@ static int fsl_qman_portal_finish(void)
return ret;
}
+int qman_thread_fd(void)
+{
+ return qmfd;
+}
+
int qman_thread_init(void)
{
/* Convert from contiguous/virtual cpu numbering to real cpu when
@@ -135,7 +140,7 @@ void qman_thread_irq(void)
* rather than breaking that encapsulation I am simply hard-coding the
* offset to the inhibit register here.
*/
- out_be32(qpcfg.addr_virt[DPAA_PORTAL_CI] + 0xe0c, 0);
+ out_be32(qpcfg.addr_virt[DPAA_PORTAL_CI] + 0x36C0, 0);
}
struct qman_portal *fsl_qman_portal_create(void)
diff --git a/drivers/bus/dpaa/dpaa_bus.c b/drivers/bus/dpaa/dpaa_bus.c
index 16fabd1b..203f60dc 100644
--- a/drivers/bus/dpaa/dpaa_bus.c
+++ b/drivers/bus/dpaa/dpaa_bus.c
@@ -34,6 +34,7 @@
#include <rte_dpaa_bus.h>
#include <rte_dpaa_logs.h>
+#include <dpaax_iova_table.h>
#include <fsl_usd.h>
#include <fsl_qman.h>
@@ -46,7 +47,7 @@ int dpaa_logtype_mempool;
int dpaa_logtype_pmd;
int dpaa_logtype_eventdev;
-struct rte_dpaa_bus rte_dpaa_bus;
+static struct rte_dpaa_bus rte_dpaa_bus;
struct netcfg_info *dpaa_netcfg;
/* define a variable to hold the portal_key, once created.*/
@@ -165,6 +166,8 @@ dpaa_create_device_list(void)
goto cleanup;
}
+ dev->device.bus = &rte_dpaa_bus.bus;
+
cfg = &dpaa_netcfg->port_cfg[i];
fman_intf = cfg->fman_if;
@@ -546,6 +549,9 @@ rte_dpaa_bus_probe(void)
fclose(svr_file);
}
+ /* And initialize the PA->VA translation table */
+ dpaax_iova_table_populate();
+
/* For each registered driver, and device, call the driver->probe */
TAILQ_FOREACH(dev, &rte_dpaa_bus.device_list, next) {
TAILQ_FOREACH(drv, &rte_dpaa_bus.driver_list, next) {
@@ -553,6 +559,9 @@ rte_dpaa_bus_probe(void)
if (ret)
continue;
+ if (rte_dev_is_probed(&dev->device))
+ continue;
+
if (!drv->probe ||
(dev->device.devargs &&
dev->device.devargs->policy == RTE_DEV_BLACKLISTED))
@@ -563,8 +572,12 @@ rte_dpaa_bus_probe(void)
dev->device.devargs->policy ==
RTE_DEV_WHITELISTED)) {
ret = drv->probe(drv, dev);
- if (ret)
+ if (ret) {
DPAA_BUS_ERR("Unable to probe.\n");
+ } else {
+ dev->driver = drv;
+ dev->device.driver = &drv->driver;
+ }
}
break;
}
@@ -611,7 +624,7 @@ rte_dpaa_get_iommu_class(void)
return RTE_IOVA_PA;
}
-struct rte_dpaa_bus rte_dpaa_bus = {
+static struct rte_dpaa_bus rte_dpaa_bus = {
.bus = {
.scan = rte_dpaa_bus_scan,
.probe = rte_dpaa_bus_probe,
diff --git a/drivers/bus/dpaa/include/compat.h b/drivers/bus/dpaa/include/compat.h
index 92241d23..41226577 100644
--- a/drivers/bus/dpaa/include/compat.h
+++ b/drivers/bus/dpaa/include/compat.h
@@ -57,8 +57,9 @@
#ifndef __packed
#define __packed __rte_packed
#endif
+#ifndef noinline
#define noinline __attribute__((noinline))
-
+#endif
#define L1_CACHE_BYTES 64
#define ____cacheline_aligned __attribute__((aligned(L1_CACHE_BYTES)))
#define __stringify_1(x) #x
@@ -75,20 +76,25 @@
printf(fmt, ##args); \
fflush(stdout); \
} while (0)
-
+#ifndef pr_crit
#define pr_crit(fmt, args...) prflush("CRIT:" fmt, ##args)
+#endif
+#ifndef pr_err
#define pr_err(fmt, args...) prflush("ERR:" fmt, ##args)
+#endif
+#ifndef pr_warn
#define pr_warn(fmt, args...) prflush("WARN:" fmt, ##args)
+#endif
+#ifndef pr_info
#define pr_info(fmt, args...) prflush(fmt, ##args)
-
-#ifdef RTE_LIBRTE_DPAA_DEBUG_BUS
-#ifdef pr_debug
-#undef pr_debug
#endif
+#ifndef pr_debug
+#ifdef RTE_LIBRTE_DPAA_DEBUG_BUS
#define pr_debug(fmt, args...) printf(fmt, ##args)
#else
#define pr_debug(fmt, args...) {}
#endif
+#endif
#define DPAA_BUG_ON(x) RTE_ASSERT(x)
@@ -256,7 +262,9 @@ __bswap_24(uint32_t x)
#define be16_to_cpu(x) rte_be_to_cpu_16(x)
#define cpu_to_be64(x) rte_cpu_to_be_64(x)
+#if !defined(cpu_to_be32)
#define cpu_to_be32(x) rte_cpu_to_be_32(x)
+#endif
#define cpu_to_be16(x) rte_cpu_to_be_16(x)
#if RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN
diff --git a/drivers/bus/dpaa/include/fsl_fman_crc64.h b/drivers/bus/dpaa/include/fsl_fman_crc64.h
index bf162f3a..08ad6304 100644
--- a/drivers/bus/dpaa/include/fsl_fman_crc64.h
+++ b/drivers/bus/dpaa/include/fsl_fman_crc64.h
@@ -42,9 +42,9 @@ struct fman_crc64_t {
uint64_t initial;
uint64_t table[1 << 8];
};
-extern struct fman_crc64_t FMAN_CRC64_ECMA_182;
+extern struct fman_crc64_t fman_crc64_ecma_182;
#define DECLARE_FMAN_CRC64_TABLE() \
-struct fman_crc64_t FMAN_CRC64_ECMA_182 = { \
+struct fman_crc64_t fman_crc64_ecma_182 = { \
0xFFFFFFFFFFFFFFFFULL, \
{ \
0x0000000000000000ULL, 0xb32e4cbe03a75f6fULL, \
@@ -183,7 +183,7 @@ struct fman_crc64_t FMAN_CRC64_ECMA_182 = { \
*/
static inline uint64_t fman_crc64_init(void)
{
- return FMAN_CRC64_ECMA_182.initial;
+ return fman_crc64_ecma_182.initial;
}
/* Updates the CRC with arbitrary data */
@@ -192,7 +192,7 @@ static inline uint64_t fman_crc64_update(uint64_t crc,
{
uint8_t *p = data;
while (len--)
- crc = FMAN_CRC64_ECMA_182.table[(crc ^ *(p++)) & 0xff] ^
+ crc = fman_crc64_ecma_182.table[(crc ^ *(p++)) & 0xff] ^
(crc >> 8);
return crc;
}
diff --git a/drivers/bus/dpaa/include/fsl_qman.h b/drivers/bus/dpaa/include/fsl_qman.h
index b18cf037..e4384149 100644
--- a/drivers/bus/dpaa/include/fsl_qman.h
+++ b/drivers/bus/dpaa/include/fsl_qman.h
@@ -1316,6 +1316,26 @@ u32 qman_portal_dequeue(struct rte_event ev[], unsigned int poll_limit,
void **bufs);
/**
+ * qman_irqsource_add - add processing sources to be interrupt-driven
+ * @bits: bitmask of QM_PIRQ_**I processing sources
+ *
+ * Adds processing sources that should be interrupt-driven (rather than
+ * processed via qman_poll_***() functions). Returns zero for success, or
+ * -EINVAL if the current CPU is sharing a portal hosted on another CPU.
+ */
+int qman_irqsource_add(u32 bits);
+
+/**
+ * qman_irqsource_remove - remove processing sources from being interrupt-driven
+ * @bits: bitmask of QM_PIRQ_**I processing sources
+ *
+ * Removes processing sources from being interrupt-driven, so that they will
+ * instead be processed via qman_poll_***() functions. Returns zero for success,
+ * or -EINVAL if the current CPU is sharing a portal hosted on another CPU.
+ */
+int qman_irqsource_remove(u32 bits);
+
+/**
* qman_affine_channel - return the channel ID of an portal
* @cpu: the cpu whose affine portal is the subject of the query
*
diff --git a/drivers/bus/dpaa/include/fsl_usd.h b/drivers/bus/dpaa/include/fsl_usd.h
index e1836175..ec1ab7ce 100644
--- a/drivers/bus/dpaa/include/fsl_usd.h
+++ b/drivers/bus/dpaa/include/fsl_usd.h
@@ -55,6 +55,10 @@ int qman_free_raw_portal(struct dpaa_raw_portal *portal);
int bman_allocate_raw_portal(struct dpaa_raw_portal *portal);
int bman_free_raw_portal(struct dpaa_raw_portal *portal);
+/* Obtain thread-local UIO file-descriptors */
+int qman_thread_fd(void);
+int bman_thread_fd(void);
+
/* Post-process interrupts. NB, the kernel IRQ handler disables the interrupt
* line before notifying us, and this post-processing re-enables it once
* processing is complete. As such, it is essential to call this before going
@@ -63,6 +67,8 @@ int bman_free_raw_portal(struct dpaa_raw_portal *portal);
void qman_thread_irq(void);
void bman_thread_irq(void);
+void qman_clear_irq(void);
+
/* Global setup */
int qman_global_init(void);
int bman_global_init(void);
diff --git a/drivers/bus/dpaa/meson.build b/drivers/bus/dpaa/meson.build
index d10b62c0..1fcb4e91 100644
--- a/drivers/bus/dpaa/meson.build
+++ b/drivers/bus/dpaa/meson.build
@@ -1,11 +1,13 @@
# SPDX-License-Identifier: BSD-3-Clause
# Copyright 2018 NXP
+version = 2
+
if host_machine.system() != 'linux'
build = false
endif
-deps += ['eventdev']
+deps += ['common_dpaax', 'eventdev']
sources = files('base/fman/fman.c',
'base/fman/fman_hw.c',
'base/fman/netcfg_layer.c',
@@ -26,4 +28,3 @@ if cc.has_argument('-Wno-cast-qual')
endif
includes += include_directories('include', 'base/qbman')
-cflags += ['-D_GNU_SOURCE']
diff --git a/drivers/bus/dpaa/rte_bus_dpaa_version.map b/drivers/bus/dpaa/rte_bus_dpaa_version.map
index 7d6d6243..70076c7a 100644
--- a/drivers/bus/dpaa/rte_bus_dpaa_version.map
+++ b/drivers/bus/dpaa/rte_bus_dpaa_version.map
@@ -95,10 +95,24 @@ DPDK_18.02 {
DPDK_18.08 {
global:
-
fman_if_get_sg_enable;
fman_if_set_sg;
of_get_mac_address;
local: *;
} DPDK_18.02;
+
+DPDK_18.11 {
+ global:
+ bman_thread_irq;
+ fman_if_get_sg_enable;
+ fman_if_set_sg;
+ qman_clear_irq;
+
+ qman_irqsource_add;
+ qman_irqsource_remove;
+ qman_thread_fd;
+ qman_thread_irq;
+
+ local: *;
+} DPDK_18.08;
diff --git a/drivers/bus/dpaa/rte_dpaa_bus.h b/drivers/bus/dpaa/rte_dpaa_bus.h
index 15dc6a4a..1d580a00 100644
--- a/drivers/bus/dpaa/rte_dpaa_bus.h
+++ b/drivers/bus/dpaa/rte_dpaa_bus.h
@@ -8,6 +8,7 @@
#include <rte_bus.h>
#include <rte_mempool.h>
+#include <dpaax_iova_table.h>
#include <fsl_usd.h>
#include <fsl_qman.h>
@@ -110,6 +111,11 @@ extern struct dpaa_memseg_list rte_dpaa_memsegs;
static inline void *rte_dpaa_mem_ptov(phys_addr_t paddr)
{
struct dpaa_memseg *ms;
+ void *va;
+
+ va = dpaax_iova_table_get_va(paddr);
+ if (likely(va != NULL))
+ return va;
/* Check if the address is already part of the memseg list internally
* maintained by the dpaa driver.
diff --git a/drivers/bus/fslmc/Makefile b/drivers/bus/fslmc/Makefile
index 515d0f53..218d9bd2 100644
--- a/drivers/bus/fslmc/Makefile
+++ b/drivers/bus/fslmc/Makefile
@@ -19,12 +19,13 @@ CFLAGS += -I$(RTE_SDK)/drivers/bus/fslmc/qbman/include
CFLAGS += -I$(RTE_SDK)/lib/librte_eal/common
LDLIBS += -lrte_eal -lrte_mbuf -lrte_mempool -lrte_ring
LDLIBS += -lrte_ethdev
+LDLIBS += -lrte_common_dpaax
# versioning export map
EXPORT_MAP := rte_bus_fslmc_version.map
# library version
-LIBABIVER := 1
+LIBABIVER := 2
SRCS-$(CONFIG_RTE_LIBRTE_FSLMC_BUS) += \
qbman/qbman_portal.c \
diff --git a/drivers/bus/fslmc/fslmc_bus.c b/drivers/bus/fslmc/fslmc_bus.c
index d2900edc..89af9385 100644
--- a/drivers/bus/fslmc/fslmc_bus.c
+++ b/drivers/bus/fslmc/fslmc_bus.c
@@ -20,6 +20,8 @@
#include <fslmc_vfio.h>
#include "fslmc_logs.h"
+#include <dpaax_iova_table.h>
+
int dpaa2_logtype_bus;
#define VFIO_IOMMU_GROUP_PATH "/sys/kernel/iommu_groups"
@@ -161,6 +163,8 @@ scan_one_fslmc_device(char *dev_name)
return -ENOMEM;
}
+ dev->device.bus = &rte_fslmc_bus.bus;
+
/* Parse the device name and ID */
t_ptr = strtok(dup_dev_name, ".");
if (!t_ptr) {
@@ -375,6 +379,19 @@ rte_fslmc_probe(void)
probe_all = rte_fslmc_bus.bus.conf.scan_mode != RTE_BUS_SCAN_WHITELIST;
+ /* In case of PA, the FD addresses returned by qbman APIs are physical
+ * addresses, which need conversion into equivalent VA address for
+ * rte_mbuf. For that, a table (a serial array, in memory) is used to
+ * increase translation efficiency.
+ * This has to be done before probe as some device initialization
+ * (during) probe allocate memory (dpaa2_sec) which needs to be pinned
+ * to this table.
+ *
+ * Error is ignored as relevant logs are handled within dpaax and
+ * handling for unavailable dpaax table too is transparent to caller.
+ */
+ dpaax_iova_table_populate();
+
TAILQ_FOREACH(dev, &rte_fslmc_bus.device_list, next) {
TAILQ_FOREACH(drv, &rte_fslmc_bus.driver_list, next) {
ret = rte_fslmc_match(drv, dev);
@@ -384,6 +401,9 @@ rte_fslmc_probe(void)
if (!drv->probe)
continue;
+ if (rte_dev_is_probed(&dev->device))
+ continue;
+
if (dev->device.devargs &&
dev->device.devargs->policy == RTE_DEV_BLACKLISTED) {
DPAA2_BUS_LOG(DEBUG, "%s Blacklisted, skipping",
@@ -396,8 +416,12 @@ rte_fslmc_probe(void)
dev->device.devargs->policy ==
RTE_DEV_WHITELISTED)) {
ret = drv->probe(drv, dev);
- if (ret)
+ if (ret) {
DPAA2_BUS_ERR("Unable to probe");
+ } else {
+ dev->driver = drv;
+ dev->device.driver = &drv->driver;
+ }
}
break;
}
@@ -450,6 +474,11 @@ rte_fslmc_driver_unregister(struct rte_dpaa2_driver *driver)
fslmc_bus = driver->fslmc_bus;
+ /* Cleanup the PA->VA Translation table; From whereever this function
+ * is called from.
+ */
+ dpaax_iova_table_depopulate();
+
TAILQ_REMOVE(&fslmc_bus->driver_list, driver, next);
/* Update Bus references */
driver->fslmc_bus = NULL;
@@ -490,6 +519,10 @@ rte_dpaa2_get_iommu_class(void)
if (TAILQ_EMPTY(&rte_fslmc_bus.device_list))
return RTE_IOVA_DC;
+#ifdef RTE_LIBRTE_DPAA2_USE_PHYS_IOVA
+ return RTE_IOVA_PA;
+#endif
+
/* check if all devices on the bus support Virtual addressing or not */
has_iova_va = fslmc_all_device_support_iova();
diff --git a/drivers/bus/fslmc/fslmc_vfio.c b/drivers/bus/fslmc/fslmc_vfio.c
index 4c2cd2a8..493b6e5b 100644
--- a/drivers/bus/fslmc/fslmc_vfio.c
+++ b/drivers/bus/fslmc/fslmc_vfio.c
@@ -221,6 +221,13 @@ fslmc_memevent_cb(enum rte_mem_event type, const void *addr, size_t len,
"alloc" : "dealloc",
va, virt_addr, iova_addr, map_len);
+ /* iova_addr may be set to RTE_BAD_IOVA */
+ if (iova_addr == RTE_BAD_IOVA) {
+ DPAA2_BUS_DEBUG("Segment has invalid iova, skipping\n");
+ cur_len += map_len;
+ continue;
+ }
+
if (type == RTE_MEM_EVENT_ALLOC)
ret = fslmc_map_dma(virt_addr, iova_addr, map_len);
else
@@ -318,11 +325,15 @@ fslmc_unmap_dma(uint64_t vaddr, uint64_t iovaddr __rte_unused, size_t len)
static int
fslmc_dmamap_seg(const struct rte_memseg_list *msl __rte_unused,
- const struct rte_memseg *ms, void *arg)
+ const struct rte_memseg *ms, void *arg)
{
int *n_segs = arg;
int ret;
+ /* if IOVA address is invalid, skip */
+ if (ms->iova == RTE_BAD_IOVA)
+ return 0;
+
ret = fslmc_map_dma(ms->addr_64, ms->iova, ms->len);
if (ret)
DPAA2_BUS_ERR("Unable to VFIO map (addr=%p, len=%zu)",
diff --git a/drivers/bus/fslmc/mc/dpbp.c b/drivers/bus/fslmc/mc/dpbp.c
index 0215d22d..d9103409 100644
--- a/drivers/bus/fslmc/mc/dpbp.c
+++ b/drivers/bus/fslmc/mc/dpbp.c
@@ -248,6 +248,16 @@ int dpbp_reset(struct fsl_mc_io *mc_io,
/* send command to mc*/
return mc_send_command(mc_io, &cmd);
}
+/**
+ * dpbp_get_attributes - Retrieve DPBP attributes.
+ *
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPBP object
+ * @attr: Returned object's attributes
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
int dpbp_get_attributes(struct fsl_mc_io *mc_io,
uint32_t cmd_flags,
uint16_t token,
diff --git a/drivers/bus/fslmc/mc/dpci.c b/drivers/bus/fslmc/mc/dpci.c
index ff366bfa..95edae9d 100644
--- a/drivers/bus/fslmc/mc/dpci.c
+++ b/drivers/bus/fslmc/mc/dpci.c
@@ -265,6 +265,15 @@ int dpci_reset(struct fsl_mc_io *mc_io,
return mc_send_command(mc_io, &cmd);
}
+/**
+ * dpci_get_attributes() - Retrieve DPCI attributes.
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPCI object
+ * @attr: Returned object's attributes
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
int dpci_get_attributes(struct fsl_mc_io *mc_io,
uint32_t cmd_flags,
uint16_t token,
@@ -292,6 +301,94 @@ int dpci_get_attributes(struct fsl_mc_io *mc_io,
return 0;
}
+/**
+ * dpci_get_peer_attributes() - Retrieve peer DPCI attributes.
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPCI object
+ * @attr: Returned peer attributes
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dpci_get_peer_attributes(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token,
+ struct dpci_peer_attr *attr)
+{
+ struct dpci_rsp_get_peer_attr *rsp_params;
+ struct mc_command cmd = { 0 };
+ int err;
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPCI_CMDID_GET_PEER_ATTR,
+ cmd_flags,
+ token);
+
+ /* send command to mc*/
+ err = mc_send_command(mc_io, &cmd);
+ if (err)
+ return err;
+
+ /* retrieve response parameters */
+ rsp_params = (struct dpci_rsp_get_peer_attr *)cmd.params;
+ attr->peer_id = le32_to_cpu(rsp_params->id);
+ attr->num_of_priorities = rsp_params->num_of_priorities;
+
+ return 0;
+}
+
+/**
+ * dpci_get_link_state() - Retrieve the DPCI link state.
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPCI object
+ * @up: Returned link state; returns '1' if link is up, '0' otherwise
+ *
+ * DPCI can be connected to another DPCI, together they
+ * create a 'link'. In order to use the DPCI Tx and Rx queues,
+ * both objects must be enabled.
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dpci_get_link_state(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token,
+ int *up)
+{
+ struct dpci_rsp_get_link_state *rsp_params;
+ struct mc_command cmd = { 0 };
+ int err;
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPCI_CMDID_GET_LINK_STATE,
+ cmd_flags,
+ token);
+
+ /* send command to mc*/
+ err = mc_send_command(mc_io, &cmd);
+ if (err)
+ return err;
+
+ /* retrieve response parameters */
+ rsp_params = (struct dpci_rsp_get_link_state *)cmd.params;
+ *up = dpci_get_field(rsp_params->up, UP);
+
+ return 0;
+}
+
+/**
+ * dpci_set_rx_queue() - Set Rx queue configuration
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPCI object
+ * @priority: Select the queue relative to number of
+ * priorities configured at DPCI creation; use
+ * DPCI_ALL_QUEUES to configure all Rx queues
+ * identically.
+ * @cfg: Rx queue configuration
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
int dpci_set_rx_queue(struct fsl_mc_io *mc_io,
uint32_t cmd_flags,
uint16_t token,
@@ -314,6 +411,9 @@ int dpci_set_rx_queue(struct fsl_mc_io *mc_io,
dpci_set_field(cmd_params->dest_type,
DEST_TYPE,
cfg->dest_cfg.dest_type);
+ dpci_set_field(cmd_params->dest_type,
+ ORDER_PRESERVATION,
+ cfg->order_preservation_en);
/* send command to mc*/
return mc_send_command(mc_io, &cmd);
@@ -438,3 +538,100 @@ int dpci_get_api_version(struct fsl_mc_io *mc_io,
return 0;
}
+
+/**
+ * dpci_set_opr() - Set Order Restoration configuration.
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPCI object
+ * @index: The queue index
+ * @options: Configuration mode options
+ * can be OPR_OPT_CREATE or OPR_OPT_RETIRE
+ * @cfg: Configuration options for the OPR
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dpci_set_opr(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token,
+ uint8_t index,
+ uint8_t options,
+ struct opr_cfg *cfg)
+{
+ struct dpci_cmd_set_opr *cmd_params;
+ struct mc_command cmd = { 0 };
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPCI_CMDID_SET_OPR,
+ cmd_flags,
+ token);
+ cmd_params = (struct dpci_cmd_set_opr *)cmd.params;
+ cmd_params->index = index;
+ cmd_params->options = options;
+ cmd_params->oloe = cfg->oloe;
+ cmd_params->oeane = cfg->oeane;
+ cmd_params->olws = cfg->olws;
+ cmd_params->oa = cfg->oa;
+ cmd_params->oprrws = cfg->oprrws;
+
+ /* send command to mc*/
+ return mc_send_command(mc_io, &cmd);
+}
+
+/**
+ * dpci_get_opr() - Retrieve Order Restoration config and query.
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPCI object
+ * @index: The queue index
+ * @cfg: Returned OPR configuration
+ * @qry: Returned OPR query
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dpci_get_opr(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token,
+ uint8_t index,
+ struct opr_cfg *cfg,
+ struct opr_qry *qry)
+{
+ struct dpci_rsp_get_opr *rsp_params;
+ struct dpci_cmd_get_opr *cmd_params;
+ struct mc_command cmd = { 0 };
+ int err;
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPCI_CMDID_GET_OPR,
+ cmd_flags,
+ token);
+ cmd_params = (struct dpci_cmd_get_opr *)cmd.params;
+ cmd_params->index = index;
+
+ /* send command to mc*/
+ err = mc_send_command(mc_io, &cmd);
+ if (err)
+ return err;
+
+ /* retrieve response parameters */
+ rsp_params = (struct dpci_rsp_get_opr *)cmd.params;
+ cfg->oloe = rsp_params->oloe;
+ cfg->oeane = rsp_params->oeane;
+ cfg->olws = rsp_params->olws;
+ cfg->oa = rsp_params->oa;
+ cfg->oprrws = rsp_params->oprrws;
+ qry->rip = dpci_get_field(rsp_params->flags, RIP);
+ qry->enable = dpci_get_field(rsp_params->flags, OPR_ENABLE);
+ qry->nesn = le16_to_cpu(rsp_params->nesn);
+ qry->ndsn = le16_to_cpu(rsp_params->ndsn);
+ qry->ea_tseq = le16_to_cpu(rsp_params->ea_tseq);
+ qry->tseq_nlis = dpci_get_field(rsp_params->tseq_nlis, TSEQ_NLIS);
+ qry->ea_hseq = le16_to_cpu(rsp_params->ea_hseq);
+ qry->hseq_nlis = dpci_get_field(rsp_params->hseq_nlis, HSEQ_NLIS);
+ qry->ea_hptr = le16_to_cpu(rsp_params->ea_hptr);
+ qry->ea_tptr = le16_to_cpu(rsp_params->ea_tptr);
+ qry->opr_vid = le16_to_cpu(rsp_params->opr_vid);
+ qry->opr_id = le16_to_cpu(rsp_params->opr_id);
+
+ return 0;
+}
diff --git a/drivers/bus/fslmc/mc/dpcon.c b/drivers/bus/fslmc/mc/dpcon.c
index 3f6e04b9..92bd2651 100644
--- a/drivers/bus/fslmc/mc/dpcon.c
+++ b/drivers/bus/fslmc/mc/dpcon.c
@@ -296,6 +296,36 @@ int dpcon_get_attributes(struct fsl_mc_io *mc_io,
}
/**
+ * dpcon_set_notification() - Set DPCON notification destination
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPCON object
+ * @cfg: Notification parameters
+ *
+ * Return: '0' on Success; Error code otherwise
+ */
+int dpcon_set_notification(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token,
+ struct dpcon_notification_cfg *cfg)
+{
+ struct dpcon_cmd_set_notification *dpcon_cmd;
+ struct mc_command cmd = { 0 };
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPCON_CMDID_SET_NOTIFICATION,
+ cmd_flags,
+ token);
+ dpcon_cmd = (struct dpcon_cmd_set_notification *)cmd.params;
+ dpcon_cmd->dpio_id = cpu_to_le32(cfg->dpio_id);
+ dpcon_cmd->priority = cfg->priority;
+ dpcon_cmd->user_ctx = cpu_to_le64(cfg->user_ctx);
+
+ /* send command to mc*/
+ return mc_send_command(mc_io, &cmd);
+}
+
+/**
* dpcon_get_api_version - Get Data Path Concentrator API version
* @mc_io: Pointer to MC portal's DPCON object
* @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
diff --git a/drivers/bus/fslmc/mc/dpdmai.c b/drivers/bus/fslmc/mc/dpdmai.c
index 528889df..dcb9d516 100644
--- a/drivers/bus/fslmc/mc/dpdmai.c
+++ b/drivers/bus/fslmc/mc/dpdmai.c
@@ -113,6 +113,7 @@ int dpdmai_create(struct fsl_mc_io *mc_io,
cmd_flags,
dprc_token);
cmd_params = (struct dpdmai_cmd_create *)cmd.params;
+ cmd_params->num_queues = cfg->num_queues;
cmd_params->priorities[0] = cfg->priorities[0];
cmd_params->priorities[1] = cfg->priorities[1];
@@ -297,6 +298,7 @@ int dpdmai_get_attributes(struct fsl_mc_io *mc_io,
rsp_params = (struct dpdmai_rsp_get_attr *)cmd.params;
attr->id = le32_to_cpu(rsp_params->id);
attr->num_of_priorities = rsp_params->num_of_priorities;
+ attr->num_of_queues = rsp_params->num_of_queues;
return 0;
}
@@ -306,6 +308,8 @@ int dpdmai_get_attributes(struct fsl_mc_io *mc_io,
* @mc_io: Pointer to MC portal's I/O object
* @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
* @token: Token of DPDMAI object
+ * @queue_idx: Rx queue index. Accepted values are form 0 to num_queues
+ * parameter provided in dpdmai_create
* @priority: Select the queue relative to number of
* priorities configured at DPDMAI creation; use
* DPDMAI_ALL_QUEUES to configure all Rx queues
@@ -317,6 +321,7 @@ int dpdmai_get_attributes(struct fsl_mc_io *mc_io,
int dpdmai_set_rx_queue(struct fsl_mc_io *mc_io,
uint32_t cmd_flags,
uint16_t token,
+ uint8_t queue_idx,
uint8_t priority,
const struct dpdmai_rx_queue_cfg *cfg)
{
@@ -331,6 +336,7 @@ int dpdmai_set_rx_queue(struct fsl_mc_io *mc_io,
cmd_params->dest_id = cpu_to_le32(cfg->dest_cfg.dest_id);
cmd_params->dest_priority = cfg->dest_cfg.priority;
cmd_params->priority = priority;
+ cmd_params->queue_idx = queue_idx;
cmd_params->user_ctx = cpu_to_le64(cfg->user_ctx);
cmd_params->options = cpu_to_le32(cfg->options);
dpdmai_set_field(cmd_params->dest_type,
@@ -346,6 +352,8 @@ int dpdmai_set_rx_queue(struct fsl_mc_io *mc_io,
* @mc_io: Pointer to MC portal's I/O object
* @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
* @token: Token of DPDMAI object
+ * @queue_idx: Rx queue index. Accepted values are form 0 to num_queues
+ * parameter provided in dpdmai_create
* @priority: Select the queue relative to number of
* priorities configured at DPDMAI creation
* @attr: Returned Rx queue attributes
@@ -355,6 +363,7 @@ int dpdmai_set_rx_queue(struct fsl_mc_io *mc_io,
int dpdmai_get_rx_queue(struct fsl_mc_io *mc_io,
uint32_t cmd_flags,
uint16_t token,
+ uint8_t queue_idx,
uint8_t priority,
struct dpdmai_rx_queue_attr *attr)
{
@@ -369,6 +378,7 @@ int dpdmai_get_rx_queue(struct fsl_mc_io *mc_io,
token);
cmd_params = (struct dpdmai_cmd_get_queue *)cmd.params;
cmd_params->priority = priority;
+ cmd_params->queue_idx = queue_idx;
/* send command to mc*/
err = mc_send_command(mc_io, &cmd);
@@ -392,6 +402,8 @@ int dpdmai_get_rx_queue(struct fsl_mc_io *mc_io,
* @mc_io: Pointer to MC portal's I/O object
* @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
* @token: Token of DPDMAI object
+ * @queue_idx: Tx queue index. Accepted values are form 0 to num_queues
+ * parameter provided in dpdmai_create
* @priority: Select the queue relative to number of
* priorities configured at DPDMAI creation
* @attr: Returned Tx queue attributes
@@ -401,6 +413,7 @@ int dpdmai_get_rx_queue(struct fsl_mc_io *mc_io,
int dpdmai_get_tx_queue(struct fsl_mc_io *mc_io,
uint32_t cmd_flags,
uint16_t token,
+ uint8_t queue_idx,
uint8_t priority,
struct dpdmai_tx_queue_attr *attr)
{
@@ -415,6 +428,7 @@ int dpdmai_get_tx_queue(struct fsl_mc_io *mc_io,
token);
cmd_params = (struct dpdmai_cmd_get_queue *)cmd.params;
cmd_params->priority = priority;
+ cmd_params->queue_idx = queue_idx;
/* send command to mc*/
err = mc_send_command(mc_io, &cmd);
diff --git a/drivers/bus/fslmc/mc/dpio.c b/drivers/bus/fslmc/mc/dpio.c
index 966277cc..a3382ed1 100644
--- a/drivers/bus/fslmc/mc/dpio.c
+++ b/drivers/bus/fslmc/mc/dpio.c
@@ -268,6 +268,15 @@ int dpio_reset(struct fsl_mc_io *mc_io,
return mc_send_command(mc_io, &cmd);
}
+/**
+ * dpio_get_attributes() - Retrieve DPIO attributes
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPIO object
+ * @attr: Returned object's attributes
+ *
+ * Return: '0' on Success; Error code otherwise
+ */
int dpio_get_attributes(struct fsl_mc_io *mc_io,
uint32_t cmd_flags,
uint16_t token,
diff --git a/drivers/bus/fslmc/mc/fsl_dpbp.h b/drivers/bus/fslmc/mc/fsl_dpbp.h
index 11183626..9d405b42 100644
--- a/drivers/bus/fslmc/mc/fsl_dpbp.h
+++ b/drivers/bus/fslmc/mc/fsl_dpbp.h
@@ -82,6 +82,7 @@ int dpbp_get_attributes(struct fsl_mc_io *mc_io,
/**
* BPSCN write will attempt to allocate into a cache (coherent write)
*/
+#define DPBP_NOTIF_OPT_COHERENT_WRITE 0x00000001
int dpbp_get_api_version(struct fsl_mc_io *mc_io,
uint32_t cmd_flags,
uint16_t *major_ver,
diff --git a/drivers/bus/fslmc/mc/fsl_dpbp_cmd.h b/drivers/bus/fslmc/mc/fsl_dpbp_cmd.h
index 18402ced..55c9fc9b 100644
--- a/drivers/bus/fslmc/mc/fsl_dpbp_cmd.h
+++ b/drivers/bus/fslmc/mc/fsl_dpbp_cmd.h
@@ -9,13 +9,15 @@
/* DPBP Version */
#define DPBP_VER_MAJOR 3
-#define DPBP_VER_MINOR 3
+#define DPBP_VER_MINOR 4
/* Command versioning */
#define DPBP_CMD_BASE_VERSION 1
+#define DPBP_CMD_VERSION_2 2
#define DPBP_CMD_ID_OFFSET 4
#define DPBP_CMD(id) ((id << DPBP_CMD_ID_OFFSET) | DPBP_CMD_BASE_VERSION)
+#define DPBP_CMD_V2(id) ((id << DPBP_CMD_ID_OFFSET) | DPBP_CMD_VERSION_2)
/* Command IDs */
#define DPBP_CMDID_CLOSE DPBP_CMD(0x800)
@@ -37,8 +39,8 @@
#define DPBP_CMDID_GET_IRQ_STATUS DPBP_CMD(0x016)
#define DPBP_CMDID_CLEAR_IRQ_STATUS DPBP_CMD(0x017)
-#define DPBP_CMDID_SET_NOTIFICATIONS DPBP_CMD(0x1b0)
-#define DPBP_CMDID_GET_NOTIFICATIONS DPBP_CMD(0x1b1)
+#define DPBP_CMDID_SET_NOTIFICATIONS DPBP_CMD_V2(0x1b0)
+#define DPBP_CMDID_GET_NOTIFICATIONS DPBP_CMD_V2(0x1b1)
#define DPBP_CMDID_GET_FREE_BUFFERS_NUM DPBP_CMD(0x1b2)
@@ -68,8 +70,8 @@ struct dpbp_cmd_set_notifications {
uint32_t depletion_exit;
uint32_t surplus_entry;
uint32_t surplus_exit;
- uint16_t options;
- uint16_t pad[3];
+ uint32_t options;
+ uint16_t pad[2];
uint64_t message_ctx;
uint64_t message_iova;
};
@@ -79,8 +81,8 @@ struct dpbp_rsp_get_notifications {
uint32_t depletion_exit;
uint32_t surplus_entry;
uint32_t surplus_exit;
- uint16_t options;
- uint16_t pad[3];
+ uint32_t options;
+ uint16_t pad[2];
uint64_t message_ctx;
uint64_t message_iova;
};
diff --git a/drivers/bus/fslmc/mc/fsl_dpci.h b/drivers/bus/fslmc/mc/fsl_dpci.h
index f69ed3f3..9af9097e 100644
--- a/drivers/bus/fslmc/mc/fsl_dpci.h
+++ b/drivers/bus/fslmc/mc/fsl_dpci.h
@@ -6,6 +6,8 @@
#ifndef __FSL_DPCI_H
#define __FSL_DPCI_H
+#include <fsl_dpopr.h>
+
/* Data Path Communication Interface API
* Contains initialization APIs and runtime control APIs for DPCI
*/
@@ -17,7 +19,7 @@ struct fsl_mc_io;
/**
* Maximum number of Tx/Rx priorities per DPCI object
*/
-#define DPCI_PRIO_NUM 2
+#define DPCI_PRIO_NUM 4
/**
* Indicates an invalid frame queue
@@ -107,6 +109,27 @@ int dpci_get_attributes(struct fsl_mc_io *mc_io,
struct dpci_attr *attr);
/**
+ * struct dpci_peer_attr - Structure representing the peer DPCI attributes
+ * @peer_id: DPCI peer id; if no peer is connected returns (-1)
+ * @num_of_priorities: The pper's number of receive priorities; determines the
+ * number of transmit priorities for the local DPCI object
+ */
+struct dpci_peer_attr {
+ int peer_id;
+ uint8_t num_of_priorities;
+};
+
+int dpci_get_peer_attributes(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token,
+ struct dpci_peer_attr *attr);
+
+int dpci_get_link_state(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token,
+ int *up);
+
+/**
* enum dpci_dest - DPCI destination types
* @DPCI_DEST_NONE: Unassigned destination; The queue is set in parked mode
* and does not generate FQDAN notifications; user is
@@ -154,6 +177,11 @@ struct dpci_dest_cfg {
#define DPCI_QUEUE_OPT_DEST 0x00000002
/**
+ * Set the queue to hold active mode.
+ */
+#define DPCI_QUEUE_OPT_HOLD_ACTIVE 0x00000004
+
+/**
* struct dpci_rx_queue_cfg - Structure representing RX queue configuration
* @options: Flags representing the suggested modifications to the queue;
* Use any combination of 'DPCI_QUEUE_OPT_<X>' flags
@@ -163,11 +191,14 @@ struct dpci_dest_cfg {
* 'options'
* @dest_cfg: Queue destination parameters;
* valid only if 'DPCI_QUEUE_OPT_DEST' is contained in 'options'
+ * @order_preservation_en: order preservation configuration for the rx queue
+ * valid only if 'DPCI_QUEUE_OPT_HOLD_ACTIVE' is contained in 'options'
*/
struct dpci_rx_queue_cfg {
uint32_t options;
uint64_t user_ctx;
struct dpci_dest_cfg dest_cfg;
+ int order_preservation_en;
};
int dpci_set_rx_queue(struct fsl_mc_io *mc_io,
@@ -217,4 +248,18 @@ int dpci_get_api_version(struct fsl_mc_io *mc_io,
uint16_t *major_ver,
uint16_t *minor_ver);
+int dpci_set_opr(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token,
+ uint8_t index,
+ uint8_t options,
+ struct opr_cfg *cfg);
+
+int dpci_get_opr(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token,
+ uint8_t index,
+ struct opr_cfg *cfg,
+ struct opr_qry *qry);
+
#endif /* __FSL_DPCI_H */
diff --git a/drivers/bus/fslmc/mc/fsl_dpci_cmd.h b/drivers/bus/fslmc/mc/fsl_dpci_cmd.h
index 634248ac..92b85a82 100644
--- a/drivers/bus/fslmc/mc/fsl_dpci_cmd.h
+++ b/drivers/bus/fslmc/mc/fsl_dpci_cmd.h
@@ -8,7 +8,7 @@
/* DPCI Version */
#define DPCI_VER_MAJOR 3
-#define DPCI_VER_MINOR 3
+#define DPCI_VER_MINOR 4
#define DPCI_CMD_BASE_VERSION 1
#define DPCI_CMD_BASE_VERSION_V2 2
@@ -35,6 +35,8 @@
#define DPCI_CMDID_GET_PEER_ATTR DPCI_CMD_V1(0x0e2)
#define DPCI_CMDID_GET_RX_QUEUE DPCI_CMD_V1(0x0e3)
#define DPCI_CMDID_GET_TX_QUEUE DPCI_CMD_V1(0x0e4)
+#define DPCI_CMDID_SET_OPR DPCI_CMD_V1(0x0e5)
+#define DPCI_CMDID_GET_OPR DPCI_CMD_V1(0x0e6)
/* Macros for accessing command fields smaller than 1byte */
#define DPCI_MASK(field) \
@@ -90,6 +92,8 @@ struct dpci_rsp_get_link_state {
#define DPCI_DEST_TYPE_SHIFT 0
#define DPCI_DEST_TYPE_SIZE 4
+#define DPCI_ORDER_PRESERVATION_SHIFT 4
+#define DPCI_ORDER_PRESERVATION_SIZE 1
struct dpci_cmd_set_rx_queue {
uint32_t dest_id;
@@ -128,5 +132,61 @@ struct dpci_rsp_get_api_version {
uint16_t minor;
};
+struct dpci_cmd_set_opr {
+ uint16_t pad0;
+ uint8_t index;
+ uint8_t options;
+ uint8_t pad1[7];
+ uint8_t oloe;
+ uint8_t oeane;
+ uint8_t olws;
+ uint8_t oa;
+ uint8_t oprrws;
+};
+
+struct dpci_cmd_get_opr {
+ uint16_t pad;
+ uint8_t index;
+};
+
+#define DPCI_RIP_SHIFT 0
+#define DPCI_RIP_SIZE 1
+#define DPCI_OPR_ENABLE_SHIFT 1
+#define DPCI_OPR_ENABLE_SIZE 1
+#define DPCI_TSEQ_NLIS_SHIFT 0
+#define DPCI_TSEQ_NLIS_SIZE 1
+#define DPCI_HSEQ_NLIS_SHIFT 0
+#define DPCI_HSEQ_NLIS_SIZE 1
+
+struct dpci_rsp_get_opr {
+ uint64_t pad0;
+ /* from LSB: rip:1 enable:1 */
+ uint8_t flags;
+ uint16_t pad1;
+ uint8_t oloe;
+ uint8_t oeane;
+ uint8_t olws;
+ uint8_t oa;
+ uint8_t oprrws;
+ uint16_t nesn;
+ uint16_t pad8;
+ uint16_t ndsn;
+ uint16_t pad2;
+ uint16_t ea_tseq;
+ /* only the LSB */
+ uint8_t tseq_nlis;
+ uint8_t pad3;
+ uint16_t ea_hseq;
+ /* only the LSB */
+ uint8_t hseq_nlis;
+ uint8_t pad4;
+ uint16_t ea_hptr;
+ uint16_t pad5;
+ uint16_t ea_tptr;
+ uint16_t pad6;
+ uint16_t opr_vid;
+ uint16_t pad7;
+ uint16_t opr_id;
+};
#pragma pack(pop)
#endif /* _FSL_DPCI_CMD_H */
diff --git a/drivers/bus/fslmc/mc/fsl_dpcon.h b/drivers/bus/fslmc/mc/fsl_dpcon.h
index 36dd5f3c..fc0430dc 100644
--- a/drivers/bus/fslmc/mc/fsl_dpcon.h
+++ b/drivers/bus/fslmc/mc/fsl_dpcon.h
@@ -81,6 +81,25 @@ int dpcon_get_attributes(struct fsl_mc_io *mc_io,
uint16_t token,
struct dpcon_attr *attr);
+/**
+ * struct dpcon_notification_cfg - Structure representing notification params
+ * @dpio_id: DPIO object ID; must be configured with a notification channel;
+ * to disable notifications set it to 'DPCON_INVALID_DPIO_ID';
+ * @priority: Priority selection within the DPIO channel; valid values
+ * are 0-7, depending on the number of priorities in that channel
+ * @user_ctx: User context value provided with each CDAN message
+ */
+struct dpcon_notification_cfg {
+ int dpio_id;
+ uint8_t priority;
+ uint64_t user_ctx;
+};
+
+int dpcon_set_notification(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token,
+ struct dpcon_notification_cfg *cfg);
+
int dpcon_get_api_version(struct fsl_mc_io *mc_io,
uint32_t cmd_flags,
uint16_t *major_ver,
diff --git a/drivers/bus/fslmc/mc/fsl_dpdmai.h b/drivers/bus/fslmc/mc/fsl_dpdmai.h
index 03e46ec1..40469cc1 100644
--- a/drivers/bus/fslmc/mc/fsl_dpdmai.h
+++ b/drivers/bus/fslmc/mc/fsl_dpdmai.h
@@ -39,6 +39,7 @@ int dpdmai_close(struct fsl_mc_io *mc_io,
* should be configured with 0
*/
struct dpdmai_cfg {
+ uint8_t num_queues;
uint8_t priorities[DPDMAI_PRIO_NUM];
};
@@ -78,6 +79,7 @@ int dpdmai_reset(struct fsl_mc_io *mc_io,
struct dpdmai_attr {
int id;
uint8_t num_of_priorities;
+ uint8_t num_of_queues;
};
int dpdmai_get_attributes(struct fsl_mc_io *mc_io,
@@ -149,6 +151,7 @@ struct dpdmai_rx_queue_cfg {
int dpdmai_set_rx_queue(struct fsl_mc_io *mc_io,
uint32_t cmd_flags,
uint16_t token,
+ uint8_t queue_idx,
uint8_t priority,
const struct dpdmai_rx_queue_cfg *cfg);
@@ -168,6 +171,7 @@ struct dpdmai_rx_queue_attr {
int dpdmai_get_rx_queue(struct fsl_mc_io *mc_io,
uint32_t cmd_flags,
uint16_t token,
+ uint8_t queue_idx,
uint8_t priority,
struct dpdmai_rx_queue_attr *attr);
@@ -183,6 +187,7 @@ struct dpdmai_tx_queue_attr {
int dpdmai_get_tx_queue(struct fsl_mc_io *mc_io,
uint32_t cmd_flags,
uint16_t token,
+ uint8_t queue_idx,
uint8_t priority,
struct dpdmai_tx_queue_attr *attr);
diff --git a/drivers/bus/fslmc/mc/fsl_dpdmai_cmd.h b/drivers/bus/fslmc/mc/fsl_dpdmai_cmd.h
index 618e19ea..7e122de4 100644
--- a/drivers/bus/fslmc/mc/fsl_dpdmai_cmd.h
+++ b/drivers/bus/fslmc/mc/fsl_dpdmai_cmd.h
@@ -7,30 +7,32 @@
/* DPDMAI Version */
#define DPDMAI_VER_MAJOR 3
-#define DPDMAI_VER_MINOR 2
+#define DPDMAI_VER_MINOR 3
/* Command versioning */
#define DPDMAI_CMD_BASE_VERSION 1
+#define DPDMAI_CMD_VERSION_2 2
#define DPDMAI_CMD_ID_OFFSET 4
#define DPDMAI_CMD(id) ((id << DPDMAI_CMD_ID_OFFSET) | DPDMAI_CMD_BASE_VERSION)
+#define DPDMAI_CMD_V2(id) ((id << DPDMAI_CMD_ID_OFFSET) | DPDMAI_CMD_VERSION_2)
/* Command IDs */
#define DPDMAI_CMDID_CLOSE DPDMAI_CMD(0x800)
#define DPDMAI_CMDID_OPEN DPDMAI_CMD(0x80E)
-#define DPDMAI_CMDID_CREATE DPDMAI_CMD(0x90E)
+#define DPDMAI_CMDID_CREATE DPDMAI_CMD_V2(0x90E)
#define DPDMAI_CMDID_DESTROY DPDMAI_CMD(0x98E)
#define DPDMAI_CMDID_GET_API_VERSION DPDMAI_CMD(0xa0E)
#define DPDMAI_CMDID_ENABLE DPDMAI_CMD(0x002)
#define DPDMAI_CMDID_DISABLE DPDMAI_CMD(0x003)
-#define DPDMAI_CMDID_GET_ATTR DPDMAI_CMD(0x004)
+#define DPDMAI_CMDID_GET_ATTR DPDMAI_CMD_V2(0x004)
#define DPDMAI_CMDID_RESET DPDMAI_CMD(0x005)
#define DPDMAI_CMDID_IS_ENABLED DPDMAI_CMD(0x006)
-#define DPDMAI_CMDID_SET_RX_QUEUE DPDMAI_CMD(0x1A0)
-#define DPDMAI_CMDID_GET_RX_QUEUE DPDMAI_CMD(0x1A1)
-#define DPDMAI_CMDID_GET_TX_QUEUE DPDMAI_CMD(0x1A2)
+#define DPDMAI_CMDID_SET_RX_QUEUE DPDMAI_CMD_V2(0x1A0)
+#define DPDMAI_CMDID_GET_RX_QUEUE DPDMAI_CMD_V2(0x1A1)
+#define DPDMAI_CMDID_GET_TX_QUEUE DPDMAI_CMD_V2(0x1A2)
/* Macros for accessing command fields smaller than 1byte */
#define DPDMAI_MASK(field) \
@@ -47,7 +49,7 @@ struct dpdmai_cmd_open {
};
struct dpdmai_cmd_create {
- uint8_t pad;
+ uint8_t num_queues;
uint8_t priorities[2];
};
@@ -66,6 +68,7 @@ struct dpdmai_rsp_is_enabled {
struct dpdmai_rsp_get_attr {
uint32_t id;
uint8_t num_of_priorities;
+ uint8_t num_of_queues;
};
#define DPDMAI_DEST_TYPE_SHIFT 0
@@ -77,7 +80,7 @@ struct dpdmai_cmd_set_rx_queue {
uint8_t priority;
/* from LSB: dest_type:4 */
uint8_t dest_type;
- uint8_t pad;
+ uint8_t queue_idx;
uint64_t user_ctx;
uint32_t options;
};
@@ -85,6 +88,7 @@ struct dpdmai_cmd_set_rx_queue {
struct dpdmai_cmd_get_queue {
uint8_t pad[5];
uint8_t priority;
+ uint8_t queue_idx;
};
struct dpdmai_rsp_get_rx_queue {
diff --git a/drivers/bus/fslmc/mc/fsl_dpmng.h b/drivers/bus/fslmc/mc/fsl_dpmng.h
index afaf9b71..8559bef8 100644
--- a/drivers/bus/fslmc/mc/fsl_dpmng.h
+++ b/drivers/bus/fslmc/mc/fsl_dpmng.h
@@ -18,7 +18,7 @@ struct fsl_mc_io;
* Management Complex firmware version information
*/
#define MC_VER_MAJOR 10
-#define MC_VER_MINOR 3
+#define MC_VER_MINOR 10
/**
* struct mc_version
diff --git a/drivers/bus/fslmc/mc/fsl_dpopr.h b/drivers/bus/fslmc/mc/fsl_dpopr.h
new file mode 100644
index 00000000..fd727e01
--- /dev/null
+++ b/drivers/bus/fslmc/mc/fsl_dpopr.h
@@ -0,0 +1,85 @@
+/* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0)
+ *
+ * Copyright 2013-2015 Freescale Semiconductor Inc.
+ * Copyright 2018 NXP
+ *
+ */
+#ifndef __FSL_DPOPR_H_
+#define __FSL_DPOPR_H_
+
+/** @addtogroup dpopr Data Path Order Restoration API
+ * Contains initialization APIs and runtime APIs for the Order Restoration
+ * @{
+ */
+
+/** Order Restoration properties */
+
+/**
+ * Create a new Order Point Record option
+ */
+#define OPR_OPT_CREATE 0x1
+/**
+ * Retire an existing Order Point Record option
+ */
+#define OPR_OPT_RETIRE 0x2
+
+/**
+ * struct opr_cfg - Structure representing OPR configuration
+ * @oprrws: Order point record (OPR) restoration window size (0 to 5)
+ * 0 - Window size is 32 frames.
+ * 1 - Window size is 64 frames.
+ * 2 - Window size is 128 frames.
+ * 3 - Window size is 256 frames.
+ * 4 - Window size is 512 frames.
+ * 5 - Window size is 1024 frames.
+ *@oa: OPR auto advance NESN window size (0 disabled, 1 enabled)
+ *@olws: OPR acceptable late arrival window size (0 to 3)
+ * 0 - Disabled. Late arrivals are always rejected.
+ * 1 - Window size is 32 frames.
+ * 2 - Window size is the same as the OPR restoration
+ * window size configured in the OPRRWS field.
+ * 3 - Window size is 8192 frames.
+ * Late arrivals are always accepted.
+ *@oeane: Order restoration list (ORL) resource exhaustion
+ * advance NESN enable (0 disabled, 1 enabled)
+ *@oloe: OPR loose ordering enable (0 disabled, 1 enabled)
+ */
+struct opr_cfg {
+ uint8_t oprrws;
+ uint8_t oa;
+ uint8_t olws;
+ uint8_t oeane;
+ uint8_t oloe;
+};
+
+/**
+ * struct opr_qry - Structure representing OPR configuration
+ * @enable: Enabled state
+ * @rip: Retirement In Progress
+ * @ndsn: Next dispensed sequence number
+ * @nesn: Next expected sequence number
+ * @ea_hseq: Early arrival head sequence number
+ * @hseq_nlis: HSEQ not last in sequence
+ * @ea_tseq: Early arrival tail sequence number
+ * @tseq_nlis: TSEQ not last in sequence
+ * @ea_tptr: Early arrival tail pointer
+ * @ea_hptr: Early arrival head pointer
+ * @opr_id: Order Point Record ID
+ * @opr_vid: Order Point Record Virtual ID
+ */
+struct opr_qry {
+ char enable;
+ char rip;
+ uint16_t ndsn;
+ uint16_t nesn;
+ uint16_t ea_hseq;
+ char hseq_nlis;
+ uint16_t ea_tseq;
+ char tseq_nlis;
+ uint16_t ea_tptr;
+ uint16_t ea_hptr;
+ uint16_t opr_id;
+ uint16_t opr_vid;
+};
+
+#endif /* __FSL_DPOPR_H_ */
diff --git a/drivers/bus/fslmc/meson.build b/drivers/bus/fslmc/meson.build
index 22a56a6f..4b052157 100644
--- a/drivers/bus/fslmc/meson.build
+++ b/drivers/bus/fslmc/meson.build
@@ -1,11 +1,13 @@
# SPDX-License-Identifier: BSD-3-Clause
# Copyright 2018 NXP
+version = 2
+
if host_machine.system() != 'linux'
build = false
endif
-deps += ['eventdev', 'kvargs']
+deps += ['common_dpaax', 'eventdev', 'kvargs']
sources = files('fslmc_bus.c',
'fslmc_vfio.c',
'mc/dpbp.c',
@@ -24,4 +26,3 @@ sources = files('fslmc_bus.c',
allow_experimental_apis = true
includes += include_directories('mc', 'qbman/include', 'portal')
-cflags += ['-D_GNU_SOURCE']
diff --git a/drivers/bus/fslmc/portal/dpaa2_hw_dpbp.c b/drivers/bus/fslmc/portal/dpaa2_hw_dpbp.c
index 39c5adf9..db49d637 100644
--- a/drivers/bus/fslmc/portal/dpaa2_hw_dpbp.c
+++ b/drivers/bus/fslmc/portal/dpaa2_hw_dpbp.c
@@ -28,6 +28,13 @@
#include "portal/dpaa2_hw_pvt.h"
#include "portal/dpaa2_hw_dpio.h"
+/* List of all the memseg information locally maintained in dpaa2 driver. This
+ * is to optimize the PA_to_VA searches until a better mechanism (algo) is
+ * available.
+ */
+struct dpaa2_memseg_list rte_dpaa2_memsegs
+ = TAILQ_HEAD_INITIALIZER(rte_dpaa2_memsegs);
+
TAILQ_HEAD(dpbp_dev_list, dpaa2_dpbp_dev);
static struct dpbp_dev_list dpbp_dev_list
= TAILQ_HEAD_INITIALIZER(dpbp_dev_list); /*!< DPBP device list */
diff --git a/drivers/bus/fslmc/portal/dpaa2_hw_dpio.c b/drivers/bus/fslmc/portal/dpaa2_hw_dpio.c
index 99f70be1..ce069984 100644
--- a/drivers/bus/fslmc/portal/dpaa2_hw_dpio.c
+++ b/drivers/bus/fslmc/portal/dpaa2_hw_dpio.c
@@ -1,7 +1,7 @@
/* SPDX-License-Identifier: BSD-3-Clause
*
* Copyright (c) 2016 Freescale Semiconductor, Inc. All rights reserved.
- * Copyright 2016 NXP
+ * Copyright 2016-2018 NXP
*
*/
#include <unistd.h>
@@ -53,6 +53,11 @@ static uint32_t io_space_count;
/* Variable to store DPAA2 platform type */
uint32_t dpaa2_svr_family;
+/* Variable to store DPAA2 DQRR size */
+uint8_t dpaa2_dqrr_size;
+/* Variable to store DPAA2 EQCR size */
+uint8_t dpaa2_eqcr_size;
+
/*Stashing Macros default for LS208x*/
static int dpaa2_core_cluster_base = 0x04;
static int dpaa2_cluster_sz = 2;
@@ -125,7 +130,7 @@ static void dpaa2_affine_dpio_intr_to_respective_core(int32_t dpio_id)
cpu_mask, token);
ret = system(command);
if (ret < 0)
- DPAA2_BUS_WARN(
+ DPAA2_BUS_DEBUG(
"Failed to affine interrupts on respective core");
else
DPAA2_BUS_DEBUG(" %s command is executed", command);
@@ -178,68 +183,6 @@ static int dpaa2_dpio_intr_init(struct dpaa2_dpio_dev *dpio_dev)
#endif
static int
-configure_dpio_qbman_swp(struct dpaa2_dpio_dev *dpio_dev)
-{
- struct qbman_swp_desc p_des;
- struct dpio_attr attr;
-
- dpio_dev->dpio = malloc(sizeof(struct fsl_mc_io));
- if (!dpio_dev->dpio) {
- DPAA2_BUS_ERR("Memory allocation failure");
- return -1;
- }
-
- dpio_dev->dpio->regs = dpio_dev->mc_portal;
- if (dpio_open(dpio_dev->dpio, CMD_PRI_LOW, dpio_dev->hw_id,
- &dpio_dev->token)) {
- DPAA2_BUS_ERR("Failed to allocate IO space");
- free(dpio_dev->dpio);
- return -1;
- }
-
- if (dpio_reset(dpio_dev->dpio, CMD_PRI_LOW, dpio_dev->token)) {
- DPAA2_BUS_ERR("Failed to reset dpio");
- dpio_close(dpio_dev->dpio, CMD_PRI_LOW, dpio_dev->token);
- free(dpio_dev->dpio);
- return -1;
- }
-
- if (dpio_enable(dpio_dev->dpio, CMD_PRI_LOW, dpio_dev->token)) {
- DPAA2_BUS_ERR("Failed to Enable dpio");
- dpio_close(dpio_dev->dpio, CMD_PRI_LOW, dpio_dev->token);
- free(dpio_dev->dpio);
- return -1;
- }
-
- if (dpio_get_attributes(dpio_dev->dpio, CMD_PRI_LOW,
- dpio_dev->token, &attr)) {
- DPAA2_BUS_ERR("DPIO Get attribute failed");
- dpio_disable(dpio_dev->dpio, CMD_PRI_LOW, dpio_dev->token);
- dpio_close(dpio_dev->dpio, CMD_PRI_LOW, dpio_dev->token);
- free(dpio_dev->dpio);
- return -1;
- }
-
- /* Configure & setup SW portal */
- p_des.block = NULL;
- p_des.idx = attr.qbman_portal_id;
- p_des.cena_bar = (void *)(dpio_dev->qbman_portal_ce_paddr);
- p_des.cinh_bar = (void *)(dpio_dev->qbman_portal_ci_paddr);
- p_des.irq = -1;
- p_des.qman_version = attr.qbman_version;
-
- dpio_dev->sw_portal = qbman_swp_init(&p_des);
- if (dpio_dev->sw_portal == NULL) {
- DPAA2_BUS_ERR("QBMan SW Portal Init failed");
- dpio_close(dpio_dev->dpio, CMD_PRI_LOW, dpio_dev->token);
- free(dpio_dev->dpio);
- return -1;
- }
-
- return 0;
-}
-
-static int
dpaa2_configure_stashing(struct dpaa2_dpio_dev *dpio_dev, int cpu_id)
{
int sdest, ret;
@@ -402,15 +345,17 @@ dpaa2_create_dpio_device(int vdev_fd,
struct vfio_device_info *obj_info,
int object_id)
{
- struct dpaa2_dpio_dev *dpio_dev;
+ struct dpaa2_dpio_dev *dpio_dev = NULL;
struct vfio_region_info reg_info = { .argsz = sizeof(reg_info)};
+ struct qbman_swp_desc p_des;
+ struct dpio_attr attr;
if (obj_info->num_regions < NUM_DPIO_REGIONS) {
DPAA2_BUS_ERR("Not sufficient number of DPIO regions");
return -1;
}
- dpio_dev = rte_malloc(NULL, sizeof(struct dpaa2_dpio_dev),
+ dpio_dev = rte_zmalloc(NULL, sizeof(struct dpaa2_dpio_dev),
RTE_CACHE_LINE_SIZE);
if (!dpio_dev) {
DPAA2_BUS_ERR("Memory allocation failed for DPIO Device");
@@ -423,45 +368,33 @@ dpaa2_create_dpio_device(int vdev_fd,
/* Using single portal for all devices */
dpio_dev->mc_portal = rte_mcp_ptr_list[MC_PORTAL_INDEX];
- reg_info.index = 0;
- if (ioctl(vdev_fd, VFIO_DEVICE_GET_REGION_INFO, &reg_info)) {
- DPAA2_BUS_ERR("vfio: error getting region info");
- rte_free(dpio_dev);
- return -1;
+ dpio_dev->dpio = malloc(sizeof(struct fsl_mc_io));
+ if (!dpio_dev->dpio) {
+ DPAA2_BUS_ERR("Memory allocation failure");
+ goto err;
}
- dpio_dev->ce_size = reg_info.size;
- dpio_dev->qbman_portal_ce_paddr = (size_t)mmap(NULL, reg_info.size,
- PROT_WRITE | PROT_READ, MAP_SHARED,
- vdev_fd, reg_info.offset);
-
- reg_info.index = 1;
- if (ioctl(vdev_fd, VFIO_DEVICE_GET_REGION_INFO, &reg_info)) {
- DPAA2_BUS_ERR("vfio: error getting region info");
- rte_free(dpio_dev);
- return -1;
+ dpio_dev->dpio->regs = dpio_dev->mc_portal;
+ if (dpio_open(dpio_dev->dpio, CMD_PRI_LOW, dpio_dev->hw_id,
+ &dpio_dev->token)) {
+ DPAA2_BUS_ERR("Failed to allocate IO space");
+ goto err;
}
- dpio_dev->ci_size = reg_info.size;
- dpio_dev->qbman_portal_ci_paddr = (size_t)mmap(NULL, reg_info.size,
- PROT_WRITE | PROT_READ, MAP_SHARED,
- vdev_fd, reg_info.offset);
-
- if (configure_dpio_qbman_swp(dpio_dev)) {
- DPAA2_BUS_ERR(
- "Fail to configure the dpio qbman portal for %d",
- dpio_dev->hw_id);
- rte_free(dpio_dev);
- return -1;
+ if (dpio_reset(dpio_dev->dpio, CMD_PRI_LOW, dpio_dev->token)) {
+ DPAA2_BUS_ERR("Failed to reset dpio");
+ goto err;
}
- io_space_count++;
- dpio_dev->index = io_space_count;
+ if (dpio_enable(dpio_dev->dpio, CMD_PRI_LOW, dpio_dev->token)) {
+ DPAA2_BUS_ERR("Failed to Enable dpio");
+ goto err;
+ }
- if (rte_dpaa2_vfio_setup_intr(&dpio_dev->intr_handle, vdev_fd, 1)) {
- DPAA2_BUS_ERR("Fail to setup interrupt for %d",
- dpio_dev->hw_id);
- rte_free(dpio_dev);
+ if (dpio_get_attributes(dpio_dev->dpio, CMD_PRI_LOW,
+ dpio_dev->token, &attr)) {
+ DPAA2_BUS_ERR("DPIO Get attribute failed");
+ goto err;
}
/* find the SoC type for the first time */
@@ -481,11 +414,77 @@ dpaa2_create_dpio_device(int vdev_fd,
DPAA2_BUS_DEBUG("LX2160 Platform Detected");
}
dpaa2_svr_family = (mc_plat_info.svr & 0xffff0000);
+
+ if (dpaa2_svr_family == SVR_LX2160A) {
+ dpaa2_dqrr_size = DPAA2_LX2_DQRR_RING_SIZE;
+ dpaa2_eqcr_size = DPAA2_LX2_EQCR_RING_SIZE;
+ } else {
+ dpaa2_dqrr_size = DPAA2_DQRR_RING_SIZE;
+ dpaa2_eqcr_size = DPAA2_EQCR_RING_SIZE;
+ }
+ }
+
+ if (dpaa2_svr_family == SVR_LX2160A)
+ reg_info.index = DPAA2_SWP_CENA_MEM_REGION;
+ else
+ reg_info.index = DPAA2_SWP_CENA_REGION;
+
+ if (ioctl(vdev_fd, VFIO_DEVICE_GET_REGION_INFO, &reg_info)) {
+ DPAA2_BUS_ERR("vfio: error getting region info");
+ goto err;
+ }
+
+ dpio_dev->ce_size = reg_info.size;
+ dpio_dev->qbman_portal_ce_paddr = (size_t)mmap(NULL, reg_info.size,
+ PROT_WRITE | PROT_READ, MAP_SHARED,
+ vdev_fd, reg_info.offset);
+
+ reg_info.index = DPAA2_SWP_CINH_REGION;
+ if (ioctl(vdev_fd, VFIO_DEVICE_GET_REGION_INFO, &reg_info)) {
+ DPAA2_BUS_ERR("vfio: error getting region info");
+ goto err;
+ }
+
+ dpio_dev->ci_size = reg_info.size;
+ dpio_dev->qbman_portal_ci_paddr = (size_t)mmap(NULL, reg_info.size,
+ PROT_WRITE | PROT_READ, MAP_SHARED,
+ vdev_fd, reg_info.offset);
+
+ /* Configure & setup SW portal */
+ p_des.block = NULL;
+ p_des.idx = attr.qbman_portal_id;
+ p_des.cena_bar = (void *)(dpio_dev->qbman_portal_ce_paddr);
+ p_des.cinh_bar = (void *)(dpio_dev->qbman_portal_ci_paddr);
+ p_des.irq = -1;
+ p_des.qman_version = attr.qbman_version;
+
+ dpio_dev->sw_portal = qbman_swp_init(&p_des);
+ if (dpio_dev->sw_portal == NULL) {
+ DPAA2_BUS_ERR("QBMan SW Portal Init failed");
+ goto err;
+ }
+
+ io_space_count++;
+ dpio_dev->index = io_space_count;
+
+ if (rte_dpaa2_vfio_setup_intr(&dpio_dev->intr_handle, vdev_fd, 1)) {
+ DPAA2_BUS_ERR("Fail to setup interrupt for %d",
+ dpio_dev->hw_id);
+ goto err;
}
TAILQ_INSERT_TAIL(&dpio_dev_list, dpio_dev, next);
return 0;
+
+err:
+ if (dpio_dev->dpio) {
+ dpio_disable(dpio_dev->dpio, CMD_PRI_LOW, dpio_dev->token);
+ dpio_close(dpio_dev->dpio, CMD_PRI_LOW, dpio_dev->token);
+ free(dpio_dev->dpio);
+ }
+ rte_free(dpio_dev);
+ return -1;
}
void
@@ -506,7 +505,7 @@ dpaa2_alloc_dq_storage(struct queue_storage_info_t *q_storage)
for (i = 0; i < NUM_DQS_PER_QUEUE; i++) {
q_storage->dq_storage[i] = rte_malloc(NULL,
- DPAA2_DQRR_RING_SIZE * sizeof(struct qbman_result),
+ dpaa2_dqrr_size * sizeof(struct qbman_result),
RTE_CACHE_LINE_SIZE);
if (!q_storage->dq_storage[i])
goto fail;
diff --git a/drivers/bus/fslmc/portal/dpaa2_hw_dpio.h b/drivers/bus/fslmc/portal/dpaa2_hw_dpio.h
index d593eea7..462501a2 100644
--- a/drivers/bus/fslmc/portal/dpaa2_hw_dpio.h
+++ b/drivers/bus/fslmc/portal/dpaa2_hw_dpio.h
@@ -30,6 +30,10 @@ RTE_DECLARE_PER_LCORE(struct dpaa2_io_portal_t, _dpaa2_io);
/* Variable to store DPAA2 platform type */
extern uint32_t dpaa2_svr_family;
+/* Variable to store DPAA2 DQRR size */
+extern uint8_t dpaa2_dqrr_size;
+/* Variable to store DPAA2 EQCR size */
+extern uint8_t dpaa2_eqcr_size;
extern struct dpaa2_io_portal_t dpaa2_io_portal[RTE_MAX_LCORE];
diff --git a/drivers/bus/fslmc/portal/dpaa2_hw_pvt.h b/drivers/bus/fslmc/portal/dpaa2_hw_pvt.h
index 82075936..efbeebef 100644
--- a/drivers/bus/fslmc/portal/dpaa2_hw_pvt.h
+++ b/drivers/bus/fslmc/portal/dpaa2_hw_pvt.h
@@ -1,7 +1,7 @@
/* SPDX-License-Identifier: BSD-3-Clause
*
* Copyright (c) 2016 Freescale Semiconductor, Inc. All rights reserved.
- * Copyright 2016 NXP
+ * Copyright 2016-2018 NXP
*
*/
@@ -9,6 +9,7 @@
#define _DPAA2_HW_PVT_H_
#include <rte_eventdev.h>
+#include <dpaax_iova_table.h>
#include <mc/fsl_mc_sys.h>
#include <fsl_qbman_portal.h>
@@ -31,11 +32,27 @@
#define VLAN_TAG_SIZE 4 /** < Vlan Header Length */
#endif
-#define MAX_TX_RING_SLOTS 8
- /** <Maximum number of slots available in TX ring*/
+/* Maximum number of slots available in TX ring */
+#define MAX_TX_RING_SLOTS 32
-#define DPAA2_DQRR_RING_SIZE 16
- /** <Maximum number of slots available in RX ring*/
+/* Maximum number of slots available in RX ring */
+#define DPAA2_EQCR_RING_SIZE 8
+/* Maximum number of slots available in RX ring on LX2 */
+#define DPAA2_LX2_EQCR_RING_SIZE 32
+
+/* Maximum number of slots available in RX ring */
+#define DPAA2_DQRR_RING_SIZE 16
+/* Maximum number of slots available in RX ring on LX2 */
+#define DPAA2_LX2_DQRR_RING_SIZE 32
+
+/* EQCR shift to get EQCR size (2 >> 3) = 8 for LS2/LS2 */
+#define DPAA2_EQCR_SHIFT 3
+/* EQCR shift to get EQCR size for LX2 (2 >> 5) = 32 for LX2 */
+#define DPAA2_LX2_EQCR_SHIFT 5
+
+#define DPAA2_SWP_CENA_REGION 0
+#define DPAA2_SWP_CINH_REGION 1
+#define DPAA2_SWP_CENA_MEM_REGION 2
#define MC_PORTAL_INDEX 0
#define NUM_DPIO_REGIONS 2
@@ -193,6 +210,12 @@ enum qbman_fd_format {
#define DPAA2_RESET_FD_CTRL(fd) ((fd)->simple.ctrl = 0)
#define DPAA2_SET_FD_ASAL(fd, asal) ((fd)->simple.ctrl |= (asal << 16))
+
+#define DPAA2_RESET_FD_FLC(fd) do { \
+ (fd)->simple.flc_lo = 0; \
+ (fd)->simple.flc_hi = 0; \
+} while (0)
+
#define DPAA2_SET_FD_FLC(fd, addr) do { \
(fd)->simple.flc_lo = lower_32_bits((size_t)(addr)); \
(fd)->simple.flc_hi = upper_32_bits((uint64_t)(addr)); \
@@ -275,28 +298,26 @@ extern struct dpaa2_memseg_list rte_dpaa2_memsegs;
#ifdef RTE_LIBRTE_DPAA2_USE_PHYS_IOVA
extern uint8_t dpaa2_virt_mode;
static void *dpaa2_mem_ptov(phys_addr_t paddr) __attribute__((unused));
-/* todo - this is costly, need to write a fast coversion routine */
+
static void *dpaa2_mem_ptov(phys_addr_t paddr)
{
- struct dpaa2_memseg *ms;
+ void *va;
if (dpaa2_virt_mode)
return (void *)(size_t)paddr;
- /* Check if the address is already part of the memseg list internally
- * maintained by the dpaa2 driver.
- */
- TAILQ_FOREACH(ms, &rte_dpaa2_memsegs, next) {
- if (paddr >= ms->iova && paddr <
- ms->iova + ms->len)
- return RTE_PTR_ADD(ms->vaddr, (uintptr_t)(paddr - ms->iova));
- }
+ va = (void *)dpaax_iova_table_get_va(paddr);
+ if (likely(va != NULL))
+ return va;
/* If not, Fallback to full memseg list searching */
- return rte_mem_iova2virt(paddr);
+ va = rte_mem_iova2virt(paddr);
+
+ return va;
}
static phys_addr_t dpaa2_mem_vtop(uint64_t vaddr) __attribute__((unused));
+
static phys_addr_t dpaa2_mem_vtop(uint64_t vaddr)
{
const struct rte_memseg *memseg;
diff --git a/drivers/bus/fslmc/qbman/include/compat.h b/drivers/bus/fslmc/qbman/include/compat.h
index 7be8f54c..655bff4b 100644
--- a/drivers/bus/fslmc/qbman/include/compat.h
+++ b/drivers/bus/fslmc/qbman/include/compat.h
@@ -78,13 +78,14 @@ do { \
#define lower_32_bits(x) ((uint32_t)(x))
#define upper_32_bits(x) ((uint32_t)(((x) >> 16) >> 16))
-
#define __iomem
#define __raw_readb(p) (*(const volatile unsigned char *)(p))
#define __raw_readl(p) (*(const volatile unsigned int *)(p))
#define __raw_writel(v, p) {*(volatile unsigned int *)(p) = (v); }
+#define dma_wmb() rte_smp_mb()
+
#define atomic_t rte_atomic32_t
#define atomic_read(v) rte_atomic32_read(v)
#define atomic_set(v, i) rte_atomic32_set(v, i)
diff --git a/drivers/bus/fslmc/qbman/include/fsl_qbman_portal.h b/drivers/bus/fslmc/qbman/include/fsl_qbman_portal.h
index 3e63db3a..10c72e04 100644
--- a/drivers/bus/fslmc/qbman/include/fsl_qbman_portal.h
+++ b/drivers/bus/fslmc/qbman/include/fsl_qbman_portal.h
@@ -43,6 +43,15 @@ struct qbman_swp *qbman_swp_init(const struct qbman_swp_desc *d);
void qbman_swp_finish(struct qbman_swp *p);
/**
+ * qbman_swp_invalidate() - Invalidate the cache enabled area of the QBMan
+ * portal. This is required to be called if a portal moved to another core
+ * because the QBMan portal area is non coherent
+ * @p: the qbman_swp object to be invalidated
+ *
+ */
+void qbman_swp_invalidate(struct qbman_swp *p);
+
+/**
* qbman_swp_get_desc() - Get the descriptor of the given portal object.
* @p: the given portal object.
*
@@ -172,7 +181,7 @@ void qbman_swp_interrupt_set_inhibit(struct qbman_swp *p, int inhibit);
/**
* struct qbman_result - structure for qbman dequeue response and/or
* notification.
- * @donot_manipulate_directly: the 16 32bit data to represent the whole
+ * @dont_manipulate_directly: the 16 32bit data to represent the whole
* possible qbman dequeue result.
*/
struct qbman_result {
@@ -262,7 +271,7 @@ void qbman_swp_push_set(struct qbman_swp *s, uint8_t channel_idx, int enable);
*/
struct qbman_pull_desc {
union {
- uint32_t donot_manipulate_directly[16];
+ uint32_t dont_manipulate_directly[16];
struct pull {
uint8_t verb;
uint8_t numf;
@@ -356,6 +365,14 @@ void qbman_pull_desc_set_channel(struct qbman_pull_desc *d, uint32_t chid,
enum qbman_pull_type_e dct);
/**
+ * qbman_pull_desc_set_rad() - Decide whether reschedule the fq after dequeue
+ *
+ * @rad: 1 = Reschedule the FQ after dequeue.
+ * 0 = Allow the FQ to remain active after dequeue.
+ */
+void qbman_pull_desc_set_rad(struct qbman_pull_desc *d, int rad);
+
+/**
* qbman_swp_pull() - Issue the pull dequeue command
* @s: the software portal object.
* @d: the software portal descriptor which has been configured with
@@ -775,7 +792,7 @@ uint64_t qbman_result_cgcu_icnt(const struct qbman_result *scn);
/* struct qbman_eq_desc - structure of enqueue descriptor */
struct qbman_eq_desc {
union {
- uint32_t donot_manipulate_directly[8];
+ uint32_t dont_manipulate_directly[8];
struct eq {
uint8_t verb;
uint8_t dca;
@@ -796,11 +813,11 @@ struct qbman_eq_desc {
/**
* struct qbman_eq_response - structure of enqueue response
- * @donot_manipulate_directly: the 16 32bit data to represent the whole
+ * @dont_manipulate_directly: the 16 32bit data to represent the whole
* enqueue response.
*/
struct qbman_eq_response {
- uint32_t donot_manipulate_directly[16];
+ uint32_t dont_manipulate_directly[16];
};
/**
@@ -958,6 +975,7 @@ int qbman_swp_enqueue(struct qbman_swp *s, const struct qbman_eq_desc *d,
* @s: the software portal used for enqueue.
* @d: the enqueue descriptor.
* @fd: the frame descriptor to be enqueued.
+ * @flags: bit-mask of QBMAN_ENQUEUE_FLAG_*** options
* @num_frames: the number of the frames to be enqueued.
*
* Return the number of enqueued frames, -EBUSY if the EQCR is not ready.
@@ -973,7 +991,6 @@ int qbman_swp_enqueue_multiple(struct qbman_swp *s,
* @s: the software portal used for enqueue.
* @d: the enqueue descriptor.
* @fd: the frame descriptor to be enqueued.
- * @flags: bit-mask of QBMAN_ENQUEUE_FLAG_*** options
* @num_frames: the number of the frames to be enqueued.
*
* Return the number of enqueued frames, -EBUSY if the EQCR is not ready.
@@ -998,12 +1015,12 @@ int qbman_swp_enqueue_thresh(struct qbman_swp *s, unsigned int thresh);
/*******************/
/**
* struct qbman_release_desc - The structure for buffer release descriptor
- * @donot_manipulate_directly: the 32bit data to represent the whole
+ * @dont_manipulate_directly: the 32bit data to represent the whole
* possible settings of qbman release descriptor.
*/
struct qbman_release_desc {
union {
- uint32_t donot_manipulate_directly[16];
+ uint32_t dont_manipulate_directly[16];
struct br {
uint8_t verb;
uint8_t reserved;
diff --git a/drivers/bus/fslmc/qbman/qbman_portal.c b/drivers/bus/fslmc/qbman/qbman_portal.c
index 07145005..3380e54f 100644
--- a/drivers/bus/fslmc/qbman/qbman_portal.c
+++ b/drivers/bus/fslmc/qbman/qbman_portal.c
@@ -1,39 +1,17 @@
/* SPDX-License-Identifier: BSD-3-Clause
*
* Copyright (C) 2014-2016 Freescale Semiconductor, Inc.
+ * Copyright 2018 NXP
*
*/
+#include "qbman_sys.h"
#include "qbman_portal.h"
/* QBMan portal management command codes */
#define QBMAN_MC_ACQUIRE 0x30
#define QBMAN_WQCHAN_CONFIGURE 0x46
-/* CINH register offsets */
-#define QBMAN_CINH_SWP_EQCR_PI 0x800
-#define QBMAN_CINH_SWP_EQCR_CI 0x840
-#define QBMAN_CINH_SWP_EQAR 0x8c0
-#define QBMAN_CINH_SWP_DQPI 0xa00
-#define QBMAN_CINH_SWP_DCAP 0xac0
-#define QBMAN_CINH_SWP_SDQCR 0xb00
-#define QBMAN_CINH_SWP_RAR 0xcc0
-#define QBMAN_CINH_SWP_ISR 0xe00
-#define QBMAN_CINH_SWP_IER 0xe40
-#define QBMAN_CINH_SWP_ISDR 0xe80
-#define QBMAN_CINH_SWP_IIR 0xec0
-#define QBMAN_CINH_SWP_DQRR_ITR 0xa80
-#define QBMAN_CINH_SWP_ITPR 0xf40
-
-/* CENA register offsets */
-#define QBMAN_CENA_SWP_EQCR(n) (0x000 + ((uint32_t)(n) << 6))
-#define QBMAN_CENA_SWP_DQRR(n) (0x200 + ((uint32_t)(n) << 6))
-#define QBMAN_CENA_SWP_RCR(n) (0x400 + ((uint32_t)(n) << 6))
-#define QBMAN_CENA_SWP_CR 0x600
-#define QBMAN_CENA_SWP_RR(vb) (0x700 + ((uint32_t)(vb) >> 1))
-#define QBMAN_CENA_SWP_VDQCR 0x780
-#define QBMAN_CENA_SWP_EQCR_CI 0x840
-
/* Reverse mapping of QBMAN_CENA_SWP_DQRR() */
#define QBMAN_IDX_FROM_DQRR(p) (((unsigned long)p & 0x1ff) >> 6)
@@ -83,6 +61,102 @@ enum qbman_sdqcr_fc {
#define MAX_QBMAN_PORTALS 64
static struct qbman_swp *portal_idx_map[MAX_QBMAN_PORTALS];
+/* Internal Function declaration */
+static int
+qbman_swp_enqueue_array_mode_direct(struct qbman_swp *s,
+ const struct qbman_eq_desc *d,
+ const struct qbman_fd *fd);
+static int
+qbman_swp_enqueue_array_mode_mem_back(struct qbman_swp *s,
+ const struct qbman_eq_desc *d,
+ const struct qbman_fd *fd);
+
+static int
+qbman_swp_enqueue_ring_mode_direct(struct qbman_swp *s,
+ const struct qbman_eq_desc *d,
+ const struct qbman_fd *fd);
+static int
+qbman_swp_enqueue_ring_mode_mem_back(struct qbman_swp *s,
+ const struct qbman_eq_desc *d,
+ const struct qbman_fd *fd);
+
+static int
+qbman_swp_enqueue_multiple_direct(struct qbman_swp *s,
+ const struct qbman_eq_desc *d,
+ const struct qbman_fd *fd,
+ uint32_t *flags,
+ int num_frames);
+static int
+qbman_swp_enqueue_multiple_mem_back(struct qbman_swp *s,
+ const struct qbman_eq_desc *d,
+ const struct qbman_fd *fd,
+ uint32_t *flags,
+ int num_frames);
+
+static int
+qbman_swp_enqueue_multiple_desc_direct(struct qbman_swp *s,
+ const struct qbman_eq_desc *d,
+ const struct qbman_fd *fd,
+ int num_frames);
+static int
+qbman_swp_enqueue_multiple_desc_mem_back(struct qbman_swp *s,
+ const struct qbman_eq_desc *d,
+ const struct qbman_fd *fd,
+ int num_frames);
+
+static int
+qbman_swp_pull_direct(struct qbman_swp *s, struct qbman_pull_desc *d);
+static int
+qbman_swp_pull_mem_back(struct qbman_swp *s, struct qbman_pull_desc *d);
+
+const struct qbman_result *qbman_swp_dqrr_next_direct(struct qbman_swp *s);
+const struct qbman_result *qbman_swp_dqrr_next_mem_back(struct qbman_swp *s);
+
+static int
+qbman_swp_release_direct(struct qbman_swp *s,
+ const struct qbman_release_desc *d,
+ const uint64_t *buffers, unsigned int num_buffers);
+static int
+qbman_swp_release_mem_back(struct qbman_swp *s,
+ const struct qbman_release_desc *d,
+ const uint64_t *buffers, unsigned int num_buffers);
+
+/* Function pointers */
+static int (*qbman_swp_enqueue_array_mode_ptr)(struct qbman_swp *s,
+ const struct qbman_eq_desc *d,
+ const struct qbman_fd *fd)
+ = qbman_swp_enqueue_array_mode_direct;
+
+static int (*qbman_swp_enqueue_ring_mode_ptr)(struct qbman_swp *s,
+ const struct qbman_eq_desc *d,
+ const struct qbman_fd *fd)
+ = qbman_swp_enqueue_ring_mode_direct;
+
+static int (*qbman_swp_enqueue_multiple_ptr)(struct qbman_swp *s,
+ const struct qbman_eq_desc *d,
+ const struct qbman_fd *fd,
+ uint32_t *flags,
+ int num_frames)
+ = qbman_swp_enqueue_multiple_direct;
+
+static int (*qbman_swp_enqueue_multiple_desc_ptr)(struct qbman_swp *s,
+ const struct qbman_eq_desc *d,
+ const struct qbman_fd *fd,
+ int num_frames)
+ = qbman_swp_enqueue_multiple_desc_direct;
+
+static int (*qbman_swp_pull_ptr)(struct qbman_swp *s,
+ struct qbman_pull_desc *d)
+ = qbman_swp_pull_direct;
+
+const struct qbman_result *(*qbman_swp_dqrr_next_ptr)(struct qbman_swp *s)
+ = qbman_swp_dqrr_next_direct;
+
+static int (*qbman_swp_release_ptr)(struct qbman_swp *s,
+ const struct qbman_release_desc *d,
+ const uint64_t *buffers, unsigned int num_buffers)
+ = qbman_swp_release_direct;
+
/*********************************/
/* Portal constructor/destructor */
/*********************************/
@@ -104,25 +178,30 @@ struct qbman_swp *qbman_swp_init(const struct qbman_swp_desc *d)
{
int ret;
uint32_t eqcr_pi;
+ uint32_t mask_size;
struct qbman_swp *p = malloc(sizeof(*p));
if (!p)
return NULL;
+
+ memset(p, 0, sizeof(struct qbman_swp));
+
p->desc = *d;
#ifdef QBMAN_CHECKING
p->mc.check = swp_mc_can_start;
#endif
p->mc.valid_bit = QB_VALID_BIT;
- p->sdq = 0;
p->sdq |= qbman_sdqcr_dct_prio_ics << QB_SDQCR_DCT_SHIFT;
p->sdq |= qbman_sdqcr_fc_up_to_3 << QB_SDQCR_FC_SHIFT;
p->sdq |= QMAN_SDQCR_TOKEN << QB_SDQCR_TOK_SHIFT;
+ if ((d->qman_version & QMAN_REV_MASK) >= QMAN_REV_5000)
+ p->mr.valid_bit = QB_VALID_BIT;
atomic_set(&p->vdq.busy, 1);
p->vdq.valid_bit = QB_VALID_BIT;
- p->dqrr.next_idx = 0;
p->dqrr.valid_bit = QB_VALID_BIT;
- if ((p->desc.qman_version & 0xFFFF0000) < QMAN_REV_4100) {
+ qman_version = p->desc.qman_version;
+ if ((qman_version & 0xFFFF0000) < QMAN_REV_4100) {
p->dqrr.dqrr_size = 4;
p->dqrr.reset_bug = 1;
} else {
@@ -136,18 +215,54 @@ struct qbman_swp *qbman_swp_init(const struct qbman_swp_desc *d)
pr_err("qbman_swp_sys_init() failed %d\n", ret);
return NULL;
}
+
+ /* Verify that the DQRRPI is 0 - if it is not the portal isn't
+ * in default state which is an error
+ */
+ if (qbman_cinh_read(&p->sys, QBMAN_CINH_SWP_DQPI) & 0xF) {
+ pr_err("qbman DQRR PI is not zero, portal is not clean\n");
+ free(p);
+ return NULL;
+ }
+
/* SDQCR needs to be initialized to 0 when no channels are
* being dequeued from or else the QMan HW will indicate an
* error. The values that were calculated above will be
* applied when dequeues from a specific channel are enabled.
*/
qbman_cinh_write(&p->sys, QBMAN_CINH_SWP_SDQCR, 0);
+
+ p->eqcr.pi_ring_size = 8;
+ if ((qman_version & 0xFFFF0000) >= QMAN_REV_5000) {
+ p->eqcr.pi_ring_size = 32;
+ qbman_swp_enqueue_array_mode_ptr =
+ qbman_swp_enqueue_array_mode_mem_back;
+ qbman_swp_enqueue_ring_mode_ptr =
+ qbman_swp_enqueue_ring_mode_mem_back;
+ qbman_swp_enqueue_multiple_ptr =
+ qbman_swp_enqueue_multiple_mem_back;
+ qbman_swp_enqueue_multiple_desc_ptr =
+ qbman_swp_enqueue_multiple_desc_mem_back;
+ qbman_swp_pull_ptr = qbman_swp_pull_mem_back;
+ qbman_swp_dqrr_next_ptr = qbman_swp_dqrr_next_mem_back;
+ qbman_swp_release_ptr = qbman_swp_release_mem_back;
+ }
+
+ for (mask_size = p->eqcr.pi_ring_size; mask_size > 0; mask_size >>= 1)
+ p->eqcr.pi_mask = (p->eqcr.pi_mask<<1) + 1;
eqcr_pi = qbman_cinh_read(&p->sys, QBMAN_CINH_SWP_EQCR_PI);
- p->eqcr.pi = eqcr_pi & 0xF;
+ p->eqcr.pi = eqcr_pi & p->eqcr.pi_mask;
p->eqcr.pi_vb = eqcr_pi & QB_VALID_BIT;
- p->eqcr.ci = qbman_cinh_read(&p->sys, QBMAN_CINH_SWP_EQCR_CI) & 0xF;
- p->eqcr.available = QBMAN_EQCR_SIZE - qm_cyc_diff(QBMAN_EQCR_SIZE,
- p->eqcr.ci, p->eqcr.pi);
+ if ((p->desc.qman_version & QMAN_REV_MASK) < QMAN_REV_5000)
+ p->eqcr.ci = qbman_cinh_read(&p->sys,
+ QBMAN_CINH_SWP_EQCR_CI) & p->eqcr.pi_mask;
+ else
+ p->eqcr.ci = qbman_cinh_read(&p->sys,
+ QBMAN_CINH_SWP_EQCR_PI) & p->eqcr.pi_mask;
+ p->eqcr.available = p->eqcr.pi_ring_size -
+ qm_cyc_diff(p->eqcr.pi_ring_size,
+ p->eqcr.ci & (p->eqcr.pi_mask<<1),
+ p->eqcr.pi & (p->eqcr.pi_mask<<1));
portal_idx_map[p->desc.idx] = p;
return p;
@@ -229,7 +344,8 @@ int qbman_swp_interrupt_get_inhibit(struct qbman_swp *p)
void qbman_swp_interrupt_set_inhibit(struct qbman_swp *p, int inhibit)
{
- qbman_cinh_write(&p->sys, QBMAN_CINH_SWP_IIR, inhibit ? 0xffffffff : 0);
+ qbman_cinh_write(&p->sys, QBMAN_CINH_SWP_IIR,
+ inhibit ? 0xffffffff : 0);
}
/***********************/
@@ -246,7 +362,10 @@ void *qbman_swp_mc_start(struct qbman_swp *p)
#ifdef QBMAN_CHECKING
QBMAN_BUG_ON(p->mc.check != swp_mc_can_start);
#endif
- ret = qbman_cena_write_start(&p->sys, QBMAN_CENA_SWP_CR);
+ if ((p->desc.qman_version & QMAN_REV_MASK) < QMAN_REV_5000)
+ ret = qbman_cena_write_start(&p->sys, QBMAN_CENA_SWP_CR);
+ else
+ ret = qbman_cena_write_start(&p->sys, QBMAN_CENA_SWP_CR_MEM);
#ifdef QBMAN_CHECKING
if (!ret)
p->mc.check = swp_mc_can_submit;
@@ -266,8 +385,17 @@ void qbman_swp_mc_submit(struct qbman_swp *p, void *cmd, uint8_t cmd_verb)
* caller wants to OR but has forgotten to do so.
*/
QBMAN_BUG_ON((*v & cmd_verb) != *v);
- *v = cmd_verb | p->mc.valid_bit;
- qbman_cena_write_complete(&p->sys, QBMAN_CENA_SWP_CR, cmd);
+ if ((p->desc.qman_version & QMAN_REV_MASK) < QMAN_REV_5000) {
+ dma_wmb();
+ *v = cmd_verb | p->mc.valid_bit;
+ qbman_cena_write_complete(&p->sys, QBMAN_CENA_SWP_CR, cmd);
+ clean(cmd);
+ } else {
+ *v = cmd_verb | p->mr.valid_bit;
+ qbman_cena_write_complete(&p->sys, QBMAN_CENA_SWP_CR_MEM, cmd);
+ dma_wmb();
+ qbman_cinh_write(&p->sys, QBMAN_CINH_SWP_CR_RT, QMAN_RT_MODE);
+ }
#ifdef QBMAN_CHECKING
p->mc.check = swp_mc_can_poll;
#endif
@@ -279,17 +407,34 @@ void *qbman_swp_mc_result(struct qbman_swp *p)
#ifdef QBMAN_CHECKING
QBMAN_BUG_ON(p->mc.check != swp_mc_can_poll);
#endif
- qbman_cena_invalidate_prefetch(&p->sys,
- QBMAN_CENA_SWP_RR(p->mc.valid_bit));
- ret = qbman_cena_read(&p->sys, QBMAN_CENA_SWP_RR(p->mc.valid_bit));
- /* Remove the valid-bit - command completed if the rest is non-zero */
- verb = ret[0] & ~QB_VALID_BIT;
- if (!verb)
- return NULL;
+ if ((p->desc.qman_version & QMAN_REV_MASK) < QMAN_REV_5000) {
+ qbman_cena_invalidate_prefetch(&p->sys,
+ QBMAN_CENA_SWP_RR(p->mc.valid_bit));
+ ret = qbman_cena_read(&p->sys,
+ QBMAN_CENA_SWP_RR(p->mc.valid_bit));
+ /* Remove the valid-bit -
+ * command completed iff the rest is non-zero
+ */
+ verb = ret[0] & ~QB_VALID_BIT;
+ if (!verb)
+ return NULL;
+ p->mc.valid_bit ^= QB_VALID_BIT;
+ } else {
+ ret = qbman_cena_read(&p->sys, QBMAN_CENA_SWP_RR_MEM);
+ /* Command completed if the valid bit is toggled */
+ if (p->mr.valid_bit != (ret[0] & QB_VALID_BIT))
+ return NULL;
+ /* Remove the valid-bit -
+ * command completed iff the rest is non-zero
+ */
+ verb = ret[0] & ~QB_VALID_BIT;
+ if (!verb)
+ return NULL;
+ p->mr.valid_bit ^= QB_VALID_BIT;
+ }
#ifdef QBMAN_CHECKING
p->mc.check = swp_mc_can_start;
#endif
- p->mc.valid_bit ^= QB_VALID_BIT;
return ret;
}
@@ -417,13 +562,26 @@ void qbman_eq_desc_set_dca(struct qbman_eq_desc *d, int enable,
}
}
-#define EQAR_IDX(eqar) ((eqar) & 0x7)
+#define EQAR_IDX(eqar) ((eqar) & 0x1f)
#define EQAR_VB(eqar) ((eqar) & 0x80)
#define EQAR_SUCCESS(eqar) ((eqar) & 0x100)
-static int qbman_swp_enqueue_array_mode(struct qbman_swp *s,
- const struct qbman_eq_desc *d,
- const struct qbman_fd *fd)
+static inline void qbman_write_eqcr_am_rt_register(struct qbman_swp *p,
+ uint8_t idx)
+{
+ if (idx < 16)
+ qbman_cinh_write(&p->sys, QBMAN_CINH_SWP_EQCR_AM_RT + idx * 4,
+ QMAN_RT_MODE);
+ else
+ qbman_cinh_write(&p->sys, QBMAN_CINH_SWP_EQCR_AM_RT2 +
+ (idx - 16) * 4,
+ QMAN_RT_MODE);
+}
+
+
+static int qbman_swp_enqueue_array_mode_direct(struct qbman_swp *s,
+ const struct qbman_eq_desc *d,
+ const struct qbman_fd *fd)
{
uint32_t *p;
const uint32_t *cl = qb_cl(d);
@@ -433,39 +591,69 @@ static int qbman_swp_enqueue_array_mode(struct qbman_swp *s,
if (!EQAR_SUCCESS(eqar))
return -EBUSY;
p = qbman_cena_write_start_wo_shadow(&s->sys,
- QBMAN_CENA_SWP_EQCR(EQAR_IDX(eqar)));
+ QBMAN_CENA_SWP_EQCR(EQAR_IDX(eqar)));
memcpy(&p[1], &cl[1], 28);
memcpy(&p[8], fd, sizeof(*fd));
+
/* Set the verb byte, have to substitute in the valid-bit */
- lwsync();
+ dma_wmb();
p[0] = cl[0] | EQAR_VB(eqar);
qbman_cena_write_complete_wo_shadow(&s->sys,
- QBMAN_CENA_SWP_EQCR(EQAR_IDX(eqar)));
+ QBMAN_CENA_SWP_EQCR(EQAR_IDX(eqar)));
return 0;
}
+static int qbman_swp_enqueue_array_mode_mem_back(struct qbman_swp *s,
+ const struct qbman_eq_desc *d,
+ const struct qbman_fd *fd)
+{
+ uint32_t *p;
+ const uint32_t *cl = qb_cl(d);
+ uint32_t eqar = qbman_cinh_read(&s->sys, QBMAN_CINH_SWP_EQAR);
-static int qbman_swp_enqueue_ring_mode(struct qbman_swp *s,
- const struct qbman_eq_desc *d,
- const struct qbman_fd *fd)
+ pr_debug("EQAR=%08x\n", eqar);
+ if (!EQAR_SUCCESS(eqar))
+ return -EBUSY;
+ p = qbman_cena_write_start_wo_shadow(&s->sys,
+ QBMAN_CENA_SWP_EQCR(EQAR_IDX(eqar)));
+ memcpy(&p[1], &cl[1], 28);
+ memcpy(&p[8], fd, sizeof(*fd));
+
+ /* Set the verb byte, have to substitute in the valid-bit */
+ p[0] = cl[0] | EQAR_VB(eqar);
+ dma_wmb();
+ qbman_write_eqcr_am_rt_register(s, EQAR_IDX(eqar));
+ return 0;
+}
+
+static inline int qbman_swp_enqueue_array_mode(struct qbman_swp *s,
+ const struct qbman_eq_desc *d,
+ const struct qbman_fd *fd)
+{
+ return qbman_swp_enqueue_array_mode_ptr(s, d, fd);
+}
+
+static int qbman_swp_enqueue_ring_mode_direct(struct qbman_swp *s,
+ const struct qbman_eq_desc *d,
+ const struct qbman_fd *fd)
{
uint32_t *p;
const uint32_t *cl = qb_cl(d);
- uint32_t eqcr_ci;
- uint8_t diff;
+ uint32_t eqcr_ci, full_mask, half_mask;
+ half_mask = (s->eqcr.pi_mask>>1);
+ full_mask = s->eqcr.pi_mask;
if (!s->eqcr.available) {
eqcr_ci = s->eqcr.ci;
s->eqcr.ci = qbman_cena_read_reg(&s->sys,
- QBMAN_CENA_SWP_EQCR_CI) & 0xF;
- diff = qm_cyc_diff(QBMAN_EQCR_SIZE,
- eqcr_ci, s->eqcr.ci);
- s->eqcr.available += diff;
- if (!diff)
+ QBMAN_CENA_SWP_EQCR_CI) & full_mask;
+ s->eqcr.available = qm_cyc_diff(s->eqcr.pi_ring_size,
+ eqcr_ci, s->eqcr.ci);
+ if (!s->eqcr.available)
return -EBUSY;
}
p = qbman_cena_write_start_wo_shadow(&s->sys,
- QBMAN_CENA_SWP_EQCR(s->eqcr.pi & 7));
+ QBMAN_CENA_SWP_EQCR(s->eqcr.pi & half_mask));
memcpy(&p[1], &cl[1], 28);
memcpy(&p[8], fd, sizeof(*fd));
lwsync();
@@ -473,16 +661,61 @@ static int qbman_swp_enqueue_ring_mode(struct qbman_swp *s,
/* Set the verb byte, have to substitute in the valid-bit */
p[0] = cl[0] | s->eqcr.pi_vb;
qbman_cena_write_complete_wo_shadow(&s->sys,
- QBMAN_CENA_SWP_EQCR(s->eqcr.pi & 7));
+ QBMAN_CENA_SWP_EQCR(s->eqcr.pi & half_mask));
s->eqcr.pi++;
- s->eqcr.pi &= 0xF;
+ s->eqcr.pi &= full_mask;
s->eqcr.available--;
- if (!(s->eqcr.pi & 7))
+ if (!(s->eqcr.pi & half_mask))
s->eqcr.pi_vb ^= QB_VALID_BIT;
return 0;
}
+static int qbman_swp_enqueue_ring_mode_mem_back(struct qbman_swp *s,
+ const struct qbman_eq_desc *d,
+ const struct qbman_fd *fd)
+{
+ uint32_t *p;
+ const uint32_t *cl = qb_cl(d);
+ uint32_t eqcr_ci, full_mask, half_mask;
+
+ half_mask = (s->eqcr.pi_mask>>1);
+ full_mask = s->eqcr.pi_mask;
+ if (!s->eqcr.available) {
+ eqcr_ci = s->eqcr.ci;
+ s->eqcr.ci = qbman_cinh_read(&s->sys,
+ QBMAN_CENA_SWP_EQCR_CI) & full_mask;
+ s->eqcr.available = qm_cyc_diff(s->eqcr.pi_ring_size,
+ eqcr_ci, s->eqcr.ci);
+ if (!s->eqcr.available)
+ return -EBUSY;
+ }
+
+ p = qbman_cena_write_start_wo_shadow(&s->sys,
+ QBMAN_CENA_SWP_EQCR(s->eqcr.pi & half_mask));
+ memcpy(&p[1], &cl[1], 28);
+ memcpy(&p[8], fd, sizeof(*fd));
+
+ /* Set the verb byte, have to substitute in the valid-bit */
+ p[0] = cl[0] | s->eqcr.pi_vb;
+ s->eqcr.pi++;
+ s->eqcr.pi &= full_mask;
+ s->eqcr.available--;
+ if (!(s->eqcr.pi & half_mask))
+ s->eqcr.pi_vb ^= QB_VALID_BIT;
+ dma_wmb();
+ qbman_cinh_write(&s->sys, QBMAN_CINH_SWP_EQCR_PI,
+ (QB_RT_BIT)|(s->eqcr.pi)|s->eqcr.pi_vb);
+ return 0;
+}
+
+static int qbman_swp_enqueue_ring_mode(struct qbman_swp *s,
+ const struct qbman_eq_desc *d,
+ const struct qbman_fd *fd)
+{
+ return qbman_swp_enqueue_ring_mode_ptr(s, d, fd);
+}
+
int qbman_swp_enqueue(struct qbman_swp *s, const struct qbman_eq_desc *d,
const struct qbman_fd *fd)
{
@@ -492,27 +725,27 @@ int qbman_swp_enqueue(struct qbman_swp *s, const struct qbman_eq_desc *d,
return qbman_swp_enqueue_ring_mode(s, d, fd);
}
-int qbman_swp_enqueue_multiple(struct qbman_swp *s,
- const struct qbman_eq_desc *d,
- const struct qbman_fd *fd,
- uint32_t *flags,
- int num_frames)
+static int qbman_swp_enqueue_multiple_direct(struct qbman_swp *s,
+ const struct qbman_eq_desc *d,
+ const struct qbman_fd *fd,
+ uint32_t *flags,
+ int num_frames)
{
- uint32_t *p;
+ uint32_t *p = NULL;
const uint32_t *cl = qb_cl(d);
- uint32_t eqcr_ci, eqcr_pi;
- uint8_t diff;
+ uint32_t eqcr_ci, eqcr_pi, half_mask, full_mask;
int i, num_enqueued = 0;
uint64_t addr_cena;
+ half_mask = (s->eqcr.pi_mask>>1);
+ full_mask = s->eqcr.pi_mask;
if (!s->eqcr.available) {
eqcr_ci = s->eqcr.ci;
s->eqcr.ci = qbman_cena_read_reg(&s->sys,
- QBMAN_CENA_SWP_EQCR_CI) & 0xF;
- diff = qm_cyc_diff(QBMAN_EQCR_SIZE,
- eqcr_ci, s->eqcr.ci);
- s->eqcr.available += diff;
- if (!diff)
+ QBMAN_CENA_SWP_EQCR_CI) & full_mask;
+ s->eqcr.available = qm_cyc_diff(s->eqcr.pi_ring_size,
+ eqcr_ci, s->eqcr.ci);
+ if (!s->eqcr.available)
return 0;
}
@@ -523,11 +756,10 @@ int qbman_swp_enqueue_multiple(struct qbman_swp *s,
/* Fill in the EQCR ring */
for (i = 0; i < num_enqueued; i++) {
p = qbman_cena_write_start_wo_shadow(&s->sys,
- QBMAN_CENA_SWP_EQCR(eqcr_pi & 7));
+ QBMAN_CENA_SWP_EQCR(eqcr_pi & half_mask));
memcpy(&p[1], &cl[1], 28);
memcpy(&p[8], &fd[i], sizeof(*fd));
eqcr_pi++;
- eqcr_pi &= 0xF;
}
lwsync();
@@ -536,7 +768,7 @@ int qbman_swp_enqueue_multiple(struct qbman_swp *s,
eqcr_pi = s->eqcr.pi;
for (i = 0; i < num_enqueued; i++) {
p = qbman_cena_write_start_wo_shadow(&s->sys,
- QBMAN_CENA_SWP_EQCR(eqcr_pi & 7));
+ QBMAN_CENA_SWP_EQCR(eqcr_pi & half_mask));
p[0] = cl[0] | s->eqcr.pi_vb;
if (flags && (flags[i] & QBMAN_ENQUEUE_FLAG_DCA)) {
struct qbman_eq_desc *d = (struct qbman_eq_desc *)p;
@@ -545,8 +777,7 @@ int qbman_swp_enqueue_multiple(struct qbman_swp *s,
((flags[i]) & QBMAN_EQCR_DCA_IDXMASK);
}
eqcr_pi++;
- eqcr_pi &= 0xF;
- if (!(eqcr_pi & 7))
+ if (!(eqcr_pi & half_mask))
s->eqcr.pi_vb ^= QB_VALID_BIT;
}
@@ -554,35 +785,104 @@ int qbman_swp_enqueue_multiple(struct qbman_swp *s,
eqcr_pi = s->eqcr.pi;
addr_cena = (size_t)s->sys.addr_cena;
for (i = 0; i < num_enqueued; i++) {
- dcbf((addr_cena + QBMAN_CENA_SWP_EQCR(eqcr_pi & 7)));
+ dcbf((uintptr_t)(addr_cena +
+ QBMAN_CENA_SWP_EQCR(eqcr_pi & half_mask)));
eqcr_pi++;
- eqcr_pi &= 0xF;
}
- s->eqcr.pi = eqcr_pi;
+ s->eqcr.pi = eqcr_pi & full_mask;
return num_enqueued;
}
-int qbman_swp_enqueue_multiple_desc(struct qbman_swp *s,
- const struct qbman_eq_desc *d,
- const struct qbman_fd *fd,
- int num_frames)
+static int qbman_swp_enqueue_multiple_mem_back(struct qbman_swp *s,
+ const struct qbman_eq_desc *d,
+ const struct qbman_fd *fd,
+ uint32_t *flags,
+ int num_frames)
+{
+ uint32_t *p = NULL;
+ const uint32_t *cl = qb_cl(d);
+ uint32_t eqcr_ci, eqcr_pi, half_mask, full_mask;
+ int i, num_enqueued = 0;
+
+ half_mask = (s->eqcr.pi_mask>>1);
+ full_mask = s->eqcr.pi_mask;
+ if (!s->eqcr.available) {
+ eqcr_ci = s->eqcr.ci;
+ s->eqcr.ci = qbman_cinh_read(&s->sys,
+ QBMAN_CENA_SWP_EQCR_CI) & full_mask;
+ s->eqcr.available = qm_cyc_diff(s->eqcr.pi_ring_size,
+ eqcr_ci, s->eqcr.ci);
+ if (!s->eqcr.available)
+ return 0;
+ }
+
+ eqcr_pi = s->eqcr.pi;
+ num_enqueued = (s->eqcr.available < num_frames) ?
+ s->eqcr.available : num_frames;
+ s->eqcr.available -= num_enqueued;
+ /* Fill in the EQCR ring */
+ for (i = 0; i < num_enqueued; i++) {
+ p = qbman_cena_write_start_wo_shadow(&s->sys,
+ QBMAN_CENA_SWP_EQCR(eqcr_pi & half_mask));
+ memcpy(&p[1], &cl[1], 28);
+ memcpy(&p[8], &fd[i], sizeof(*fd));
+ eqcr_pi++;
+ }
+
+ /* Set the verb byte, have to substitute in the valid-bit */
+ eqcr_pi = s->eqcr.pi;
+ for (i = 0; i < num_enqueued; i++) {
+ p = qbman_cena_write_start_wo_shadow(&s->sys,
+ QBMAN_CENA_SWP_EQCR(eqcr_pi & half_mask));
+ p[0] = cl[0] | s->eqcr.pi_vb;
+ if (flags && (flags[i] & QBMAN_ENQUEUE_FLAG_DCA)) {
+ struct qbman_eq_desc *d = (struct qbman_eq_desc *)p;
+
+ d->eq.dca = (1 << QB_ENQUEUE_CMD_DCA_EN_SHIFT) |
+ ((flags[i]) & QBMAN_EQCR_DCA_IDXMASK);
+ }
+ eqcr_pi++;
+ if (!(eqcr_pi & half_mask))
+ s->eqcr.pi_vb ^= QB_VALID_BIT;
+ }
+ s->eqcr.pi = eqcr_pi & full_mask;
+
+ dma_wmb();
+ qbman_cinh_write(&s->sys, QBMAN_CINH_SWP_EQCR_PI,
+ (QB_RT_BIT)|(s->eqcr.pi)|s->eqcr.pi_vb);
+ return num_enqueued;
+}
+
+inline int qbman_swp_enqueue_multiple(struct qbman_swp *s,
+ const struct qbman_eq_desc *d,
+ const struct qbman_fd *fd,
+ uint32_t *flags,
+ int num_frames)
+{
+ return qbman_swp_enqueue_multiple_ptr(s, d, fd, flags, num_frames);
+}
+
+static int qbman_swp_enqueue_multiple_desc_direct(struct qbman_swp *s,
+ const struct qbman_eq_desc *d,
+ const struct qbman_fd *fd,
+ int num_frames)
{
uint32_t *p;
const uint32_t *cl;
- uint32_t eqcr_ci, eqcr_pi;
- uint8_t diff;
+ uint32_t eqcr_ci, eqcr_pi, half_mask, full_mask;
int i, num_enqueued = 0;
uint64_t addr_cena;
+ half_mask = (s->eqcr.pi_mask>>1);
+ full_mask = s->eqcr.pi_mask;
if (!s->eqcr.available) {
eqcr_ci = s->eqcr.ci;
s->eqcr.ci = qbman_cena_read_reg(&s->sys,
- QBMAN_CENA_SWP_EQCR_CI) & 0xF;
- diff = qm_cyc_diff(QBMAN_EQCR_SIZE,
- eqcr_ci, s->eqcr.ci);
- s->eqcr.available += diff;
- if (!diff)
+ QBMAN_CENA_SWP_EQCR_CI) & full_mask;
+ s->eqcr.available = qm_cyc_diff(s->eqcr.pi_ring_size,
+ eqcr_ci, s->eqcr.ci);
+ if (!s->eqcr.available)
return 0;
}
@@ -593,12 +893,11 @@ int qbman_swp_enqueue_multiple_desc(struct qbman_swp *s,
/* Fill in the EQCR ring */
for (i = 0; i < num_enqueued; i++) {
p = qbman_cena_write_start_wo_shadow(&s->sys,
- QBMAN_CENA_SWP_EQCR(eqcr_pi & 7));
+ QBMAN_CENA_SWP_EQCR(eqcr_pi & half_mask));
cl = qb_cl(&d[i]);
memcpy(&p[1], &cl[1], 28);
memcpy(&p[8], &fd[i], sizeof(*fd));
eqcr_pi++;
- eqcr_pi &= 0xF;
}
lwsync();
@@ -607,12 +906,11 @@ int qbman_swp_enqueue_multiple_desc(struct qbman_swp *s,
eqcr_pi = s->eqcr.pi;
for (i = 0; i < num_enqueued; i++) {
p = qbman_cena_write_start_wo_shadow(&s->sys,
- QBMAN_CENA_SWP_EQCR(eqcr_pi & 7));
+ QBMAN_CENA_SWP_EQCR(eqcr_pi & half_mask));
cl = qb_cl(&d[i]);
p[0] = cl[0] | s->eqcr.pi_vb;
eqcr_pi++;
- eqcr_pi &= 0xF;
- if (!(eqcr_pi & 7))
+ if (!(eqcr_pi & half_mask))
s->eqcr.pi_vb ^= QB_VALID_BIT;
}
@@ -620,14 +918,78 @@ int qbman_swp_enqueue_multiple_desc(struct qbman_swp *s,
eqcr_pi = s->eqcr.pi;
addr_cena = (size_t)s->sys.addr_cena;
for (i = 0; i < num_enqueued; i++) {
- dcbf((addr_cena + QBMAN_CENA_SWP_EQCR(eqcr_pi & 7)));
+ dcbf((uintptr_t)(addr_cena +
+ QBMAN_CENA_SWP_EQCR(eqcr_pi & half_mask)));
+ eqcr_pi++;
+ }
+ s->eqcr.pi = eqcr_pi & full_mask;
+
+ return num_enqueued;
+}
+
+static int qbman_swp_enqueue_multiple_desc_mem_back(struct qbman_swp *s,
+ const struct qbman_eq_desc *d,
+ const struct qbman_fd *fd,
+ int num_frames)
+{
+ uint32_t *p;
+ const uint32_t *cl;
+ uint32_t eqcr_ci, eqcr_pi, half_mask, full_mask;
+ int i, num_enqueued = 0;
+
+ half_mask = (s->eqcr.pi_mask>>1);
+ full_mask = s->eqcr.pi_mask;
+ if (!s->eqcr.available) {
+ eqcr_ci = s->eqcr.ci;
+ s->eqcr.ci = qbman_cinh_read(&s->sys,
+ QBMAN_CENA_SWP_EQCR_CI) & full_mask;
+ s->eqcr.available = qm_cyc_diff(s->eqcr.pi_ring_size,
+ eqcr_ci, s->eqcr.ci);
+ if (!s->eqcr.available)
+ return 0;
+ }
+
+ eqcr_pi = s->eqcr.pi;
+ num_enqueued = (s->eqcr.available < num_frames) ?
+ s->eqcr.available : num_frames;
+ s->eqcr.available -= num_enqueued;
+ /* Fill in the EQCR ring */
+ for (i = 0; i < num_enqueued; i++) {
+ p = qbman_cena_write_start_wo_shadow(&s->sys,
+ QBMAN_CENA_SWP_EQCR(eqcr_pi & half_mask));
+ cl = qb_cl(&d[i]);
+ memcpy(&p[1], &cl[1], 28);
+ memcpy(&p[8], &fd[i], sizeof(*fd));
+ eqcr_pi++;
+ }
+
+ /* Set the verb byte, have to substitute in the valid-bit */
+ eqcr_pi = s->eqcr.pi;
+ for (i = 0; i < num_enqueued; i++) {
+ p = qbman_cena_write_start_wo_shadow(&s->sys,
+ QBMAN_CENA_SWP_EQCR(eqcr_pi & half_mask));
+ cl = qb_cl(&d[i]);
+ p[0] = cl[0] | s->eqcr.pi_vb;
eqcr_pi++;
- eqcr_pi &= 0xF;
+ if (!(eqcr_pi & half_mask))
+ s->eqcr.pi_vb ^= QB_VALID_BIT;
}
- s->eqcr.pi = eqcr_pi;
+
+ s->eqcr.pi = eqcr_pi & full_mask;
+
+ dma_wmb();
+ qbman_cinh_write(&s->sys, QBMAN_CINH_SWP_EQCR_PI,
+ (QB_RT_BIT)|(s->eqcr.pi)|s->eqcr.pi_vb);
return num_enqueued;
}
+inline int qbman_swp_enqueue_multiple_desc(struct qbman_swp *s,
+ const struct qbman_eq_desc *d,
+ const struct qbman_fd *fd,
+ int num_frames)
+{
+ return qbman_swp_enqueue_multiple_desc_ptr(s, d, fd, num_frames);
+}
/*************************/
/* Static (push) dequeue */
@@ -670,6 +1032,7 @@ void qbman_swp_push_set(struct qbman_swp *s, uint8_t channel_idx, int enable)
#define QB_VDQCR_VERB_DT_SHIFT 2
#define QB_VDQCR_VERB_RLS_SHIFT 4
#define QB_VDQCR_VERB_WAE_SHIFT 5
+#define QB_VDQCR_VERB_RAD_SHIFT 6
enum qb_pull_dt_e {
qb_pull_dt_channel,
@@ -702,7 +1065,8 @@ void qbman_pull_desc_set_storage(struct qbman_pull_desc *d,
d->pull.rsp_addr = storage_phys;
}
-void qbman_pull_desc_set_numframes(struct qbman_pull_desc *d, uint8_t numframes)
+void qbman_pull_desc_set_numframes(struct qbman_pull_desc *d,
+ uint8_t numframes)
{
d->pull.numf = numframes - 1;
}
@@ -735,7 +1099,20 @@ void qbman_pull_desc_set_channel(struct qbman_pull_desc *d, uint32_t chid,
d->pull.dq_src = chid;
}
-int qbman_swp_pull(struct qbman_swp *s, struct qbman_pull_desc *d)
+void qbman_pull_desc_set_rad(struct qbman_pull_desc *d, int rad)
+{
+ if (d->pull.verb & (1 << QB_VDQCR_VERB_RLS_SHIFT)) {
+ if (rad)
+ d->pull.verb |= 1 << QB_VDQCR_VERB_RAD_SHIFT;
+ else
+ d->pull.verb &= ~(1 << QB_VDQCR_VERB_RAD_SHIFT);
+ } else {
+ printf("The RAD feature is not valid when RLS = 0\n");
+ }
+}
+
+static int qbman_swp_pull_direct(struct qbman_swp *s,
+ struct qbman_pull_desc *d)
{
uint32_t *p;
uint32_t *cl = qb_cl(d);
@@ -759,6 +1136,36 @@ int qbman_swp_pull(struct qbman_swp *s, struct qbman_pull_desc *d)
return 0;
}
+static int qbman_swp_pull_mem_back(struct qbman_swp *s,
+ struct qbman_pull_desc *d)
+{
+ uint32_t *p;
+ uint32_t *cl = qb_cl(d);
+
+ if (!atomic_dec_and_test(&s->vdq.busy)) {
+ atomic_inc(&s->vdq.busy);
+ return -EBUSY;
+ }
+
+ d->pull.tok = s->sys.idx + 1;
+ s->vdq.storage = (void *)(size_t)d->pull.rsp_addr_virt;
+ p = qbman_cena_write_start_wo_shadow(&s->sys, QBMAN_CENA_SWP_VDQCR_MEM);
+ memcpy(&p[1], &cl[1], 12);
+
+ /* Set the verb byte, have to substitute in the valid-bit */
+ p[0] = cl[0] | s->vdq.valid_bit;
+ s->vdq.valid_bit ^= QB_VALID_BIT;
+ dma_wmb();
+ qbman_cinh_write(&s->sys, QBMAN_CINH_SWP_VDQCR_RT, QMAN_RT_MODE);
+
+ return 0;
+}
+
+inline int qbman_swp_pull(struct qbman_swp *s, struct qbman_pull_desc *d)
+{
+ return qbman_swp_pull_ptr(s, d);
+}
+
/****************/
/* Polling DQRR */
/****************/
@@ -791,7 +1198,12 @@ void qbman_swp_prefetch_dqrr_next(struct qbman_swp *s)
* only once, so repeated calls can return a sequence of DQRR entries, without
* requiring they be consumed immediately or in any particular order.
*/
-const struct qbman_result *qbman_swp_dqrr_next(struct qbman_swp *s)
+inline const struct qbman_result *qbman_swp_dqrr_next(struct qbman_swp *s)
+{
+ return qbman_swp_dqrr_next_ptr(s);
+}
+
+const struct qbman_result *qbman_swp_dqrr_next_direct(struct qbman_swp *s)
{
uint32_t verb;
uint32_t response_verb;
@@ -801,7 +1213,7 @@ const struct qbman_result *qbman_swp_dqrr_next(struct qbman_swp *s)
/* Before using valid-bit to detect if something is there, we have to
* handle the case of the DQRR reset bug...
*/
- if (unlikely(s->dqrr.reset_bug)) {
+ if (s->dqrr.reset_bug) {
/* We pick up new entries by cache-inhibited producer index,
* which means that a non-coherent mapping would require us to
* invalidate and read *only* once that PI has indicated that
@@ -833,7 +1245,8 @@ const struct qbman_result *qbman_swp_dqrr_next(struct qbman_swp *s)
QBMAN_CENA_SWP_DQRR(s->dqrr.next_idx));
}
p = qbman_cena_read_wo_shadow(&s->sys,
- QBMAN_CENA_SWP_DQRR(s->dqrr.next_idx));
+ QBMAN_CENA_SWP_DQRR(s->dqrr.next_idx));
+
verb = p->dq.verb;
/* If the valid-bit isn't of the expected polarity, nothing there. Note,
@@ -867,11 +1280,54 @@ const struct qbman_result *qbman_swp_dqrr_next(struct qbman_swp *s)
return p;
}
+const struct qbman_result *qbman_swp_dqrr_next_mem_back(struct qbman_swp *s)
+{
+ uint32_t verb;
+ uint32_t response_verb;
+ uint32_t flags;
+ const struct qbman_result *p;
+
+ p = qbman_cena_read_wo_shadow(&s->sys,
+ QBMAN_CENA_SWP_DQRR_MEM(s->dqrr.next_idx));
+
+ verb = p->dq.verb;
+
+ /* If the valid-bit isn't of the expected polarity, nothing there. Note,
+ * in the DQRR reset bug workaround, we shouldn't need to skip these
+ * check, because we've already determined that a new entry is available
+ * and we've invalidated the cacheline before reading it, so the
+ * valid-bit behaviour is repaired and should tell us what we already
+ * knew from reading PI.
+ */
+ if ((verb & QB_VALID_BIT) != s->dqrr.valid_bit)
+ return NULL;
+
+ /* There's something there. Move "next_idx" attention to the next ring
+ * entry (and prefetch it) before returning what we found.
+ */
+ s->dqrr.next_idx++;
+ if (s->dqrr.next_idx == s->dqrr.dqrr_size) {
+ s->dqrr.next_idx = 0;
+ s->dqrr.valid_bit ^= QB_VALID_BIT;
+ }
+ /* If this is the final response to a volatile dequeue command
+ * indicate that the vdq is no longer busy
+ */
+ flags = p->dq.stat;
+ response_verb = verb & QBMAN_RESPONSE_VERB_MASK;
+ if ((response_verb == QBMAN_RESULT_DQ) &&
+ (flags & QBMAN_DQ_STAT_VOLATILE) &&
+ (flags & QBMAN_DQ_STAT_EXPIRED))
+ atomic_inc(&s->vdq.busy);
+ return p;
+}
+
/* Consume DQRR entries previously returned from qbman_swp_dqrr_next(). */
void qbman_swp_dqrr_consume(struct qbman_swp *s,
const struct qbman_result *dq)
{
- qbman_cinh_write(&s->sys, QBMAN_CINH_SWP_DCAP, QBMAN_IDX_FROM_DQRR(dq));
+ qbman_cinh_write(&s->sys,
+ QBMAN_CINH_SWP_DCAP, QBMAN_IDX_FROM_DQRR(dq));
}
/* Consume DQRR entries previously returned from qbman_swp_dqrr_next(). */
@@ -884,6 +1340,7 @@ void qbman_swp_dqrr_idx_consume(struct qbman_swp *s,
/*********************************/
/* Polling user-provided storage */
/*********************************/
+
int qbman_result_has_new_result(struct qbman_swp *s,
struct qbman_result *dq)
{
@@ -898,11 +1355,11 @@ int qbman_result_has_new_result(struct qbman_swp *s,
((struct qbman_result *)dq)->dq.tok = 0;
/*
- * VDQCR "no longer busy" hook - not quite the same as DQRR, because the
- * fact "VDQCR" shows busy doesn't mean that we hold the result that
- * makes it available. Eg. we may be looking at our 10th dequeue result,
- * having released VDQCR after the 1st result and it is now busy due to
- * some other command!
+ * VDQCR "no longer busy" hook - not quite the same as DQRR, because
+ * the fact "VDQCR" shows busy doesn't mean that we hold the result
+ * that makes it available. Eg. we may be looking at our 10th dequeue
+ * result, having released VDQCR after the 1st result and it is now
+ * busy due to some other command!
*/
if (s->vdq.storage == dq) {
s->vdq.storage = NULL;
@@ -936,11 +1393,11 @@ int qbman_check_command_complete(struct qbman_result *dq)
s = portal_idx_map[dq->dq.tok - 1];
/*
- * VDQCR "no longer busy" hook - not quite the same as DQRR, because the
- * fact "VDQCR" shows busy doesn't mean that we hold the result that
- * makes it available. Eg. we may be looking at our 10th dequeue result,
- * having released VDQCR after the 1st result and it is now busy due to
- * some other command!
+ * VDQCR "no longer busy" hook - not quite the same as DQRR, because
+ * the fact "VDQCR" shows busy doesn't mean that we hold the result
+ * that makes it available. Eg. we may be looking at our 10th dequeue
+ * result, having released VDQCR after the 1st result and it is now
+ * busy due to some other command!
*/
if (s->vdq.storage == dq) {
s->vdq.storage = NULL;
@@ -1142,8 +1599,10 @@ void qbman_release_desc_set_rcdi(struct qbman_release_desc *d, int enable)
#define RAR_VB(rar) ((rar) & 0x80)
#define RAR_SUCCESS(rar) ((rar) & 0x100)
-int qbman_swp_release(struct qbman_swp *s, const struct qbman_release_desc *d,
- const uint64_t *buffers, unsigned int num_buffers)
+static int qbman_swp_release_direct(struct qbman_swp *s,
+ const struct qbman_release_desc *d,
+ const uint64_t *buffers,
+ unsigned int num_buffers)
{
uint32_t *p;
const uint32_t *cl = qb_cl(d);
@@ -1157,22 +1616,63 @@ int qbman_swp_release(struct qbman_swp *s, const struct qbman_release_desc *d,
/* Start the release command */
p = qbman_cena_write_start_wo_shadow(&s->sys,
- QBMAN_CENA_SWP_RCR(RAR_IDX(rar)));
+ QBMAN_CENA_SWP_RCR(RAR_IDX(rar)));
/* Copy the caller's buffer pointers to the command */
u64_to_le32_copy(&p[2], buffers, num_buffers);
- /* Set the verb byte, have to substitute in the valid-bit and the number
- * of buffers.
+ /* Set the verb byte, have to substitute in the valid-bit and the
+ * number of buffers.
*/
lwsync();
p[0] = cl[0] | RAR_VB(rar) | num_buffers;
qbman_cena_write_complete_wo_shadow(&s->sys,
- QBMAN_CENA_SWP_RCR(RAR_IDX(rar)));
+ QBMAN_CENA_SWP_RCR(RAR_IDX(rar)));
return 0;
}
+static int qbman_swp_release_mem_back(struct qbman_swp *s,
+ const struct qbman_release_desc *d,
+ const uint64_t *buffers,
+ unsigned int num_buffers)
+{
+ uint32_t *p;
+ const uint32_t *cl = qb_cl(d);
+ uint32_t rar = qbman_cinh_read(&s->sys, QBMAN_CINH_SWP_RAR);
+
+ pr_debug("RAR=%08x\n", rar);
+ if (!RAR_SUCCESS(rar))
+ return -EBUSY;
+
+ QBMAN_BUG_ON(!num_buffers || (num_buffers > 7));
+
+ /* Start the release command */
+ p = qbman_cena_write_start_wo_shadow(&s->sys,
+ QBMAN_CENA_SWP_RCR_MEM(RAR_IDX(rar)));
+
+ /* Copy the caller's buffer pointers to the command */
+ u64_to_le32_copy(&p[2], buffers, num_buffers);
+
+ /* Set the verb byte, have to substitute in the valid-bit and the
+ * number of buffers.
+ */
+ p[0] = cl[0] | RAR_VB(rar) | num_buffers;
+ lwsync();
+ qbman_cinh_write(&s->sys, QBMAN_CINH_SWP_RCR_AM_RT +
+ RAR_IDX(rar) * 4, QMAN_RT_MODE);
+
+ return 0;
+}
+
+inline int qbman_swp_release(struct qbman_swp *s,
+ const struct qbman_release_desc *d,
+ const uint64_t *buffers,
+ unsigned int num_buffers)
+{
+ return qbman_swp_release_ptr(s, d, buffers, num_buffers);
+}
+
/*******************/
/* Buffer acquires */
/*******************/
@@ -1214,7 +1714,7 @@ int qbman_swp_acquire(struct qbman_swp *s, uint16_t bpid, uint64_t *buffers,
/* Complete the management command */
r = qbman_swp_mc_complete(s, p, QBMAN_MC_ACQUIRE);
- if (unlikely(!r)) {
+ if (!r) {
pr_err("qbman: acquire from BPID %d failed, no response\n",
bpid);
return -EIO;
@@ -1224,7 +1724,7 @@ int qbman_swp_acquire(struct qbman_swp *s, uint16_t bpid, uint64_t *buffers,
QBMAN_BUG_ON((r->verb & QBMAN_RESPONSE_VERB_MASK) != QBMAN_MC_ACQUIRE);
/* Determine success or failure */
- if (unlikely(r->rslt != QBMAN_MC_RSLT_OK)) {
+ if (r->rslt != QBMAN_MC_RSLT_OK) {
pr_err("Acquire buffers from BPID 0x%x failed, code=0x%02x\n",
bpid, r->rslt);
return -EIO;
@@ -1271,7 +1771,7 @@ static int qbman_swp_alt_fq_state(struct qbman_swp *s, uint32_t fqid,
/* Complete the management command */
r = qbman_swp_mc_complete(s, p, alt_fq_verb);
- if (unlikely(!r)) {
+ if (!r) {
pr_err("qbman: mgmt cmd failed, no response (verb=0x%x)\n",
alt_fq_verb);
return -EIO;
@@ -1281,7 +1781,7 @@ static int qbman_swp_alt_fq_state(struct qbman_swp *s, uint32_t fqid,
QBMAN_BUG_ON((r->verb & QBMAN_RESPONSE_VERB_MASK) != alt_fq_verb);
/* Determine success or failure */
- if (unlikely(r->rslt != QBMAN_MC_RSLT_OK)) {
+ if (r->rslt != QBMAN_MC_RSLT_OK) {
pr_err("ALT FQID %d failed: verb = 0x%08x, code = 0x%02x\n",
fqid, alt_fq_verb, r->rslt);
return -EIO;
@@ -1362,7 +1862,7 @@ static int qbman_swp_CDAN_set(struct qbman_swp *s, uint16_t channelid,
/* Complete the management command */
r = qbman_swp_mc_complete(s, p, QBMAN_WQCHAN_CONFIGURE);
- if (unlikely(!r)) {
+ if (!r) {
pr_err("qbman: wqchan config failed, no response\n");
return -EIO;
}
@@ -1372,7 +1872,7 @@ static int qbman_swp_CDAN_set(struct qbman_swp *s, uint16_t channelid,
!= QBMAN_WQCHAN_CONFIGURE);
/* Determine success or failure */
- if (unlikely(r->rslt != QBMAN_MC_RSLT_OK)) {
+ if (r->rslt != QBMAN_MC_RSLT_OK) {
pr_err("CDAN cQID %d failed: code = 0x%02x\n",
channelid, r->rslt);
return -EIO;
diff --git a/drivers/bus/fslmc/qbman/qbman_portal.h b/drivers/bus/fslmc/qbman/qbman_portal.h
index dbea22a1..3b0fc540 100644
--- a/drivers/bus/fslmc/qbman/qbman_portal.h
+++ b/drivers/bus/fslmc/qbman/qbman_portal.h
@@ -1,12 +1,17 @@
/* SPDX-License-Identifier: BSD-3-Clause
*
* Copyright (C) 2014-2016 Freescale Semiconductor, Inc.
+ * Copyright 2018 NXP
*
*/
+#ifndef _QBMAN_PORTAL_H_
+#define _QBMAN_PORTAL_H_
+
#include "qbman_sys.h"
#include <fsl_qbman_portal.h>
+uint32_t qman_version;
#define QMAN_REV_4000 0x04000000
#define QMAN_REV_4100 0x04010000
#define QMAN_REV_4101 0x04010001
@@ -14,13 +19,14 @@
/* All QBMan command and result structures use this "valid bit" encoding */
#define QB_VALID_BIT ((uint32_t)0x80)
+/* All QBMan command use this "Read trigger bit" encoding */
+#define QB_RT_BIT ((uint32_t)0x100)
+
/* Management command result codes */
#define QBMAN_MC_RSLT_OK 0xf0
/* QBMan DQRR size is set at runtime in qbman_portal.c */
-#define QBMAN_EQCR_SIZE 8
-
static inline uint8_t qm_cyc_diff(uint8_t ringsize, uint8_t first,
uint8_t last)
{
@@ -51,6 +57,10 @@ struct qbman_swp {
#endif
uint32_t valid_bit; /* 0x00 or 0x80 */
} mc;
+ /* Management response */
+ struct {
+ uint32_t valid_bit; /* 0x00 or 0x80 */
+ } mr;
/* Push dequeues */
uint32_t sdq;
/* Volatile dequeues */
@@ -87,6 +97,8 @@ struct qbman_swp {
struct {
uint32_t pi;
uint32_t pi_vb;
+ uint32_t pi_ring_size;
+ uint32_t pi_mask;
uint32_t ci;
int available;
} eqcr;
@@ -141,4 +153,16 @@ static inline void *qbman_swp_mc_complete(struct qbman_swp *swp, void *cmd,
* an inline) is necessary to work with different descriptor types and to work
* correctly with const and non-const inputs (and similarly-qualified outputs).
*/
-#define qb_cl(d) (&(d)->donot_manipulate_directly[0])
+#define qb_cl(d) (&(d)->dont_manipulate_directly[0])
+
+#ifdef RTE_ARCH_ARM64
+ #define clean(p) \
+ { asm volatile("dc cvac, %0;" : : "r" (p) : "memory"); }
+ #define invalidate(p) \
+ { asm volatile("dc ivac, %0" : : "r"(p) : "memory"); }
+#else
+ #define clean(p)
+ #define invalidate(p)
+#endif
+
+#endif
diff --git a/drivers/bus/fslmc/qbman/qbman_sys.h b/drivers/bus/fslmc/qbman/qbman_sys.h
index 2bd33ea5..d41af835 100644
--- a/drivers/bus/fslmc/qbman/qbman_sys.h
+++ b/drivers/bus/fslmc/qbman/qbman_sys.h
@@ -18,11 +18,51 @@
* *not* to provide linux compatibility.
*/
+#ifndef _QBMAN_SYS_H_
+#define _QBMAN_SYS_H_
+
#include "qbman_sys_decl.h"
#define CENA_WRITE_ENABLE 0
#define CINH_WRITE_ENABLE 1
+/* CINH register offsets */
+#define QBMAN_CINH_SWP_EQCR_PI 0x800
+#define QBMAN_CINH_SWP_EQCR_CI 0x840
+#define QBMAN_CINH_SWP_EQAR 0x8c0
+#define QBMAN_CINH_SWP_CR_RT 0x900
+#define QBMAN_CINH_SWP_VDQCR_RT 0x940
+#define QBMAN_CINH_SWP_EQCR_AM_RT 0x980
+#define QBMAN_CINH_SWP_RCR_AM_RT 0x9c0
+#define QBMAN_CINH_SWP_DQPI 0xa00
+#define QBMAN_CINH_SWP_DQRR_ITR 0xa80
+#define QBMAN_CINH_SWP_DCAP 0xac0
+#define QBMAN_CINH_SWP_SDQCR 0xb00
+#define QBMAN_CINH_SWP_EQCR_AM_RT2 0xb40
+#define QBMAN_CINH_SWP_RCR_PI 0xc00
+#define QBMAN_CINH_SWP_RAR 0xcc0
+#define QBMAN_CINH_SWP_ISR 0xe00
+#define QBMAN_CINH_SWP_IER 0xe40
+#define QBMAN_CINH_SWP_ISDR 0xe80
+#define QBMAN_CINH_SWP_IIR 0xec0
+#define QBMAN_CINH_SWP_ITPR 0xf40
+
+/* CENA register offsets */
+#define QBMAN_CENA_SWP_EQCR(n) (0x000 + ((uint32_t)(n) << 6))
+#define QBMAN_CENA_SWP_DQRR(n) (0x200 + ((uint32_t)(n) << 6))
+#define QBMAN_CENA_SWP_RCR(n) (0x400 + ((uint32_t)(n) << 6))
+#define QBMAN_CENA_SWP_CR 0x600
+#define QBMAN_CENA_SWP_RR(vb) (0x700 + ((uint32_t)(vb) >> 1))
+#define QBMAN_CENA_SWP_VDQCR 0x780
+#define QBMAN_CENA_SWP_EQCR_CI 0x840
+
+/* CENA register offsets in memory-backed mode */
+#define QBMAN_CENA_SWP_DQRR_MEM(n) (0x800 + ((uint32_t)(n) << 6))
+#define QBMAN_CENA_SWP_RCR_MEM(n) (0x1400 + ((uint32_t)(n) << 6))
+#define QBMAN_CENA_SWP_CR_MEM 0x1600
+#define QBMAN_CENA_SWP_RR_MEM 0x1680
+#define QBMAN_CENA_SWP_VDQCR_MEM 0x1780
+
/* Debugging assists */
static inline void __hexdump(unsigned long start, unsigned long end,
unsigned long p, size_t sz, const unsigned char *c)
@@ -125,8 +165,8 @@ struct qbman_swp_sys {
* place-holder.
*/
uint8_t *cena;
- uint8_t __iomem *addr_cena;
- uint8_t __iomem *addr_cinh;
+ uint8_t *addr_cena;
+ uint8_t *addr_cinh;
uint32_t idx;
enum qbman_eqcr_mode eqcr_mode;
};
@@ -292,13 +332,16 @@ static inline void qbman_cena_prefetch(struct qbman_swp_sys *s,
* qbman_portal.c. So use of it is declared locally here.
*/
#define QBMAN_CINH_SWP_CFG 0xd00
-#define QBMAN_CINH_SWP_CFG 0xd00
+
#define SWP_CFG_DQRR_MF_SHIFT 20
#define SWP_CFG_EST_SHIFT 16
+#define SWP_CFG_CPBS_SHIFT 15
#define SWP_CFG_WN_SHIFT 14
#define SWP_CFG_RPM_SHIFT 12
#define SWP_CFG_DCM_SHIFT 10
#define SWP_CFG_EPM_SHIFT 8
+#define SWP_CFG_VPM_SHIFT 7
+#define SWP_CFG_CPM_SHIFT 6
#define SWP_CFG_SD_SHIFT 5
#define SWP_CFG_SP_SHIFT 4
#define SWP_CFG_SE_SHIFT 3
@@ -329,11 +372,20 @@ static inline uint32_t qbman_set_swp_cfg(uint8_t max_fill, uint8_t wn,
return reg;
}
+#define QMAN_RT_MODE 0x00000100
+
+#define QMAN_REV_4000 0x04000000
+#define QMAN_REV_4100 0x04010000
+#define QMAN_REV_4101 0x04010001
+#define QMAN_REV_5000 0x05000000
+#define QMAN_REV_MASK 0xffff0000
+
static inline int qbman_swp_sys_init(struct qbman_swp_sys *s,
const struct qbman_swp_desc *d,
uint8_t dqrr_size)
{
uint32_t reg;
+ int i;
#ifdef RTE_ARCH_64
uint8_t wn = CENA_WRITE_ENABLE;
#else
@@ -343,7 +395,7 @@ static inline int qbman_swp_sys_init(struct qbman_swp_sys *s,
s->addr_cena = d->cena_bar;
s->addr_cinh = d->cinh_bar;
s->idx = (uint32_t)d->idx;
- s->cena = malloc(4096);
+ s->cena = malloc(64*1024);
if (!s->cena) {
pr_err("Could not allocate page for cena shadow\n");
return -1;
@@ -358,12 +410,34 @@ static inline int qbman_swp_sys_init(struct qbman_swp_sys *s,
reg = qbman_cinh_read(s, QBMAN_CINH_SWP_CFG);
QBMAN_BUG_ON(reg);
#endif
+ if ((d->qman_version & QMAN_REV_MASK) >= QMAN_REV_5000)
+ memset(s->addr_cena, 0, 64*1024);
+ else {
+ /* Invalidate the portal memory.
+ * This ensures no stale cache lines
+ */
+ for (i = 0; i < 0x1000; i += 64)
+ dccivac(s->addr_cena + i);
+ }
+
if (s->eqcr_mode == qman_eqcr_vb_array)
- reg = qbman_set_swp_cfg(dqrr_size, wn, 0, 3, 2, 3, 1, 1, 1, 1,
- 1, 1);
- else
- reg = qbman_set_swp_cfg(dqrr_size, wn, 1, 3, 2, 2, 1, 1, 1, 1,
- 1, 1);
+ reg = qbman_set_swp_cfg(dqrr_size, wn,
+ 0, 3, 2, 3, 1, 1, 1, 1, 1, 1);
+ else {
+ if ((d->qman_version & QMAN_REV_MASK) < QMAN_REV_5000)
+ reg = qbman_set_swp_cfg(dqrr_size, wn,
+ 1, 3, 2, 2, 1, 1, 1, 1, 1, 1);
+ else
+ reg = qbman_set_swp_cfg(dqrr_size, wn,
+ 1, 3, 2, 0, 1, 1, 1, 1, 1, 1);
+ }
+
+ if ((d->qman_version & QMAN_REV_MASK) >= QMAN_REV_5000) {
+ reg |= 1 << SWP_CFG_CPBS_SHIFT | /* memory-backed mode */
+ 1 << SWP_CFG_VPM_SHIFT | /* VDQCR read triggered mode */
+ 1 << SWP_CFG_CPM_SHIFT; /* CR read triggered mode */
+ }
+
qbman_cinh_write(s, QBMAN_CINH_SWP_CFG, reg);
reg = qbman_cinh_read(s, QBMAN_CINH_SWP_CFG);
if (!reg) {
@@ -371,6 +445,12 @@ static inline int qbman_swp_sys_init(struct qbman_swp_sys *s,
free(s->cena);
return -1;
}
+
+ if ((d->qman_version & QMAN_REV_MASK) >= QMAN_REV_5000) {
+ qbman_cinh_write(s, QBMAN_CINH_SWP_EQCR_PI, QMAN_RT_MODE);
+ qbman_cinh_write(s, QBMAN_CINH_SWP_RCR_PI, QMAN_RT_MODE);
+ }
+
return 0;
}
@@ -378,3 +458,5 @@ static inline void qbman_swp_sys_finish(struct qbman_swp_sys *s)
{
free(s->cena);
}
+
+#endif /* _QBMAN_SYS_H_ */
diff --git a/drivers/bus/fslmc/qbman/qbman_sys_decl.h b/drivers/bus/fslmc/qbman/qbman_sys_decl.h
index fa6977fe..a29f5b46 100644
--- a/drivers/bus/fslmc/qbman/qbman_sys_decl.h
+++ b/drivers/bus/fslmc/qbman/qbman_sys_decl.h
@@ -3,6 +3,9 @@
* Copyright (C) 2014-2016 Freescale Semiconductor, Inc.
*
*/
+#ifndef _QBMAN_SYS_DECL_H_
+#define _QBMAN_SYS_DECL_H_
+
#include <compat.h>
#include <fsl_qbman_base.h>
@@ -51,3 +54,4 @@ static inline void prefetch_for_store(void *p)
RTE_SET_USED(p);
}
#endif
+#endif /* _QBMAN_SYS_DECL_H_ */
diff --git a/drivers/bus/fslmc/rte_bus_fslmc_version.map b/drivers/bus/fslmc/rte_bus_fslmc_version.map
index fe45a113..dcc4e082 100644
--- a/drivers/bus/fslmc/rte_bus_fslmc_version.map
+++ b/drivers/bus/fslmc/rte_bus_fslmc_version.map
@@ -114,5 +114,18 @@ DPDK_18.05 {
dpdmai_open;
dpdmai_set_rx_queue;
rte_dpaa2_free_dpci_dev;
+ rte_dpaa2_memsegs;
} DPDK_18.02;
+
+DPDK_18.11 {
+ global:
+
+ dpaa2_dqrr_size;
+ dpaa2_eqcr_size;
+ dpci_get_link_state;
+ dpci_get_opr;
+ dpci_get_peer_attributes;
+ dpci_set_opr;
+
+} DPDK_18.05;
diff --git a/drivers/bus/ifpga/Makefile b/drivers/bus/ifpga/Makefile
index 3ff3bdb8..514452b3 100644
--- a/drivers/bus/ifpga/Makefile
+++ b/drivers/bus/ifpga/Makefile
@@ -19,7 +19,7 @@ LDLIBS += -lrte_kvargs
EXPORT_MAP := rte_bus_ifpga_version.map
# library version
-LIBABIVER := 1
+LIBABIVER := 2
SRCS-$(CONFIG_RTE_LIBRTE_IFPGA_BUS) += ifpga_bus.c
SRCS-$(CONFIG_RTE_LIBRTE_IFPGA_BUS) += ifpga_common.c
diff --git a/drivers/bus/ifpga/ifpga_bus.c b/drivers/bus/ifpga/ifpga_bus.c
index b324872e..5f23ed8b 100644
--- a/drivers/bus/ifpga/ifpga_bus.c
+++ b/drivers/bus/ifpga/ifpga_bus.c
@@ -142,6 +142,7 @@ ifpga_scan_one(struct rte_rawdev *rawdev,
if (!afu_dev)
goto end;
+ afu_dev->device.bus = &rte_ifpga_bus;
afu_dev->device.devargs = devargs;
afu_dev->device.numa_node = SOCKET_ID_ANY;
afu_dev->device.name = devargs->name;
@@ -279,14 +280,13 @@ ifpga_probe_one_driver(struct rte_afu_driver *drv,
/* reference driver structure */
afu_dev->driver = drv;
- afu_dev->device.driver = &drv->driver;
/* call the driver probe() function */
ret = drv->probe(afu_dev);
- if (ret) {
+ if (ret)
afu_dev->driver = NULL;
- afu_dev->device.driver = NULL;
- }
+ else
+ afu_dev->device.driver = &drv->driver;
return ret;
}
@@ -301,8 +301,11 @@ ifpga_probe_all_drivers(struct rte_afu_device *afu_dev)
return -1;
/* Check if a driver is already loaded */
- if (afu_dev->driver != NULL)
- return 0;
+ if (rte_dev_is_probed(&afu_dev->device)) {
+ IFPGA_BUS_DEBUG("Device %s is already probed\n",
+ rte_ifpga_device_name(afu_dev));
+ return -EEXIST;
+ }
TAILQ_FOREACH(drv, &ifpga_afu_drv_list, next) {
if (ifpga_probe_one_driver(drv, afu_dev)) {
@@ -325,14 +328,13 @@ ifpga_probe(void)
int ret = 0;
TAILQ_FOREACH(afu_dev, &ifpga_afu_dev_list, next) {
- if (afu_dev->device.driver)
- continue;
-
ret = ifpga_probe_all_drivers(afu_dev);
+ if (ret == -EEXIST)
+ continue;
if (ret < 0)
IFPGA_BUS_ERR("failed to initialize %s device\n",
rte_ifpga_device_name(afu_dev));
- }
+ }
return ret;
}
@@ -347,23 +349,20 @@ static int
ifpga_remove_driver(struct rte_afu_device *afu_dev)
{
const char *name;
- const struct rte_afu_driver *driver;
name = rte_ifpga_device_name(afu_dev);
- if (!afu_dev->device.driver) {
+ if (afu_dev->driver == NULL) {
IFPGA_BUS_DEBUG("no driver attach to device %s\n", name);
return 1;
}
- driver = RTE_DRV_TO_AFU_CONST(afu_dev->device.driver);
- return driver->remove(afu_dev);
+ return afu_dev->driver->remove(afu_dev);
}
static int
ifpga_unplug(struct rte_device *dev)
{
struct rte_afu_device *afu_dev = NULL;
- struct rte_devargs *devargs = NULL;
int ret;
if (dev == NULL)
@@ -373,15 +372,13 @@ ifpga_unplug(struct rte_device *dev)
if (!afu_dev)
return -ENOENT;
- devargs = dev->devargs;
-
ret = ifpga_remove_driver(afu_dev);
if (ret)
return ret;
TAILQ_REMOVE(&ifpga_afu_dev_list, afu_dev, next);
- rte_devargs_remove(devargs->bus->name, devargs->name);
+ rte_devargs_remove(dev->devargs);
free(afu_dev);
return 0;
diff --git a/drivers/bus/ifpga/meson.build b/drivers/bus/ifpga/meson.build
index c9b08c86..0b5c38d5 100644
--- a/drivers/bus/ifpga/meson.build
+++ b/drivers/bus/ifpga/meson.build
@@ -1,6 +1,8 @@
# SPDX-License-Identifier: BSD-3-Clause
# Copyright(c) 2010-2018 Intel Corporation
+version = 2
+
deps += ['pci', 'kvargs', 'rawdev']
install_headers('rte_bus_ifpga.h')
sources = files('ifpga_common.c', 'ifpga_bus.c')
diff --git a/drivers/bus/ifpga/rte_bus_ifpga.h b/drivers/bus/ifpga/rte_bus_ifpga.h
index 51d5ae0d..d53c0f48 100644
--- a/drivers/bus/ifpga/rte_bus_ifpga.h
+++ b/drivers/bus/ifpga/rte_bus_ifpga.h
@@ -83,9 +83,6 @@ struct rte_afu_device {
#define RTE_DEV_TO_AFU(ptr) \
container_of(ptr, struct rte_afu_device, device)
-#define RTE_DRV_TO_AFU_CONST(ptr) \
- container_of(ptr, const struct rte_afu_driver, driver)
-
/**
* Initialization function for the driver called during FPGA BUS probing.
*/
diff --git a/drivers/bus/pci/Makefile b/drivers/bus/pci/Makefile
index cf373068..f33e0120 100644
--- a/drivers/bus/pci/Makefile
+++ b/drivers/bus/pci/Makefile
@@ -4,7 +4,7 @@
include $(RTE_SDK)/mk/rte.vars.mk
LIB = librte_bus_pci.a
-LIBABIVER := 1
+LIBABIVER := 2
EXPORT_MAP := rte_bus_pci_version.map
CFLAGS := -I$(SRCDIR) $(CFLAGS)
@@ -26,10 +26,11 @@ CFLAGS += -I$(RTE_SDK)/lib/librte_eal/$(SYSTEM)app/eal
CFLAGS += -DALLOW_EXPERIMENTAL_API
LDLIBS += -lrte_eal -lrte_mbuf -lrte_mempool -lrte_ring
-LDLIBS += -lrte_ethdev -lrte_pci
+LDLIBS += -lrte_ethdev -lrte_pci -lrte_kvargs
include $(RTE_SDK)/drivers/bus/pci/$(SYSTEM)/Makefile
SRCS-$(CONFIG_RTE_LIBRTE_PCI_BUS) := $(addprefix $(SYSTEM)/,$(SRCS))
+SRCS-$(CONFIG_RTE_LIBRTE_PCI_BUS) += pci_params.c
SRCS-$(CONFIG_RTE_LIBRTE_PCI_BUS) += pci_common.c
SRCS-$(CONFIG_RTE_LIBRTE_PCI_BUS) += pci_common_uio.c
diff --git a/drivers/bus/pci/bsd/pci.c b/drivers/bus/pci/bsd/pci.c
index 655b34b7..d09f8ee5 100644
--- a/drivers/bus/pci/bsd/pci.c
+++ b/drivers/bus/pci/bsd/pci.c
@@ -223,6 +223,8 @@ pci_scan_one(int dev_pci_fd, struct pci_conf *conf)
}
memset(dev, 0, sizeof(*dev));
+ dev->device.bus = &rte_pci_bus.bus;
+
dev->addr.domain = conf->pc_sel.pc_domain;
dev->addr.bus = conf->pc_sel.pc_bus;
dev->addr.devid = conf->pc_sel.pc_dev;
@@ -439,6 +441,8 @@ int rte_pci_read_config(const struct rte_pci_device *dev,
{
int fd = -1;
int size;
+ /* Copy Linux implementation's behaviour */
+ const int return_len = len;
struct pci_io pi = {
.pi_sel = {
.pc_domain = dev->addr.domain,
@@ -469,7 +473,7 @@ int rte_pci_read_config(const struct rte_pci_device *dev,
}
close(fd);
- return 0;
+ return return_len;
error:
if (fd >= 0)
diff --git a/drivers/bus/pci/linux/Makefile b/drivers/bus/pci/linux/Makefile
index 96ea1d54..90404468 100644
--- a/drivers/bus/pci/linux/Makefile
+++ b/drivers/bus/pci/linux/Makefile
@@ -4,5 +4,3 @@
SRCS += pci.c
SRCS += pci_uio.c
SRCS += pci_vfio.c
-
-CFLAGS += -D_GNU_SOURCE
diff --git a/drivers/bus/pci/linux/pci.c b/drivers/bus/pci/linux/pci.c
index 04648ac9..45c24ef7 100644
--- a/drivers/bus/pci/linux/pci.c
+++ b/drivers/bus/pci/linux/pci.c
@@ -119,7 +119,7 @@ rte_pci_unmap_device(struct rte_pci_device *dev)
static int
find_max_end_va(const struct rte_memseg_list *msl, void *arg)
{
- size_t sz = msl->memseg_arr.len * msl->page_sz;
+ size_t sz = msl->len;
void *end_va = RTE_PTR_ADD(msl->base_va, sz);
void **max_va = arg;
@@ -228,6 +228,7 @@ pci_scan_one(const char *dirname, const struct rte_pci_addr *addr)
return -1;
memset(dev, 0, sizeof(*dev));
+ dev->device.bus = &rte_pci_bus.bus;
dev->addr = *addr;
/* get vendor id */
@@ -588,10 +589,8 @@ pci_one_device_iommu_support_va(struct rte_pci_device *dev)
fclose(fp);
mgaw = ((vtd_cap_reg & VTD_CAP_MGAW_MASK) >> VTD_CAP_MGAW_SHIFT) + 1;
- if (mgaw < X86_VA_WIDTH)
- return false;
- return true;
+ return rte_eal_check_dma_mask(mgaw) == 0 ? true : false;
}
#elif defined(RTE_ARCH_PPC_64)
static bool
@@ -620,8 +619,11 @@ pci_devices_iommu_support_va(void)
FOREACH_DEVICE_ON_PCIBUS(dev) {
if (!rte_pci_match(drv, dev))
continue;
- if (!pci_one_device_iommu_support_va(dev))
- return false;
+ /*
+ * just one PCI device needs to be checked out because
+ * the IOMMU hardware is the same for all of them.
+ */
+ return pci_one_device_iommu_support_va(dev);
}
}
return true;
@@ -672,23 +674,21 @@ rte_pci_get_iommu_class(void)
int rte_pci_read_config(const struct rte_pci_device *device,
void *buf, size_t len, off_t offset)
{
+ char devname[RTE_DEV_NAME_MAX_LEN] = "";
const struct rte_intr_handle *intr_handle = &device->intr_handle;
- switch (intr_handle->type) {
- case RTE_INTR_HANDLE_UIO:
- case RTE_INTR_HANDLE_UIO_INTX:
+ switch (device->kdrv) {
+ case RTE_KDRV_IGB_UIO:
return pci_uio_read_config(intr_handle, buf, len, offset);
-
#ifdef VFIO_PRESENT
- case RTE_INTR_HANDLE_VFIO_MSIX:
- case RTE_INTR_HANDLE_VFIO_MSI:
- case RTE_INTR_HANDLE_VFIO_LEGACY:
+ case RTE_KDRV_VFIO:
return pci_vfio_read_config(intr_handle, buf, len, offset);
#endif
default:
+ rte_pci_device_name(&device->addr, devname,
+ RTE_DEV_NAME_MAX_LEN);
RTE_LOG(ERR, EAL,
- "Unknown handle type of fd %d\n",
- intr_handle->fd);
+ "Unknown driver type for %s\n", devname);
return -1;
}
}
@@ -697,23 +697,21 @@ int rte_pci_read_config(const struct rte_pci_device *device,
int rte_pci_write_config(const struct rte_pci_device *device,
const void *buf, size_t len, off_t offset)
{
+ char devname[RTE_DEV_NAME_MAX_LEN] = "";
const struct rte_intr_handle *intr_handle = &device->intr_handle;
- switch (intr_handle->type) {
- case RTE_INTR_HANDLE_UIO:
- case RTE_INTR_HANDLE_UIO_INTX:
+ switch (device->kdrv) {
+ case RTE_KDRV_IGB_UIO:
return pci_uio_write_config(intr_handle, buf, len, offset);
-
#ifdef VFIO_PRESENT
- case RTE_INTR_HANDLE_VFIO_MSIX:
- case RTE_INTR_HANDLE_VFIO_MSI:
- case RTE_INTR_HANDLE_VFIO_LEGACY:
+ case RTE_KDRV_VFIO:
return pci_vfio_write_config(intr_handle, buf, len, offset);
#endif
default:
+ rte_pci_device_name(&device->addr, devname,
+ RTE_DEV_NAME_MAX_LEN);
RTE_LOG(ERR, EAL,
- "Unknown handle type of fd %d\n",
- intr_handle->fd);
+ "Unknown driver type for %s\n", devname);
return -1;
}
}
diff --git a/drivers/bus/pci/linux/pci_vfio.c b/drivers/bus/pci/linux/pci_vfio.c
index 686386d6..305cc060 100644
--- a/drivers/bus/pci/linux/pci_vfio.c
+++ b/drivers/bus/pci/linux/pci_vfio.c
@@ -17,6 +17,8 @@
#include <rte_eal_memconfig.h>
#include <rte_malloc.h>
#include <rte_vfio.h>
+#include <rte_eal.h>
+#include <rte_bus.h>
#include "eal_filesystem.h"
@@ -35,7 +37,9 @@
#ifdef VFIO_PRESENT
+#ifndef PAGE_SIZE
#define PAGE_SIZE (sysconf(_SC_PAGESIZE))
+#endif
#define PAGE_MASK (~(PAGE_SIZE - 1))
static struct rte_tailq_elem rte_vfio_tailq = {
@@ -277,6 +281,114 @@ pci_vfio_setup_interrupts(struct rte_pci_device *dev, int vfio_dev_fd)
return -1;
}
+#ifdef HAVE_VFIO_DEV_REQ_INTERFACE
+static void
+pci_vfio_req_handler(void *param)
+{
+ struct rte_bus *bus;
+ int ret;
+ struct rte_device *device = (struct rte_device *)param;
+
+ bus = rte_bus_find_by_device(device);
+ if (bus == NULL) {
+ RTE_LOG(ERR, EAL, "Cannot find bus for device (%s)\n",
+ device->name);
+ return;
+ }
+
+ /*
+ * vfio kernel module request user space to release allocated
+ * resources before device be deleted in kernel, so it can directly
+ * call the vfio bus hot-unplug handler to process it.
+ */
+ ret = bus->hot_unplug_handler(device);
+ if (ret)
+ RTE_LOG(ERR, EAL,
+ "Can not handle hot-unplug for device (%s)\n",
+ device->name);
+}
+
+/* enable notifier (only enable req now) */
+static int
+pci_vfio_enable_notifier(struct rte_pci_device *dev, int vfio_dev_fd)
+{
+ int ret;
+ int fd = -1;
+
+ /* set up an eventfd for req notifier */
+ fd = eventfd(0, EFD_NONBLOCK | EFD_CLOEXEC);
+ if (fd < 0) {
+ RTE_LOG(ERR, EAL, "Cannot set up eventfd, error %i (%s)\n",
+ errno, strerror(errno));
+ return -1;
+ }
+
+ dev->vfio_req_intr_handle.fd = fd;
+ dev->vfio_req_intr_handle.type = RTE_INTR_HANDLE_VFIO_REQ;
+ dev->vfio_req_intr_handle.vfio_dev_fd = vfio_dev_fd;
+
+ ret = rte_intr_callback_register(&dev->vfio_req_intr_handle,
+ pci_vfio_req_handler,
+ (void *)&dev->device);
+ if (ret) {
+ RTE_LOG(ERR, EAL, "Fail to register req notifier handler.\n");
+ goto error;
+ }
+
+ ret = rte_intr_enable(&dev->vfio_req_intr_handle);
+ if (ret) {
+ RTE_LOG(ERR, EAL, "Fail to enable req notifier.\n");
+ ret = rte_intr_callback_unregister(&dev->vfio_req_intr_handle,
+ pci_vfio_req_handler,
+ (void *)&dev->device);
+ if (ret < 0)
+ RTE_LOG(ERR, EAL,
+ "Fail to unregister req notifier handler.\n");
+ goto error;
+ }
+
+ return 0;
+error:
+ close(fd);
+
+ dev->vfio_req_intr_handle.fd = -1;
+ dev->vfio_req_intr_handle.type = RTE_INTR_HANDLE_UNKNOWN;
+ dev->vfio_req_intr_handle.vfio_dev_fd = -1;
+
+ return -1;
+}
+
+/* disable notifier (only disable req now) */
+static int
+pci_vfio_disable_notifier(struct rte_pci_device *dev)
+{
+ int ret;
+
+ ret = rte_intr_disable(&dev->vfio_req_intr_handle);
+ if (ret) {
+ RTE_LOG(ERR, EAL, "fail to disable req notifier.\n");
+ return -1;
+ }
+
+ ret = rte_intr_callback_unregister(&dev->vfio_req_intr_handle,
+ pci_vfio_req_handler,
+ (void *)&dev->device);
+ if (ret < 0) {
+ RTE_LOG(ERR, EAL,
+ "fail to unregister req notifier handler.\n");
+ return -1;
+ }
+
+ close(dev->vfio_req_intr_handle.fd);
+
+ dev->vfio_req_intr_handle.fd = -1;
+ dev->vfio_req_intr_handle.type = RTE_INTR_HANDLE_UNKNOWN;
+ dev->vfio_req_intr_handle.vfio_dev_fd = -1;
+
+ return 0;
+}
+#endif
+
static int
pci_vfio_is_ioport_bar(int vfio_dev_fd, int bar_index)
{
@@ -415,6 +527,93 @@ pci_vfio_mmap_bar(int vfio_dev_fd, struct mapped_pci_resource *vfio_res,
return 0;
}
+/*
+ * region info may contain capability headers, so we need to keep reallocating
+ * the memory until we match allocated memory size with argsz.
+ */
+static int
+pci_vfio_get_region_info(int vfio_dev_fd, struct vfio_region_info **info,
+ int region)
+{
+ struct vfio_region_info *ri;
+ size_t argsz = sizeof(*ri);
+ int ret;
+
+ ri = malloc(sizeof(*ri));
+ if (ri == NULL) {
+ RTE_LOG(ERR, EAL, "Cannot allocate memory for region info\n");
+ return -1;
+ }
+again:
+ memset(ri, 0, argsz);
+ ri->argsz = argsz;
+ ri->index = region;
+
+ ret = ioctl(vfio_dev_fd, VFIO_DEVICE_GET_REGION_INFO, ri);
+ if (ret < 0) {
+ free(ri);
+ return ret;
+ }
+ if (ri->argsz != argsz) {
+ struct vfio_region_info *tmp;
+
+ argsz = ri->argsz;
+ tmp = realloc(ri, argsz);
+
+ if (tmp == NULL) {
+ /* realloc failed but the ri is still there */
+ free(ri);
+ RTE_LOG(ERR, EAL, "Cannot reallocate memory for region info\n");
+ return -1;
+ }
+ ri = tmp;
+ goto again;
+ }
+ *info = ri;
+
+ return 0;
+}
+
+static struct vfio_info_cap_header *
+pci_vfio_info_cap(struct vfio_region_info *info, int cap)
+{
+ struct vfio_info_cap_header *h;
+ size_t offset;
+
+ if ((info->flags & RTE_VFIO_INFO_FLAG_CAPS) == 0) {
+ /* VFIO info does not advertise capabilities */
+ return NULL;
+ }
+
+ offset = VFIO_CAP_OFFSET(info);
+ while (offset != 0) {
+ h = RTE_PTR_ADD(info, offset);
+ if (h->id == cap)
+ return h;
+ offset = h->next;
+ }
+ return NULL;
+}
+
+static int
+pci_vfio_msix_is_mappable(int vfio_dev_fd, int msix_region)
+{
+ struct vfio_region_info *info;
+ int ret;
+
+ ret = pci_vfio_get_region_info(vfio_dev_fd, &info, msix_region);
+ if (ret < 0)
+ return -1;
+
+ ret = pci_vfio_info_cap(info, RTE_VFIO_CAP_MSIX_MAPPABLE) != NULL;
+
+ /* cleanup */
+ free(info);
+
+ return ret;
+}
+
+
static int
pci_vfio_map_resource_primary(struct rte_pci_device *dev)
{
@@ -430,6 +629,9 @@ pci_vfio_map_resource_primary(struct rte_pci_device *dev)
struct pci_map *maps;
dev->intr_handle.fd = -1;
+#ifdef HAVE_VFIO_DEV_REQ_INTERFACE
+ dev->vfio_req_intr_handle.fd = -1;
+#endif
/* store PCI address string */
snprintf(pci_addr, sizeof(pci_addr), PCI_PRI_FMT,
@@ -464,56 +666,75 @@ pci_vfio_map_resource_primary(struct rte_pci_device *dev)
if (ret < 0) {
RTE_LOG(ERR, EAL, " %s cannot get MSI-X BAR number!\n",
pci_addr);
- goto err_vfio_dev_fd;
+ goto err_vfio_res;
+ }
+ /* if we found our MSI-X BAR region, check if we can mmap it */
+ if (vfio_res->msix_table.bar_index != -1) {
+ int ret = pci_vfio_msix_is_mappable(vfio_dev_fd,
+ vfio_res->msix_table.bar_index);
+ if (ret < 0) {
+ RTE_LOG(ERR, EAL, "Couldn't check if MSI-X BAR is mappable\n");
+ goto err_vfio_res;
+ } else if (ret != 0) {
+ /* we can map it, so we don't care where it is */
+ RTE_LOG(DEBUG, EAL, "VFIO reports MSI-X BAR as mappable\n");
+ vfio_res->msix_table.bar_index = -1;
+ }
}
for (i = 0; i < (int) vfio_res->nb_maps; i++) {
- struct vfio_region_info reg = { .argsz = sizeof(reg) };
+ struct vfio_region_info *reg = NULL;
void *bar_addr;
- reg.index = i;
-
- ret = ioctl(vfio_dev_fd, VFIO_DEVICE_GET_REGION_INFO, &reg);
- if (ret) {
+ ret = pci_vfio_get_region_info(vfio_dev_fd, &reg, i);
+ if (ret < 0) {
RTE_LOG(ERR, EAL, " %s cannot get device region info "
- "error %i (%s)\n", pci_addr, errno, strerror(errno));
+ "error %i (%s)\n", pci_addr, errno,
+ strerror(errno));
goto err_vfio_res;
}
/* chk for io port region */
ret = pci_vfio_is_ioport_bar(vfio_dev_fd, i);
- if (ret < 0)
+ if (ret < 0) {
+ free(reg);
goto err_vfio_res;
- else if (ret) {
+ } else if (ret) {
RTE_LOG(INFO, EAL, "Ignore mapping IO port bar(%d)\n",
i);
+ free(reg);
continue;
}
/* skip non-mmapable BARs */
- if ((reg.flags & VFIO_REGION_INFO_FLAG_MMAP) == 0)
+ if ((reg->flags & VFIO_REGION_INFO_FLAG_MMAP) == 0) {
+ free(reg);
continue;
+ }
/* try mapping somewhere close to the end of hugepages */
if (pci_map_addr == NULL)
pci_map_addr = pci_find_max_end_va();
bar_addr = pci_map_addr;
- pci_map_addr = RTE_PTR_ADD(bar_addr, (size_t) reg.size);
+ pci_map_addr = RTE_PTR_ADD(bar_addr, (size_t) reg->size);
maps[i].addr = bar_addr;
- maps[i].offset = reg.offset;
- maps[i].size = reg.size;
+ maps[i].offset = reg->offset;
+ maps[i].size = reg->size;
maps[i].path = NULL; /* vfio doesn't have per-resource paths */
ret = pci_vfio_mmap_bar(vfio_dev_fd, vfio_res, i, 0);
if (ret < 0) {
RTE_LOG(ERR, EAL, " %s mapping BAR%i failed: %s\n",
pci_addr, i, strerror(errno));
+ free(reg);
goto err_vfio_res;
}
dev->mem_resource[i].addr = maps[i].addr;
+
+ free(reg);
}
if (pci_rte_vfio_setup_device(dev, vfio_dev_fd) < 0) {
@@ -521,6 +742,13 @@ pci_vfio_map_resource_primary(struct rte_pci_device *dev)
goto err_vfio_res;
}
+#ifdef HAVE_VFIO_DEV_REQ_INTERFACE
+ if (pci_vfio_enable_notifier(dev, vfio_dev_fd) != 0) {
+ RTE_LOG(ERR, EAL, "Error setting up notifier!\n");
+ goto err_vfio_res;
+ }
+
+#endif
TAILQ_INSERT_TAIL(vfio_res_list, vfio_res, next);
return 0;
@@ -546,6 +774,9 @@ pci_vfio_map_resource_secondary(struct rte_pci_device *dev)
struct pci_map *maps;
dev->intr_handle.fd = -1;
+#ifdef HAVE_VFIO_DEV_REQ_INTERFACE
+ dev->vfio_req_intr_handle.fd = -1;
+#endif
/* store PCI address string */
snprintf(pci_addr, sizeof(pci_addr), PCI_PRI_FMT,
@@ -586,6 +817,9 @@ pci_vfio_map_resource_secondary(struct rte_pci_device *dev)
/* we need save vfio_dev_fd, so it can be used during release */
dev->intr_handle.vfio_dev_fd = vfio_dev_fd;
+#ifdef HAVE_VFIO_DEV_REQ_INTERFACE
+ dev->vfio_req_intr_handle.vfio_dev_fd = vfio_dev_fd;
+#endif
return 0;
err_vfio_dev_fd:
@@ -658,6 +892,14 @@ pci_vfio_unmap_resource_primary(struct rte_pci_device *dev)
snprintf(pci_addr, sizeof(pci_addr), PCI_PRI_FMT,
loc->domain, loc->bus, loc->devid, loc->function);
+#ifdef HAVE_VFIO_DEV_REQ_INTERFACE
+ ret = pci_vfio_disable_notifier(dev);
+ if (ret) {
+ RTE_LOG(ERR, EAL, "fail to disable req notifier.\n");
+ return -1;
+ }
+
+#endif
if (close(dev->intr_handle.fd) < 0) {
RTE_LOG(INFO, EAL, "Error when closing eventfd file descriptor for %s\n",
pci_addr);
diff --git a/drivers/bus/pci/meson.build b/drivers/bus/pci/meson.build
index 72939e59..a3140ff9 100644
--- a/drivers/bus/pci/meson.build
+++ b/drivers/bus/pci/meson.build
@@ -1,15 +1,18 @@
# SPDX-License-Identifier: BSD-3-Clause
# Copyright(c) 2017 Intel Corporation
+version = 2
+
deps += ['pci']
install_headers('rte_bus_pci.h')
-sources = files('pci_common.c', 'pci_common_uio.c')
+sources = files('pci_common.c',
+ 'pci_common_uio.c',
+ 'pci_params.c')
if host_machine.system() == 'linux'
sources += files('linux/pci.c',
'linux/pci_uio.c',
'linux/pci_vfio.c')
includes += include_directories('linux')
- cflags += ['-D_GNU_SOURCE']
else
sources += files('bsd/pci.c')
includes += include_directories('bsd')
@@ -17,3 +20,5 @@ endif
# memseg walk is not part of stable API yet
allow_experimental_apis = true
+
+deps += ['kvargs']
diff --git a/drivers/bus/pci/pci_common.c b/drivers/bus/pci/pci_common.c
index 7736b3f9..6276e5d6 100644
--- a/drivers/bus/pci/pci_common.c
+++ b/drivers/bus/pci/pci_common.c
@@ -6,6 +6,7 @@
#include <string.h>
#include <inttypes.h>
#include <stdint.h>
+#include <stdbool.h>
#include <stdlib.h>
#include <stdio.h>
#include <sys/queue.h>
@@ -23,12 +24,11 @@
#include <rte_string_fns.h>
#include <rte_common.h>
#include <rte_devargs.h>
+#include <rte_vfio.h>
#include "private.h"
-extern struct rte_pci_bus rte_pci_bus;
-
#define SYSFS_PCI_DEVICES "/sys/bus/pci/devices"
const char *rte_pci_get_sysfs_path(void)
@@ -123,6 +123,7 @@ rte_pci_probe_one_driver(struct rte_pci_driver *dr,
struct rte_pci_device *dev)
{
int ret;
+ bool already_probed;
struct rte_pci_addr *loc;
if ((dr == NULL) || (dev == NULL))
@@ -153,6 +154,13 @@ rte_pci_probe_one_driver(struct rte_pci_driver *dr,
dev->device.numa_node = 0;
}
+ already_probed = rte_dev_is_probed(&dev->device);
+ if (already_probed && !(dr->drv_flags & RTE_PCI_DRV_PROBE_AGAIN)) {
+ RTE_LOG(DEBUG, EAL, "Device %s is already probed\n",
+ dev->device.name);
+ return -EEXIST;
+ }
+
RTE_LOG(INFO, EAL, " probe driver: %x:%x %s\n", dev->id.vendor_id,
dev->id.device_id, dr->driver.name);
@@ -161,24 +169,24 @@ rte_pci_probe_one_driver(struct rte_pci_driver *dr,
* This needs to be before rte_pci_map_device(), as it enables to use
* driver flags for adjusting configuration.
*/
- dev->driver = dr;
- dev->device.driver = &dr->driver;
+ if (!already_probed)
+ dev->driver = dr;
- if (dr->drv_flags & RTE_PCI_DRV_NEED_MAPPING) {
+ if (!already_probed && (dr->drv_flags & RTE_PCI_DRV_NEED_MAPPING)) {
/* map resources for devices that use igb_uio */
ret = rte_pci_map_device(dev);
if (ret != 0) {
dev->driver = NULL;
- dev->device.driver = NULL;
return ret;
}
}
/* call the driver probe() function */
ret = dr->probe(dr, dev);
+ if (already_probed)
+ return ret; /* no rollback if already succeeded earlier */
if (ret) {
dev->driver = NULL;
- dev->device.driver = NULL;
if ((dr->drv_flags & RTE_PCI_DRV_NEED_MAPPING) &&
/* Don't unmap if device is unsupported and
* driver needs mapped resources.
@@ -186,6 +194,8 @@ rte_pci_probe_one_driver(struct rte_pci_driver *dr,
!(ret > 0 &&
(dr->drv_flags & RTE_PCI_DRV_KEEP_MAPPED_RES)))
rte_pci_unmap_device(dev);
+ } else {
+ dev->device.driver = &dr->driver;
}
return ret;
@@ -233,7 +243,7 @@ rte_pci_detach_dev(struct rte_pci_device *dev)
/*
* If vendor/device ID match, call the probe() function of all
- * registered driver for the given device. Return -1 if initialization
+ * registered driver for the given device. Return < 0 if initialization
* failed, return 1 if no driver is found for this device.
*/
static int
@@ -243,17 +253,13 @@ pci_probe_all_drivers(struct rte_pci_device *dev)
int rc = 0;
if (dev == NULL)
- return -1;
-
- /* Check if a driver is already loaded */
- if (dev->driver != NULL)
- return 0;
+ return -EINVAL;
FOREACH_DRIVER_ON_PCIBUS(dr) {
rc = rte_pci_probe_one_driver(dr, dev);
if (rc < 0)
/* negative value is an error */
- return -1;
+ return rc;
if (rc > 0)
/* positive value means driver doesn't support it */
continue;
@@ -290,11 +296,14 @@ rte_pci_probe(void)
devargs->policy == RTE_DEV_WHITELISTED)
ret = pci_probe_all_drivers(dev);
if (ret < 0) {
- RTE_LOG(ERR, EAL, "Requested device " PCI_PRI_FMT
- " cannot be used\n", dev->addr.domain, dev->addr.bus,
- dev->addr.devid, dev->addr.function);
- rte_errno = errno;
- failed++;
+ if (ret != -EEXIST) {
+ RTE_LOG(ERR, EAL, "Requested device "
+ PCI_PRI_FMT " cannot be used\n",
+ dev->addr.domain, dev->addr.bus,
+ dev->addr.devid, dev->addr.function);
+ rte_errno = errno;
+ failed++;
+ }
ret = 0;
}
}
@@ -405,6 +414,98 @@ pci_find_device(const struct rte_device *start, rte_dev_cmp_t cmp,
return NULL;
}
+/*
+ * find the device which encounter the failure, by iterate over all device on
+ * PCI bus to check if the memory failure address is located in the range
+ * of the BARs of the device.
+ */
+static struct rte_pci_device *
+pci_find_device_by_addr(const void *failure_addr)
+{
+ struct rte_pci_device *pdev = NULL;
+ uint64_t check_point, start, end, len;
+ int i;
+
+ check_point = (uint64_t)(uintptr_t)failure_addr;
+
+ FOREACH_DEVICE_ON_PCIBUS(pdev) {
+ for (i = 0; i != RTE_DIM(pdev->mem_resource); i++) {
+ start = (uint64_t)(uintptr_t)pdev->mem_resource[i].addr;
+ len = pdev->mem_resource[i].len;
+ end = start + len;
+ if (check_point >= start && check_point < end) {
+ RTE_LOG(DEBUG, EAL, "Failure address %16.16"
+ PRIx64" belongs to device %s!\n",
+ check_point, pdev->device.name);
+ return pdev;
+ }
+ }
+ }
+ return NULL;
+}
+
+static int
+pci_hot_unplug_handler(struct rte_device *dev)
+{
+ struct rte_pci_device *pdev = NULL;
+ int ret = 0;
+
+ pdev = RTE_DEV_TO_PCI(dev);
+ if (!pdev)
+ return -1;
+
+ switch (pdev->kdrv) {
+#ifdef HAVE_VFIO_DEV_REQ_INTERFACE
+ case RTE_KDRV_VFIO:
+ /*
+ * vfio kernel module guaranty the pci device would not be
+ * deleted until the user space release the resource, so no
+ * need to remap BARs resource here, just directly notify
+ * the req event to the user space to handle it.
+ */
+ rte_dev_event_callback_process(dev->name,
+ RTE_DEV_EVENT_REMOVE);
+ break;
+#endif
+ case RTE_KDRV_IGB_UIO:
+ case RTE_KDRV_UIO_GENERIC:
+ case RTE_KDRV_NIC_UIO:
+ /* BARs resource is invalid, remap it to be safe. */
+ ret = pci_uio_remap_resource(pdev);
+ break;
+ default:
+ RTE_LOG(DEBUG, EAL,
+ "Not managed by a supported kernel driver, skipped\n");
+ ret = -1;
+ break;
+ }
+
+ return ret;
+}
+
+static int
+pci_sigbus_handler(const void *failure_addr)
+{
+ struct rte_pci_device *pdev = NULL;
+ int ret = 0;
+
+ pdev = pci_find_device_by_addr(failure_addr);
+ if (!pdev) {
+ /* It is a generic sigbus error, no bus would handle it. */
+ ret = 1;
+ } else {
+ /* The sigbus error is caused of hot-unplug. */
+ ret = pci_hot_unplug_handler(&pdev->device);
+ if (ret) {
+ RTE_LOG(ERR, EAL,
+ "Failed to handle hot-unplug for device %s",
+ pdev->name);
+ ret = -1;
+ }
+ }
+ return ret;
+}
+
static int
pci_plug(struct rte_device *dev)
{
@@ -421,6 +522,7 @@ pci_unplug(struct rte_device *dev)
ret = rte_pci_detach_dev(pdev);
if (ret == 0) {
rte_pci_remove_device(pdev);
+ rte_devargs_remove(dev->devargs);
free(pdev);
}
return ret;
@@ -435,6 +537,9 @@ struct rte_pci_bus rte_pci_bus = {
.unplug = pci_unplug,
.parse = pci_parse,
.get_iommu_class = rte_pci_get_iommu_class,
+ .dev_iterate = rte_pci_dev_iterate,
+ .hot_unplug_handler = pci_hot_unplug_handler,
+ .sigbus_handler = pci_sigbus_handler,
},
.device_list = TAILQ_HEAD_INITIALIZER(rte_pci_bus.device_list),
.driver_list = TAILQ_HEAD_INITIALIZER(rte_pci_bus.driver_list),
diff --git a/drivers/bus/pci/pci_common_uio.c b/drivers/bus/pci/pci_common_uio.c
index 54bc20b5..7ea73dbc 100644
--- a/drivers/bus/pci/pci_common_uio.c
+++ b/drivers/bus/pci/pci_common_uio.c
@@ -146,6 +146,39 @@ pci_uio_unmap(struct mapped_pci_resource *uio_res)
}
}
+/* remap the PCI resource of a PCI device in anonymous virtual memory */
+int
+pci_uio_remap_resource(struct rte_pci_device *dev)
+{
+ int i;
+ void *map_address;
+
+ if (dev == NULL)
+ return -1;
+
+ /* Remap all BARs */
+ for (i = 0; i != PCI_MAX_RESOURCE; i++) {
+ /* skip empty BAR */
+ if (dev->mem_resource[i].phys_addr == 0)
+ continue;
+ map_address = mmap(dev->mem_resource[i].addr,
+ (size_t)dev->mem_resource[i].len,
+ PROT_READ | PROT_WRITE,
+ MAP_PRIVATE | MAP_ANONYMOUS | MAP_FIXED, -1, 0);
+ if (map_address == MAP_FAILED) {
+ RTE_LOG(ERR, EAL,
+ "Cannot remap resource for device %s\n",
+ dev->name);
+ return -1;
+ }
+ RTE_LOG(INFO, EAL,
+ "Successful remap resource for device %s\n",
+ dev->name);
+ }
+
+ return 0;
+}
+
static struct mapped_pci_resource *
pci_uio_find_resource(struct rte_pci_device *dev)
{
diff --git a/drivers/bus/pci/pci_params.c b/drivers/bus/pci/pci_params.c
new file mode 100644
index 00000000..3192e9c9
--- /dev/null
+++ b/drivers/bus/pci/pci_params.c
@@ -0,0 +1,78 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright 2018 Gaëtan Rivet
+ */
+
+#include <rte_bus.h>
+#include <rte_bus_pci.h>
+#include <rte_dev.h>
+#include <rte_errno.h>
+#include <rte_kvargs.h>
+#include <rte_pci.h>
+
+#include "private.h"
+
+enum pci_params {
+ RTE_PCI_PARAM_ADDR,
+ RTE_PCI_PARAM_MAX,
+};
+
+static const char * const pci_params_keys[] = {
+ [RTE_PCI_PARAM_ADDR] = "addr",
+ [RTE_PCI_PARAM_MAX] = NULL,
+};
+
+static int
+pci_addr_kv_cmp(const char *key __rte_unused,
+ const char *value,
+ void *_addr2)
+{
+ struct rte_pci_addr _addr1;
+ struct rte_pci_addr *addr1 = &_addr1;
+ struct rte_pci_addr *addr2 = _addr2;
+
+ if (rte_pci_addr_parse(value, addr1))
+ return -1;
+ return -abs(rte_pci_addr_cmp(addr1, addr2));
+}
+
+static int
+pci_dev_match(const struct rte_device *dev,
+ const void *_kvlist)
+{
+ const struct rte_kvargs *kvlist = _kvlist;
+ const struct rte_pci_device *pdev;
+
+ if (kvlist == NULL)
+ /* Empty string matches everything. */
+ return 0;
+ pdev = RTE_DEV_TO_PCI_CONST(dev);
+ /* if any field does not match. */
+ if (rte_kvargs_process(kvlist, pci_params_keys[RTE_PCI_PARAM_ADDR],
+ &pci_addr_kv_cmp,
+ (void *)(intptr_t)&pdev->addr))
+ return 1;
+ return 0;
+}
+
+void *
+rte_pci_dev_iterate(const void *start,
+ const char *str,
+ const struct rte_dev_iterator *it __rte_unused)
+{
+ rte_bus_find_device_t find_device;
+ struct rte_kvargs *kvargs = NULL;
+ struct rte_device *dev;
+
+ if (str != NULL) {
+ kvargs = rte_kvargs_parse(str, pci_params_keys);
+ if (kvargs == NULL) {
+ RTE_LOG(ERR, EAL, "cannot parse argument list\n");
+ rte_errno = EINVAL;
+ return NULL;
+ }
+ }
+ find_device = rte_pci_bus.bus.find_device;
+ dev = find_device(start, pci_dev_match, kvargs);
+ rte_kvargs_free(kvargs);
+ return dev;
+}
diff --git a/drivers/bus/pci/private.h b/drivers/bus/pci/private.h
index 8ddd03e1..13c3324b 100644
--- a/drivers/bus/pci/private.h
+++ b/drivers/bus/pci/private.h
@@ -10,9 +10,13 @@
#include <rte_pci.h>
#include <rte_bus_pci.h>
+extern struct rte_pci_bus rte_pci_bus;
+
struct rte_pci_driver;
struct rte_pci_device;
+extern struct rte_pci_bus rte_pci_bus;
+
/**
* Probe the PCI bus
*
@@ -123,6 +127,18 @@ void pci_uio_free_resource(struct rte_pci_device *dev,
struct mapped_pci_resource *uio_res);
/**
+ * Remap the PCI resource of a PCI device in anonymous virtual memory.
+ *
+ * @param dev
+ * Point to the struct rte pci device.
+ * @return
+ * - On success, zero.
+ * - On failure, a negative value.
+ */
+int
+pci_uio_remap_resource(struct rte_pci_device *dev);
+
+/**
* Map device memory to uio resource
*
* This function is private to EAL.
@@ -166,4 +182,27 @@ rte_pci_match(const struct rte_pci_driver *pci_drv,
enum rte_iova_mode
rte_pci_get_iommu_class(void);
+/*
+ * Iterate over internal devices,
+ * matching any device against the provided
+ * string.
+ *
+ * @param start
+ * Iteration starting point.
+ *
+ * @param str
+ * Device string to match against.
+ *
+ * @param it
+ * (unused) iterator structure.
+ *
+ * @return
+ * A pointer to the next matching device if any.
+ * NULL otherwise.
+ */
+void *
+rte_pci_dev_iterate(const void *start,
+ const char *str,
+ const struct rte_dev_iterator *it);
+
#endif /* _PCI_PRIVATE_H_ */
diff --git a/drivers/bus/pci/rte_bus_pci.h b/drivers/bus/pci/rte_bus_pci.h
index 0d1955ff..f0d6d81c 100644
--- a/drivers/bus/pci/rte_bus_pci.h
+++ b/drivers/bus/pci/rte_bus_pci.h
@@ -62,10 +62,12 @@ struct rte_pci_device {
struct rte_mem_resource mem_resource[PCI_MAX_RESOURCE];
/**< PCI Memory Resource */
struct rte_intr_handle intr_handle; /**< Interrupt handle */
- struct rte_pci_driver *driver; /**< Associated driver */
+ struct rte_pci_driver *driver; /**< PCI driver used in probing */
uint16_t max_vfs; /**< sriov enable if not zero */
enum rte_kernel_driver kdrv; /**< Kernel driver passthrough */
char name[PCI_PRI_STR_SIZE+1]; /**< PCI location (ASCII) */
+ struct rte_intr_handle vfio_req_intr_handle;
+ /**< Handler of VFIO request interrupt */
};
/**
@@ -121,7 +123,7 @@ struct rte_pci_driver {
pci_probe_t *probe; /**< Device Probe function. */
pci_remove_t *remove; /**< Device Remove function. */
const struct rte_pci_id *id_table; /**< ID table, NULL terminated. */
- uint32_t drv_flags; /**< Flags contolling handling of device. */
+ uint32_t drv_flags; /**< Flags RTE_PCI_DRV_*. */
};
/**
@@ -137,6 +139,8 @@ struct rte_pci_bus {
#define RTE_PCI_DRV_NEED_MAPPING 0x0001
/** Device needs PCI BAR mapping with enabled write combining (wc) */
#define RTE_PCI_DRV_WC_ACTIVATE 0x0002
+/** Device already probed can be probed again to check for new ports. */
+#define RTE_PCI_DRV_PROBE_AGAIN 0x0004
/** Device driver supports link state interrupt */
#define RTE_PCI_DRV_INTR_LSC 0x0008
/** Device driver supports device removal interrupt */
@@ -219,6 +223,8 @@ void rte_pci_unregister(struct rte_pci_driver *driver);
* The length of the data buffer.
* @param offset
* The offset into PCI config space
+ * @return
+ * Number of bytes read on success, negative on error.
*/
int rte_pci_read_config(const struct rte_pci_device *device,
void *buf, size_t len, off_t offset);
diff --git a/drivers/bus/vdev/Makefile b/drivers/bus/vdev/Makefile
index bd0bb895..803b8ea7 100644
--- a/drivers/bus/vdev/Makefile
+++ b/drivers/bus/vdev/Makefile
@@ -16,11 +16,12 @@ CFLAGS += -DALLOW_EXPERIMENTAL_API
EXPORT_MAP := rte_bus_vdev_version.map
# library version
-LIBABIVER := 1
+LIBABIVER := 2
SRCS-y += vdev.c
+SRCS-y += vdev_params.c
-LDLIBS += -lrte_eal
+LDLIBS += -lrte_eal -lrte_kvargs
#
# Export include files
diff --git a/drivers/bus/vdev/meson.build b/drivers/bus/vdev/meson.build
index 2ee648b4..803785f1 100644
--- a/drivers/bus/vdev/meson.build
+++ b/drivers/bus/vdev/meson.build
@@ -1,7 +1,12 @@
# SPDX-License-Identifier: BSD-3-Clause
# Copyright(c) 2017 Intel Corporation
-sources = files('vdev.c')
+version = 2
+
+sources = files('vdev.c',
+ 'vdev_params.c')
install_headers('rte_bus_vdev.h')
allow_experimental_apis = true
+
+deps += ['kvargs']
diff --git a/drivers/bus/vdev/vdev.c b/drivers/bus/vdev/vdev.c
index 6139dd55..9c66bdc7 100644
--- a/drivers/bus/vdev/vdev.c
+++ b/drivers/bus/vdev/vdev.c
@@ -23,6 +23,7 @@
#include "rte_bus_vdev.h"
#include "vdev_logs.h"
+#include "vdev_private.h"
#define VDEV_MP_KEY "bus_vdev_mp"
@@ -40,7 +41,7 @@ static struct vdev_device_list vdev_device_list =
static rte_spinlock_recursive_t vdev_device_list_lock =
RTE_SPINLOCK_RECURSIVE_INITIALIZER;
-struct vdev_driver_list vdev_driver_list =
+static struct vdev_driver_list vdev_driver_list =
TAILQ_HEAD_INITIALIZER(vdev_driver_list);
struct vdev_custom_scan {
@@ -149,10 +150,9 @@ vdev_probe_all_drivers(struct rte_vdev_device *dev)
if (vdev_parse(name, &driver))
return -1;
- dev->device.driver = &driver->driver;
ret = driver->probe(dev);
- if (ret)
- dev->device.driver = NULL;
+ if (ret == 0)
+ dev->device.driver = &driver->driver;
return ret;
}
@@ -202,7 +202,9 @@ alloc_devargs(const char *name, const char *args)
}
static int
-insert_vdev(const char *name, const char *args, struct rte_vdev_device **p_dev)
+insert_vdev(const char *name, const char *args,
+ struct rte_vdev_device **p_dev,
+ bool init)
{
struct rte_vdev_device *dev;
struct rte_devargs *devargs;
@@ -221,17 +223,24 @@ insert_vdev(const char *name, const char *args, struct rte_vdev_device **p_dev)
goto fail;
}
+ dev->device.bus = &rte_vdev_bus;
dev->device.devargs = devargs;
dev->device.numa_node = SOCKET_ID_ANY;
dev->device.name = devargs->name;
if (find_vdev(name)) {
+ /*
+ * A vdev is expected to have only one port.
+ * So there is no reason to try probing again,
+ * even with new arguments.
+ */
ret = -EEXIST;
goto fail;
}
TAILQ_INSERT_TAIL(&vdev_device_list, dev, next);
- rte_devargs_insert(devargs);
+ if (init)
+ rte_devargs_insert(devargs);
if (p_dev)
*p_dev = dev;
@@ -248,20 +257,18 @@ int
rte_vdev_init(const char *name, const char *args)
{
struct rte_vdev_device *dev;
- struct rte_devargs *devargs;
int ret;
rte_spinlock_recursive_lock(&vdev_device_list_lock);
- ret = insert_vdev(name, args, &dev);
+ ret = insert_vdev(name, args, &dev, true);
if (ret == 0) {
ret = vdev_probe_all_drivers(dev);
if (ret) {
if (ret > 0)
VDEV_LOG(ERR, "no driver found for %s", name);
/* If fails, remove it from vdev list */
- devargs = dev->device.devargs;
TAILQ_REMOVE(&vdev_device_list, dev, next);
- rte_devargs_remove(devargs->bus->name, devargs->name);
+ rte_devargs_remove(dev->device.devargs);
free(dev);
}
}
@@ -289,7 +296,6 @@ int
rte_vdev_uninit(const char *name)
{
struct rte_vdev_device *dev;
- struct rte_devargs *devargs;
int ret;
if (name == NULL)
@@ -308,8 +314,7 @@ rte_vdev_uninit(const char *name)
goto unlock;
TAILQ_REMOVE(&vdev_device_list, dev, next);
- devargs = dev->device.devargs;
- rte_devargs_remove(devargs->bus->name, devargs->name);
+ rte_devargs_remove(dev->device.devargs);
free(dev);
unlock:
@@ -346,6 +351,7 @@ vdev_action(const struct rte_mp_msg *mp_msg, const void *peer)
const struct vdev_param *in = (const struct vdev_param *)mp_msg->param;
const char *devname;
int num;
+ int ret;
strlcpy(mp_resp.name, VDEV_MP_KEY, sizeof(mp_resp.name));
mp_resp.len_param = sizeof(*ou);
@@ -380,7 +386,10 @@ vdev_action(const struct rte_mp_msg *mp_msg, const void *peer)
break;
case VDEV_SCAN_ONE:
VDEV_LOG(INFO, "receive vdev, %s", in->name);
- if (insert_vdev(in->name, NULL, NULL) < 0)
+ ret = insert_vdev(in->name, NULL, NULL, false);
+ if (ret == -EEXIST)
+ VDEV_LOG(DEBUG, "device already exist, %s", in->name);
+ else if (ret < 0)
VDEV_LOG(ERR, "failed to add vdev, %s", in->name);
break;
default:
@@ -419,6 +428,7 @@ vdev_scan(void)
mp_rep = &mp_reply.msgs[0];
resp = (struct vdev_param *)mp_rep->param;
VDEV_LOG(INFO, "Received %d vdevs", resp->num);
+ free(mp_reply.msgs);
} else
VDEV_LOG(ERR, "Failed to request vdev from primary");
@@ -455,6 +465,7 @@ vdev_scan(void)
continue;
}
+ dev->device.bus = &rte_vdev_bus;
dev->device.devargs = devargs;
dev->device.numa_node = SOCKET_ID_ANY;
dev->device.name = devargs->name;
@@ -480,7 +491,7 @@ vdev_probe(void)
* we call each driver probe.
*/
- if (dev->device.driver)
+ if (rte_dev_is_probed(&dev->device))
continue;
if (vdev_probe_all_drivers(dev)) {
@@ -493,9 +504,9 @@ vdev_probe(void)
return ret;
}
-static struct rte_device *
-vdev_find_device(const struct rte_device *start, rte_dev_cmp_t cmp,
- const void *data)
+struct rte_device *
+rte_vdev_find_device(const struct rte_device *start, rte_dev_cmp_t cmp,
+ const void *data)
{
const struct rte_vdev_device *vstart;
struct rte_vdev_device *dev;
@@ -532,10 +543,11 @@ vdev_unplug(struct rte_device *dev)
static struct rte_bus rte_vdev_bus = {
.scan = vdev_scan,
.probe = vdev_probe,
- .find_device = vdev_find_device,
+ .find_device = rte_vdev_find_device,
.plug = vdev_plug,
.unplug = vdev_unplug,
.parse = vdev_parse,
+ .dev_iterate = rte_vdev_dev_iterate,
};
RTE_REGISTER_BUS(vdev, rte_vdev_bus);
diff --git a/drivers/bus/vdev/vdev_params.c b/drivers/bus/vdev/vdev_params.c
new file mode 100644
index 00000000..6f74704d
--- /dev/null
+++ b/drivers/bus/vdev/vdev_params.c
@@ -0,0 +1,66 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright 2018 Gaëtan Rivet
+ */
+
+#include <string.h>
+
+#include <rte_dev.h>
+#include <rte_bus.h>
+#include <rte_kvargs.h>
+#include <rte_errno.h>
+
+#include "vdev_logs.h"
+#include "vdev_private.h"
+
+enum vdev_params {
+ RTE_VDEV_PARAM_NAME,
+ RTE_VDEV_PARAM_MAX,
+};
+
+static const char * const vdev_params_keys[] = {
+ [RTE_VDEV_PARAM_NAME] = "name",
+ [RTE_VDEV_PARAM_MAX] = NULL,
+};
+
+static int
+vdev_dev_match(const struct rte_device *dev,
+ const void *_kvlist)
+{
+ int ret;
+ const struct rte_kvargs *kvlist = _kvlist;
+ char *name;
+
+ /* cannot pass const dev->name to rte_kvargs_process() */
+ name = strdup(dev->name);
+ if (name == NULL)
+ return -1;
+ ret = rte_kvargs_process(kvlist,
+ vdev_params_keys[RTE_VDEV_PARAM_NAME],
+ rte_kvargs_strcmp, name);
+ free(name);
+ if (ret != 0)
+ return -1;
+
+ return 0;
+}
+
+void *
+rte_vdev_dev_iterate(const void *start,
+ const char *str,
+ const struct rte_dev_iterator *it __rte_unused)
+{
+ struct rte_kvargs *kvargs = NULL;
+ struct rte_device *dev;
+
+ if (str != NULL) {
+ kvargs = rte_kvargs_parse(str, vdev_params_keys);
+ if (kvargs == NULL) {
+ VDEV_LOG(ERR, "cannot parse argument list\n");
+ rte_errno = EINVAL;
+ return NULL;
+ }
+ }
+ dev = rte_vdev_find_device(start, vdev_dev_match, kvargs);
+ rte_kvargs_free(kvargs);
+ return dev;
+}
diff --git a/drivers/bus/vdev/vdev_private.h b/drivers/bus/vdev/vdev_private.h
new file mode 100644
index 00000000..ba6dc48f
--- /dev/null
+++ b/drivers/bus/vdev/vdev_private.h
@@ -0,0 +1,26 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright 2018 Gaëtan Rivet
+ */
+
+#ifndef _VDEV_PRIVATE_H_
+#define _VDEV_PRIVATE_H_
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+struct rte_device *
+rte_vdev_find_device(const struct rte_device *start,
+ rte_dev_cmp_t cmp,
+ const void *data);
+
+void *
+rte_vdev_dev_iterate(const void *start,
+ const char *str,
+ const struct rte_dev_iterator *it);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _VDEV_PRIVATE_H_ */
diff --git a/drivers/bus/vmbus/Makefile b/drivers/bus/vmbus/Makefile
index deee9dd1..e54c557c 100644
--- a/drivers/bus/vmbus/Makefile
+++ b/drivers/bus/vmbus/Makefile
@@ -3,7 +3,7 @@
include $(RTE_SDK)/mk/rte.vars.mk
LIB = librte_bus_vmbus.a
-LIBABIVER := 1
+LIBABIVER := 2
EXPORT_MAP := rte_bus_vmbus_version.map
CFLAGS += -I$(SRCDIR)
diff --git a/drivers/bus/vmbus/linux/vmbus_bus.c b/drivers/bus/vmbus/linux/vmbus_bus.c
index 52d6a3c0..a4755a38 100644
--- a/drivers/bus/vmbus/linux/vmbus_bus.c
+++ b/drivers/bus/vmbus/linux/vmbus_bus.c
@@ -229,6 +229,7 @@ vmbus_scan_one(const char *name)
if (dev == NULL)
return -1;
+ dev->device.bus = &rte_vmbus_bus.bus;
dev->device.name = strdup(name);
if (!dev->device.name)
goto error;
@@ -276,6 +277,8 @@ vmbus_scan_one(const char *name)
dev->device.numa_node = SOCKET_ID_ANY;
}
+ dev->device.devargs = vmbus_devargs_lookup(dev);
+
/* device is valid, add in list (sorted) */
VMBUS_LOG(DEBUG, "Adding vmbus device %s", name);
diff --git a/drivers/bus/vmbus/meson.build b/drivers/bus/vmbus/meson.build
index 18daabec..0e4d058e 100644
--- a/drivers/bus/vmbus/meson.build
+++ b/drivers/bus/vmbus/meson.build
@@ -1,5 +1,7 @@
# SPDX-License-Identifier: BSD-3-Clause
+version = 2
+
allow_experimental_apis = true
install_headers('rte_bus_vmbus.h','rte_vmbus_reg.h')
diff --git a/drivers/bus/vmbus/private.h b/drivers/bus/vmbus/private.h
index 9964fc42..211127dd 100644
--- a/drivers/bus/vmbus/private.h
+++ b/drivers/bus/vmbus/private.h
@@ -10,11 +10,14 @@
#include <sys/uio.h>
#include <rte_log.h>
#include <rte_vmbus_reg.h>
+#include <rte_bus_vmbus.h>
#ifndef PAGE_SIZE
#define PAGE_SIZE 4096
#endif
+extern struct rte_vmbus_bus rte_vmbus_bus;
+
extern int vmbus_logtype_bus;
#define VMBUS_LOG(level, fmt, args...) \
rte_log(RTE_LOG_ ## level, vmbus_logtype_bus, "%s(): " fmt "\n", \
@@ -66,6 +69,9 @@ struct vmbus_channel {
#define VMBUS_MAX_CHANNELS 64
+struct rte_devargs *
+vmbus_devargs_lookup(struct rte_vmbus_device *dev);
+
int vmbus_chan_create(const struct rte_vmbus_device *device,
uint16_t relid, uint16_t subid, uint8_t monitor_id,
struct vmbus_channel **new_chan);
diff --git a/drivers/bus/vmbus/rte_bus_vmbus.h b/drivers/bus/vmbus/rte_bus_vmbus.h
index 4a2c1f6f..2839fef5 100644
--- a/drivers/bus/vmbus/rte_bus_vmbus.h
+++ b/drivers/bus/vmbus/rte_bus_vmbus.h
@@ -365,6 +365,21 @@ void rte_vmbus_chan_signal_read(struct vmbus_channel *chan, uint32_t bytes_read)
uint16_t rte_vmbus_sub_channel_index(const struct vmbus_channel *chan);
/**
+ * Set the host monitor latency hint
+ *
+ * @param dev
+ * VMBUS device
+ * @param chan
+ * Pointer to vmbus_channel structure.
+ * @param latency
+ * Approximate wait period between hypervisor examinations of
+ * the trigger page (in nanoseconds).
+ */
+void rte_vmbus_set_latency(const struct rte_vmbus_device *dev,
+ const struct vmbus_channel *chan,
+ uint32_t latency);
+
+/**
* Register a VMBUS driver.
*
* @param driver
diff --git a/drivers/bus/vmbus/rte_bus_vmbus_version.map b/drivers/bus/vmbus/rte_bus_vmbus_version.map
index dabb9203..ae231ad3 100644
--- a/drivers/bus/vmbus/rte_bus_vmbus_version.map
+++ b/drivers/bus/vmbus/rte_bus_vmbus_version.map
@@ -27,3 +27,10 @@ DPDK_18.08 {
local: *;
};
+
+DPDK_18.11 {
+ global:
+
+ rte_vmbus_set_latency;
+
+} DPDK_18.08;
diff --git a/drivers/bus/vmbus/vmbus_channel.c b/drivers/bus/vmbus/vmbus_channel.c
index cc5f3e83..bd14c066 100644
--- a/drivers/bus/vmbus/vmbus_channel.c
+++ b/drivers/bus/vmbus/vmbus_channel.c
@@ -60,6 +60,32 @@ vmbus_set_event(const struct rte_vmbus_device *dev,
}
/*
+ * Set the wait between when hypervisor examines the trigger.
+ */
+void
+rte_vmbus_set_latency(const struct rte_vmbus_device *dev,
+ const struct vmbus_channel *chan,
+ uint32_t latency)
+{
+ uint32_t trig_idx = chan->monitor_id / VMBUS_MONTRIG_LEN;
+ uint32_t trig_offs = chan->monitor_id % VMBUS_MONTRIG_LEN;
+
+ if (latency >= UINT16_MAX * 100) {
+ VMBUS_LOG(ERR, "invalid latency value %u", latency);
+ return;
+ }
+
+ if (trig_idx >= VMBUS_MONTRIGS_MAX) {
+ VMBUS_LOG(ERR, "invalid monitor trigger %u",
+ trig_idx);
+ return;
+ }
+
+ /* Host value is expressed in 100 nanosecond units */
+ dev->monitor_page->lat[trig_idx][trig_offs] = latency / 100;
+}
+
+/*
* Notify host that there are data pending on our TX bufring.
*
* Since this in userspace, rely on the monitor page.
diff --git a/drivers/bus/vmbus/vmbus_common.c b/drivers/bus/vmbus/vmbus_common.c
index c7165ad5..48a219f7 100644
--- a/drivers/bus/vmbus/vmbus_common.c
+++ b/drivers/bus/vmbus/vmbus_common.c
@@ -85,7 +85,6 @@ vmbus_match(const struct rte_vmbus_driver *dr,
return false;
}
-
/*
* If device ID match, call the devinit() function of the driver.
*/
@@ -112,7 +111,6 @@ vmbus_probe_one_driver(struct rte_vmbus_driver *dr,
/* reference driver structure */
dev->driver = dr;
- dev->device.driver = &dr->driver;
if (dev->device.numa_node < 0) {
VMBUS_LOG(WARNING, " Invalid NUMA socket, default to 0");
@@ -125,6 +123,8 @@ vmbus_probe_one_driver(struct rte_vmbus_driver *dr,
if (ret) {
dev->driver = NULL;
rte_vmbus_unmap_device(dev);
+ } else {
+ dev->device.driver = &dr->driver;
}
return ret;
@@ -143,7 +143,7 @@ vmbus_probe_all_drivers(struct rte_vmbus_device *dev)
int rc;
/* Check if a driver is already loaded */
- if (dev->driver != NULL) {
+ if (rte_dev_is_probed(&dev->device)) {
VMBUS_LOG(DEBUG, "VMBUS driver already loaded");
return 0;
}
@@ -204,6 +204,27 @@ vmbus_parse(const char *name, void *addr)
return ret;
}
+/*
+ * scan for matching device args on command line
+ * example:
+ * -w 'vmbus:635a7ae3-091e-4410-ad59-667c4f8c04c3,latency=20'
+ */
+struct rte_devargs *
+vmbus_devargs_lookup(struct rte_vmbus_device *dev)
+{
+ struct rte_devargs *devargs;
+ rte_uuid_t addr;
+
+ RTE_EAL_DEVARGS_FOREACH("vmbus", devargs) {
+ vmbus_parse(devargs->name, &addr);
+
+ if (rte_uuid_compare(dev->device_id, addr) == 0)
+ return devargs;
+ }
+ return NULL;
+
+}
+
/* register vmbus driver */
void
rte_vmbus_register(struct rte_vmbus_driver *driver)