diff options
author | Luca Boccassi <luca.boccassi@gmail.com> | 2018-08-14 18:52:30 +0100 |
---|---|---|
committer | Luca Boccassi <luca.boccassi@gmail.com> | 2018-08-14 18:53:17 +0100 |
commit | b63264c8342e6a1b6971c79550d2af2024b6a4de (patch) | |
tree | 83114aac64286fe616506c0b3dfaec2ab86ef835 /drivers/bus/fslmc | |
parent | ca33590b6af032bff57d9cc70455660466a654b2 (diff) |
New upstream version 18.08upstream/18.08
Change-Id: I32fdf5e5016556d9c0a6d88ddaf1fc468961790a
Signed-off-by: Luca Boccassi <luca.boccassi@gmail.com>
Diffstat (limited to 'drivers/bus/fslmc')
22 files changed, 1460 insertions, 386 deletions
diff --git a/drivers/bus/fslmc/Makefile b/drivers/bus/fslmc/Makefile index de237f0f..515d0f53 100644 --- a/drivers/bus/fslmc/Makefile +++ b/drivers/bus/fslmc/Makefile @@ -9,23 +9,13 @@ include $(RTE_SDK)/mk/rte.vars.mk # LIB = librte_bus_fslmc.a -ifeq ($(CONFIG_RTE_LIBRTE_DPAA2_PMD),y) -CONFIG_RTE_LIBRTE_FSLMC_BUS = $(CONFIG_RTE_LIBRTE_DPAA2_PMD) -endif - CFLAGS += -DALLOW_EXPERIMENTAL_API -ifeq ($(CONFIG_RTE_LIBRTE_DPAA2_DEBUG_INIT),y) -CFLAGS += -O0 -g -CFLAGS += "-Wno-error" -else CFLAGS += -O3 CFLAGS += $(WERROR_FLAGS) -endif CFLAGS += -I$(RTE_SDK)/drivers/bus/fslmc CFLAGS += -I$(RTE_SDK)/drivers/bus/fslmc/mc CFLAGS += -I$(RTE_SDK)/drivers/bus/fslmc/qbman/include -CFLAGS += -I$(RTE_SDK)/lib/librte_eal/linuxapp/eal CFLAGS += -I$(RTE_SDK)/lib/librte_eal/common LDLIBS += -lrte_eal -lrte_mbuf -lrte_mempool -lrte_ring LDLIBS += -lrte_ethdev @@ -42,11 +32,12 @@ SRCS-$(CONFIG_RTE_LIBRTE_FSLMC_BUS) += \ SRCS-$(CONFIG_RTE_LIBRTE_FSLMC_BUS) += \ mc/dpmng.c \ - mc/dpbp.c \ - mc/dpio.c \ - mc/mc_sys.c \ + mc/dpbp.c \ + mc/dpio.c \ + mc/mc_sys.c \ mc/dpcon.c \ - mc/dpci.c + mc/dpci.c \ + mc/dpdmai.c SRCS-$(CONFIG_RTE_LIBRTE_FSLMC_BUS) += portal/dpaa2_hw_dpio.c SRCS-$(CONFIG_RTE_LIBRTE_FSLMC_BUS) += portal/dpaa2_hw_dpbp.c diff --git a/drivers/bus/fslmc/fslmc_bus.c b/drivers/bus/fslmc/fslmc_bus.c index 5ee0beb8..d2900edc 100644 --- a/drivers/bus/fslmc/fslmc_bus.c +++ b/drivers/bus/fslmc/fslmc_bus.c @@ -18,11 +18,12 @@ #include <rte_fslmc.h> #include <fslmc_vfio.h> +#include "fslmc_logs.h" -#define FSLMC_BUS_LOG(level, fmt, args...) \ - RTE_LOG(level, EAL, fmt "\n", ##args) +int dpaa2_logtype_bus; #define VFIO_IOMMU_GROUP_PATH "/sys/kernel/iommu_groups" +#define FSLMC_BUS_NAME fslmc struct rte_fslmc_bus rte_fslmc_bus; uint8_t dpaa2_virt_mode; @@ -93,6 +94,41 @@ insert_in_device_list(struct rte_dpaa2_device *newdev) TAILQ_INSERT_TAIL(&rte_fslmc_bus.device_list, newdev, next); } +static struct rte_devargs * +fslmc_devargs_lookup(struct rte_dpaa2_device *dev) +{ + struct rte_devargs *devargs; + char dev_name[32]; + + RTE_EAL_DEVARGS_FOREACH("fslmc", devargs) { + devargs->bus->parse(devargs->name, &dev_name); + if (strcmp(dev_name, dev->device.name) == 0) { + DPAA2_BUS_INFO("**Devargs matched %s", dev_name); + return devargs; + } + } + return NULL; +} + +static void +dump_device_list(void) +{ + struct rte_dpaa2_device *dev; + uint32_t global_log_level; + int local_log_level; + + /* Only if the log level has been set to Debugging, print list */ + global_log_level = rte_log_get_global_level(); + local_log_level = rte_log_get_level(dpaa2_logtype_bus); + if (global_log_level == RTE_LOG_DEBUG || + local_log_level == RTE_LOG_DEBUG) { + DPAA2_BUS_LOG(DEBUG, "List of devices scanned on bus:"); + TAILQ_FOREACH(dev, &rte_fslmc_bus.device_list, next) { + DPAA2_BUS_LOG(DEBUG, "\t\t%s", dev->device.name); + } + } +} + static int scan_one_fslmc_device(char *dev_name) { @@ -109,7 +145,7 @@ scan_one_fslmc_device(char *dev_name) /* Creating a temporary copy to perform cut-parse over string */ dup_dev_name = strdup(dev_name); if (!dup_dev_name) { - FSLMC_BUS_LOG(ERR, "Out of memory."); + DPAA2_BUS_ERR("Unable to allocate device name memory"); return -ENOMEM; } @@ -120,7 +156,7 @@ scan_one_fslmc_device(char *dev_name) */ dev = calloc(1, sizeof(struct rte_dpaa2_device)); if (!dev) { - FSLMC_BUS_LOG(ERR, "Out of memory."); + DPAA2_BUS_ERR("Unable to allocate device object"); free(dup_dev_name); return -ENOMEM; } @@ -128,7 +164,7 @@ scan_one_fslmc_device(char *dev_name) /* Parse the device name and ID */ t_ptr = strtok(dup_dev_name, "."); if (!t_ptr) { - FSLMC_BUS_LOG(ERR, "Incorrect device string observed."); + DPAA2_BUS_ERR("Incorrect device name observed"); goto cleanup; } if (!strncmp("dpni", t_ptr, 4)) @@ -145,6 +181,8 @@ scan_one_fslmc_device(char *dev_name) dev->dev_type = DPAA2_CI; else if (!strncmp("dpmcp", t_ptr, 5)) dev->dev_type = DPAA2_MPORTAL; + else if (!strncmp("dpdmai", t_ptr, 6)) + dev->dev_type = DPAA2_QDMA; else dev->dev_type = DPAA2_UNKNOWN; @@ -153,17 +191,17 @@ scan_one_fslmc_device(char *dev_name) t_ptr = strtok(NULL, "."); if (!t_ptr) { - FSLMC_BUS_LOG(ERR, "Incorrect device string observed (%s).", - t_ptr); + DPAA2_BUS_ERR("Incorrect device string observed (%s)", t_ptr); goto cleanup; } sscanf(t_ptr, "%hu", &dev->object_id); dev->device.name = strdup(dev_name); if (!dev->device.name) { - FSLMC_BUS_LOG(ERR, "Out of memory."); + DPAA2_BUS_ERR("Unable to clone device name. Out of memory"); goto cleanup; } + dev->device.devargs = fslmc_devargs_lookup(dev); /* Add device in the fslmc device list */ insert_in_device_list(dev); @@ -182,6 +220,54 @@ cleanup: } static int +rte_fslmc_parse(const char *name, void *addr) +{ + uint16_t dev_id; + char *t_ptr; + char *sep = strchr(name, ':'); + + if (strncmp(name, RTE_STR(FSLMC_BUS_NAME), + strlen(RTE_STR(FSLMC_BUS_NAME)))) { + return -EINVAL; + } + + if (!sep) { + DPAA2_BUS_ERR("Incorrect device name observed"); + return -EINVAL; + } + + t_ptr = (char *)(sep + 1); + + if (strncmp("dpni", t_ptr, 4) && + strncmp("dpseci", t_ptr, 6) && + strncmp("dpcon", t_ptr, 5) && + strncmp("dpbp", t_ptr, 4) && + strncmp("dpio", t_ptr, 4) && + strncmp("dpci", t_ptr, 4) && + strncmp("dpmcp", t_ptr, 5) && + strncmp("dpdmai", t_ptr, 6)) { + DPAA2_BUS_ERR("Unknown or unsupported device"); + return -EINVAL; + } + + t_ptr = strchr(name, '.'); + if (!t_ptr) { + DPAA2_BUS_ERR("Incorrect device string observed (%s)", t_ptr); + return -EINVAL; + } + + t_ptr = (char *)(t_ptr + 1); + if (sscanf(t_ptr, "%hu", &dev_id) <= 0) { + DPAA2_BUS_ERR("Incorrect device string observed (%s)", t_ptr); + return -EINVAL; + } + + if (addr) + strcpy(addr, (char *)(sep + 1)); + return 0; +} + +static int rte_fslmc_scan(void) { int ret; @@ -193,8 +279,7 @@ rte_fslmc_scan(void) int groupid; if (process_once) { - FSLMC_BUS_LOG(DEBUG, - "Fslmc bus already scanned. Not rescanning"); + DPAA2_BUS_DEBUG("Fslmc bus already scanned. Not rescanning"); return 0; } process_once = 1; @@ -208,7 +293,7 @@ rte_fslmc_scan(void) groupid); dir = opendir(fslmc_dirpath); if (!dir) { - FSLMC_BUS_LOG(ERR, "Unable to open VFIO group dir."); + DPAA2_BUS_ERR("Unable to open VFIO group directory"); goto scan_fail; } @@ -224,9 +309,12 @@ rte_fslmc_scan(void) device_count += 1; } - FSLMC_BUS_LOG(INFO, "fslmc: Bus scan completed"); - closedir(dir); + + DPAA2_BUS_INFO("FSLMC Bus scan completed"); + /* If debugging is enabled, device list is dumped to log output */ + dump_device_list(); + return 0; scan_fail_cleanup: @@ -235,7 +323,7 @@ scan_fail_cleanup: /* Remove all devices in the list */ cleanup_fslmc_device_list(); scan_fail: - FSLMC_BUS_LOG(DEBUG, "FSLMC Bus Not Available. Skipping."); + DPAA2_BUS_DEBUG("FSLMC Bus Not Available. Skipping"); /* Irrespective of failure, scan only return success */ return 0; } @@ -254,6 +342,8 @@ static int rte_fslmc_probe(void) { int ret = 0; + int probe_all; + struct rte_dpaa2_device *dev; struct rte_dpaa2_driver *drv; @@ -262,16 +352,29 @@ rte_fslmc_probe(void) ret = fslmc_vfio_setup_group(); if (ret) { - FSLMC_BUS_LOG(ERR, "Unable to setup VFIO %d", ret); + DPAA2_BUS_ERR("Unable to setup VFIO %d", ret); + return 0; + } + + /* Map existing segments as well as, in case of hotpluggable memory, + * install callback handler. + */ + ret = rte_fslmc_vfio_dmamap(); + if (ret) { + DPAA2_BUS_ERR("Unable to DMA map existing VAs: (%d)", ret); + /* Not continuing ahead */ + DPAA2_BUS_ERR("FSLMC VFIO Mapping failed"); return 0; } ret = fslmc_vfio_process_group(); if (ret) { - FSLMC_BUS_LOG(ERR, "Unable to setup devices %d", ret); + DPAA2_BUS_ERR("Unable to setup devices %d", ret); return 0; } + probe_all = rte_fslmc_bus.bus.conf.scan_mode != RTE_BUS_SCAN_WHITELIST; + TAILQ_FOREACH(dev, &rte_fslmc_bus.device_list, next) { TAILQ_FOREACH(drv, &rte_fslmc_bus.driver_list, next) { ret = rte_fslmc_match(drv, dev); @@ -281,9 +384,21 @@ rte_fslmc_probe(void) if (!drv->probe) continue; - ret = drv->probe(drv, dev); - if (ret) - FSLMC_BUS_LOG(ERR, "Unable to probe.\n"); + if (dev->device.devargs && + dev->device.devargs->policy == RTE_DEV_BLACKLISTED) { + DPAA2_BUS_LOG(DEBUG, "%s Blacklisted, skipping", + dev->device.name); + continue; + } + + if (probe_all || + (dev->device.devargs && + dev->device.devargs->policy == + RTE_DEV_WHITELISTED)) { + ret = drv->probe(drv, dev); + if (ret) + DPAA2_BUS_ERR("Unable to probe"); + } break; } } @@ -298,16 +413,19 @@ static struct rte_device * rte_fslmc_find_device(const struct rte_device *start, rte_dev_cmp_t cmp, const void *data) { + const struct rte_dpaa2_device *dstart; struct rte_dpaa2_device *dev; - TAILQ_FOREACH(dev, &rte_fslmc_bus.device_list, next) { - if (start && &dev->device == start) { - start = NULL; /* starting point found */ - continue; - } - + if (start != NULL) { + dstart = RTE_DEV_TO_FSLMC_CONST(start); + dev = TAILQ_NEXT(dstart, next); + } else { + dev = TAILQ_FIRST(&rte_fslmc_bus.device_list); + } + while (dev != NULL) { if (cmp(&dev->device, data) == 0) return &dev->device; + dev = TAILQ_NEXT(dev, next); } return NULL; @@ -390,6 +508,7 @@ struct rte_fslmc_bus rte_fslmc_bus = { .bus = { .scan = rte_fslmc_scan, .probe = rte_fslmc_probe, + .parse = rte_fslmc_parse, .find_device = rte_fslmc_find_device, .get_iommu_class = rte_dpaa2_get_iommu_class, }, @@ -398,4 +517,12 @@ struct rte_fslmc_bus rte_fslmc_bus = { .device_count = {0}, }; -RTE_REGISTER_BUS(fslmc, rte_fslmc_bus.bus); +RTE_REGISTER_BUS(FSLMC_BUS_NAME, rte_fslmc_bus.bus); + +RTE_INIT(fslmc_init_log) +{ + /* Bus level logs */ + dpaa2_logtype_bus = rte_log_register("bus.fslmc"); + if (dpaa2_logtype_bus >= 0) + rte_log_set_level(dpaa2_logtype_bus, RTE_LOG_NOTICE); +} diff --git a/drivers/bus/fslmc/fslmc_logs.h b/drivers/bus/fslmc/fslmc_logs.h index d87b6386..dd74cb7d 100644 --- a/drivers/bus/fslmc/fslmc_logs.h +++ b/drivers/bus/fslmc/fslmc_logs.h @@ -7,44 +7,35 @@ #ifndef _FSLMC_LOGS_H_ #define _FSLMC_LOGS_H_ -#define PMD_INIT_LOG(level, fmt, args...) \ - RTE_LOG(level, PMD, "%s(): " fmt "\n", __func__, ##args) - -#ifdef RTE_LIBRTE_DPAA2_DEBUG_INIT -#define PMD_INIT_FUNC_TRACE() PMD_INIT_LOG(DEBUG, " >>") -#else -#define PMD_INIT_FUNC_TRACE() do { } while (0) -#endif - -#ifdef RTE_LIBRTE_DPAA2_DEBUG_RX -#define PMD_RX_LOG(level, fmt, args...) \ - RTE_LOG(level, PMD, "%s(): " fmt "\n", __func__, ## args) -#else -#define PMD_RX_LOG(level, fmt, args...) do { } while (0) -#endif - -#ifdef RTE_LIBRTE_DPAA2_DEBUG_TX -#define PMD_TX_LOG(level, fmt, args...) \ - RTE_LOG(level, PMD, "%s(): " fmt "\n", __func__, ## args) -#else -#define PMD_TX_LOG(level, fmt, args...) do { } while (0) -#endif - -#ifdef RTE_LIBRTE_DPAA2_DEBUG_TX_FREE -#define PMD_TX_FREE_LOG(level, fmt, args...) \ - RTE_LOG(level, PMD, "%s(): " fmt "\n", __func__, ## args) -#else -#define PMD_TX_FREE_LOG(level, fmt, args...) do { } while (0) -#endif - -#ifdef RTE_LIBRTE_DPAA2_DEBUG_DRIVER -#define PMD_DRV_LOG_RAW(level, fmt, args...) \ - RTE_LOG(level, PMD, "%s(): " fmt, __func__, ## args) -#else -#define PMD_DRV_LOG_RAW(level, fmt, args...) do { } while (0) -#endif - -#define PMD_DRV_LOG(level, fmt, args...) \ - PMD_DRV_LOG_RAW(level, fmt "\n", ## args) +extern int dpaa2_logtype_bus; + +#define DPAA2_BUS_LOG(level, fmt, args...) \ + rte_log(RTE_LOG_ ## level, dpaa2_logtype_bus, "fslmc: " fmt "\n", \ + ##args) + +/* Debug logs are with Function names */ +#define DPAA2_BUS_DEBUG(fmt, args...) \ + rte_log(RTE_LOG_DEBUG, dpaa2_logtype_bus, "fslmc: %s(): " fmt "\n", \ + __func__, ##args) + +#define BUS_INIT_FUNC_TRACE() DPAA2_BUS_DEBUG(" >>") + +#define DPAA2_BUS_INFO(fmt, args...) \ + DPAA2_BUS_LOG(INFO, fmt, ## args) +#define DPAA2_BUS_ERR(fmt, args...) \ + DPAA2_BUS_LOG(ERR, fmt, ## args) +#define DPAA2_BUS_WARN(fmt, args...) \ + DPAA2_BUS_LOG(WARNING, fmt, ## args) + +/* DP Logs, toggled out at compile time if level lower than current level */ +#define DPAA2_BUS_DP_LOG(level, fmt, args...) \ + RTE_LOG_DP(level, PMD, fmt, ## args) + +#define DPAA2_BUS_DP_DEBUG(fmt, args...) \ + DPAA2_BUS_DP_LOG(DEBUG, fmt, ## args) +#define DPAA2_BUS_DP_INFO(fmt, args...) \ + DPAA2_BUS_DP_LOG(INFO, fmt, ## args) +#define DPAA2_BUS_DP_WARN(fmt, args...) \ + DPAA2_BUS_DP_LOG(WARNING, fmt, ## args) #endif /* _FSLMC_LOGS_H_ */ diff --git a/drivers/bus/fslmc/fslmc_vfio.c b/drivers/bus/fslmc/fslmc_vfio.c index 1241295b..4c2cd2a8 100644 --- a/drivers/bus/fslmc/fslmc_vfio.c +++ b/drivers/bus/fslmc/fslmc_vfio.c @@ -30,17 +30,16 @@ #include <rte_kvargs.h> #include <rte_dev.h> #include <rte_bus.h> +#include <rte_eal_memconfig.h> #include "rte_fslmc.h" #include "fslmc_vfio.h" +#include "fslmc_logs.h" #include <mc/fsl_dpmng.h> #include "portal/dpaa2_hw_pvt.h" #include "portal/dpaa2_hw_dpio.h" -#define FSLMC_VFIO_LOG(level, fmt, args...) \ - RTE_LOG(level, EAL, fmt "\n", ##args) - /** Pathname of FSL-MC devices directory. */ #define SYSFS_FSL_MC_DEVICES "/sys/bus/fsl-mc/devices" @@ -53,7 +52,6 @@ static int container_device_fd; static char *g_container; static uint32_t *msi_intr_vaddr; void *(*rte_mcp_ptr_list); -static int is_dma_done; static struct rte_dpaa2_object_list dpaa2_obj_list = TAILQ_HEAD_INITIALIZER(dpaa2_obj_list); @@ -76,33 +74,32 @@ fslmc_get_container_group(int *groupid) if (!g_container) { container = getenv("DPRC"); if (container == NULL) { - RTE_LOG(WARNING, EAL, "DPAA2: DPRC not available\n"); + DPAA2_BUS_DEBUG("DPAA2: DPRC not available"); return -EINVAL; } if (strlen(container) >= FSLMC_CONTAINER_MAX_LEN) { - FSLMC_VFIO_LOG(ERR, "Invalid container name: %s\n", - container); + DPAA2_BUS_ERR("Invalid container name: %s", container); return -1; } g_container = strdup(container); if (!g_container) { - FSLMC_VFIO_LOG(ERR, "Out of memory."); + DPAA2_BUS_ERR("Mem alloc failure; Container name"); return -ENOMEM; } } /* get group number */ - ret = vfio_get_group_no(SYSFS_FSL_MC_DEVICES, g_container, groupid); + ret = rte_vfio_get_group_num(SYSFS_FSL_MC_DEVICES, + g_container, groupid); if (ret <= 0) { - FSLMC_VFIO_LOG(ERR, "Unable to find %s IOMMU group", - g_container); + DPAA2_BUS_ERR("Unable to find %s IOMMU group", g_container); return -1; } - FSLMC_VFIO_LOG(DEBUG, "Container: %s has VFIO iommu group id = %d", - g_container, *groupid); + DPAA2_BUS_DEBUG("Container: %s has VFIO iommu group id = %d", + g_container, *groupid); return 0; } @@ -113,14 +110,14 @@ vfio_connect_container(void) int fd, ret; if (vfio_container.used) { - FSLMC_VFIO_LOG(DEBUG, "No container available."); + DPAA2_BUS_DEBUG("No container available"); return -1; } /* Try connecting to vfio container if already created */ if (!ioctl(vfio_group.fd, VFIO_GROUP_SET_CONTAINER, &vfio_container.fd)) { - FSLMC_VFIO_LOG(INFO, + DPAA2_BUS_DEBUG( "Container pre-exists with FD[0x%x] for this group", vfio_container.fd); vfio_group.container = &vfio_container; @@ -128,9 +125,9 @@ vfio_connect_container(void) } /* Opens main vfio file descriptor which represents the "container" */ - fd = vfio_get_container_fd(); + fd = rte_vfio_get_container_fd(); if (fd < 0) { - FSLMC_VFIO_LOG(ERR, "Failed to open VFIO container"); + DPAA2_BUS_ERR("Failed to open VFIO container"); return -errno; } @@ -139,19 +136,19 @@ vfio_connect_container(void) /* Connect group to container */ ret = ioctl(vfio_group.fd, VFIO_GROUP_SET_CONTAINER, &fd); if (ret) { - FSLMC_VFIO_LOG(ERR, "Failed to setup group container"); + DPAA2_BUS_ERR("Failed to setup group container"); close(fd); return -errno; } ret = ioctl(fd, VFIO_SET_IOMMU, VFIO_TYPE1_IOMMU); if (ret) { - FSLMC_VFIO_LOG(ERR, "Failed to setup VFIO iommu"); + DPAA2_BUS_ERR("Failed to setup VFIO iommu"); close(fd); return -errno; } } else { - FSLMC_VFIO_LOG(ERR, "No supported IOMMU available"); + DPAA2_BUS_ERR("No supported IOMMU available"); close(fd); return -EINVAL; } @@ -179,7 +176,7 @@ static int vfio_map_irq_region(struct fslmc_vfio_group *group) vaddr = (unsigned long *)mmap(NULL, 0x1000, PROT_WRITE | PROT_READ, MAP_SHARED, container_device_fd, 0x6030000); if (vaddr == MAP_FAILED) { - FSLMC_VFIO_LOG(ERR, "Unable to map region (errno = %d)", errno); + DPAA2_BUS_ERR("Unable to map region (errno = %d)", errno); return -errno; } @@ -189,88 +186,195 @@ static int vfio_map_irq_region(struct fslmc_vfio_group *group) if (ret == 0) return 0; - FSLMC_VFIO_LOG(ERR, "VFIO_IOMMU_MAP_DMA fails (errno = %d)", errno); + DPAA2_BUS_ERR("Unable to map DMA address (errno = %d)", errno); return -errno; } -int rte_fslmc_vfio_dmamap(void) +static int fslmc_map_dma(uint64_t vaddr, rte_iova_t iovaddr, size_t len); +static int fslmc_unmap_dma(uint64_t vaddr, rte_iova_t iovaddr, size_t len); + +static void +fslmc_memevent_cb(enum rte_mem_event type, const void *addr, size_t len, + void *arg __rte_unused) { + struct rte_memseg_list *msl; + struct rte_memseg *ms; + size_t cur_len = 0, map_len = 0; + uint64_t virt_addr; + rte_iova_t iova_addr; int ret; + + msl = rte_mem_virt2memseg_list(addr); + + while (cur_len < len) { + const void *va = RTE_PTR_ADD(addr, cur_len); + + ms = rte_mem_virt2memseg(va, msl); + iova_addr = ms->iova; + virt_addr = ms->addr_64; + map_len = ms->len; + + DPAA2_BUS_DEBUG("Request for %s, va=%p, " + "virt_addr=0x%" PRIx64 ", " + "iova=0x%" PRIx64 ", map_len=%zu", + type == RTE_MEM_EVENT_ALLOC ? + "alloc" : "dealloc", + va, virt_addr, iova_addr, map_len); + + if (type == RTE_MEM_EVENT_ALLOC) + ret = fslmc_map_dma(virt_addr, iova_addr, map_len); + else + ret = fslmc_unmap_dma(virt_addr, iova_addr, map_len); + + if (ret != 0) { + DPAA2_BUS_ERR("DMA Mapping/Unmapping failed. " + "Map=%d, addr=%p, len=%zu, err:(%d)", + type, va, map_len, ret); + return; + } + + cur_len += map_len; + } + + if (type == RTE_MEM_EVENT_ALLOC) + DPAA2_BUS_DEBUG("Total Mapped: addr=%p, len=%zu", + addr, len); + else + DPAA2_BUS_DEBUG("Total Unmapped: addr=%p, len=%zu", + addr, len); +} + +static int +fslmc_map_dma(uint64_t vaddr, rte_iova_t iovaddr __rte_unused, size_t len) +{ struct fslmc_vfio_group *group; struct vfio_iommu_type1_dma_map dma_map = { .argsz = sizeof(struct vfio_iommu_type1_dma_map), .flags = VFIO_DMA_MAP_FLAG_READ | VFIO_DMA_MAP_FLAG_WRITE, }; + int ret; - int i; - const struct rte_memseg *memseg; + dma_map.size = len; + dma_map.vaddr = vaddr; - if (is_dma_done) - return 0; +#ifdef RTE_LIBRTE_DPAA2_USE_PHYS_IOVA + dma_map.iova = iovaddr; +#else + dma_map.iova = dma_map.vaddr; +#endif - memseg = rte_eal_get_physmem_layout(); - if (memseg == NULL) { - FSLMC_VFIO_LOG(ERR, "Cannot get physical layout."); - return -ENODEV; + /* SET DMA MAP for IOMMU */ + group = &vfio_group; + + if (!group->container) { + DPAA2_BUS_ERR("Container is not connected "); + return -1; } - for (i = 0; i < RTE_MAX_MEMSEG; i++) { - if (memseg[i].addr == NULL && memseg[i].len == 0) { - FSLMC_VFIO_LOG(DEBUG, "Total %d segments found.", i); - break; - } + DPAA2_BUS_DEBUG("--> Map address: 0x%"PRIx64", size: %"PRIu64"", + (uint64_t)dma_map.vaddr, (uint64_t)dma_map.size); + ret = ioctl(group->container->fd, VFIO_IOMMU_MAP_DMA, &dma_map); + if (ret) { + DPAA2_BUS_ERR("VFIO_IOMMU_MAP_DMA API(errno = %d)", + errno); + return -1; + } - dma_map.size = memseg[i].len; - dma_map.vaddr = memseg[i].addr_64; -#ifdef RTE_LIBRTE_DPAA2_USE_PHYS_IOVA - if (rte_eal_iova_mode() == RTE_IOVA_VA) - dma_map.iova = dma_map.vaddr; - else - dma_map.iova = memseg[i].iova; -#else - dma_map.iova = dma_map.vaddr; -#endif + return 0; +} - /* SET DMA MAP for IOMMU */ - group = &vfio_group; +static int +fslmc_unmap_dma(uint64_t vaddr, uint64_t iovaddr __rte_unused, size_t len) +{ + struct fslmc_vfio_group *group; + struct vfio_iommu_type1_dma_unmap dma_unmap = { + .argsz = sizeof(struct vfio_iommu_type1_dma_unmap), + .flags = 0, + }; + int ret; - if (!group->container) { - FSLMC_VFIO_LOG(ERR, "Container is not connected "); - return -1; - } + dma_unmap.size = len; + dma_unmap.iova = vaddr; - FSLMC_VFIO_LOG(DEBUG, "-->Initial SHM Virtual ADDR %llX", - dma_map.vaddr); - FSLMC_VFIO_LOG(DEBUG, "-----> DMA size 0x%llX", dma_map.size); - ret = ioctl(group->container->fd, VFIO_IOMMU_MAP_DMA, - &dma_map); - if (ret) { - FSLMC_VFIO_LOG(ERR, "VFIO_IOMMU_MAP_DMA API(errno = %d)", - errno); - return ret; - } + /* SET DMA MAP for IOMMU */ + group = &vfio_group; + + if (!group->container) { + DPAA2_BUS_ERR("Container is not connected "); + return -1; } - /* Verifying that at least single segment is available */ - if (i <= 0) { - FSLMC_VFIO_LOG(ERR, "No Segments found for VFIO Mapping"); + DPAA2_BUS_DEBUG("--> Unmap address: 0x%"PRIx64", size: %"PRIu64"", + (uint64_t)dma_unmap.iova, (uint64_t)dma_unmap.size); + ret = ioctl(group->container->fd, VFIO_IOMMU_UNMAP_DMA, &dma_unmap); + if (ret) { + DPAA2_BUS_ERR("VFIO_IOMMU_UNMAP_DMA API(errno = %d)", + errno); return -1; } + return 0; +} + +static int +fslmc_dmamap_seg(const struct rte_memseg_list *msl __rte_unused, + const struct rte_memseg *ms, void *arg) +{ + int *n_segs = arg; + int ret; + + ret = fslmc_map_dma(ms->addr_64, ms->iova, ms->len); + if (ret) + DPAA2_BUS_ERR("Unable to VFIO map (addr=%p, len=%zu)", + ms->addr, ms->len); + else + (*n_segs)++; + + return ret; +} + +int rte_fslmc_vfio_dmamap(void) +{ + int i = 0, ret; + struct rte_mem_config *mcfg = rte_eal_get_configuration()->mem_config; + rte_rwlock_t *mem_lock = &mcfg->memory_hotplug_lock; + + /* Lock before parsing and registering callback to memory subsystem */ + rte_rwlock_read_lock(mem_lock); + + if (rte_memseg_walk(fslmc_dmamap_seg, &i) < 0) { + rte_rwlock_read_unlock(mem_lock); + return -1; + } + + ret = rte_mem_event_callback_register("fslmc_memevent_clb", + fslmc_memevent_cb, NULL); + if (ret && rte_errno == ENOTSUP) + DPAA2_BUS_DEBUG("Memory event callbacks not supported"); + else if (ret) + DPAA2_BUS_DEBUG("Unable to install memory handler"); + else + DPAA2_BUS_DEBUG("Installed memory callback handler"); + + DPAA2_BUS_DEBUG("Total %d segments found.", i); + /* TODO - This is a W.A. as VFIO currently does not add the mapping of * the interrupt region to SMMU. This should be removed once the * support is added in the Kernel. */ - vfio_map_irq_region(group); + vfio_map_irq_region(&vfio_group); - is_dma_done = 1; + /* Existing segments have been mapped and memory callback for hotplug + * has been installed. + */ + rte_rwlock_read_unlock(mem_lock); return 0; } static int64_t vfio_map_mcp_obj(struct fslmc_vfio_group *group, char *mcp_obj) { - int64_t v_addr = (int64_t)MAP_FAILED; + intptr_t v_addr = (intptr_t)MAP_FAILED; int32_t ret, mc_fd; struct vfio_device_info d_info = { .argsz = sizeof(d_info) }; @@ -279,29 +383,26 @@ static int64_t vfio_map_mcp_obj(struct fslmc_vfio_group *group, char *mcp_obj) /* getting the mcp object's fd*/ mc_fd = ioctl(group->fd, VFIO_GROUP_GET_DEVICE_FD, mcp_obj); if (mc_fd < 0) { - FSLMC_VFIO_LOG(ERR, "error in VFIO get dev %s fd from group %d", - mcp_obj, group->fd); + DPAA2_BUS_ERR("Error in VFIO get dev %s fd from group %d", + mcp_obj, group->fd); return v_addr; } /* getting device info*/ ret = ioctl(mc_fd, VFIO_DEVICE_GET_INFO, &d_info); if (ret < 0) { - FSLMC_VFIO_LOG(ERR, "error in VFIO getting DEVICE_INFO"); + DPAA2_BUS_ERR("Error in VFIO getting DEVICE_INFO"); goto MC_FAILURE; } /* getting device region info*/ ret = ioctl(mc_fd, VFIO_DEVICE_GET_REGION_INFO, ®_info); if (ret < 0) { - FSLMC_VFIO_LOG(ERR, "error in VFIO getting REGION_INFO"); + DPAA2_BUS_ERR("Error in VFIO getting REGION_INFO"); goto MC_FAILURE; } - FSLMC_VFIO_LOG(DEBUG, "region offset = %llx , region size = %llx", - reg_info.offset, reg_info.size); - - v_addr = (uint64_t)mmap(NULL, reg_info.size, + v_addr = (size_t)mmap(NULL, reg_info.size, PROT_WRITE | PROT_READ, MAP_SHARED, mc_fd, reg_info.offset); @@ -334,8 +435,8 @@ int rte_dpaa2_intr_enable(struct rte_intr_handle *intr_handle, int index) ret = ioctl(intr_handle->vfio_dev_fd, VFIO_DEVICE_SET_IRQS, irq_set); if (ret) { - RTE_LOG(ERR, EAL, "Error:dpaa2 SET IRQs fd=%d, err = %d(%s)\n", - intr_handle->fd, errno, strerror(errno)); + DPAA2_BUS_ERR("Error:dpaa2 SET IRQs fd=%d, err = %d(%s)", + intr_handle->fd, errno, strerror(errno)); return ret; } @@ -359,8 +460,8 @@ int rte_dpaa2_intr_disable(struct rte_intr_handle *intr_handle, int index) ret = ioctl(intr_handle->vfio_dev_fd, VFIO_DEVICE_SET_IRQS, irq_set); if (ret) - RTE_LOG(ERR, EAL, - "Error disabling dpaa2 interrupts for fd %d\n", + DPAA2_BUS_ERR( + "Error disabling dpaa2 interrupts for fd %d", intr_handle->fd); return ret; @@ -383,9 +484,8 @@ rte_dpaa2_vfio_setup_intr(struct rte_intr_handle *intr_handle, ret = ioctl(vfio_dev_fd, VFIO_DEVICE_GET_IRQ_INFO, &irq_info); if (ret < 0) { - FSLMC_VFIO_LOG(ERR, - "cannot get IRQ(%d) info, error %i (%s)", - i, errno, strerror(errno)); + DPAA2_BUS_ERR("Cannot get IRQ(%d) info, error %i (%s)", + i, errno, strerror(errno)); return -1; } @@ -399,9 +499,8 @@ rte_dpaa2_vfio_setup_intr(struct rte_intr_handle *intr_handle, /* set up an eventfd for interrupts */ fd = eventfd(0, EFD_NONBLOCK | EFD_CLOEXEC); if (fd < 0) { - FSLMC_VFIO_LOG(ERR, - "cannot set up eventfd, error %i (%s)\n", - errno, strerror(errno)); + DPAA2_BUS_ERR("Cannot set up eventfd, error %i (%s)", + errno, strerror(errno)); return -1; } @@ -430,13 +529,14 @@ fslmc_process_iodevices(struct rte_dpaa2_device *dev) dev_fd = ioctl(vfio_group.fd, VFIO_GROUP_GET_DEVICE_FD, dev->device.name); if (dev_fd <= 0) { - FSLMC_VFIO_LOG(ERR, "Unable to obtain device FD for device:%s", - dev->device.name); + DPAA2_BUS_ERR("Unable to obtain device FD for device:%s", + dev->device.name); return -1; } if (ioctl(dev_fd, VFIO_DEVICE_GET_INFO, &device_info)) { - FSLMC_VFIO_LOG(ERR, "DPAA2 VFIO_DEVICE_GET_INFO fail"); + DPAA2_BUS_ERR("Unable to obtain information for device:%s", + dev->device.name); return -1; } @@ -461,61 +561,74 @@ fslmc_process_iodevices(struct rte_dpaa2_device *dev) break; } - FSLMC_VFIO_LOG(DEBUG, "Device (%s) abstracted from VFIO", - dev->device.name); + DPAA2_BUS_LOG(DEBUG, "Device (%s) abstracted from VFIO", + dev->device.name); return 0; } static int fslmc_process_mcp(struct rte_dpaa2_device *dev) { - int64_t v_addr; - char *dev_name; + int ret; + intptr_t v_addr; + char *dev_name = NULL; struct fsl_mc_io dpmng = {0}; struct mc_version mc_ver_info = {0}; rte_mcp_ptr_list = malloc(sizeof(void *) * 1); if (!rte_mcp_ptr_list) { - FSLMC_VFIO_LOG(ERR, "Out of memory"); - return -ENOMEM; + DPAA2_BUS_ERR("Unable to allocate MC portal memory"); + ret = -ENOMEM; + goto cleanup; } dev_name = strdup(dev->device.name); if (!dev_name) { - FSLMC_VFIO_LOG(ERR, "Out of memory."); - free(rte_mcp_ptr_list); - rte_mcp_ptr_list = NULL; - return -ENOMEM; + DPAA2_BUS_ERR("Unable to allocate MC device name memory"); + ret = -ENOMEM; + goto cleanup; } v_addr = vfio_map_mcp_obj(&vfio_group, dev_name); - if (v_addr == (int64_t)MAP_FAILED) { - FSLMC_VFIO_LOG(ERR, "Error mapping region (errno = %d)", - errno); - free(rte_mcp_ptr_list); - rte_mcp_ptr_list = NULL; - return -1; + if (v_addr == (intptr_t)MAP_FAILED) { + DPAA2_BUS_ERR("Error mapping region (errno = %d)", errno); + ret = -1; + goto cleanup; } /* check the MC version compatibility */ dpmng.regs = (void *)v_addr; - if (mc_get_version(&dpmng, CMD_PRI_LOW, &mc_ver_info)) - RTE_LOG(WARNING, PMD, "\tmc_get_version failed\n"); + if (mc_get_version(&dpmng, CMD_PRI_LOW, &mc_ver_info)) { + DPAA2_BUS_ERR("Unable to obtain MC version"); + ret = -1; + goto cleanup; + } if ((mc_ver_info.major != MC_VER_MAJOR) || (mc_ver_info.minor < MC_VER_MINOR)) { - RTE_LOG(ERR, PMD, "DPAA2 MC version not compatible!" - " Expected %d.%d.x, Detected %d.%d.%d\n", - MC_VER_MAJOR, MC_VER_MINOR, - mc_ver_info.major, mc_ver_info.minor, - mc_ver_info.revision); - free(rte_mcp_ptr_list); - rte_mcp_ptr_list = NULL; - return -1; + DPAA2_BUS_ERR("DPAA2 MC version not compatible!" + " Expected %d.%d.x, Detected %d.%d.%d", + MC_VER_MAJOR, MC_VER_MINOR, + mc_ver_info.major, mc_ver_info.minor, + mc_ver_info.revision); + ret = -1; + goto cleanup; } rte_mcp_ptr_list[0] = (void *)v_addr; + free(dev_name); return 0; + +cleanup: + if (dev_name) + free(dev_name); + + if (rte_mcp_ptr_list) { + free(rte_mcp_ptr_list); + rte_mcp_ptr_list = NULL; + } + + return ret; } int @@ -530,7 +643,7 @@ fslmc_vfio_process_group(void) if (dev->dev_type == DPAA2_MPORTAL) { ret = fslmc_process_mcp(dev); if (ret) { - FSLMC_VFIO_LOG(DEBUG, "Unable to map Portal."); + DPAA2_BUS_ERR("Unable to map MC Portal"); return -1; } if (!found_mportal) @@ -547,23 +660,19 @@ fslmc_vfio_process_group(void) /* Cannot continue if there is not even a single mportal */ if (!found_mportal) { - FSLMC_VFIO_LOG(DEBUG, - "No MC Portal device found. Not continuing."); + DPAA2_BUS_ERR("No MC Portal device found. Not continuing"); return -1; } TAILQ_FOREACH_SAFE(dev, &rte_fslmc_bus.device_list, next, dev_temp) { - if (!dev) - break; - switch (dev->dev_type) { case DPAA2_ETH: case DPAA2_CRYPTO: + case DPAA2_QDMA: ret = fslmc_process_iodevices(dev); if (ret) { - FSLMC_VFIO_LOG(DEBUG, - "Dev (%s) init failed.", - dev->device.name); + DPAA2_BUS_DEBUG("Dev (%s) init failed", + dev->device.name); return ret; } break; @@ -576,9 +685,8 @@ fslmc_vfio_process_group(void) */ ret = fslmc_process_iodevices(dev); if (ret) { - FSLMC_VFIO_LOG(DEBUG, - "Dev (%s) init failed.", - dev->device.name); + DPAA2_BUS_DEBUG("Dev (%s) init failed", + dev->device.name); return -1; } @@ -592,8 +700,8 @@ fslmc_vfio_process_group(void) case DPAA2_UNKNOWN: default: /* Unknown - ignore */ - FSLMC_VFIO_LOG(DEBUG, "Found unknown device (%s).", - dev->device.name); + DPAA2_BUS_DEBUG("Found unknown device (%s)", + dev->device.name); TAILQ_REMOVE(&rte_fslmc_bus.device_list, dev, next); free(dev); dev = NULL; @@ -622,12 +730,12 @@ fslmc_vfio_setup_group(void) * processing. */ if (vfio_group.groupid == groupid) { - FSLMC_VFIO_LOG(ERR, "groupid already exists %d", groupid); + DPAA2_BUS_ERR("groupid already exists %d", groupid); return 0; } /* Get the actual group fd */ - ret = vfio_get_group_fd(groupid); + ret = rte_vfio_get_group_fd(groupid); if (ret < 0) return ret; vfio_group.fd = ret; @@ -635,14 +743,14 @@ fslmc_vfio_setup_group(void) /* Check group viability */ ret = ioctl(vfio_group.fd, VFIO_GROUP_GET_STATUS, &status); if (ret) { - FSLMC_VFIO_LOG(ERR, "VFIO error getting group status"); + DPAA2_BUS_ERR("VFIO error getting group status"); close(vfio_group.fd); rte_vfio_clear_group(vfio_group.fd); return ret; } if (!(status.flags & VFIO_GROUP_FLAGS_VIABLE)) { - FSLMC_VFIO_LOG(ERR, "VFIO group not viable"); + DPAA2_BUS_ERR("VFIO group not viable"); close(vfio_group.fd); rte_vfio_clear_group(vfio_group.fd); return -EPERM; @@ -655,7 +763,7 @@ fslmc_vfio_setup_group(void) /* Now connect this IOMMU group to given container */ ret = vfio_connect_container(); if (ret) { - FSLMC_VFIO_LOG(ERR, + DPAA2_BUS_ERR( "Error connecting container with groupid %d", groupid); close(vfio_group.fd); @@ -667,15 +775,15 @@ fslmc_vfio_setup_group(void) /* Get Device information */ ret = ioctl(vfio_group.fd, VFIO_GROUP_GET_DEVICE_FD, g_container); if (ret < 0) { - FSLMC_VFIO_LOG(ERR, "Error getting device %s fd from group %d", - g_container, vfio_group.groupid); + DPAA2_BUS_ERR("Error getting device %s fd from group %d", + g_container, vfio_group.groupid); close(vfio_group.fd); rte_vfio_clear_group(vfio_group.fd); return ret; } container_device_fd = ret; - FSLMC_VFIO_LOG(DEBUG, "VFIO Container FD is [0x%X]", - container_device_fd); + DPAA2_BUS_DEBUG("VFIO Container FD is [0x%X]", + container_device_fd); return 0; } diff --git a/drivers/bus/fslmc/fslmc_vfio.h b/drivers/bus/fslmc/fslmc_vfio.h index e8fb3445..9e2c4fee 100644 --- a/drivers/bus/fslmc/fslmc_vfio.h +++ b/drivers/bus/fslmc/fslmc_vfio.h @@ -10,8 +10,6 @@ #include <rte_vfio.h> -#include "eal_vfio.h" - #define DPAA2_MC_DPNI_DEVID 7 #define DPAA2_MC_DPSECI_DEVID 3 #define DPAA2_MC_DPCON_DEVID 5 diff --git a/drivers/bus/fslmc/mc/dpdmai.c b/drivers/bus/fslmc/mc/dpdmai.c new file mode 100644 index 00000000..528889df --- /dev/null +++ b/drivers/bus/fslmc/mc/dpdmai.c @@ -0,0 +1,429 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright 2018 NXP + */ + +#include <fsl_mc_sys.h> +#include <fsl_mc_cmd.h> +#include <fsl_dpdmai.h> +#include <fsl_dpdmai_cmd.h> + +/** + * dpdmai_open() - Open a control session for the specified object + * @mc_io: Pointer to MC portal's I/O object + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' + * @dpdmai_id: DPDMAI unique ID + * @token: Returned token; use in subsequent API calls + * + * This function can be used to open a control session for an + * already created object; an object may have been declared in + * the DPL or by calling the dpdmai_create() function. + * This function returns a unique authentication token, + * associated with the specific object ID and the specific MC + * portal; this token must be used in all subsequent commands for + * this specific object. + * + * Return: '0' on Success; Error code otherwise. + */ +int dpdmai_open(struct fsl_mc_io *mc_io, + uint32_t cmd_flags, + int dpdmai_id, + uint16_t *token) +{ + struct dpdmai_cmd_open *cmd_params; + struct mc_command cmd = { 0 }; + int err; + + /* prepare command */ + cmd.header = mc_encode_cmd_header(DPDMAI_CMDID_OPEN, + cmd_flags, + 0); + cmd_params = (struct dpdmai_cmd_open *)cmd.params; + cmd_params->dpdmai_id = cpu_to_le32(dpdmai_id); + + /* send command to mc*/ + err = mc_send_command(mc_io, &cmd); + if (err) + return err; + + /* retrieve response parameters */ + *token = mc_cmd_hdr_read_token(&cmd); + + return 0; +} + +/** + * dpdmai_close() - Close the control session of the object + * @mc_io: Pointer to MC portal's I/O object + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' + * @token: Token of DPDMAI object + * + * After this function is called, no further operations are + * allowed on the object without opening a new control session. + * + * Return: '0' on Success; Error code otherwise. + */ +int dpdmai_close(struct fsl_mc_io *mc_io, + uint32_t cmd_flags, + uint16_t token) +{ + struct mc_command cmd = { 0 }; + + /* prepare command */ + cmd.header = mc_encode_cmd_header(DPDMAI_CMDID_CLOSE, + cmd_flags, token); + + /* send command to mc*/ + return mc_send_command(mc_io, &cmd); +} + +/** + * dpdmai_create() - Create the DPDMAI object + * @mc_io: Pointer to MC portal's I/O object + * @dprc_token: Parent container token; '0' for default container + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' + * @cfg: Configuration structure + * @obj_id: Returned object id + * + * Create the DPDMAI object, allocate required resources and + * perform required initialization. + * + * The object can be created either by declaring it in the + * DPL file, or by calling this function. + * + * The function accepts an authentication token of a parent + * container that this object should be assigned to. The token + * can be '0' so the object will be assigned to the default container. + * The newly created object can be opened with the returned + * object id and using the container's associated tokens and MC portals. + * + * Return: '0' on Success; Error code otherwise. + */ +int dpdmai_create(struct fsl_mc_io *mc_io, + uint16_t dprc_token, + uint32_t cmd_flags, + const struct dpdmai_cfg *cfg, + uint32_t *obj_id) +{ + struct dpdmai_cmd_create *cmd_params; + struct mc_command cmd = { 0 }; + int err; + + /* prepare command */ + cmd.header = mc_encode_cmd_header(DPDMAI_CMDID_CREATE, + cmd_flags, + dprc_token); + cmd_params = (struct dpdmai_cmd_create *)cmd.params; + cmd_params->priorities[0] = cfg->priorities[0]; + cmd_params->priorities[1] = cfg->priorities[1]; + + /* send command to mc*/ + err = mc_send_command(mc_io, &cmd); + if (err) + return err; + + /* retrieve response parameters */ + *obj_id = mc_cmd_read_object_id(&cmd); + + return 0; +} + +/** + * dpdmai_destroy() - Destroy the DPDMAI object and release all its resources. + * @mc_io: Pointer to MC portal's I/O object + * @dprc_token: Parent container token; '0' for default container + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' + * @object_id: The object id; it must be a valid id within the container that + * created this object; + * + * The function accepts the authentication token of the parent container that + * created the object (not the one that currently owns the object). The object + * is searched within parent using the provided 'object_id'. + * All tokens to the object must be closed before calling destroy. + * + * Return: '0' on Success; error code otherwise. + */ +int dpdmai_destroy(struct fsl_mc_io *mc_io, + uint16_t dprc_token, + uint32_t cmd_flags, + uint32_t object_id) +{ + struct dpdmai_cmd_destroy *cmd_params; + struct mc_command cmd = { 0 }; + + /* prepare command */ + cmd.header = mc_encode_cmd_header(DPDMAI_CMDID_DESTROY, + cmd_flags, + dprc_token); + cmd_params = (struct dpdmai_cmd_destroy *)cmd.params; + cmd_params->dpdmai_id = cpu_to_le32(object_id); + + /* send command to mc*/ + return mc_send_command(mc_io, &cmd); +} + +/** + * dpdmai_enable() - Enable the DPDMAI, allow sending and receiving frames. + * @mc_io: Pointer to MC portal's I/O object + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' + * @token: Token of DPDMAI object + * + * Return: '0' on Success; Error code otherwise. + */ +int dpdmai_enable(struct fsl_mc_io *mc_io, + uint32_t cmd_flags, + uint16_t token) +{ + struct mc_command cmd = { 0 }; + + /* prepare command */ + cmd.header = mc_encode_cmd_header(DPDMAI_CMDID_ENABLE, + cmd_flags, + token); + + /* send command to mc*/ + return mc_send_command(mc_io, &cmd); +} + +/** + * dpdmai_disable() - Disable the DPDMAI, stop sending and receiving frames. + * @mc_io: Pointer to MC portal's I/O object + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' + * @token: Token of DPDMAI object + * + * Return: '0' on Success; Error code otherwise. + */ +int dpdmai_disable(struct fsl_mc_io *mc_io, + uint32_t cmd_flags, + uint16_t token) +{ + struct mc_command cmd = { 0 }; + + /* prepare command */ + cmd.header = mc_encode_cmd_header(DPDMAI_CMDID_DISABLE, + cmd_flags, + token); + + /* send command to mc*/ + return mc_send_command(mc_io, &cmd); +} + +/** + * dpdmai_is_enabled() - Check if the DPDMAI is enabled. + * @mc_io: Pointer to MC portal's I/O object + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' + * @token: Token of DPDMAI object + * @en: Returns '1' if object is enabled; '0' otherwise + * + * Return: '0' on Success; Error code otherwise. + */ +int dpdmai_is_enabled(struct fsl_mc_io *mc_io, + uint32_t cmd_flags, + uint16_t token, + int *en) +{ + struct dpdmai_rsp_is_enabled *rsp_params; + struct mc_command cmd = { 0 }; + int err; + + /* prepare command */ + cmd.header = mc_encode_cmd_header(DPDMAI_CMDID_IS_ENABLED, + cmd_flags, + token); + + /* send command to mc*/ + err = mc_send_command(mc_io, &cmd); + if (err) + return err; + + /* retrieve response parameters */ + rsp_params = (struct dpdmai_rsp_is_enabled *)cmd.params; + *en = dpdmai_get_field(rsp_params->en, ENABLE); + + return 0; +} + +/** + * dpdmai_reset() - Reset the DPDMAI, returns the object to initial state. + * @mc_io: Pointer to MC portal's I/O object + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' + * @token: Token of DPDMAI object + * + * Return: '0' on Success; Error code otherwise. + */ +int dpdmai_reset(struct fsl_mc_io *mc_io, + uint32_t cmd_flags, + uint16_t token) +{ + struct mc_command cmd = { 0 }; + + /* prepare command */ + cmd.header = mc_encode_cmd_header(DPDMAI_CMDID_RESET, + cmd_flags, + token); + + /* send command to mc*/ + return mc_send_command(mc_io, &cmd); +} + +/** + * dpdmai_get_attributes() - Retrieve DPDMAI attributes. + * @mc_io: Pointer to MC portal's I/O object + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' + * @token: Token of DPDMAI object + * @attr: Returned object's attributes + * + * Return: '0' on Success; Error code otherwise. + */ +int dpdmai_get_attributes(struct fsl_mc_io *mc_io, + uint32_t cmd_flags, + uint16_t token, + struct dpdmai_attr *attr) +{ + struct dpdmai_rsp_get_attr *rsp_params; + struct mc_command cmd = { 0 }; + int err; + + /* prepare command */ + cmd.header = mc_encode_cmd_header(DPDMAI_CMDID_GET_ATTR, + cmd_flags, + token); + + /* send command to mc*/ + err = mc_send_command(mc_io, &cmd); + if (err) + return err; + + /* retrieve response parameters */ + rsp_params = (struct dpdmai_rsp_get_attr *)cmd.params; + attr->id = le32_to_cpu(rsp_params->id); + attr->num_of_priorities = rsp_params->num_of_priorities; + + return 0; +} + +/** + * dpdmai_set_rx_queue() - Set Rx queue configuration + * @mc_io: Pointer to MC portal's I/O object + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' + * @token: Token of DPDMAI object + * @priority: Select the queue relative to number of + * priorities configured at DPDMAI creation; use + * DPDMAI_ALL_QUEUES to configure all Rx queues + * identically. + * @cfg: Rx queue configuration + * + * Return: '0' on Success; Error code otherwise. + */ +int dpdmai_set_rx_queue(struct fsl_mc_io *mc_io, + uint32_t cmd_flags, + uint16_t token, + uint8_t priority, + const struct dpdmai_rx_queue_cfg *cfg) +{ + struct dpdmai_cmd_set_rx_queue *cmd_params; + struct mc_command cmd = { 0 }; + + /* prepare command */ + cmd.header = mc_encode_cmd_header(DPDMAI_CMDID_SET_RX_QUEUE, + cmd_flags, + token); + cmd_params = (struct dpdmai_cmd_set_rx_queue *)cmd.params; + cmd_params->dest_id = cpu_to_le32(cfg->dest_cfg.dest_id); + cmd_params->dest_priority = cfg->dest_cfg.priority; + cmd_params->priority = priority; + cmd_params->user_ctx = cpu_to_le64(cfg->user_ctx); + cmd_params->options = cpu_to_le32(cfg->options); + dpdmai_set_field(cmd_params->dest_type, + DEST_TYPE, + cfg->dest_cfg.dest_type); + + /* send command to mc*/ + return mc_send_command(mc_io, &cmd); +} + +/** + * dpdmai_get_rx_queue() - Retrieve Rx queue attributes. + * @mc_io: Pointer to MC portal's I/O object + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' + * @token: Token of DPDMAI object + * @priority: Select the queue relative to number of + * priorities configured at DPDMAI creation + * @attr: Returned Rx queue attributes + * + * Return: '0' on Success; Error code otherwise. + */ +int dpdmai_get_rx_queue(struct fsl_mc_io *mc_io, + uint32_t cmd_flags, + uint16_t token, + uint8_t priority, + struct dpdmai_rx_queue_attr *attr) +{ + struct dpdmai_cmd_get_queue *cmd_params; + struct dpdmai_rsp_get_rx_queue *rsp_params; + struct mc_command cmd = { 0 }; + int err; + + /* prepare command */ + cmd.header = mc_encode_cmd_header(DPDMAI_CMDID_GET_RX_QUEUE, + cmd_flags, + token); + cmd_params = (struct dpdmai_cmd_get_queue *)cmd.params; + cmd_params->priority = priority; + + /* send command to mc*/ + err = mc_send_command(mc_io, &cmd); + if (err) + return err; + + /* retrieve response parameters */ + rsp_params = (struct dpdmai_rsp_get_rx_queue *)cmd.params; + attr->user_ctx = le64_to_cpu(rsp_params->user_ctx); + attr->fqid = le32_to_cpu(rsp_params->fqid); + attr->dest_cfg.dest_id = le32_to_cpu(rsp_params->dest_id); + attr->dest_cfg.priority = le32_to_cpu(rsp_params->dest_priority); + attr->dest_cfg.dest_type = dpdmai_get_field(rsp_params->dest_type, + DEST_TYPE); + + return 0; +} + +/** + * dpdmai_get_tx_queue() - Retrieve Tx queue attributes. + * @mc_io: Pointer to MC portal's I/O object + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' + * @token: Token of DPDMAI object + * @priority: Select the queue relative to number of + * priorities configured at DPDMAI creation + * @attr: Returned Tx queue attributes + * + * Return: '0' on Success; Error code otherwise. + */ +int dpdmai_get_tx_queue(struct fsl_mc_io *mc_io, + uint32_t cmd_flags, + uint16_t token, + uint8_t priority, + struct dpdmai_tx_queue_attr *attr) +{ + struct dpdmai_cmd_get_queue *cmd_params; + struct dpdmai_rsp_get_tx_queue *rsp_params; + struct mc_command cmd = { 0 }; + int err; + + /* prepare command */ + cmd.header = mc_encode_cmd_header(DPDMAI_CMDID_GET_TX_QUEUE, + cmd_flags, + token); + cmd_params = (struct dpdmai_cmd_get_queue *)cmd.params; + cmd_params->priority = priority; + + /* send command to mc*/ + err = mc_send_command(mc_io, &cmd); + if (err) + return err; + + /* retrieve response parameters */ + rsp_params = (struct dpdmai_rsp_get_tx_queue *)cmd.params; + attr->fqid = le32_to_cpu(rsp_params->fqid); + + return 0; +} diff --git a/drivers/bus/fslmc/mc/fsl_dpdmai.h b/drivers/bus/fslmc/mc/fsl_dpdmai.h new file mode 100644 index 00000000..03e46ec1 --- /dev/null +++ b/drivers/bus/fslmc/mc/fsl_dpdmai.h @@ -0,0 +1,189 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright 2018 NXP + */ + +#ifndef __FSL_DPDMAI_H +#define __FSL_DPDMAI_H + +struct fsl_mc_io; + +/* Data Path DMA Interface API + * Contains initialization APIs and runtime control APIs for DPDMAI + */ + +/* General DPDMAI macros */ + +/** + * Maximum number of Tx/Rx priorities per DPDMAI object + */ +#define DPDMAI_PRIO_NUM 2 + +/** + * All queues considered; see dpdmai_set_rx_queue() + */ +#define DPDMAI_ALL_QUEUES (uint8_t)(-1) + +int dpdmai_open(struct fsl_mc_io *mc_io, + uint32_t cmd_flags, + int dpdmai_id, + uint16_t *token); + +int dpdmai_close(struct fsl_mc_io *mc_io, + uint32_t cmd_flags, + uint16_t token); + +/** + * struct dpdmai_cfg - Structure representing DPDMAI configuration + * @priorities: Priorities for the DMA hardware processing; valid priorities are + * configured with values 1-8; the entry following last valid entry + * should be configured with 0 + */ +struct dpdmai_cfg { + uint8_t priorities[DPDMAI_PRIO_NUM]; +}; + +int dpdmai_create(struct fsl_mc_io *mc_io, + uint16_t dprc_token, + uint32_t cmd_flags, + const struct dpdmai_cfg *cfg, + uint32_t *obj_id); + +int dpdmai_destroy(struct fsl_mc_io *mc_io, + uint16_t dprc_token, + uint32_t cmd_flags, + uint32_t object_id); + +int dpdmai_enable(struct fsl_mc_io *mc_io, + uint32_t cmd_flags, + uint16_t token); + +int dpdmai_disable(struct fsl_mc_io *mc_io, + uint32_t cmd_flags, + uint16_t token); + +int dpdmai_is_enabled(struct fsl_mc_io *mc_io, + uint32_t cmd_flags, + uint16_t token, + int *en); + +int dpdmai_reset(struct fsl_mc_io *mc_io, + uint32_t cmd_flags, + uint16_t token); + +/** + * struct dpdmai_attr - Structure representing DPDMAI attributes + * @id: DPDMAI object ID + * @num_of_priorities: number of priorities + */ +struct dpdmai_attr { + int id; + uint8_t num_of_priorities; +}; + +int dpdmai_get_attributes(struct fsl_mc_io *mc_io, + uint32_t cmd_flags, + uint16_t token, + struct dpdmai_attr *attr); + +/** + * enum dpdmai_dest - DPDMAI destination types + * @DPDMAI_DEST_NONE: Unassigned destination; The queue is set in parked mode + * and does not generate FQDAN notifications; user is expected to dequeue + * from the queue based on polling or other user-defined method + * @DPDMAI_DEST_DPIO: The queue is set in schedule mode and generates FQDAN + * notifications to the specified DPIO; user is expected to dequeue + * from the queue only after notification is received + * @DPDMAI_DEST_DPCON: The queue is set in schedule mode and does not generate + * FQDAN notifications, but is connected to the specified DPCON object; + * user is expected to dequeue from the DPCON channel + */ +enum dpdmai_dest { + DPDMAI_DEST_NONE = 0, + DPDMAI_DEST_DPIO = 1, + DPDMAI_DEST_DPCON = 2 +}; + +/** + * struct dpdmai_dest_cfg - Structure representing DPDMAI destination parameters + * @dest_type: Destination type + * @dest_id: Either DPIO ID or DPCON ID, depending on the destination type + * @priority: Priority selection within the DPIO or DPCON channel; valid values + * are 0-1 or 0-7, depending on the number of priorities in that + * channel; not relevant for 'DPDMAI_DEST_NONE' option + */ +struct dpdmai_dest_cfg { + enum dpdmai_dest dest_type; + int dest_id; + uint8_t priority; +}; + +/* DPDMAI queue modification options */ + +/** + * Select to modify the user's context associated with the queue + */ +#define DPDMAI_QUEUE_OPT_USER_CTX 0x00000001 + +/** + * Select to modify the queue's destination + */ +#define DPDMAI_QUEUE_OPT_DEST 0x00000002 + +/** + * struct dpdmai_rx_queue_cfg - DPDMAI RX queue configuration + * @options: Flags representing the suggested modifications to the queue; + * Use any combination of 'DPDMAI_QUEUE_OPT_<X>' flags + * @user_ctx: User context value provided in the frame descriptor of each + * dequeued frame; + * valid only if 'DPDMAI_QUEUE_OPT_USER_CTX' is contained in 'options' + * @dest_cfg: Queue destination parameters; + * valid only if 'DPDMAI_QUEUE_OPT_DEST' is contained in 'options' + */ +struct dpdmai_rx_queue_cfg { + uint32_t options; + uint64_t user_ctx; + struct dpdmai_dest_cfg dest_cfg; + +}; + +int dpdmai_set_rx_queue(struct fsl_mc_io *mc_io, + uint32_t cmd_flags, + uint16_t token, + uint8_t priority, + const struct dpdmai_rx_queue_cfg *cfg); + +/** + * struct dpdmai_rx_queue_attr - Structure representing attributes of Rx queues + * @user_ctx: User context value provided in the frame descriptor of each + * dequeued frame + * @dest_cfg: Queue destination configuration + * @fqid: Virtual FQID value to be used for dequeue operations + */ +struct dpdmai_rx_queue_attr { + uint64_t user_ctx; + struct dpdmai_dest_cfg dest_cfg; + uint32_t fqid; +}; + +int dpdmai_get_rx_queue(struct fsl_mc_io *mc_io, + uint32_t cmd_flags, + uint16_t token, + uint8_t priority, + struct dpdmai_rx_queue_attr *attr); + +/** + * struct dpdmai_tx_queue_attr - Structure representing attributes of Tx queues + * @fqid: Virtual FQID to be used for sending frames to DMA hardware + */ + +struct dpdmai_tx_queue_attr { + uint32_t fqid; +}; + +int dpdmai_get_tx_queue(struct fsl_mc_io *mc_io, + uint32_t cmd_flags, + uint16_t token, + uint8_t priority, + struct dpdmai_tx_queue_attr *attr); + +#endif /* __FSL_DPDMAI_H */ diff --git a/drivers/bus/fslmc/mc/fsl_dpdmai_cmd.h b/drivers/bus/fslmc/mc/fsl_dpdmai_cmd.h new file mode 100644 index 00000000..618e19ea --- /dev/null +++ b/drivers/bus/fslmc/mc/fsl_dpdmai_cmd.h @@ -0,0 +1,107 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright 2018 NXP + */ + +#ifndef _FSL_DPDMAI_CMD_H +#define _FSL_DPDMAI_CMD_H + +/* DPDMAI Version */ +#define DPDMAI_VER_MAJOR 3 +#define DPDMAI_VER_MINOR 2 + +/* Command versioning */ +#define DPDMAI_CMD_BASE_VERSION 1 +#define DPDMAI_CMD_ID_OFFSET 4 + +#define DPDMAI_CMD(id) ((id << DPDMAI_CMD_ID_OFFSET) | DPDMAI_CMD_BASE_VERSION) + +/* Command IDs */ +#define DPDMAI_CMDID_CLOSE DPDMAI_CMD(0x800) +#define DPDMAI_CMDID_OPEN DPDMAI_CMD(0x80E) +#define DPDMAI_CMDID_CREATE DPDMAI_CMD(0x90E) +#define DPDMAI_CMDID_DESTROY DPDMAI_CMD(0x98E) +#define DPDMAI_CMDID_GET_API_VERSION DPDMAI_CMD(0xa0E) + +#define DPDMAI_CMDID_ENABLE DPDMAI_CMD(0x002) +#define DPDMAI_CMDID_DISABLE DPDMAI_CMD(0x003) +#define DPDMAI_CMDID_GET_ATTR DPDMAI_CMD(0x004) +#define DPDMAI_CMDID_RESET DPDMAI_CMD(0x005) +#define DPDMAI_CMDID_IS_ENABLED DPDMAI_CMD(0x006) + +#define DPDMAI_CMDID_SET_RX_QUEUE DPDMAI_CMD(0x1A0) +#define DPDMAI_CMDID_GET_RX_QUEUE DPDMAI_CMD(0x1A1) +#define DPDMAI_CMDID_GET_TX_QUEUE DPDMAI_CMD(0x1A2) + +/* Macros for accessing command fields smaller than 1byte */ +#define DPDMAI_MASK(field) \ + GENMASK(DPDMAI_##field##_SHIFT + DPDMAI_##field##_SIZE - 1, \ + DPDMAI_##field##_SHIFT) +#define dpdmai_set_field(var, field, val) \ + ((var) |= (((val) << DPDMAI_##field##_SHIFT) & DPDMAI_MASK(field))) +#define dpdmai_get_field(var, field) \ + (((var) & DPDMAI_MASK(field)) >> DPDMAI_##field##_SHIFT) + +#pragma pack(push, 1) +struct dpdmai_cmd_open { + uint32_t dpdmai_id; +}; + +struct dpdmai_cmd_create { + uint8_t pad; + uint8_t priorities[2]; +}; + +struct dpdmai_cmd_destroy { + uint32_t dpdmai_id; +}; + +#define DPDMAI_ENABLE_SHIFT 0 +#define DPDMAI_ENABLE_SIZE 1 + +struct dpdmai_rsp_is_enabled { + /* only the LSB bit */ + uint8_t en; +}; + +struct dpdmai_rsp_get_attr { + uint32_t id; + uint8_t num_of_priorities; +}; + +#define DPDMAI_DEST_TYPE_SHIFT 0 +#define DPDMAI_DEST_TYPE_SIZE 4 + +struct dpdmai_cmd_set_rx_queue { + uint32_t dest_id; + uint8_t dest_priority; + uint8_t priority; + /* from LSB: dest_type:4 */ + uint8_t dest_type; + uint8_t pad; + uint64_t user_ctx; + uint32_t options; +}; + +struct dpdmai_cmd_get_queue { + uint8_t pad[5]; + uint8_t priority; +}; + +struct dpdmai_rsp_get_rx_queue { + uint32_t dest_id; + uint8_t dest_priority; + uint8_t pad1; + /* from LSB: dest_type:4 */ + uint8_t dest_type; + uint8_t pad2; + uint64_t user_ctx; + uint32_t fqid; +}; + +struct dpdmai_rsp_get_tx_queue { + uint64_t pad; + uint32_t fqid; +}; + +#pragma pack(pop) +#endif /* _FSL_DPDMAI_CMD_H */ diff --git a/drivers/bus/fslmc/mc/fsl_mc_cmd.h b/drivers/bus/fslmc/mc/fsl_mc_cmd.h index a3c3e798..ac919610 100644 --- a/drivers/bus/fslmc/mc/fsl_mc_cmd.h +++ b/drivers/bus/fslmc/mc/fsl_mc_cmd.h @@ -27,7 +27,7 @@ #define le32_to_cpu rte_le_to_cpu_32 #define le16_to_cpu rte_le_to_cpu_16 -#define BITS_PER_LONG 64 +#define BITS_PER_LONG (__SIZEOF_LONG__ * 8) #define GENMASK(h, l) \ (((~0UL) << (l)) & (~0UL >> (BITS_PER_LONG - 1 - (h)))) diff --git a/drivers/bus/fslmc/meson.build b/drivers/bus/fslmc/meson.build new file mode 100644 index 00000000..22a56a6f --- /dev/null +++ b/drivers/bus/fslmc/meson.build @@ -0,0 +1,27 @@ +# SPDX-License-Identifier: BSD-3-Clause +# Copyright 2018 NXP + +if host_machine.system() != 'linux' + build = false +endif + +deps += ['eventdev', 'kvargs'] +sources = files('fslmc_bus.c', + 'fslmc_vfio.c', + 'mc/dpbp.c', + 'mc/dpci.c', + 'mc/dpcon.c', + 'mc/dpdmai.c', + 'mc/dpio.c', + 'mc/dpmng.c', + 'mc/mc_sys.c', + 'portal/dpaa2_hw_dpbp.c', + 'portal/dpaa2_hw_dpci.c', + 'portal/dpaa2_hw_dpio.c', + 'qbman/qbman_portal.c', + 'qbman/qbman_debug.c') + +allow_experimental_apis = true + +includes += include_directories('mc', 'qbman/include', 'portal') +cflags += ['-D_GNU_SOURCE'] diff --git a/drivers/bus/fslmc/portal/dpaa2_hw_dpbp.c b/drivers/bus/fslmc/portal/dpaa2_hw_dpbp.c index f1f14e29..39c5adf9 100644 --- a/drivers/bus/fslmc/portal/dpaa2_hw_dpbp.c +++ b/drivers/bus/fslmc/portal/dpaa2_hw_dpbp.c @@ -44,7 +44,7 @@ dpaa2_create_dpbp_device(int vdev_fd __rte_unused, /* Allocate DPAA2 dpbp handle */ dpbp_node = rte_malloc(NULL, sizeof(struct dpaa2_dpbp_dev), 0); if (!dpbp_node) { - PMD_INIT_LOG(ERR, "Memory allocation failed for DPBP Device"); + DPAA2_BUS_ERR("Memory allocation failed for DPBP Device"); return -1; } @@ -53,8 +53,8 @@ dpaa2_create_dpbp_device(int vdev_fd __rte_unused, ret = dpbp_open(&dpbp_node->dpbp, CMD_PRI_LOW, dpbp_id, &dpbp_node->token); if (ret) { - PMD_INIT_LOG(ERR, "Resource alloc failure with err code: %d", - ret); + DPAA2_BUS_ERR("Unable to open buffer pool object: err(%d)", + ret); rte_free(dpbp_node); return -1; } @@ -62,8 +62,8 @@ dpaa2_create_dpbp_device(int vdev_fd __rte_unused, /* Clean the device first */ ret = dpbp_reset(&dpbp_node->dpbp, CMD_PRI_LOW, dpbp_node->token); if (ret) { - PMD_INIT_LOG(ERR, "Failure cleaning dpbp device with" - " error code %d\n", ret); + DPAA2_BUS_ERR("Unable to reset buffer pool device. err(%d)", + ret); dpbp_close(&dpbp_node->dpbp, CMD_PRI_LOW, dpbp_node->token); rte_free(dpbp_node); return -1; @@ -74,8 +74,6 @@ dpaa2_create_dpbp_device(int vdev_fd __rte_unused, TAILQ_INSERT_TAIL(&dpbp_dev_list, dpbp_node, next); - RTE_LOG(DEBUG, PMD, "DPAA2: Added [dpbp.%d]\n", dpbp_id); - if (!register_once) { rte_mbuf_set_platform_mempool_ops(DPAA2_MEMPOOL_OPS_NAME); register_once = 1; diff --git a/drivers/bus/fslmc/portal/dpaa2_hw_dpci.c b/drivers/bus/fslmc/portal/dpaa2_hw_dpci.c index fb28e497..5ad0374d 100644 --- a/drivers/bus/fslmc/portal/dpaa2_hw_dpci.c +++ b/drivers/bus/fslmc/portal/dpaa2_hw_dpci.c @@ -39,13 +39,14 @@ rte_dpaa2_create_dpci_device(int vdev_fd __rte_unused, struct dpci_attr attr; struct dpci_rx_queue_cfg rx_queue_cfg; struct dpci_rx_queue_attr rx_attr; + struct dpci_tx_queue_attr tx_attr; int ret, i; /* Allocate DPAA2 dpci handle */ dpci_node = rte_malloc(NULL, sizeof(struct dpaa2_dpci_dev), 0); if (!dpci_node) { - PMD_INIT_LOG(ERR, "Memory allocation failed for DPCI Device"); - return -1; + DPAA2_BUS_ERR("Memory allocation failed for DPCI Device"); + return -ENOMEM; } /* Open the dpci object */ @@ -53,43 +54,57 @@ rte_dpaa2_create_dpci_device(int vdev_fd __rte_unused, ret = dpci_open(&dpci_node->dpci, CMD_PRI_LOW, dpci_id, &dpci_node->token); if (ret) { - PMD_INIT_LOG(ERR, "Resource alloc failure with err code: %d", - ret); - rte_free(dpci_node); - return -1; + DPAA2_BUS_ERR("Resource alloc failure with err code: %d", ret); + goto err; } /* Get the device attributes */ ret = dpci_get_attributes(&dpci_node->dpci, CMD_PRI_LOW, dpci_node->token, &attr); if (ret != 0) { - PMD_INIT_LOG(ERR, "Reading device failed with err code: %d", - ret); - rte_free(dpci_node); - return -1; + DPAA2_BUS_ERR("Reading device failed with err code: %d", ret); + goto err; } - /* Set up the Rx Queue */ - memset(&rx_queue_cfg, 0, sizeof(struct dpci_rx_queue_cfg)); - ret = dpci_set_rx_queue(&dpci_node->dpci, - CMD_PRI_LOW, - dpci_node->token, - 0, &rx_queue_cfg); - if (ret) { - PMD_INIT_LOG(ERR, "Setting Rx queue failed with err code: %d", - ret); - rte_free(dpci_node); - return -1; + for (i = 0; i < DPAA2_DPCI_MAX_QUEUES; i++) { + struct dpaa2_queue *rxq; + + memset(&rx_queue_cfg, 0, sizeof(struct dpci_rx_queue_cfg)); + ret = dpci_set_rx_queue(&dpci_node->dpci, + CMD_PRI_LOW, + dpci_node->token, + i, &rx_queue_cfg); + if (ret) { + DPAA2_BUS_ERR("Setting Rx queue failed with err code: %d", + ret); + goto err; + } + + /* Allocate DQ storage for the DPCI Rx queues */ + rxq = &(dpci_node->rx_queue[i]); + rxq->q_storage = rte_malloc("dq_storage", + sizeof(struct queue_storage_info_t), + RTE_CACHE_LINE_SIZE); + if (!rxq->q_storage) { + DPAA2_BUS_ERR("q_storage allocation failed\n"); + ret = -ENOMEM; + goto err; + } + + memset(rxq->q_storage, 0, sizeof(struct queue_storage_info_t)); + ret = dpaa2_alloc_dq_storage(rxq->q_storage); + if (ret) { + DPAA2_BUS_ERR("dpaa2_alloc_dq_storage failed\n"); + goto err; + } } /* Enable the device */ ret = dpci_enable(&dpci_node->dpci, CMD_PRI_LOW, dpci_node->token); if (ret != 0) { - PMD_INIT_LOG(ERR, "Enabling device failed with err code: %d", - ret); - rte_free(dpci_node); - return -1; + DPAA2_BUS_ERR("Enabling device failed with err code: %d", ret); + goto err; } for (i = 0; i < DPAA2_DPCI_MAX_QUEUES; i++) { @@ -99,14 +114,22 @@ rte_dpaa2_create_dpci_device(int vdev_fd __rte_unused, dpci_node->token, i, &rx_attr); if (ret != 0) { - PMD_INIT_LOG(ERR, - "Reading device failed with err code: %d", - ret); - rte_free(dpci_node); - return -1; + DPAA2_BUS_ERR("Rx queue fetch failed with err code: %d", + ret); + goto err; } + dpci_node->rx_queue[i].fqid = rx_attr.fqid; - dpci_node->queue[i].fqid = rx_attr.fqid; + ret = dpci_get_tx_queue(&dpci_node->dpci, + CMD_PRI_LOW, + dpci_node->token, i, + &tx_attr); + if (ret != 0) { + DPAA2_BUS_ERR("Reading device failed with err code: %d", + ret); + goto err; + } + dpci_node->tx_queue[i].fqid = tx_attr.fqid; } dpci_node->dpci_id = dpci_id; @@ -114,9 +137,20 @@ rte_dpaa2_create_dpci_device(int vdev_fd __rte_unused, TAILQ_INSERT_TAIL(&dpci_dev_list, dpci_node, next); - RTE_LOG(DEBUG, PMD, "DPAA2: Added [dpci.%d]\n", dpci_id); - return 0; + +err: + for (i = 0; i < DPAA2_DPCI_MAX_QUEUES; i++) { + struct dpaa2_queue *rxq = &(dpci_node->rx_queue[i]); + + if (rxq->q_storage) { + dpaa2_free_dq_storage(rxq->q_storage); + rte_free(rxq->q_storage); + } + } + rte_free(dpci_node); + + return ret; } struct dpaa2_dpci_dev *rte_dpaa2_alloc_dpci_dev(void) diff --git a/drivers/bus/fslmc/portal/dpaa2_hw_dpio.c b/drivers/bus/fslmc/portal/dpaa2_hw_dpio.c index eefde155..99f70be1 100644 --- a/drivers/bus/fslmc/portal/dpaa2_hw_dpio.c +++ b/drivers/bus/fslmc/portal/dpaa2_hw_dpio.c @@ -101,7 +101,7 @@ static void dpaa2_affine_dpio_intr_to_respective_core(int32_t dpio_id) snprintf(string, STRING_LEN, "dpio.%d", dpio_id); file = fopen("/proc/interrupts", "r"); if (!file) { - PMD_DRV_LOG(WARNING, "Failed to open /proc/interrupts file\n"); + DPAA2_BUS_WARN("Failed to open /proc/interrupts file"); return; } while (getline(&temp, &len, file) != -1) { @@ -112,8 +112,8 @@ static void dpaa2_affine_dpio_intr_to_respective_core(int32_t dpio_id) } if (!token) { - PMD_DRV_LOG(WARNING, "Failed to get interrupt id for dpio.%d\n", - dpio_id); + DPAA2_BUS_WARN("Failed to get interrupt id for dpio.%d", + dpio_id); if (temp) free(temp); fclose(file); @@ -125,10 +125,10 @@ static void dpaa2_affine_dpio_intr_to_respective_core(int32_t dpio_id) cpu_mask, token); ret = system(command); if (ret < 0) - PMD_DRV_LOG(WARNING, - "Failed to affine interrupts on respective core\n"); + DPAA2_BUS_WARN( + "Failed to affine interrupts on respective core"); else - PMD_DRV_LOG(WARNING, " %s command is executed\n", command); + DPAA2_BUS_DEBUG(" %s command is executed", command); free(temp); fclose(file); @@ -143,7 +143,7 @@ static int dpaa2_dpio_intr_init(struct dpaa2_dpio_dev *dpio_dev) dpio_epoll_fd = epoll_create(1); ret = rte_dpaa2_intr_enable(&dpio_dev->intr_handle, 0); if (ret) { - PMD_DRV_LOG(ERR, "Interrupt registeration failed\n"); + DPAA2_BUS_ERR("Interrupt registeration failed"); return -1; } @@ -166,7 +166,7 @@ static int dpaa2_dpio_intr_init(struct dpaa2_dpio_dev *dpio_dev) ret = epoll_ctl(dpio_epoll_fd, EPOLL_CTL_ADD, eventfd, &epoll_ev); if (ret < 0) { - PMD_DRV_LOG(ERR, "epoll_ctl failed\n"); + DPAA2_BUS_ERR("epoll_ctl failed"); return -1; } dpio_dev->epoll_fd = dpio_epoll_fd; @@ -185,28 +185,27 @@ configure_dpio_qbman_swp(struct dpaa2_dpio_dev *dpio_dev) dpio_dev->dpio = malloc(sizeof(struct fsl_mc_io)); if (!dpio_dev->dpio) { - PMD_INIT_LOG(ERR, "Memory allocation failure\n"); + DPAA2_BUS_ERR("Memory allocation failure"); return -1; } - PMD_DRV_LOG(DEBUG, "Allocated DPIO Portal[%p]", dpio_dev->dpio); dpio_dev->dpio->regs = dpio_dev->mc_portal; if (dpio_open(dpio_dev->dpio, CMD_PRI_LOW, dpio_dev->hw_id, &dpio_dev->token)) { - PMD_INIT_LOG(ERR, "Failed to allocate IO space\n"); + DPAA2_BUS_ERR("Failed to allocate IO space"); free(dpio_dev->dpio); return -1; } if (dpio_reset(dpio_dev->dpio, CMD_PRI_LOW, dpio_dev->token)) { - PMD_INIT_LOG(ERR, "Failed to reset dpio\n"); + DPAA2_BUS_ERR("Failed to reset dpio"); dpio_close(dpio_dev->dpio, CMD_PRI_LOW, dpio_dev->token); free(dpio_dev->dpio); return -1; } if (dpio_enable(dpio_dev->dpio, CMD_PRI_LOW, dpio_dev->token)) { - PMD_INIT_LOG(ERR, "Failed to Enable dpio\n"); + DPAA2_BUS_ERR("Failed to Enable dpio"); dpio_close(dpio_dev->dpio, CMD_PRI_LOW, dpio_dev->token); free(dpio_dev->dpio); return -1; @@ -214,7 +213,7 @@ configure_dpio_qbman_swp(struct dpaa2_dpio_dev *dpio_dev) if (dpio_get_attributes(dpio_dev->dpio, CMD_PRI_LOW, dpio_dev->token, &attr)) { - PMD_INIT_LOG(ERR, "DPIO Get attribute failed\n"); + DPAA2_BUS_ERR("DPIO Get attribute failed"); dpio_disable(dpio_dev->dpio, CMD_PRI_LOW, dpio_dev->token); dpio_close(dpio_dev->dpio, CMD_PRI_LOW, dpio_dev->token); free(dpio_dev->dpio); @@ -231,7 +230,7 @@ configure_dpio_qbman_swp(struct dpaa2_dpio_dev *dpio_dev) dpio_dev->sw_portal = qbman_swp_init(&p_des); if (dpio_dev->sw_portal == NULL) { - PMD_DRV_LOG(ERR, " QBMan SW Portal Init failed\n"); + DPAA2_BUS_ERR("QBMan SW Portal Init failed"); dpio_close(dpio_dev->dpio, CMD_PRI_LOW, dpio_dev->token); free(dpio_dev->dpio); return -1; @@ -249,7 +248,7 @@ dpaa2_configure_stashing(struct dpaa2_dpio_dev *dpio_dev, int cpu_id) if (cpu_id < 0) { cpu_id = rte_get_master_lcore(); if (cpu_id < 0) { - RTE_LOG(ERR, PMD, "\tGetting CPU Index failed\n"); + DPAA2_BUS_ERR("Getting CPU Index failed"); return -1; } } @@ -258,19 +257,19 @@ dpaa2_configure_stashing(struct dpaa2_dpio_dev *dpio_dev, int cpu_id) */ sdest = dpaa2_core_cluster_sdest(cpu_id); - PMD_DRV_LOG(DEBUG, "Portal= %d CPU= %u SDEST= %d", - dpio_dev->index, cpu_id, sdest); + DPAA2_BUS_DEBUG("Portal= %d CPU= %u SDEST= %d", + dpio_dev->index, cpu_id, sdest); ret = dpio_set_stashing_destination(dpio_dev->dpio, CMD_PRI_LOW, dpio_dev->token, sdest); if (ret) { - PMD_DRV_LOG(ERR, "%d ERROR in SDEST\n", ret); + DPAA2_BUS_ERR("%d ERROR in SDEST", ret); return -1; } #ifdef RTE_LIBRTE_PMD_DPAA2_EVENTDEV if (dpaa2_dpio_intr_init(dpio_dev)) { - PMD_DRV_LOG(ERR, "Interrupt registration failed for dpio\n"); + DPAA2_BUS_ERR("Interrupt registration failed for dpio"); return -1; } #endif @@ -291,12 +290,12 @@ struct dpaa2_dpio_dev *dpaa2_get_qbman_swp(int cpu_id) if (!dpio_dev) return NULL; - PMD_DRV_LOG(DEBUG, "New Portal=0x%x (%d) affined thread - %lu", - dpio_dev, dpio_dev->index, syscall(SYS_gettid)); + DPAA2_BUS_DEBUG("New Portal %p (%d) affined thread - %lu", + dpio_dev, dpio_dev->index, syscall(SYS_gettid)); ret = dpaa2_configure_stashing(dpio_dev, cpu_id); if (ret) - PMD_DRV_LOG(ERR, "dpaa2_configure_stashing failed"); + DPAA2_BUS_ERR("dpaa2_configure_stashing failed"); return dpio_dev; } @@ -314,8 +313,9 @@ dpaa2_affine_qbman_swp(void) return -1; if (dpaa2_io_portal[lcore_id].dpio_dev) { - PMD_DRV_LOG(INFO, "DPAA Portal=0x%x (%d) is being shared" - " between thread %lu and current %lu", + DPAA2_BUS_DP_INFO("DPAA Portal=%p (%d) is being shared" + " between thread %" PRIu64 " and current " + "%" PRIu64 "\n", dpaa2_io_portal[lcore_id].dpio_dev, dpaa2_io_portal[lcore_id].dpio_dev->index, dpaa2_io_portal[lcore_id].net_tid, @@ -326,7 +326,8 @@ dpaa2_affine_qbman_swp(void) [lcore_id].dpio_dev->ref_count); dpaa2_io_portal[lcore_id].net_tid = tid; - PMD_DRV_LOG(DEBUG, "Old Portal=0x%x (%d) affined thread - %lu", + DPAA2_BUS_DP_DEBUG("Old Portal=%p (%d) affined thread - " + "%" PRIu64 "\n", dpaa2_io_portal[lcore_id].dpio_dev, dpaa2_io_portal[lcore_id].dpio_dev->index, tid); @@ -348,7 +349,7 @@ dpaa2_affine_qbman_swp(void) } int -dpaa2_affine_qbman_swp_sec(void) +dpaa2_affine_qbman_ethrx_swp(void) { unsigned int lcore_id = rte_lcore_id(); uint64_t tid = syscall(SYS_gettid); @@ -359,32 +360,36 @@ dpaa2_affine_qbman_swp_sec(void) else if (lcore_id >= RTE_MAX_LCORE) return -1; - if (dpaa2_io_portal[lcore_id].sec_dpio_dev) { - PMD_DRV_LOG(INFO, "DPAA Portal=0x%x (%d) is being shared" - " between thread %lu and current %lu", - dpaa2_io_portal[lcore_id].sec_dpio_dev, - dpaa2_io_portal[lcore_id].sec_dpio_dev->index, - dpaa2_io_portal[lcore_id].sec_tid, - tid); - RTE_PER_LCORE(_dpaa2_io).sec_dpio_dev - = dpaa2_io_portal[lcore_id].sec_dpio_dev; + if (dpaa2_io_portal[lcore_id].ethrx_dpio_dev) { + DPAA2_BUS_DP_INFO( + "DPAA Portal=%p (%d) is being shared between thread" + " %" PRIu64 " and current %" PRIu64 "\n", + dpaa2_io_portal[lcore_id].ethrx_dpio_dev, + dpaa2_io_portal[lcore_id].ethrx_dpio_dev->index, + dpaa2_io_portal[lcore_id].sec_tid, + tid); + RTE_PER_LCORE(_dpaa2_io).ethrx_dpio_dev + = dpaa2_io_portal[lcore_id].ethrx_dpio_dev; rte_atomic16_inc(&dpaa2_io_portal - [lcore_id].sec_dpio_dev->ref_count); + [lcore_id].ethrx_dpio_dev->ref_count); dpaa2_io_portal[lcore_id].sec_tid = tid; - PMD_DRV_LOG(DEBUG, "Old Portal=0x%x (%d) affined thread - %lu", - dpaa2_io_portal[lcore_id].sec_dpio_dev, - dpaa2_io_portal[lcore_id].sec_dpio_dev->index, - tid); + DPAA2_BUS_DP_DEBUG( + "Old Portal=%p (%d) affined thread" + " - %" PRIu64 "\n", + dpaa2_io_portal[lcore_id].ethrx_dpio_dev, + dpaa2_io_portal[lcore_id].ethrx_dpio_dev->index, + tid); return 0; } /* Populate the dpaa2_io_portal structure */ - dpaa2_io_portal[lcore_id].sec_dpio_dev = dpaa2_get_qbman_swp(lcore_id); + dpaa2_io_portal[lcore_id].ethrx_dpio_dev = + dpaa2_get_qbman_swp(lcore_id); - if (dpaa2_io_portal[lcore_id].sec_dpio_dev) { - RTE_PER_LCORE(_dpaa2_io).sec_dpio_dev - = dpaa2_io_portal[lcore_id].sec_dpio_dev; + if (dpaa2_io_portal[lcore_id].ethrx_dpio_dev) { + RTE_PER_LCORE(_dpaa2_io).ethrx_dpio_dev + = dpaa2_io_portal[lcore_id].ethrx_dpio_dev; dpaa2_io_portal[lcore_id].sec_tid = tid; return 0; } else { @@ -401,15 +406,14 @@ dpaa2_create_dpio_device(int vdev_fd, struct vfio_region_info reg_info = { .argsz = sizeof(reg_info)}; if (obj_info->num_regions < NUM_DPIO_REGIONS) { - PMD_INIT_LOG(ERR, "ERROR, Not sufficient number " - "of DPIO regions.\n"); + DPAA2_BUS_ERR("Not sufficient number of DPIO regions"); return -1; } dpio_dev = rte_malloc(NULL, sizeof(struct dpaa2_dpio_dev), RTE_CACHE_LINE_SIZE); if (!dpio_dev) { - PMD_INIT_LOG(ERR, "Memory allocation failed for DPIO Device\n"); + DPAA2_BUS_ERR("Memory allocation failed for DPIO Device"); return -1; } @@ -421,31 +425,31 @@ dpaa2_create_dpio_device(int vdev_fd, reg_info.index = 0; if (ioctl(vdev_fd, VFIO_DEVICE_GET_REGION_INFO, ®_info)) { - PMD_INIT_LOG(ERR, "vfio: error getting region info\n"); + DPAA2_BUS_ERR("vfio: error getting region info"); rte_free(dpio_dev); return -1; } dpio_dev->ce_size = reg_info.size; - dpio_dev->qbman_portal_ce_paddr = (uint64_t)mmap(NULL, reg_info.size, + dpio_dev->qbman_portal_ce_paddr = (size_t)mmap(NULL, reg_info.size, PROT_WRITE | PROT_READ, MAP_SHARED, vdev_fd, reg_info.offset); reg_info.index = 1; if (ioctl(vdev_fd, VFIO_DEVICE_GET_REGION_INFO, ®_info)) { - PMD_INIT_LOG(ERR, "vfio: error getting region info\n"); + DPAA2_BUS_ERR("vfio: error getting region info"); rte_free(dpio_dev); return -1; } dpio_dev->ci_size = reg_info.size; - dpio_dev->qbman_portal_ci_paddr = (uint64_t)mmap(NULL, reg_info.size, + dpio_dev->qbman_portal_ci_paddr = (size_t)mmap(NULL, reg_info.size, PROT_WRITE | PROT_READ, MAP_SHARED, vdev_fd, reg_info.offset); if (configure_dpio_qbman_swp(dpio_dev)) { - PMD_INIT_LOG(ERR, - "Fail to configure the dpio qbman portal for %d\n", + DPAA2_BUS_ERR( + "Fail to configure the dpio qbman portal for %d", dpio_dev->hw_id); rte_free(dpio_dev); return -1; @@ -455,8 +459,8 @@ dpaa2_create_dpio_device(int vdev_fd, dpio_dev->index = io_space_count; if (rte_dpaa2_vfio_setup_intr(&dpio_dev->intr_handle, vdev_fd, 1)) { - PMD_INIT_LOG(ERR, "Fail to setup interrupt for %d\n", - dpio_dev->hw_id); + DPAA2_BUS_ERR("Fail to setup interrupt for %d", + dpio_dev->hw_id); rte_free(dpio_dev); } @@ -466,21 +470,20 @@ dpaa2_create_dpio_device(int vdev_fd, if (mc_get_soc_version(dpio_dev->dpio, CMD_PRI_LOW, &mc_plat_info)) { - PMD_INIT_LOG(ERR, "\tmc_get_soc_version failed\n"); + DPAA2_BUS_ERR("Unable to get SoC version information"); } else if ((mc_plat_info.svr & 0xffff0000) == SVR_LS1080A) { dpaa2_core_cluster_base = 0x02; dpaa2_cluster_sz = 4; - PMD_INIT_LOG(DEBUG, "\tLS108x (A53) Platform Detected"); + DPAA2_BUS_DEBUG("LS108x (A53) Platform Detected"); } else if ((mc_plat_info.svr & 0xffff0000) == SVR_LX2160A) { dpaa2_core_cluster_base = 0x00; dpaa2_cluster_sz = 2; - PMD_INIT_LOG(DEBUG, "\tLX2160 Platform Detected"); + DPAA2_BUS_DEBUG("LX2160 Platform Detected"); } dpaa2_svr_family = (mc_plat_info.svr & 0xffff0000); } TAILQ_INSERT_TAIL(&dpio_dev_list, dpio_dev, next); - RTE_LOG(DEBUG, PMD, "DPAA2: Added [dpio.%d]\n", object_id); return 0; } diff --git a/drivers/bus/fslmc/portal/dpaa2_hw_dpio.h b/drivers/bus/fslmc/portal/dpaa2_hw_dpio.h index c0bd8782..d593eea7 100644 --- a/drivers/bus/fslmc/portal/dpaa2_hw_dpio.h +++ b/drivers/bus/fslmc/portal/dpaa2_hw_dpio.h @@ -13,7 +13,7 @@ struct dpaa2_io_portal_t { struct dpaa2_dpio_dev *dpio_dev; - struct dpaa2_dpio_dev *sec_dpio_dev; + struct dpaa2_dpio_dev *ethrx_dpio_dev; uint64_t net_tid; uint64_t sec_tid; void *eventdev; @@ -25,8 +25,8 @@ RTE_DECLARE_PER_LCORE(struct dpaa2_io_portal_t, _dpaa2_io); #define DPAA2_PER_LCORE_DPIO RTE_PER_LCORE(_dpaa2_io).dpio_dev #define DPAA2_PER_LCORE_PORTAL DPAA2_PER_LCORE_DPIO->sw_portal -#define DPAA2_PER_LCORE_SEC_DPIO RTE_PER_LCORE(_dpaa2_io).sec_dpio_dev -#define DPAA2_PER_LCORE_SEC_PORTAL DPAA2_PER_LCORE_SEC_DPIO->sw_portal +#define DPAA2_PER_LCORE_ETHRX_DPIO RTE_PER_LCORE(_dpaa2_io).ethrx_dpio_dev +#define DPAA2_PER_LCORE_ETHRX_PORTAL DPAA2_PER_LCORE_ETHRX_DPIO->sw_portal /* Variable to store DPAA2 platform type */ extern uint32_t dpaa2_svr_family; @@ -39,7 +39,7 @@ struct dpaa2_dpio_dev *dpaa2_get_qbman_swp(int cpu_id); int dpaa2_affine_qbman_swp(void); /* Affine additional DPIO portal to current crypto processing thread */ -int dpaa2_affine_qbman_swp_sec(void); +int dpaa2_affine_qbman_ethrx_swp(void); /* allocate memory for FQ - dq storage */ int diff --git a/drivers/bus/fslmc/portal/dpaa2_hw_pvt.h b/drivers/bus/fslmc/portal/dpaa2_hw_pvt.h index d421dbf0..82075936 100644 --- a/drivers/bus/fslmc/portal/dpaa2_hw_pvt.h +++ b/drivers/bus/fslmc/portal/dpaa2_hw_pvt.h @@ -142,7 +142,8 @@ struct dpaa2_dpci_dev { uint16_t token; rte_atomic16_t in_use; uint32_t dpci_id; /*HW ID for DPCI object */ - struct dpaa2_queue queue[DPAA2_DPCI_MAX_QUEUES]; + struct dpaa2_queue rx_queue[DPAA2_DPCI_MAX_QUEUES]; + struct dpaa2_queue tx_queue[DPAA2_DPCI_MAX_QUEUES]; }; /*! Global MCP list */ @@ -174,7 +175,7 @@ enum qbman_fd_format { }; /*Macros to define operations on FD*/ #define DPAA2_SET_FD_ADDR(fd, addr) do { \ - (fd)->simple.addr_lo = lower_32_bits((uint64_t)(addr)); \ + (fd)->simple.addr_lo = lower_32_bits((size_t)(addr)); \ (fd)->simple.addr_hi = upper_32_bits((uint64_t)(addr)); \ } while (0) #define DPAA2_SET_FD_LEN(fd, length) ((fd)->simple.len = length) @@ -188,50 +189,55 @@ enum qbman_fd_format { ((fd)->simple.frc = (0x80000000 | (len))) #define DPAA2_GET_FD_FRC_PARSE_SUM(fd) \ ((uint16_t)(((fd)->simple.frc & 0xffff0000) >> 16)) -#define DPAA2_SET_FD_FRC(fd, frc) ((fd)->simple.frc = frc) +#define DPAA2_SET_FD_FRC(fd, _frc) ((fd)->simple.frc = _frc) #define DPAA2_RESET_FD_CTRL(fd) ((fd)->simple.ctrl = 0) #define DPAA2_SET_FD_ASAL(fd, asal) ((fd)->simple.ctrl |= (asal << 16)) #define DPAA2_SET_FD_FLC(fd, addr) do { \ - (fd)->simple.flc_lo = lower_32_bits((uint64_t)(addr)); \ + (fd)->simple.flc_lo = lower_32_bits((size_t)(addr)); \ (fd)->simple.flc_hi = upper_32_bits((uint64_t)(addr)); \ } while (0) #define DPAA2_SET_FLE_INTERNAL_JD(fle, len) ((fle)->frc = (0x80000000 | (len))) #define DPAA2_GET_FLE_ADDR(fle) \ - (uint64_t)((((uint64_t)((fle)->addr_hi)) << 32) + (fle)->addr_lo) + (size_t)((((uint64_t)((fle)->addr_hi)) << 32) + (fle)->addr_lo) #define DPAA2_SET_FLE_ADDR(fle, addr) do { \ - (fle)->addr_lo = lower_32_bits((uint64_t)addr); \ - (fle)->addr_hi = upper_32_bits((uint64_t)addr); \ + (fle)->addr_lo = lower_32_bits((size_t)addr); \ + (fle)->addr_hi = upper_32_bits((uint64_t)addr); \ } while (0) #define DPAA2_GET_FLE_CTXT(fle) \ - (uint64_t)((((uint64_t)((fle)->reserved[1])) << 32) + \ - (fle)->reserved[0]) + ((((uint64_t)((fle)->reserved[1])) << 32) + (fle)->reserved[0]) #define DPAA2_FLE_SAVE_CTXT(fle, addr) do { \ - (fle)->reserved[0] = lower_32_bits((uint64_t)addr); \ - (fle)->reserved[1] = upper_32_bits((uint64_t)addr); \ + (fle)->reserved[0] = lower_32_bits((size_t)addr); \ + (fle)->reserved[1] = upper_32_bits((uint64_t)addr); \ } while (0) #define DPAA2_SET_FLE_OFFSET(fle, offset) \ ((fle)->fin_bpid_offset |= (uint32_t)(offset) << 16) -#define DPAA2_SET_FLE_BPID(fle, bpid) ((fle)->fin_bpid_offset |= (uint64_t)bpid) +#define DPAA2_SET_FLE_LEN(fle, len) ((fle)->length = len) +#define DPAA2_SET_FLE_BPID(fle, bpid) ((fle)->fin_bpid_offset |= (size_t)bpid) #define DPAA2_GET_FLE_BPID(fle) ((fle)->fin_bpid_offset & 0x000000ff) -#define DPAA2_SET_FLE_FIN(fle) ((fle)->fin_bpid_offset |= (uint64_t)1 << 31) +#define DPAA2_SET_FLE_FIN(fle) ((fle)->fin_bpid_offset |= 1 << 31) #define DPAA2_SET_FLE_IVP(fle) (((fle)->fin_bpid_offset |= 0x00004000)) +#define DPAA2_SET_FLE_BMT(fle) (((fle)->fin_bpid_offset |= 0x00008000)) #define DPAA2_SET_FD_COMPOUND_FMT(fd) \ ((fd)->simple.bpid_offset |= (uint32_t)1 << 28) #define DPAA2_GET_FD_ADDR(fd) \ -((uint64_t)((((uint64_t)((fd)->simple.addr_hi)) << 32) + (fd)->simple.addr_lo)) +(((((uint64_t)((fd)->simple.addr_hi)) << 32) + (fd)->simple.addr_lo)) #define DPAA2_GET_FD_LEN(fd) ((fd)->simple.len) #define DPAA2_GET_FD_BPID(fd) (((fd)->simple.bpid_offset & 0x00003FFF)) #define DPAA2_GET_FD_IVP(fd) (((fd)->simple.bpid_offset & 0x00004000) >> 14) #define DPAA2_GET_FD_OFFSET(fd) (((fd)->simple.bpid_offset & 0x0FFF0000) >> 16) +#define DPAA2_GET_FD_FRC(fd) ((fd)->simple.frc) +#define DPAA2_GET_FD_FLC(fd) \ + (((uint64_t)((fd)->simple.flc_hi) << 32) + (fd)->simple.flc_lo) +#define DPAA2_GET_FD_ERR(fd) ((fd)->simple.bpid_offset & 0x000000FF) #define DPAA2_GET_FLE_OFFSET(fle) (((fle)->fin_bpid_offset & 0x0FFF0000) >> 16) #define DPAA2_SET_FLE_SG_EXT(fle) ((fle)->fin_bpid_offset |= (uint64_t)1 << 29) #define DPAA2_IS_SET_FLE_SG_EXT(fle) \ (((fle)->fin_bpid_offset & ((uint64_t)1 << 29)) ? 1 : 0) #define DPAA2_INLINE_MBUF_FROM_BUF(buf, meta_data_size) \ - ((struct rte_mbuf *)((uint64_t)(buf) - (meta_data_size))) + ((struct rte_mbuf *)((size_t)(buf) - (meta_data_size))) #define DPAA2_ASAL_VAL (DPAA2_MBUF_HW_ANNOTATION / 64) @@ -255,47 +261,53 @@ enum qbman_fd_format { */ #define DPAA2_EQ_RESP_ALWAYS 1 +/* Various structures representing contiguous memory maps */ +struct dpaa2_memseg { + TAILQ_ENTRY(dpaa2_memseg) next; + char *vaddr; + rte_iova_t iova; + size_t len; +}; + +TAILQ_HEAD(dpaa2_memseg_list, dpaa2_memseg); +extern struct dpaa2_memseg_list rte_dpaa2_memsegs; + #ifdef RTE_LIBRTE_DPAA2_USE_PHYS_IOVA extern uint8_t dpaa2_virt_mode; static void *dpaa2_mem_ptov(phys_addr_t paddr) __attribute__((unused)); /* todo - this is costly, need to write a fast coversion routine */ static void *dpaa2_mem_ptov(phys_addr_t paddr) { - const struct rte_memseg *memseg; - int i; + struct dpaa2_memseg *ms; if (dpaa2_virt_mode) - return (void *)paddr; - - memseg = rte_eal_get_physmem_layout(); - - for (i = 0; i < RTE_MAX_MEMSEG && memseg[i].addr_64 != 0; i++) { - if (paddr >= memseg[i].iova && - (char *)paddr < (char *)memseg[i].iova + memseg[i].len) - return (void *)(memseg[i].addr_64 - + (paddr - memseg[i].iova)); + return (void *)(size_t)paddr; + + /* Check if the address is already part of the memseg list internally + * maintained by the dpaa2 driver. + */ + TAILQ_FOREACH(ms, &rte_dpaa2_memsegs, next) { + if (paddr >= ms->iova && paddr < + ms->iova + ms->len) + return RTE_PTR_ADD(ms->vaddr, (uintptr_t)(paddr - ms->iova)); } - return NULL; + + /* If not, Fallback to full memseg list searching */ + return rte_mem_iova2virt(paddr); } static phys_addr_t dpaa2_mem_vtop(uint64_t vaddr) __attribute__((unused)); static phys_addr_t dpaa2_mem_vtop(uint64_t vaddr) { const struct rte_memseg *memseg; - int i; if (dpaa2_virt_mode) return vaddr; - memseg = rte_eal_get_physmem_layout(); - - for (i = 0; i < RTE_MAX_MEMSEG && memseg[i].addr_64 != 0; i++) { - if (vaddr >= memseg[i].addr_64 && - vaddr < memseg[i].addr_64 + memseg[i].len) - return memseg[i].iova - + (vaddr - memseg[i].addr_64); - } - return (phys_addr_t)(NULL); + memseg = rte_mem_virt2memseg((void *)(uintptr_t)vaddr, NULL); + if (memseg) + return memseg->phys_addr + RTE_PTR_DIFF(vaddr, memseg->addr); + return (size_t)NULL; } /** @@ -306,28 +318,26 @@ static phys_addr_t dpaa2_mem_vtop(uint64_t vaddr) */ #define DPAA2_MBUF_VADDR_TO_IOVA(mbuf) ((mbuf)->buf_iova) -#define DPAA2_OP_VADDR_TO_IOVA(op) (op->phys_addr) /** * macro to convert Virtual address to IOVA */ -#define DPAA2_VADDR_TO_IOVA(_vaddr) dpaa2_mem_vtop((uint64_t)(_vaddr)) +#define DPAA2_VADDR_TO_IOVA(_vaddr) dpaa2_mem_vtop((size_t)(_vaddr)) /** * macro to convert IOVA to Virtual address */ -#define DPAA2_IOVA_TO_VADDR(_iova) dpaa2_mem_ptov((phys_addr_t)(_iova)) +#define DPAA2_IOVA_TO_VADDR(_iova) dpaa2_mem_ptov((size_t)(_iova)) /** * macro to convert modify the memory containing IOVA to Virtual address */ #define DPAA2_MODIFY_IOVA_TO_VADDR(_mem, _type) \ - {_mem = (_type)(dpaa2_mem_ptov((phys_addr_t)(_mem))); } + {_mem = (_type)(dpaa2_mem_ptov((size_t)(_mem))); } #else /* RTE_LIBRTE_DPAA2_USE_PHYS_IOVA */ #define DPAA2_MBUF_VADDR_TO_IOVA(mbuf) ((mbuf)->buf_addr) -#define DPAA2_OP_VADDR_TO_IOVA(op) (op) #define DPAA2_VADDR_TO_IOVA(_vaddr) (_vaddr) #define DPAA2_IOVA_TO_VADDR(_iova) (_iova) #define DPAA2_MODIFY_IOVA_TO_VADDR(_mem, _type) diff --git a/drivers/bus/fslmc/qbman/include/fsl_qbman_base.h b/drivers/bus/fslmc/qbman/include/fsl_qbman_base.h index 96269ed4..bb60a98f 100644 --- a/drivers/bus/fslmc/qbman/include/fsl_qbman_base.h +++ b/drivers/bus/fslmc/qbman/include/fsl_qbman_base.h @@ -6,8 +6,6 @@ #ifndef _FSL_QBMAN_BASE_H #define _FSL_QBMAN_BASE_H -typedef uint64_t dma_addr_t; - /** * DOC: QBMan basic structures * diff --git a/drivers/bus/fslmc/qbman/qbman_portal.c b/drivers/bus/fslmc/qbman/qbman_portal.c index e2217335..07145005 100644 --- a/drivers/bus/fslmc/qbman/qbman_portal.c +++ b/drivers/bus/fslmc/qbman/qbman_portal.c @@ -122,8 +122,7 @@ struct qbman_swp *qbman_swp_init(const struct qbman_swp_desc *d) p->vdq.valid_bit = QB_VALID_BIT; p->dqrr.next_idx = 0; p->dqrr.valid_bit = QB_VALID_BIT; - qman_version = p->desc.qman_version; - if ((qman_version & 0xFFFF0000) < QMAN_REV_4100) { + if ((p->desc.qman_version & 0xFFFF0000) < QMAN_REV_4100) { p->dqrr.dqrr_size = 4; p->dqrr.reset_bug = 1; } else { @@ -553,10 +552,9 @@ int qbman_swp_enqueue_multiple(struct qbman_swp *s, /* Flush all the cacheline without load/store in between */ eqcr_pi = s->eqcr.pi; - addr_cena = (uint64_t)s->sys.addr_cena; + addr_cena = (size_t)s->sys.addr_cena; for (i = 0; i < num_enqueued; i++) { - dcbf((uint64_t *)(addr_cena + - QBMAN_CENA_SWP_EQCR(eqcr_pi & 7))); + dcbf((addr_cena + QBMAN_CENA_SWP_EQCR(eqcr_pi & 7))); eqcr_pi++; eqcr_pi &= 0xF; } @@ -620,10 +618,9 @@ int qbman_swp_enqueue_multiple_desc(struct qbman_swp *s, /* Flush all the cacheline without load/store in between */ eqcr_pi = s->eqcr.pi; - addr_cena = (uint64_t)s->sys.addr_cena; + addr_cena = (size_t)s->sys.addr_cena; for (i = 0; i < num_enqueued; i++) { - dcbf((uint64_t *)(addr_cena + - QBMAN_CENA_SWP_EQCR(eqcr_pi & 7))); + dcbf((addr_cena + QBMAN_CENA_SWP_EQCR(eqcr_pi & 7))); eqcr_pi++; eqcr_pi &= 0xF; } @@ -690,7 +687,7 @@ void qbman_pull_desc_set_storage(struct qbman_pull_desc *d, dma_addr_t storage_phys, int stash) { - d->pull.rsp_addr_virt = (uint64_t)storage; + d->pull.rsp_addr_virt = (size_t)storage; if (!storage) { d->pull.verb &= ~(1 << QB_VDQCR_VERB_RLS_SHIFT); @@ -749,7 +746,7 @@ int qbman_swp_pull(struct qbman_swp *s, struct qbman_pull_desc *d) } d->pull.tok = s->sys.idx + 1; - s->vdq.storage = (void *)d->pull.rsp_addr_virt; + s->vdq.storage = (void *)(size_t)d->pull.rsp_addr_virt; p = qbman_cena_write_start_wo_shadow(&s->sys, QBMAN_CENA_SWP_VDQCR); memcpy(&p[1], &cl[1], 12); diff --git a/drivers/bus/fslmc/qbman/qbman_portal.h b/drivers/bus/fslmc/qbman/qbman_portal.h index 8bff0b4f..dbea22a1 100644 --- a/drivers/bus/fslmc/qbman/qbman_portal.h +++ b/drivers/bus/fslmc/qbman/qbman_portal.h @@ -7,7 +7,6 @@ #include "qbman_sys.h" #include <fsl_qbman_portal.h> -uint32_t qman_version; #define QMAN_REV_4000 0x04000000 #define QMAN_REV_4100 0x04010000 #define QMAN_REV_4101 0x04010001 diff --git a/drivers/bus/fslmc/qbman/qbman_sys.h b/drivers/bus/fslmc/qbman/qbman_sys.h index 846788ea..2bd33ea5 100644 --- a/drivers/bus/fslmc/qbman/qbman_sys.h +++ b/drivers/bus/fslmc/qbman/qbman_sys.h @@ -20,6 +20,9 @@ #include "qbman_sys_decl.h" +#define CENA_WRITE_ENABLE 0 +#define CINH_WRITE_ENABLE 1 + /* Debugging assists */ static inline void __hexdump(unsigned long start, unsigned long end, unsigned long p, size_t sz, const unsigned char *c) @@ -178,7 +181,11 @@ static inline void *qbman_cena_write_start_wo_shadow(struct qbman_swp_sys *s, s->addr_cena, s->idx, offset); #endif QBMAN_BUG_ON(offset & 63); +#ifdef RTE_ARCH_64 return (s->addr_cena + offset); +#else + return (s->addr_cinh + offset); +#endif } static inline void qbman_cena_write_complete(struct qbman_swp_sys *s, @@ -191,11 +198,19 @@ static inline void qbman_cena_write_complete(struct qbman_swp_sys *s, s->addr_cena, s->idx, offset, shadow); hexdump(cmd, 64); #endif +#ifdef RTE_ARCH_64 for (loop = 15; loop >= 1; loop--) __raw_writel(shadow[loop], s->addr_cena + offset + loop * 4); lwsync(); __raw_writel(shadow[0], s->addr_cena + offset); +#else + for (loop = 15; loop >= 1; loop--) + __raw_writel(shadow[loop], s->addr_cinh + + offset + loop * 4); + lwsync(); + __raw_writel(shadow[0], s->addr_cinh + offset); +#endif dcbf(s->addr_cena + offset); } @@ -224,9 +239,15 @@ static inline void *qbman_cena_read(struct qbman_swp_sys *s, uint32_t offset) s->addr_cena, s->idx, offset, shadow); #endif +#ifdef RTE_ARCH_64 for (loop = 0; loop < 16; loop++) shadow[loop] = __raw_readl(s->addr_cena + offset + loop * 4); +#else + for (loop = 0; loop < 16; loop++) + shadow[loop] = __raw_readl(s->addr_cinh + offset + + loop * 4); +#endif #ifdef QBMAN_CENA_TRACE hexdump(shadow, 64); #endif @@ -313,6 +334,11 @@ static inline int qbman_swp_sys_init(struct qbman_swp_sys *s, uint8_t dqrr_size) { uint32_t reg; +#ifdef RTE_ARCH_64 + uint8_t wn = CENA_WRITE_ENABLE; +#else + uint8_t wn = CINH_WRITE_ENABLE; +#endif s->addr_cena = d->cena_bar; s->addr_cinh = d->cinh_bar; @@ -333,10 +359,10 @@ static inline int qbman_swp_sys_init(struct qbman_swp_sys *s, QBMAN_BUG_ON(reg); #endif if (s->eqcr_mode == qman_eqcr_vb_array) - reg = qbman_set_swp_cfg(dqrr_size, 0, 0, 3, 2, 3, 1, 1, 1, 1, + reg = qbman_set_swp_cfg(dqrr_size, wn, 0, 3, 2, 3, 1, 1, 1, 1, 1, 1); else - reg = qbman_set_swp_cfg(dqrr_size, 0, 1, 3, 2, 2, 1, 1, 1, 1, + reg = qbman_set_swp_cfg(dqrr_size, wn, 1, 3, 2, 2, 1, 1, 1, 1, 1, 1); qbman_cinh_write(s, QBMAN_CINH_SWP_CFG, reg); reg = qbman_cinh_read(s, QBMAN_CINH_SWP_CFG); diff --git a/drivers/bus/fslmc/qbman/qbman_sys_decl.h b/drivers/bus/fslmc/qbman/qbman_sys_decl.h index f82bb18c..fa6977fe 100644 --- a/drivers/bus/fslmc/qbman/qbman_sys_decl.h +++ b/drivers/bus/fslmc/qbman/qbman_sys_decl.h @@ -15,6 +15,7 @@ /****************/ /* arch assists */ /****************/ +#if defined(RTE_ARCH_ARM64) #define dcbz(p) { asm volatile("dc zva, %0" : : "r" (p) : "memory"); } #define lwsync() { asm volatile("dmb st" : : : "memory"); } #define dcbf(p) { asm volatile("dc cvac, %0" : : "r"(p) : "memory"); } @@ -28,3 +29,25 @@ static inline void prefetch_for_store(void *p) { asm volatile("prfm pstl1keep, [%0, #0]" : : "r" (p)); } +#elif defined(RTE_ARCH_ARM) +#define dcbz(p) memset(p, 0, 64) +#define lwsync() { asm volatile("dmb st" : : : "memory"); } +#define dcbf(p) RTE_SET_USED(p) +#define dccivac(p) RTE_SET_USED(p) +#define prefetch_for_load(p) { asm volatile ("pld [%0]" : : "r" (p)); } +#define prefetch_for_store(p) { asm volatile ("pld [%0]" : : "r" (p)); } + +#else +#define dcbz(p) RTE_SET_USED(p) +#define lwsync() +#define dcbf(p) RTE_SET_USED(p) +#define dccivac(p) RTE_SET_USED(p) +static inline void prefetch_for_load(void *p) +{ + RTE_SET_USED(p); +} +static inline void prefetch_for_store(void *p) +{ + RTE_SET_USED(p); +} +#endif diff --git a/drivers/bus/fslmc/rte_bus_fslmc_version.map b/drivers/bus/fslmc/rte_bus_fslmc_version.map index b7db0741..fe45a113 100644 --- a/drivers/bus/fslmc/rte_bus_fslmc_version.map +++ b/drivers/bus/fslmc/rte_bus_fslmc_version.map @@ -2,7 +2,6 @@ DPDK_17.05 { global: dpaa2_affine_qbman_swp; - dpaa2_affine_qbman_swp_sec; dpaa2_alloc_dpbp_dev; dpaa2_alloc_dq_storage; dpaa2_free_dpbp_dev; @@ -101,3 +100,19 @@ DPDK_18.02 { rte_fslmc_get_device_count; } DPDK_17.11; + +DPDK_18.05 { + global: + + dpaa2_affine_qbman_ethrx_swp; + dpdmai_close; + dpdmai_disable; + dpdmai_enable; + dpdmai_get_attributes; + dpdmai_get_rx_queue; + dpdmai_get_tx_queue; + dpdmai_open; + dpdmai_set_rx_queue; + rte_dpaa2_free_dpci_dev; + +} DPDK_18.02; diff --git a/drivers/bus/fslmc/rte_fslmc.h b/drivers/bus/fslmc/rte_fslmc.h index 69d0fec7..cea5b78f 100644 --- a/drivers/bus/fslmc/rte_fslmc.h +++ b/drivers/bus/fslmc/rte_fslmc.h @@ -31,6 +31,7 @@ extern "C" { #include <rte_dev.h> #include <rte_bus.h> #include <rte_tailq.h> +#include <rte_devargs.h> #include <fslmc_vfio.h> @@ -49,6 +50,9 @@ struct rte_dpaa2_driver; TAILQ_HEAD(rte_fslmc_device_list, rte_dpaa2_device); TAILQ_HEAD(rte_fslmc_driver_list, rte_dpaa2_driver); +#define RTE_DEV_TO_FSLMC_CONST(ptr) \ + container_of(ptr, const struct rte_dpaa2_device, device) + extern struct rte_fslmc_bus rte_fslmc_bus; enum rte_dpaa2_dev_type { @@ -61,6 +65,7 @@ enum rte_dpaa2_dev_type { DPAA2_IO, /**< DPIO type device */ DPAA2_CI, /**< DPCI type device */ DPAA2_MPORTAL, /**< DPMCP type device */ + DPAA2_QDMA, /**< DPDMAI type device */ /* Unknown device placeholder */ DPAA2_UNKNOWN, DPAA2_DEVTYPE_MAX, @@ -91,6 +96,7 @@ struct rte_dpaa2_device { union { struct rte_eth_dev *eth_dev; /**< ethernet device */ struct rte_cryptodev *cryptodev; /**< Crypto Device */ + struct rte_rawdev *rawdev; /**< Raw Device */ }; enum rte_dpaa2_dev_type dev_type; /**< Device Type */ uint16_t object_id; /**< DPAA2 Object ID */ @@ -167,8 +173,7 @@ void rte_fslmc_driver_unregister(struct rte_dpaa2_driver *driver); /** Helper for DPAA2 device registration from driver (eth, crypto) instance */ #define RTE_PMD_REGISTER_DPAA2(nm, dpaa2_drv) \ -RTE_INIT(dpaa2initfn_ ##nm); \ -static void dpaa2initfn_ ##nm(void) \ +RTE_INIT(dpaa2initfn_ ##nm) \ {\ (dpaa2_drv).driver.name = RTE_STR(nm);\ rte_fslmc_driver_register(&dpaa2_drv); \ @@ -197,8 +202,7 @@ uint32_t rte_fslmc_get_device_count(enum rte_dpaa2_dev_type device_type); /** Helper for DPAA2 object registration */ #define RTE_PMD_REGISTER_DPAA2_OBJECT(nm, dpaa2_obj) \ -RTE_INIT(dpaa2objinitfn_ ##nm); \ -static void dpaa2objinitfn_ ##nm(void) \ +RTE_INIT(dpaa2objinitfn_ ##nm) \ {\ (dpaa2_obj).name = RTE_STR(nm);\ rte_fslmc_object_register(&dpaa2_obj); \ |