aboutsummaryrefslogtreecommitdiffstats
path: root/lib/librte_eal
diff options
context:
space:
mode:
Diffstat (limited to 'lib/librte_eal')
-rw-r--r--lib/librte_eal/common/eal_common_dev.c4
-rw-r--r--lib/librte_eal/common/eal_common_errno.c3
-rw-r--r--lib/librte_eal/common/eal_common_memory.c42
-rw-r--r--lib/librte_eal/common/eal_common_options.c2
-rw-r--r--lib/librte_eal/common/hotplug_mp.c7
-rw-r--r--lib/librte_eal/common/include/rte_dev.h10
-rw-r--r--lib/librte_eal/common/include/rte_memory.h41
-rw-r--r--lib/librte_eal/common/include/rte_version.h2
-rw-r--r--lib/librte_eal/common/malloc_heap.c43
-rw-r--r--lib/librte_eal/common/rte_malloc.c3
-rw-r--r--lib/librte_eal/common/rte_service.c3
-rw-r--r--lib/librte_eal/linuxapp/eal/eal_interrupts.c19
-rw-r--r--lib/librte_eal/linuxapp/eal/eal_memory.c20
-rw-r--r--lib/librte_eal/rte_eal_version.map8
14 files changed, 175 insertions, 32 deletions
diff --git a/lib/librte_eal/common/eal_common_dev.c b/lib/librte_eal/common/eal_common_dev.c
index 62e9ed47..5759ec2d 100644
--- a/lib/librte_eal/common/eal_common_dev.c
+++ b/lib/librte_eal/common/eal_common_dev.c
@@ -186,7 +186,7 @@ err_devarg:
return ret;
}
-int __rte_experimental
+int
rte_dev_probe(const char *devargs)
{
struct eal_dev_mp_req req;
@@ -322,7 +322,7 @@ local_dev_remove(struct rte_device *dev)
return 0;
}
-int __rte_experimental
+int
rte_dev_remove(struct rte_device *dev)
{
struct eal_dev_mp_req req;
diff --git a/lib/librte_eal/common/eal_common_errno.c b/lib/librte_eal/common/eal_common_errno.c
index 56b492f5..c63a943b 100644
--- a/lib/librte_eal/common/eal_common_errno.c
+++ b/lib/librte_eal/common/eal_common_errno.c
@@ -2,6 +2,9 @@
* Copyright(c) 2010-2014 Intel Corporation
*/
+/* Use XSI-compliant portable version of strerror_r() */
+#undef _GNU_SOURCE
+
#include <stdint.h>
#include <stdio.h>
#include <string.h>
diff --git a/lib/librte_eal/common/eal_common_memory.c b/lib/librte_eal/common/eal_common_memory.c
index 12dcedf5..87fd9921 100644
--- a/lib/librte_eal/common/eal_common_memory.c
+++ b/lib/librte_eal/common/eal_common_memory.c
@@ -49,7 +49,7 @@ static uint64_t system_page_sz;
* Current known limitations are 39 or 40 bits. Setting the starting address
* at 4GB implies there are 508GB or 1020GB for mapping the available
* hugepages. This is likely enough for most systems, although a device with
- * addressing limitations should call rte_eal_check_dma_mask for ensuring all
+ * addressing limitations should call rte_mem_check_dma_mask for ensuring all
* memory is within supported range.
*/
static uint64_t baseaddr = 0x100000000;
@@ -446,11 +446,12 @@ check_iova(const struct rte_memseg_list *msl __rte_unused,
#endif
/* check memseg iovas are within the required range based on dma mask */
-int __rte_experimental
-rte_eal_check_dma_mask(uint8_t maskbits)
+static int __rte_experimental
+check_dma_mask(uint8_t maskbits, bool thread_unsafe)
{
struct rte_mem_config *mcfg = rte_eal_get_configuration()->mem_config;
uint64_t mask;
+ int ret;
/* sanity check */
if (maskbits > MAX_DMA_MASK_BITS) {
@@ -462,7 +463,12 @@ rte_eal_check_dma_mask(uint8_t maskbits)
/* create dma mask */
mask = ~((1ULL << maskbits) - 1);
- if (rte_memseg_walk(check_iova, &mask))
+ if (thread_unsafe)
+ ret = rte_memseg_walk_thread_unsafe(check_iova, &mask);
+ else
+ ret = rte_memseg_walk(check_iova, &mask);
+
+ if (ret)
/*
* Dma mask precludes hugepage usage.
* This device can not be used and we do not need to keep
@@ -480,6 +486,34 @@ rte_eal_check_dma_mask(uint8_t maskbits)
return 0;
}
+int __rte_experimental
+rte_mem_check_dma_mask(uint8_t maskbits)
+{
+ return check_dma_mask(maskbits, false);
+}
+
+int __rte_experimental
+rte_mem_check_dma_mask_thread_unsafe(uint8_t maskbits)
+{
+ return check_dma_mask(maskbits, true);
+}
+
+/*
+ * Set dma mask to use when memory initialization is done.
+ *
+ * This function should ONLY be used by code executed before the memory
+ * initialization. PMDs should use rte_mem_check_dma_mask if addressing
+ * limitations by the device.
+ */
+void __rte_experimental
+rte_mem_set_dma_mask(uint8_t maskbits)
+{
+ struct rte_mem_config *mcfg = rte_eal_get_configuration()->mem_config;
+
+ mcfg->dma_maskbits = mcfg->dma_maskbits == 0 ? maskbits :
+ RTE_MIN(mcfg->dma_maskbits, maskbits);
+}
+
/* return the number of memory channels */
unsigned rte_memory_get_nchannel(void)
{
diff --git a/lib/librte_eal/common/eal_common_options.c b/lib/librte_eal/common/eal_common_options.c
index b82f3ddd..e31eca5c 100644
--- a/lib/librte_eal/common/eal_common_options.c
+++ b/lib/librte_eal/common/eal_common_options.c
@@ -222,7 +222,7 @@ eal_plugin_add(const char *path)
return -1;
}
memset(solib, 0, sizeof(*solib));
- strncpy(solib->name, path, PATH_MAX-1);
+ strlcpy(solib->name, path, PATH_MAX-1);
solib->name[PATH_MAX-1] = 0;
TAILQ_INSERT_TAIL(&solib_list, solib, next);
diff --git a/lib/librte_eal/common/hotplug_mp.c b/lib/librte_eal/common/hotplug_mp.c
index 84f59d95..7c9fcc46 100644
--- a/lib/librte_eal/common/hotplug_mp.c
+++ b/lib/librte_eal/common/hotplug_mp.c
@@ -243,7 +243,7 @@ static void __handle_primary_request(void *param)
da = calloc(1, sizeof(*da));
if (da == NULL) {
ret = -ENOMEM;
- goto quit;
+ break;
}
ret = rte_devargs_parse(da, req->devargs);
@@ -266,6 +266,8 @@ static void __handle_primary_request(void *param)
ret = local_dev_remove(dev);
quit:
+ free(da->args);
+ free(da);
break;
default:
ret = -EINVAL;
@@ -355,6 +357,7 @@ int eal_dev_hotplug_request_to_primary(struct eal_dev_mp_req *req)
resp = (struct eal_dev_mp_req *)mp_reply.msgs[0].param;
req->result = resp->result;
+ free(mp_reply.msgs);
return ret;
}
@@ -379,6 +382,7 @@ int eal_dev_hotplug_request_to_secondary(struct eal_dev_mp_req *req)
if (mp_reply.nb_sent != mp_reply.nb_received) {
RTE_LOG(ERR, EAL, "not all secondary reply\n");
+ free(mp_reply.msgs);
return -1;
}
@@ -397,6 +401,7 @@ int eal_dev_hotplug_request_to_secondary(struct eal_dev_mp_req *req)
}
}
+ free(mp_reply.msgs);
return 0;
}
diff --git a/lib/librte_eal/common/include/rte_dev.h b/lib/librte_eal/common/include/rte_dev.h
index cd6c187c..a9724dc9 100644
--- a/lib/librte_eal/common/include/rte_dev.h
+++ b/lib/librte_eal/common/include/rte_dev.h
@@ -196,9 +196,6 @@ int rte_eal_hotplug_add(const char *busname, const char *devname,
const char *drvargs);
/**
- * @warning
- * @b EXPERIMENTAL: this API may change without prior notice
- *
* Add matching devices.
*
* In multi-process, it will request other processes to add the same device.
@@ -209,7 +206,7 @@ int rte_eal_hotplug_add(const char *busname, const char *devname,
* @return
* 0 on success, negative on error.
*/
-int __rte_experimental rte_dev_probe(const char *devargs);
+int rte_dev_probe(const char *devargs);
/**
* Hotplug remove a given device from a specific bus.
@@ -227,9 +224,6 @@ int __rte_experimental rte_dev_probe(const char *devargs);
int rte_eal_hotplug_remove(const char *busname, const char *devname);
/**
- * @warning
- * @b EXPERIMENTAL: this API may change without prior notice
- *
* Remove one device.
*
* In multi-process, it will request other processes to remove the same device.
@@ -240,7 +234,7 @@ int rte_eal_hotplug_remove(const char *busname, const char *devname);
* @return
* 0 on success, negative on error.
*/
-int __rte_experimental rte_dev_remove(struct rte_device *dev);
+int rte_dev_remove(struct rte_device *dev);
/**
* Device comparison function.
diff --git a/lib/librte_eal/common/include/rte_memory.h b/lib/librte_eal/common/include/rte_memory.h
index ce937058..d970825d 100644
--- a/lib/librte_eal/common/include/rte_memory.h
+++ b/lib/librte_eal/common/include/rte_memory.h
@@ -463,8 +463,45 @@ unsigned rte_memory_get_nchannel(void);
*/
unsigned rte_memory_get_nrank(void);
-/* check memsegs iovas are within a range based on dma mask */
-int __rte_experimental rte_eal_check_dma_mask(uint8_t maskbits);
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice
+ *
+ * Check if all currently allocated memory segments are compliant with
+ * supplied DMA address width.
+ *
+ * @param maskbits
+ * Address width to check against.
+ */
+int __rte_experimental rte_mem_check_dma_mask(uint8_t maskbits);
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice
+ *
+ * Check if all currently allocated memory segments are compliant with
+ * supplied DMA address width. This function will use
+ * rte_memseg_walk_thread_unsafe instead of rte_memseg_walk implying
+ * memory_hotplug_lock will not be acquired avoiding deadlock during
+ * memory initialization.
+ *
+ * This function is just for EAL core memory internal use. Drivers should
+ * use the previous rte_mem_check_dma_mask.
+ *
+ * @param maskbits
+ * Address width to check against.
+ */
+int __rte_experimental rte_mem_check_dma_mask_thread_unsafe(uint8_t maskbits);
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice
+ *
+ * Set dma mask to use once memory initialization is done. Previous functions
+ * rte_mem_check_dma_mask and rte_mem_check_dma_mask_thread_unsafe can not be
+ * used safely until memory has been initialized.
+ */
+void __rte_experimental rte_mem_set_dma_mask(uint8_t maskbits);
/**
* Drivers based on uio will not load unless physical
diff --git a/lib/librte_eal/common/include/rte_version.h b/lib/librte_eal/common/include/rte_version.h
index 412ed2db..80c516d3 100644
--- a/lib/librte_eal/common/include/rte_version.h
+++ b/lib/librte_eal/common/include/rte_version.h
@@ -49,7 +49,7 @@ extern "C" {
* 0-15 = release candidates
* 16 = release
*/
-#define RTE_VER_RELEASE 1
+#define RTE_VER_RELEASE 2
/**
* Macro to compute a version number usable for comparisons
diff --git a/lib/librte_eal/common/malloc_heap.c b/lib/librte_eal/common/malloc_heap.c
index 1973b6e6..c6a6d4f6 100644
--- a/lib/librte_eal/common/malloc_heap.c
+++ b/lib/librte_eal/common/malloc_heap.c
@@ -294,7 +294,6 @@ alloc_pages_on_heap(struct malloc_heap *heap, uint64_t pg_sz, size_t elt_size,
size_t alloc_sz;
int allocd_pages;
void *ret, *map_addr;
- uint64_t mask;
alloc_sz = (size_t)pg_sz * n_segs;
@@ -322,14 +321,44 @@ alloc_pages_on_heap(struct malloc_heap *heap, uint64_t pg_sz, size_t elt_size,
goto fail;
}
- if (mcfg->dma_maskbits) {
- mask = ~((1ULL << mcfg->dma_maskbits) - 1);
- if (rte_eal_check_dma_mask(mask)) {
+ /*
+ * Once we have all the memseg lists configured, if there is a dma mask
+ * set, check iova addresses are not out of range. Otherwise the device
+ * setting the dma mask could have problems with the mapped memory.
+ *
+ * There are two situations when this can happen:
+ * 1) memory initialization
+ * 2) dynamic memory allocation
+ *
+ * For 1), an error when checking dma mask implies app can not be
+ * executed. For 2) implies the new memory can not be added.
+ */
+ if (mcfg->dma_maskbits &&
+ rte_mem_check_dma_mask_thread_unsafe(mcfg->dma_maskbits)) {
+ /*
+ * Currently this can only happen if IOMMU is enabled
+ * and the address width supported by the IOMMU hw is
+ * not enough for using the memory mapped IOVAs.
+ *
+ * If IOVA is VA, advice to try with '--iova-mode pa'
+ * which could solve some situations when IOVA VA is not
+ * really needed.
+ */
+ RTE_LOG(ERR, EAL,
+ "%s(): couldn't allocate memory due to IOVA exceeding limits of current DMA mask\n",
+ __func__);
+
+ /*
+ * If IOVA is VA and it is possible to run with IOVA PA,
+ * because user is root, give and advice for solving the
+ * problem.
+ */
+ if ((rte_eal_iova_mode() == RTE_IOVA_VA) &&
+ rte_eal_using_phys_addrs())
RTE_LOG(ERR, EAL,
- "%s(): couldn't allocate memory due to DMA mask\n",
+ "%s(): Please try initializing EAL with --iova-mode=pa parameter\n",
__func__);
- goto fail;
- }
+ goto fail;
}
/* add newly minted memsegs to malloc heap */
diff --git a/lib/librte_eal/common/rte_malloc.c b/lib/librte_eal/common/rte_malloc.c
index 9e61dc41..0da5ad5e 100644
--- a/lib/librte_eal/common/rte_malloc.c
+++ b/lib/librte_eal/common/rte_malloc.c
@@ -349,8 +349,7 @@ rte_malloc_heap_memory_add(const char *heap_name, void *va_addr, size_t len,
strnlen(heap_name, RTE_HEAP_NAME_MAX_LEN) ==
RTE_HEAP_NAME_MAX_LEN) {
rte_errno = EINVAL;
- ret = -1;
- goto unlock;
+ return -1;
}
rte_rwlock_write_lock(&mcfg->memory_hotplug_lock);
diff --git a/lib/librte_eal/common/rte_service.c b/lib/librte_eal/common/rte_service.c
index 8767c722..0f3695c4 100644
--- a/lib/librte_eal/common/rte_service.c
+++ b/lib/librte_eal/common/rte_service.c
@@ -795,6 +795,9 @@ rte_service_dump_one(FILE *f, struct rte_service_spec_impl *s,
return;
}
+ if (f == NULL)
+ return;
+
fprintf(f, " %s: stats %d\tcalls %"PRIu64"\tcycles %"
PRIu64"\tavg: %"PRIu64"\n",
s->spec.name, service_stats_enabled(s), s->calls,
diff --git a/lib/librte_eal/linuxapp/eal/eal_interrupts.c b/lib/librte_eal/linuxapp/eal/eal_interrupts.c
index 39252a88..cbac451e 100644
--- a/lib/librte_eal/linuxapp/eal/eal_interrupts.c
+++ b/lib/librte_eal/linuxapp/eal/eal_interrupts.c
@@ -700,7 +700,7 @@ eal_intr_process_interrupts(struct epoll_event *events, int nfds)
bool call = false;
int n, bytes_read;
struct rte_intr_source *src;
- struct rte_intr_callback *cb;
+ struct rte_intr_callback *cb, *next;
union rte_intr_read_buffer buf;
struct rte_intr_callback active_cb;
@@ -780,6 +780,23 @@ eal_intr_process_interrupts(struct epoll_event *events, int nfds)
"descriptor %d: %s\n",
events[n].data.fd,
strerror(errno));
+ /*
+ * The device is unplugged or buggy, remove
+ * it as an interrupt source and return to
+ * force the wait list to be rebuilt.
+ */
+ rte_spinlock_lock(&intr_lock);
+ TAILQ_REMOVE(&intr_sources, src, next);
+ rte_spinlock_unlock(&intr_lock);
+
+ for (cb = TAILQ_FIRST(&src->callbacks); cb;
+ cb = next) {
+ next = TAILQ_NEXT(cb, next);
+ TAILQ_REMOVE(&src->callbacks, cb, next);
+ free(cb);
+ }
+ free(src);
+ return -1;
} else if (bytes_read == 0)
RTE_LOG(ERR, EAL, "Read nothing from file "
"descriptor %d\n", events[n].data.fd);
diff --git a/lib/librte_eal/linuxapp/eal/eal_memory.c b/lib/librte_eal/linuxapp/eal/eal_memory.c
index fce86fda..c1b5e079 100644
--- a/lib/librte_eal/linuxapp/eal/eal_memory.c
+++ b/lib/librte_eal/linuxapp/eal/eal_memory.c
@@ -1393,6 +1393,18 @@ eal_legacy_hugepage_init(void)
addr = RTE_PTR_ADD(addr, (size_t)page_sz);
}
+ if (mcfg->dma_maskbits &&
+ rte_mem_check_dma_mask_thread_unsafe(mcfg->dma_maskbits)) {
+ RTE_LOG(ERR, EAL,
+ "%s(): couldnt allocate memory due to IOVA exceeding limits of current DMA mask.\n",
+ __func__);
+ if (rte_eal_iova_mode() == RTE_IOVA_VA &&
+ rte_eal_using_phys_addrs())
+ RTE_LOG(ERR, EAL,
+ "%s(): Please try initializing EAL with --iova-mode=pa parameter.\n",
+ __func__);
+ goto fail;
+ }
return 0;
}
@@ -1628,6 +1640,14 @@ eal_legacy_hugepage_init(void)
rte_fbarray_destroy(&msl->memseg_arr);
}
+ if (mcfg->dma_maskbits &&
+ rte_mem_check_dma_mask_thread_unsafe(mcfg->dma_maskbits)) {
+ RTE_LOG(ERR, EAL,
+ "%s(): couldn't allocate memory due to IOVA exceeding limits of current DMA mask.\n",
+ __func__);
+ goto fail;
+ }
+
return 0;
fail:
diff --git a/lib/librte_eal/rte_eal_version.map b/lib/librte_eal/rte_eal_version.map
index 04f62424..3fe78260 100644
--- a/lib/librte_eal/rte_eal_version.map
+++ b/lib/librte_eal/rte_eal_version.map
@@ -259,6 +259,8 @@ DPDK_18.08 {
DPDK_18.11 {
global:
+ rte_dev_probe;
+ rte_dev_remove;
rte_eal_get_runtime_dir;
rte_eal_hotplug_add;
rte_eal_hotplug_remove;
@@ -285,8 +287,6 @@ EXPERIMENTAL {
rte_dev_is_probed;
rte_dev_iterator_init;
rte_dev_iterator_next;
- rte_dev_probe;
- rte_dev_remove;
rte_devargs_add;
rte_devargs_dump;
rte_devargs_insert;
@@ -295,7 +295,6 @@ EXPERIMENTAL {
rte_devargs_parsef;
rte_devargs_remove;
rte_devargs_type_count;
- rte_eal_check_dma_mask;
rte_eal_cleanup;
rte_fbarray_attach;
rte_fbarray_destroy;
@@ -331,9 +330,12 @@ EXPERIMENTAL {
rte_malloc_heap_socket_is_external;
rte_mem_alloc_validator_register;
rte_mem_alloc_validator_unregister;
+ rte_mem_check_dma_mask;
+ rte_mem_check_dma_mask_thread_unsafe;
rte_mem_event_callback_register;
rte_mem_event_callback_unregister;
rte_mem_iova2virt;
+ rte_mem_set_dma_mask;
rte_mem_virt2memseg;
rte_mem_virt2memseg_list;
rte_memseg_contig_walk;