aboutsummaryrefslogtreecommitdiffstats
path: root/lib/librte_eal
diff options
context:
space:
mode:
Diffstat (limited to 'lib/librte_eal')
-rw-r--r--lib/librte_eal/bsdapp/eal/eal.c42
-rw-r--r--lib/librte_eal/common/eal_common_memory.c12
-rw-r--r--lib/librte_eal/common/eal_common_memzone.c8
-rw-r--r--lib/librte_eal/common/eal_common_options.c51
-rw-r--r--lib/librte_eal/common/eal_common_proc.c42
-rw-r--r--lib/librte_eal/common/eal_filesystem.h9
-rw-r--r--lib/librte_eal/common/eal_internal_cfg.h6
-rw-r--r--lib/librte_eal/common/eal_options.h1
-rw-r--r--lib/librte_eal/common/eal_private.h6
-rw-r--r--lib/librte_eal/common/hotplug_mp.c4
-rw-r--r--lib/librte_eal/common/include/generic/rte_atomic.h6
-rw-r--r--lib/librte_eal/common/include/rte_malloc.h13
-rw-r--r--lib/librte_eal/common/include/rte_version.h2
-rw-r--r--lib/librte_eal/common/malloc_elem.c4
-rw-r--r--lib/librte_eal/common/malloc_mp.c8
-rw-r--r--lib/librte_eal/common/rte_malloc.c34
-rw-r--r--lib/librte_eal/common/rte_option.c5
-rw-r--r--lib/librte_eal/linuxapp/eal/eal.c150
-rw-r--r--lib/librte_eal/linuxapp/eal/eal_memalloc.c50
-rw-r--r--lib/librte_eal/linuxapp/eal/eal_memory.c2
-rw-r--r--lib/librte_eal/linuxapp/eal/eal_vfio.c91
-rw-r--r--lib/librte_eal/linuxapp/eal/eal_vfio.h12
-rw-r--r--lib/librte_eal/linuxapp/eal/eal_vfio_mp_sync.c16
23 files changed, 469 insertions, 105 deletions
diff --git a/lib/librte_eal/bsdapp/eal/eal.c b/lib/librte_eal/bsdapp/eal/eal.c
index b8152a75..f01495e3 100644
--- a/lib/librte_eal/bsdapp/eal/eal.c
+++ b/lib/librte_eal/bsdapp/eal/eal.c
@@ -115,7 +115,7 @@ eal_create_runtime_dir(void)
/* create prefix-specific subdirectory under DPDK runtime dir */
ret = snprintf(runtime_dir, sizeof(runtime_dir), "%s/%s",
- tmp, internal_config.hugefile_prefix);
+ tmp, eal_get_hugefile_prefix());
if (ret < 0 || ret == sizeof(runtime_dir)) {
RTE_LOG(ERR, EAL, "Error creating prefix-specific runtime path name\n");
return -1;
@@ -141,6 +141,16 @@ eal_create_runtime_dir(void)
return 0;
}
+int
+eal_clean_runtime_dir(void)
+{
+ /* FreeBSD doesn't need this implemented for now, because, unlike Linux,
+ * FreeBSD doesn't create per-process files, so no need to clean up.
+ */
+ return 0;
+}
+
+
const char *
rte_eal_get_runtime_dir(void)
{
@@ -447,9 +457,21 @@ eal_parse_args(int argc, char **argv)
switch (opt) {
case OPT_MBUF_POOL_OPS_NAME_NUM:
- internal_config.user_mbuf_pool_ops_name =
- strdup(optarg);
+ {
+ char *ops_name = strdup(optarg);
+ if (ops_name == NULL)
+ RTE_LOG(ERR, EAL, "Could not store mbuf pool ops name\n");
+ else {
+ /* free old ops name */
+ if (internal_config.user_mbuf_pool_ops_name !=
+ NULL)
+ free(internal_config.user_mbuf_pool_ops_name);
+
+ internal_config.user_mbuf_pool_ops_name =
+ ops_name;
+ }
break;
+ }
case 'h':
eal_usage(prgname);
exit(EXIT_SUCCESS);
@@ -807,6 +829,18 @@ rte_eal_init(int argc, char **argv)
return -1;
}
+ /*
+ * Clean up unused files in runtime directory. We do this at the end of
+ * init and not at the beginning because we want to clean stuff up
+ * whether we are primary or secondary process, but we cannot remove
+ * primary process' files because secondary should be able to run even
+ * if primary process is dead.
+ */
+ if (eal_clean_runtime_dir() < 0) {
+ rte_eal_init_alert("Cannot clear runtime directory\n");
+ return -1;
+ }
+
rte_eal_mcfg_complete();
/* Call each registered callback, if enabled */
@@ -819,6 +853,8 @@ int __rte_experimental
rte_eal_cleanup(void)
{
rte_service_finalize();
+ rte_mp_channel_cleanup();
+ eal_cleanup_config(&internal_config);
return 0;
}
diff --git a/lib/librte_eal/common/eal_common_memory.c b/lib/librte_eal/common/eal_common_memory.c
index d47ea493..999ba24b 100644
--- a/lib/librte_eal/common/eal_common_memory.c
+++ b/lib/librte_eal/common/eal_common_memory.c
@@ -704,6 +704,12 @@ rte_memseg_get_fd_thread_unsafe(const struct rte_memseg *ms)
return -1;
}
+ /* segment fd API is not supported for external segments */
+ if (msl->external) {
+ rte_errno = ENOTSUP;
+ return -1;
+ }
+
ret = eal_memalloc_get_seg_fd(msl_idx, seg_idx);
if (ret < 0) {
rte_errno = -ret;
@@ -754,6 +760,12 @@ rte_memseg_get_fd_offset_thread_unsafe(const struct rte_memseg *ms,
return -1;
}
+ /* segment fd API is not supported for external segments */
+ if (msl->external) {
+ rte_errno = ENOTSUP;
+ return -1;
+ }
+
ret = eal_memalloc_get_seg_fd_offset(msl_idx, seg_idx, offset);
if (ret < 0) {
rte_errno = -ret;
diff --git a/lib/librte_eal/common/eal_common_memzone.c b/lib/librte_eal/common/eal_common_memzone.c
index b7081afb..664df5b9 100644
--- a/lib/librte_eal/common/eal_common_memzone.c
+++ b/lib/librte_eal/common/eal_common_memzone.c
@@ -365,6 +365,7 @@ int
rte_eal_memzone_init(void)
{
struct rte_mem_config *mcfg;
+ int ret = 0;
/* get pointer to global configuration */
mcfg = rte_eal_get_configuration()->mem_config;
@@ -375,17 +376,16 @@ rte_eal_memzone_init(void)
rte_fbarray_init(&mcfg->memzones, "memzone",
RTE_MAX_MEMZONE, sizeof(struct rte_memzone))) {
RTE_LOG(ERR, EAL, "Cannot allocate memzone list\n");
- return -1;
+ ret = -1;
} else if (rte_eal_process_type() == RTE_PROC_SECONDARY &&
rte_fbarray_attach(&mcfg->memzones)) {
RTE_LOG(ERR, EAL, "Cannot attach to memzone list\n");
- rte_rwlock_write_unlock(&mcfg->mlock);
- return -1;
+ ret = -1;
}
rte_rwlock_write_unlock(&mcfg->mlock);
- return 0;
+ return ret;
}
/* Walk all reserved memory zones */
diff --git a/lib/librte_eal/common/eal_common_options.c b/lib/librte_eal/common/eal_common_options.c
index e31eca5c..f6dfbc73 100644
--- a/lib/librte_eal/common/eal_common_options.c
+++ b/lib/librte_eal/common/eal_common_options.c
@@ -168,6 +168,14 @@ eal_option_device_parse(void)
return ret;
}
+const char *
+eal_get_hugefile_prefix(void)
+{
+ if (internal_config.hugefile_prefix != NULL)
+ return internal_config.hugefile_prefix;
+ return HUGEFILE_PREFIX_DEFAULT;
+}
+
void
eal_reset_internal_config(struct internal_config *internal_cfg)
{
@@ -176,7 +184,7 @@ eal_reset_internal_config(struct internal_config *internal_cfg)
internal_cfg->memory = 0;
internal_cfg->force_nrank = 0;
internal_cfg->force_nchannel = 0;
- internal_cfg->hugefile_prefix = HUGEFILE_PREFIX_DEFAULT;
+ internal_cfg->hugefile_prefix = NULL;
internal_cfg->hugepage_dir = NULL;
internal_cfg->force_sockets = 0;
/* zero out the NUMA config */
@@ -591,7 +599,9 @@ eal_parse_corelist(const char *corelist)
if (*corelist == '\0')
return -1;
errno = 0;
- idx = strtoul(corelist, &end, 10);
+ idx = strtol(corelist, &end, 10);
+ if (idx < 0 || idx >= (int)cfg->lcore_count)
+ return -1;
if (errno || end == NULL)
return -1;
while (isblank(*end))
@@ -1102,6 +1112,7 @@ eal_parse_common_option(int opt, const char *optarg,
{
static int b_used;
static int w_used;
+ struct rte_config *cfg = rte_eal_get_configuration();
switch (opt) {
/* blacklist */
@@ -1144,7 +1155,9 @@ eal_parse_common_option(int opt, const char *optarg,
/* corelist */
case 'l':
if (eal_parse_corelist(optarg) < 0) {
- RTE_LOG(ERR, EAL, "invalid core list\n");
+ RTE_LOG(ERR, EAL,
+ "invalid core list, please check core numbers are in [0, %u] range\n",
+ cfg->lcore_count-1);
return -1;
}
@@ -1347,6 +1360,19 @@ eal_auto_detect_cores(struct rte_config *cfg)
}
int
+eal_cleanup_config(struct internal_config *internal_cfg)
+{
+ if (internal_cfg->hugefile_prefix != NULL)
+ free(internal_cfg->hugefile_prefix);
+ if (internal_cfg->hugepage_dir != NULL)
+ free(internal_cfg->hugepage_dir);
+ if (internal_cfg->user_mbuf_pool_ops_name != NULL)
+ free(internal_cfg->user_mbuf_pool_ops_name);
+
+ return 0;
+}
+
+int
eal_adjust_config(struct internal_config *internal_cfg)
{
int i;
@@ -1361,6 +1387,8 @@ eal_adjust_config(struct internal_config *internal_cfg)
/* default master lcore is the first one */
if (!master_lcore_parsed) {
cfg->master_lcore = rte_get_next_lcore(-1, 0, 0);
+ if (cfg->master_lcore >= RTE_MAX_LCORE)
+ return -1;
lcore_config[cfg->master_lcore].core_role = ROLE_RTE;
}
@@ -1386,7 +1414,22 @@ eal_check_common_options(struct internal_config *internal_cfg)
RTE_LOG(ERR, EAL, "Invalid process type specified\n");
return -1;
}
- if (index(internal_cfg->hugefile_prefix, '%') != NULL) {
+ if (internal_cfg->hugefile_prefix != NULL &&
+ strlen(internal_cfg->hugefile_prefix) < 1) {
+ RTE_LOG(ERR, EAL, "Invalid length of --" OPT_FILE_PREFIX " option\n");
+ return -1;
+ }
+ if (internal_cfg->hugepage_dir != NULL &&
+ strlen(internal_cfg->hugepage_dir) < 1) {
+ RTE_LOG(ERR, EAL, "Invalid length of --" OPT_HUGE_DIR" option\n");
+ return -1;
+ }
+ if (internal_cfg->user_mbuf_pool_ops_name != NULL &&
+ strlen(internal_cfg->user_mbuf_pool_ops_name) < 1) {
+ RTE_LOG(ERR, EAL, "Invalid length of --" OPT_MBUF_POOL_OPS_NAME" option\n");
+ return -1;
+ }
+ if (index(eal_get_hugefile_prefix(), '%') != NULL) {
RTE_LOG(ERR, EAL, "Invalid char, '%%', in --"OPT_FILE_PREFIX" "
"option\n");
return -1;
diff --git a/lib/librte_eal/common/eal_common_proc.c b/lib/librte_eal/common/eal_common_proc.c
index 1c3f09aa..b46d644b 100644
--- a/lib/librte_eal/common/eal_common_proc.c
+++ b/lib/librte_eal/common/eal_common_proc.c
@@ -37,6 +37,7 @@ static int mp_fd = -1;
static char mp_filter[PATH_MAX]; /* Filter for secondary process sockets */
static char mp_dir_path[PATH_MAX]; /* The directory path for all mp sockets */
static pthread_mutex_t mp_mutex_action = PTHREAD_MUTEX_INITIALIZER;
+static char peer_name[PATH_MAX];
struct action_entry {
TAILQ_ENTRY(action_entry) next;
@@ -511,9 +512,9 @@ async_reply_handle(void *arg)
static int
open_socket_fd(void)
{
- char peer_name[PATH_MAX] = {0};
struct sockaddr_un un;
+ peer_name[0] = '\0';
if (rte_eal_process_type() == RTE_PROC_SECONDARY)
snprintf(peer_name, sizeof(peer_name),
"%d_%"PRIx64, getpid(), rte_rdtsc());
@@ -542,27 +543,17 @@ open_socket_fd(void)
return mp_fd;
}
-static int
-unlink_sockets(const char *filter)
+static void
+close_socket_fd(void)
{
- int dir_fd;
- DIR *mp_dir;
- struct dirent *ent;
-
- mp_dir = opendir(mp_dir_path);
- if (!mp_dir) {
- RTE_LOG(ERR, EAL, "Unable to open directory %s\n", mp_dir_path);
- return -1;
- }
- dir_fd = dirfd(mp_dir);
+ char path[PATH_MAX];
- while ((ent = readdir(mp_dir))) {
- if (fnmatch(filter, ent->d_name, 0) == 0)
- unlinkat(dir_fd, ent->d_name, 0);
- }
+ if (mp_fd < 0)
+ return;
- closedir(mp_dir);
- return 0;
+ close(mp_fd);
+ create_socket_path(peer_name, path, sizeof(path));
+ unlink(path);
}
int
@@ -603,13 +594,6 @@ rte_mp_channel_init(void)
return -1;
}
- if (rte_eal_process_type() == RTE_PROC_PRIMARY &&
- unlink_sockets(mp_filter)) {
- RTE_LOG(ERR, EAL, "failed to unlink mp sockets\n");
- close(dir_fd);
- return -1;
- }
-
if (open_socket_fd() < 0) {
close(dir_fd);
return -1;
@@ -632,6 +616,12 @@ rte_mp_channel_init(void)
return 0;
}
+void
+rte_mp_channel_cleanup(void)
+{
+ close_socket_fd();
+}
+
/**
* Return -1, as fail to send message and it's caused by the local side.
* Return 0, as fail to send message and it's caused by the remote side.
diff --git a/lib/librte_eal/common/eal_filesystem.h b/lib/librte_eal/common/eal_filesystem.h
index 6e0331fd..89a3adde 100644
--- a/lib/librte_eal/common/eal_filesystem.h
+++ b/lib/librte_eal/common/eal_filesystem.h
@@ -25,6 +25,13 @@
int
eal_create_runtime_dir(void);
+int
+eal_clean_runtime_dir(void);
+
+/** Function to return hugefile prefix that's currently set up */
+const char *
+eal_get_hugefile_prefix(void);
+
#define RUNTIME_CONFIG_FNAME "config"
static inline const char *
eal_runtime_config_path(void)
@@ -86,7 +93,7 @@ static inline const char *
eal_get_hugefile_path(char *buffer, size_t buflen, const char *hugedir, int f_id)
{
snprintf(buffer, buflen, HUGEFILE_FMT, hugedir,
- internal_config.hugefile_prefix, f_id);
+ eal_get_hugefile_prefix(), f_id);
buffer[buflen - 1] = '\0';
return buffer;
}
diff --git a/lib/librte_eal/common/eal_internal_cfg.h b/lib/librte_eal/common/eal_internal_cfg.h
index 737f17e3..783ce7de 100644
--- a/lib/librte_eal/common/eal_internal_cfg.h
+++ b/lib/librte_eal/common/eal_internal_cfg.h
@@ -64,9 +64,9 @@ struct internal_config {
volatile int syslog_facility; /**< facility passed to openlog() */
/** default interrupt mode for VFIO */
volatile enum rte_intr_mode vfio_intr_mode;
- const char *hugefile_prefix; /**< the base filename of hugetlbfs files */
- const char *hugepage_dir; /**< specific hugetlbfs directory to use */
- const char *user_mbuf_pool_ops_name;
+ char *hugefile_prefix; /**< the base filename of hugetlbfs files */
+ char *hugepage_dir; /**< specific hugetlbfs directory to use */
+ char *user_mbuf_pool_ops_name;
/**< user defined mbuf pool ops name */
unsigned num_hugepage_sizes; /**< how many sizes on this system */
struct hugepage_info hugepage_info[MAX_HUGEPAGE_SIZES];
diff --git a/lib/librte_eal/common/eal_options.h b/lib/librte_eal/common/eal_options.h
index 5271f944..327c95e9 100644
--- a/lib/librte_eal/common/eal_options.h
+++ b/lib/librte_eal/common/eal_options.h
@@ -75,6 +75,7 @@ int eal_parse_common_option(int opt, const char *argv,
struct internal_config *conf);
int eal_option_device_parse(void);
int eal_adjust_config(struct internal_config *internal_cfg);
+int eal_cleanup_config(struct internal_config *internal_cfg);
int eal_check_common_options(struct internal_config *internal_cfg);
void eal_common_usage(void);
enum rte_proc_type_t eal_proc_type_detect(void);
diff --git a/lib/librte_eal/common/eal_private.h b/lib/librte_eal/common/eal_private.h
index 442c6dc4..4f483833 100644
--- a/lib/librte_eal/common/eal_private.h
+++ b/lib/librte_eal/common/eal_private.h
@@ -255,10 +255,14 @@ struct rte_bus *rte_bus_find_by_device_name(const char *str);
* 0 on success;
* (<0) on failure.
*/
-
int rte_mp_channel_init(void);
/**
+ * Primary/secondary communication cleanup.
+ */
+void rte_mp_channel_cleanup(void);
+
+/**
* @internal
* Parse a device string and store its information in an
* rte_devargs structure.
diff --git a/lib/librte_eal/common/hotplug_mp.c b/lib/librte_eal/common/hotplug_mp.c
index 070e2e0c..9d610a8a 100644
--- a/lib/librte_eal/common/hotplug_mp.c
+++ b/lib/librte_eal/common/hotplug_mp.c
@@ -208,6 +208,8 @@ handle_secondary_request(const struct rte_mp_msg *msg, const void *peer)
ret = rte_eal_alarm_set(1, __handle_secondary_request, bundle);
if (ret != 0) {
RTE_LOG(ERR, EAL, "failed to add mp task\n");
+ free(bundle->peer);
+ free(bundle);
return send_response_to_secondary(req, ret, peer);
}
return 0;
@@ -332,6 +334,8 @@ handle_primary_request(const struct rte_mp_msg *msg, const void *peer)
*/
ret = rte_eal_alarm_set(1, __handle_primary_request, bundle);
if (ret != 0) {
+ free(bundle->peer);
+ free(bundle);
resp->result = ret;
ret = rte_mp_reply(&mp_resp, peer);
if (ret != 0) {
diff --git a/lib/librte_eal/common/include/generic/rte_atomic.h b/lib/librte_eal/common/include/generic/rte_atomic.h
index b99ba468..4afd1acc 100644
--- a/lib/librte_eal/common/include/generic/rte_atomic.h
+++ b/lib/librte_eal/common/include/generic/rte_atomic.h
@@ -212,7 +212,7 @@ rte_atomic16_exchange(volatile uint16_t *dst, uint16_t val);
static inline uint16_t
rte_atomic16_exchange(volatile uint16_t *dst, uint16_t val)
{
-#if defined(RTE_ARCH_ARM64) && defined(RTE_TOOLCHAIN_CLANG)
+#if defined(__clang__)
return __atomic_exchange_n(dst, val, __ATOMIC_SEQ_CST);
#else
return __atomic_exchange_2(dst, val, __ATOMIC_SEQ_CST);
@@ -495,7 +495,7 @@ rte_atomic32_exchange(volatile uint32_t *dst, uint32_t val);
static inline uint32_t
rte_atomic32_exchange(volatile uint32_t *dst, uint32_t val)
{
-#if defined(RTE_ARCH_ARM64) && defined(RTE_TOOLCHAIN_CLANG)
+#if defined(__clang__)
return __atomic_exchange_n(dst, val, __ATOMIC_SEQ_CST);
#else
return __atomic_exchange_4(dst, val, __ATOMIC_SEQ_CST);
@@ -777,7 +777,7 @@ rte_atomic64_exchange(volatile uint64_t *dst, uint64_t val);
static inline uint64_t
rte_atomic64_exchange(volatile uint64_t *dst, uint64_t val)
{
-#if defined(RTE_ARCH_ARM64) && defined(RTE_TOOLCHAIN_CLANG)
+#if defined(__clang__)
return __atomic_exchange_n(dst, val, __ATOMIC_SEQ_CST);
#else
return __atomic_exchange_8(dst, val, __ATOMIC_SEQ_CST);
diff --git a/lib/librte_eal/common/include/rte_malloc.h b/lib/librte_eal/common/include/rte_malloc.h
index 7249e6aa..54a12467 100644
--- a/lib/librte_eal/common/include/rte_malloc.h
+++ b/lib/librte_eal/common/include/rte_malloc.h
@@ -251,6 +251,9 @@ rte_malloc_validate(const void *ptr, size_t *size);
/**
* Get heap statistics for the specified heap.
*
+ * @note This function is not thread-safe with respect to
+ * ``rte_malloc_heap_create()``/``rte_malloc_heap_destroy()`` functions.
+ *
* @param socket
* An unsigned integer specifying the socket to get heap statistics for
* @param socket_stats
@@ -282,9 +285,9 @@ rte_malloc_get_socket_stats(int socket,
* @param heap_name
* Name of the heap to add memory chunk to
* @param va_addr
- * Start of virtual area to add to the heap
+ * Start of virtual area to add to the heap. Must be aligned by ``page_sz``.
* @param len
- * Length of virtual area to add to the heap
+ * Length of virtual area to add to the heap. Must be aligned by ``page_sz``.
* @param iova_addrs
* Array of page IOVA addresses corresponding to each page in this memory
* area. Can be NULL, in which case page IOVA addresses will be set to
@@ -461,6 +464,9 @@ rte_malloc_heap_socket_is_external(int socket_id);
* Dump for the specified type to a file. If the type argument is
* NULL, all memory types will be dumped.
*
+ * @note This function is not thread-safe with respect to
+ * ``rte_malloc_heap_create()``/``rte_malloc_heap_destroy()`` functions.
+ *
* @param f
* A pointer to a file for output
* @param type
@@ -473,6 +479,9 @@ rte_malloc_dump_stats(FILE *f, const char *type);
/**
* Dump contents of all malloc heaps to a file.
*
+ * @note This function is not thread-safe with respect to
+ * ``rte_malloc_heap_create()``/``rte_malloc_heap_destroy()`` functions.
+ *
* @param f
* A pointer to a file for output
*/
diff --git a/lib/librte_eal/common/include/rte_version.h b/lib/librte_eal/common/include/rte_version.h
index f01c227f..b4c6dd3c 100644
--- a/lib/librte_eal/common/include/rte_version.h
+++ b/lib/librte_eal/common/include/rte_version.h
@@ -37,7 +37,7 @@ extern "C" {
/**
* Patch level number i.e. the z in yy.mm.z
*/
-#define RTE_VER_MINOR 0
+#define RTE_VER_MINOR 1
/**
* Extra string to be appended to version number
diff --git a/lib/librte_eal/common/malloc_elem.c b/lib/librte_eal/common/malloc_elem.c
index 9d3dcb6a..052aeeb7 100644
--- a/lib/librte_eal/common/malloc_elem.c
+++ b/lib/librte_eal/common/malloc_elem.c
@@ -38,6 +38,10 @@ malloc_elem_find_max_iova_contig(struct malloc_elem *elem, size_t align)
/* segment must start after header and with specified alignment */
contig_seg_start = RTE_PTR_ALIGN_CEIL(data_start, align);
+ /* return if aligned address is already out of malloc element */
+ if (contig_seg_start > data_end)
+ return 0;
+
/* if we're in IOVA as VA mode, or if we're in legacy mode with
* hugepages, all elements are IOVA-contiguous. however, we can only
* make these assumptions about internal memory - externally allocated
diff --git a/lib/librte_eal/common/malloc_mp.c b/lib/librte_eal/common/malloc_mp.c
index 5f2d4e0b..f3a13353 100644
--- a/lib/librte_eal/common/malloc_mp.c
+++ b/lib/librte_eal/common/malloc_mp.c
@@ -209,6 +209,8 @@ handle_alloc_request(const struct malloc_mp_req *m,
map_addr = ms[0]->addr;
+ eal_memalloc_mem_event_notify(RTE_MEM_EVENT_ALLOC, map_addr, alloc_sz);
+
/* we have succeeded in allocating memory, but we still need to sync
* with other processes. however, since DPDK IPC is single-threaded, we
* send an asynchronous request and exit this callback.
@@ -258,6 +260,9 @@ handle_request(const struct rte_mp_msg *msg, const void *peer __rte_unused)
if (m->t == REQ_TYPE_ALLOC) {
ret = handle_alloc_request(m, entry);
} else if (m->t == REQ_TYPE_FREE) {
+ eal_memalloc_mem_event_notify(RTE_MEM_EVENT_FREE,
+ m->free_req.addr, m->free_req.len);
+
ret = malloc_heap_free_pages(m->free_req.addr,
m->free_req.len);
} else {
@@ -436,6 +441,9 @@ handle_sync_response(const struct rte_mp_msg *request,
memset(&rb_msg, 0, sizeof(rb_msg));
/* we've failed to sync, so do a rollback */
+ eal_memalloc_mem_event_notify(RTE_MEM_EVENT_FREE,
+ state->map_addr, state->map_len);
+
rollback_expand_heap(state->ms, state->ms_len, state->elem,
state->map_addr, state->map_len);
diff --git a/lib/librte_eal/common/rte_malloc.c b/lib/librte_eal/common/rte_malloc.c
index 0da5ad5e..47c2bec7 100644
--- a/lib/librte_eal/common/rte_malloc.c
+++ b/lib/librte_eal/common/rte_malloc.c
@@ -156,20 +156,14 @@ rte_malloc_get_socket_stats(int socket,
struct rte_malloc_socket_stats *socket_stats)
{
struct rte_mem_config *mcfg = rte_eal_get_configuration()->mem_config;
- int heap_idx, ret = -1;
-
- rte_rwlock_read_lock(&mcfg->memory_hotplug_lock);
+ int heap_idx;
heap_idx = malloc_socket_to_heap_id(socket);
if (heap_idx < 0)
- goto unlock;
+ return -1;
- ret = malloc_heap_get_stats(&mcfg->malloc_heaps[heap_idx],
+ return malloc_heap_get_stats(&mcfg->malloc_heaps[heap_idx],
socket_stats);
-unlock:
- rte_rwlock_read_unlock(&mcfg->memory_hotplug_lock);
-
- return ret;
}
/*
@@ -181,14 +175,10 @@ rte_malloc_dump_heaps(FILE *f)
struct rte_mem_config *mcfg = rte_eal_get_configuration()->mem_config;
unsigned int idx;
- rte_rwlock_read_lock(&mcfg->memory_hotplug_lock);
-
for (idx = 0; idx < RTE_MAX_HEAPS; idx++) {
fprintf(f, "Heap id: %u\n", idx);
malloc_heap_dump(&mcfg->malloc_heaps[idx], f);
}
-
- rte_rwlock_read_unlock(&mcfg->memory_hotplug_lock);
}
int
@@ -262,8 +252,6 @@ rte_malloc_dump_stats(FILE *f, __rte_unused const char *type)
unsigned int heap_id;
struct rte_malloc_socket_stats sock_stats;
- rte_rwlock_read_lock(&mcfg->memory_hotplug_lock);
-
/* Iterate through all initialised heaps */
for (heap_id = 0; heap_id < RTE_MAX_HEAPS; heap_id++) {
struct malloc_heap *heap = &mcfg->malloc_heaps[heap_id];
@@ -280,7 +268,6 @@ rte_malloc_dump_stats(FILE *f, __rte_unused const char *type)
fprintf(f, "\tAlloc_count:%u,\n",sock_stats.alloc_count);
fprintf(f, "\tFree_count:%u,\n", sock_stats.free_count);
}
- rte_rwlock_read_unlock(&mcfg->memory_hotplug_lock);
return;
}
@@ -345,6 +332,9 @@ rte_malloc_heap_memory_add(const char *heap_name, void *va_addr, size_t len,
if (heap_name == NULL || va_addr == NULL ||
page_sz == 0 || !rte_is_power_of_2(page_sz) ||
+ RTE_ALIGN(len, page_sz) != len ||
+ !rte_is_aligned(va_addr, page_sz) ||
+ ((len / page_sz) != n_pages && iova_addrs != NULL) ||
strnlen(heap_name, RTE_HEAP_NAME_MAX_LEN) == 0 ||
strnlen(heap_name, RTE_HEAP_NAME_MAX_LEN) ==
RTE_HEAP_NAME_MAX_LEN) {
@@ -367,11 +357,6 @@ rte_malloc_heap_memory_add(const char *heap_name, void *va_addr, size_t len,
goto unlock;
}
n = len / page_sz;
- if (n != n_pages && iova_addrs != NULL) {
- rte_errno = EINVAL;
- ret = -1;
- goto unlock;
- }
rte_spinlock_lock(&heap->lock);
ret = malloc_heap_add_external_memory(heap, va_addr, iova_addrs, n,
@@ -517,13 +502,8 @@ sync_memory(const char *heap_name, void *va_addr, size_t len, bool attach)
if (wa.result < 0) {
rte_errno = -wa.result;
ret = -1;
- } else {
- /* notify all subscribers that a new memory area was added */
- if (attach)
- eal_memalloc_mem_event_notify(RTE_MEM_EVENT_ALLOC,
- va_addr, len);
+ } else
ret = 0;
- }
unlock:
rte_rwlock_read_unlock(&mcfg->memory_hotplug_lock);
return ret;
diff --git a/lib/librte_eal/common/rte_option.c b/lib/librte_eal/common/rte_option.c
index 02d59a86..198de6d2 100644
--- a/lib/librte_eal/common/rte_option.c
+++ b/lib/librte_eal/common/rte_option.c
@@ -35,10 +35,11 @@ void __rte_experimental
rte_option_register(struct rte_option *opt)
{
TAILQ_FOREACH(option, &rte_option_list, next) {
- if (strcmp(opt->opt_str, option->opt_str) == 0)
- RTE_LOG(INFO, EAL, "Option %s has already been registered.",
+ if (strcmp(opt->opt_str, option->opt_str) == 0) {
+ RTE_LOG(ERR, EAL, "Option %s has already been registered.\n",
opt->opt_str);
return;
+ }
}
TAILQ_INSERT_HEAD(&rte_option_list, opt, next);
diff --git a/lib/librte_eal/linuxapp/eal/eal.c b/lib/librte_eal/linuxapp/eal/eal.c
index 361744d4..30138b63 100644
--- a/lib/librte_eal/linuxapp/eal/eal.c
+++ b/lib/librte_eal/linuxapp/eal/eal.c
@@ -13,7 +13,9 @@
#include <syslog.h>
#include <getopt.h>
#include <sys/file.h>
+#include <dirent.h>
#include <fcntl.h>
+#include <fnmatch.h>
#include <stddef.h>
#include <errno.h>
#include <limits.h>
@@ -123,7 +125,7 @@ eal_create_runtime_dir(void)
/* create prefix-specific subdirectory under DPDK runtime dir */
ret = snprintf(runtime_dir, sizeof(runtime_dir), "%s/%s",
- tmp, internal_config.hugefile_prefix);
+ tmp, eal_get_hugefile_prefix());
if (ret < 0 || ret == sizeof(runtime_dir)) {
RTE_LOG(ERR, EAL, "Error creating prefix-specific runtime path name\n");
return -1;
@@ -149,6 +151,91 @@ eal_create_runtime_dir(void)
return 0;
}
+int
+eal_clean_runtime_dir(void)
+{
+ DIR *dir;
+ struct dirent *dirent;
+ int dir_fd, fd, lck_result;
+ static const char * const filters[] = {
+ "fbarray_*",
+ "mp_socket_*"
+ };
+
+ /* open directory */
+ dir = opendir(runtime_dir);
+ if (!dir) {
+ RTE_LOG(ERR, EAL, "Unable to open runtime directory %s\n",
+ runtime_dir);
+ goto error;
+ }
+ dir_fd = dirfd(dir);
+
+ /* lock the directory before doing anything, to avoid races */
+ if (flock(dir_fd, LOCK_EX) < 0) {
+ RTE_LOG(ERR, EAL, "Unable to lock runtime directory %s\n",
+ runtime_dir);
+ goto error;
+ }
+
+ dirent = readdir(dir);
+ if (!dirent) {
+ RTE_LOG(ERR, EAL, "Unable to read runtime directory %s\n",
+ runtime_dir);
+ goto error;
+ }
+
+ while (dirent != NULL) {
+ unsigned int f_idx;
+ bool skip = true;
+
+ /* skip files that don't match the patterns */
+ for (f_idx = 0; f_idx < RTE_DIM(filters); f_idx++) {
+ const char *filter = filters[f_idx];
+
+ if (fnmatch(filter, dirent->d_name, 0) == 0) {
+ skip = false;
+ break;
+ }
+ }
+ if (skip) {
+ dirent = readdir(dir);
+ continue;
+ }
+
+ /* try and lock the file */
+ fd = openat(dir_fd, dirent->d_name, O_RDONLY);
+
+ /* skip to next file */
+ if (fd == -1) {
+ dirent = readdir(dir);
+ continue;
+ }
+
+ /* non-blocking lock */
+ lck_result = flock(fd, LOCK_EX | LOCK_NB);
+
+ /* if lock succeeds, remove the file */
+ if (lck_result != -1)
+ unlinkat(dir_fd, dirent->d_name, 0);
+ close(fd);
+ dirent = readdir(dir);
+ }
+
+ /* closedir closes dir_fd and drops the lock */
+ closedir(dir);
+ return 0;
+
+error:
+ if (dir)
+ closedir(dir);
+
+ RTE_LOG(ERR, EAL, "Error while clearing runtime dir: %s\n",
+ strerror(errno));
+
+ return -1;
+}
+
const char *
rte_eal_get_runtime_dir(void)
{
@@ -494,10 +581,6 @@ eal_parse_socket_arg(char *strval, volatile uint64_t *socket_arg)
socket_arg[i] = val;
}
- /* check if we have a positive amount of total memory */
- if (total_mem == 0)
- return -1;
-
return 0;
}
@@ -639,13 +722,31 @@ eal_parse_args(int argc, char **argv)
exit(EXIT_SUCCESS);
case OPT_HUGE_DIR_NUM:
- internal_config.hugepage_dir = strdup(optarg);
+ {
+ char *hdir = strdup(optarg);
+ if (hdir == NULL)
+ RTE_LOG(ERR, EAL, "Could not store hugepage directory\n");
+ else {
+ /* free old hugepage dir */
+ if (internal_config.hugepage_dir != NULL)
+ free(internal_config.hugepage_dir);
+ internal_config.hugepage_dir = hdir;
+ }
break;
-
+ }
case OPT_FILE_PREFIX_NUM:
- internal_config.hugefile_prefix = strdup(optarg);
+ {
+ char *prefix = strdup(optarg);
+ if (prefix == NULL)
+ RTE_LOG(ERR, EAL, "Could not store file prefix\n");
+ else {
+ /* free old prefix */
+ if (internal_config.hugefile_prefix != NULL)
+ free(internal_config.hugefile_prefix);
+ internal_config.hugefile_prefix = prefix;
+ }
break;
-
+ }
case OPT_SOCKET_MEM_NUM:
if (eal_parse_socket_arg(optarg,
internal_config.socket_mem) < 0) {
@@ -695,10 +796,21 @@ eal_parse_args(int argc, char **argv)
break;
case OPT_MBUF_POOL_OPS_NAME_NUM:
- internal_config.user_mbuf_pool_ops_name =
- strdup(optarg);
+ {
+ char *ops_name = strdup(optarg);
+ if (ops_name == NULL)
+ RTE_LOG(ERR, EAL, "Could not store mbuf pool ops name\n");
+ else {
+ /* free old ops name */
+ if (internal_config.user_mbuf_pool_ops_name !=
+ NULL)
+ free(internal_config.user_mbuf_pool_ops_name);
+
+ internal_config.user_mbuf_pool_ops_name =
+ ops_name;
+ }
break;
-
+ }
default:
if (opt < OPT_LONG_MIN_NUM && isprint(opt)) {
RTE_LOG(ERR, EAL, "Option %c is not supported "
@@ -1096,6 +1208,18 @@ rte_eal_init(int argc, char **argv)
return -1;
}
+ /*
+ * Clean up unused files in runtime directory. We do this at the end of
+ * init and not at the beginning because we want to clean stuff up
+ * whether we are primary or secondary process, but we cannot remove
+ * primary process' files because secondary should be able to run even
+ * if primary process is dead.
+ */
+ if (eal_clean_runtime_dir() < 0) {
+ rte_eal_init_alert("Cannot clear runtime directory\n");
+ return -1;
+ }
+
rte_eal_mcfg_complete();
/* Call each registered callback, if enabled */
@@ -1130,6 +1254,8 @@ rte_eal_cleanup(void)
if (rte_eal_process_type() == RTE_PROC_PRIMARY)
rte_memseg_walk(mark_freeable, NULL);
rte_service_finalize();
+ rte_mp_channel_cleanup();
+ eal_cleanup_config(&internal_config);
return 0;
}
diff --git a/lib/librte_eal/linuxapp/eal/eal_memalloc.c b/lib/librte_eal/linuxapp/eal/eal_memalloc.c
index 78493956..f63d9ca6 100644
--- a/lib/librte_eal/linuxapp/eal/eal_memalloc.c
+++ b/lib/librte_eal/linuxapp/eal/eal_memalloc.c
@@ -23,6 +23,10 @@
#include <sys/time.h>
#include <signal.h>
#include <setjmp.h>
+#ifdef F_ADD_SEALS /* if file sealing is supported, so is memfd */
+#include <linux/memfd.h>
+#define MEMFD_SUPPORTED
+#endif
#ifdef RTE_EAL_NUMA_AWARE_HUGEPAGES
#include <numa.h>
#include <numaif.h>
@@ -53,8 +57,8 @@ const int anonymous_hugepages_supported =
#endif
/*
- * we don't actually care if memfd itself is supported - we only need to check
- * if memfd supports hugetlbfs, as that already implies memfd support.
+ * we've already checked memfd support at compile-time, but we also need to
+ * check if we can create hugepage files with memfd.
*
* also, this is not a constant, because while we may be *compiled* with memfd
* hugetlbfs support, we might not be *running* on a system that supports memfd
@@ -63,10 +67,11 @@ const int anonymous_hugepages_supported =
*/
static int memfd_create_supported =
#ifdef MFD_HUGETLB
-#define MEMFD_SUPPORTED
1;
+#define RTE_MFD_HUGETLB MFD_HUGETLB
#else
0;
+#define RTE_MFD_HUGETLB 4U
#endif
/*
@@ -171,7 +176,7 @@ prepare_numa(int *oldpolicy, struct bitmask *oldmask, int socket_id)
RTE_LOG(ERR, EAL,
"Failed to get current mempolicy: %s. "
"Assuming MPOL_DEFAULT.\n", strerror(errno));
- oldpolicy = MPOL_DEFAULT;
+ *oldpolicy = MPOL_DEFAULT;
}
RTE_LOG(DEBUG, EAL,
"Setting policy MPOL_PREFERRED for socket %d\n",
@@ -338,12 +343,12 @@ get_seg_memfd(struct hugepage_info *hi __rte_unused,
int fd;
char segname[250]; /* as per manpage, limit is 249 bytes plus null */
+ int flags = RTE_MFD_HUGETLB | pagesz_flags(hi->hugepage_sz);
+
if (internal_config.single_file_segments) {
fd = fd_list[list_idx].memseg_list_fd;
if (fd < 0) {
- int flags = MFD_HUGETLB | pagesz_flags(hi->hugepage_sz);
-
snprintf(segname, sizeof(segname), "seg_%i", list_idx);
fd = memfd_create(segname, flags);
if (fd < 0) {
@@ -357,8 +362,6 @@ get_seg_memfd(struct hugepage_info *hi __rte_unused,
fd = fd_list[list_idx].fds[seg_idx];
if (fd < 0) {
- int flags = MFD_HUGETLB | pagesz_flags(hi->hugepage_sz);
-
snprintf(segname, sizeof(segname), "seg_%i-%i",
list_idx, seg_idx);
fd = memfd_create(segname, flags);
@@ -633,13 +636,13 @@ alloc_seg(struct rte_memseg *ms, void *addr, int socket_id,
int mmap_flags;
if (internal_config.in_memory && !memfd_create_supported) {
- int pagesz_flag, flags;
+ const int in_memory_flags = MAP_HUGETLB | MAP_FIXED |
+ MAP_PRIVATE | MAP_ANONYMOUS;
+ int pagesz_flag;
pagesz_flag = pagesz_flags(alloc_sz);
- flags = pagesz_flag | MAP_HUGETLB | MAP_FIXED |
- MAP_PRIVATE | MAP_ANONYMOUS;
fd = -1;
- mmap_flags = flags;
+ mmap_flags = in_memory_flags | pagesz_flag;
/* single-file segments codepath will never be active
* here because in-memory mode is incompatible with the
@@ -1542,6 +1545,17 @@ int
eal_memalloc_get_seg_fd(int list_idx, int seg_idx)
{
int fd;
+
+ if (internal_config.in_memory || internal_config.no_hugetlbfs) {
+#ifndef MEMFD_SUPPORTED
+ /* in in-memory or no-huge mode, we rely on memfd support */
+ return -ENOTSUP;
+#endif
+ /* memfd supported, but hugetlbfs memfd may not be */
+ if (!internal_config.no_hugetlbfs && !memfd_create_supported)
+ return -ENOTSUP;
+ }
+
if (internal_config.single_file_segments) {
fd = fd_list[list_idx].memseg_list_fd;
} else if (fd_list[list_idx].len == 0) {
@@ -1565,7 +1579,7 @@ test_memfd_create(void)
int pagesz_flag = pagesz_flags(pagesz);
int flags;
- flags = pagesz_flag | MFD_HUGETLB;
+ flags = pagesz_flag | RTE_MFD_HUGETLB;
int fd = memfd_create("test", flags);
if (fd < 0) {
/* we failed - let memalloc know this isn't working */
@@ -1589,6 +1603,16 @@ eal_memalloc_get_seg_fd_offset(int list_idx, int seg_idx, size_t *offset)
{
struct rte_mem_config *mcfg = rte_eal_get_configuration()->mem_config;
+ if (internal_config.in_memory || internal_config.no_hugetlbfs) {
+#ifndef MEMFD_SUPPORTED
+ /* in in-memory or no-huge mode, we rely on memfd support */
+ return -ENOTSUP;
+#endif
+ /* memfd supported, but hugetlbfs memfd may not be */
+ if (!internal_config.no_hugetlbfs && !memfd_create_supported)
+ return -ENOTSUP;
+ }
+
/* fd_list not initialized? */
if (fd_list[list_idx].len == 0)
return -ENODEV;
diff --git a/lib/librte_eal/linuxapp/eal/eal_memory.c b/lib/librte_eal/linuxapp/eal/eal_memory.c
index 32feb415..e05da74c 100644
--- a/lib/librte_eal/linuxapp/eal/eal_memory.c
+++ b/lib/librte_eal/linuxapp/eal/eal_memory.c
@@ -434,7 +434,7 @@ find_numasocket(struct hugepage_file *hugepg_tbl, struct hugepage_info *hpi)
}
snprintf(hugedir_str, sizeof(hugedir_str),
- "%s/%s", hpi->hugedir, internal_config.hugefile_prefix);
+ "%s/%s", hpi->hugedir, eal_get_hugefile_prefix());
/* parse numa map */
while (fgets(buf, sizeof(buf), f) != NULL) {
diff --git a/lib/librte_eal/linuxapp/eal/eal_vfio.c b/lib/librte_eal/linuxapp/eal/eal_vfio.c
index 0516b159..c821e838 100644
--- a/lib/librte_eal/linuxapp/eal/eal_vfio.c
+++ b/lib/librte_eal/linuxapp/eal/eal_vfio.c
@@ -549,6 +549,65 @@ next:
}
}
+static int
+vfio_sync_default_container(void)
+{
+ struct rte_mp_msg mp_req, *mp_rep;
+ struct rte_mp_reply mp_reply;
+ struct timespec ts = {.tv_sec = 5, .tv_nsec = 0};
+ struct vfio_mp_param *p = (struct vfio_mp_param *)mp_req.param;
+ int iommu_type_id;
+ unsigned int i;
+
+ /* cannot be called from primary */
+ if (rte_eal_process_type() != RTE_PROC_SECONDARY)
+ return -1;
+
+ /* default container fd should have been opened in rte_vfio_enable() */
+ if (!default_vfio_cfg->vfio_enabled ||
+ default_vfio_cfg->vfio_container_fd < 0) {
+ RTE_LOG(ERR, EAL, "VFIO support is not initialized\n");
+ return -1;
+ }
+
+ /* find default container's IOMMU type */
+ p->req = SOCKET_REQ_IOMMU_TYPE;
+ strcpy(mp_req.name, EAL_VFIO_MP);
+ mp_req.len_param = sizeof(*p);
+ mp_req.num_fds = 0;
+
+ iommu_type_id = -1;
+ if (rte_mp_request_sync(&mp_req, &mp_reply, &ts) == 0 &&
+ mp_reply.nb_received == 1) {
+ mp_rep = &mp_reply.msgs[0];
+ p = (struct vfio_mp_param *)mp_rep->param;
+ if (p->result == SOCKET_OK)
+ iommu_type_id = p->iommu_type_id;
+ free(mp_reply.msgs);
+ }
+ if (iommu_type_id < 0) {
+ RTE_LOG(ERR, EAL, "Could not get IOMMU type for default container\n");
+ return -1;
+ }
+
+ /* we now have an fd for default container, as well as its IOMMU type.
+ * now, set up default VFIO container config to match.
+ */
+ for (i = 0; i < RTE_DIM(iommu_types); i++) {
+ const struct vfio_iommu_type *t = &iommu_types[i];
+ if (t->type_id != iommu_type_id)
+ continue;
+
+ /* we found our IOMMU type */
+ default_vfio_cfg->vfio_iommu_type = t;
+
+ return 0;
+ }
+ RTE_LOG(ERR, EAL, "Could not find IOMMU type id (%i)\n",
+ iommu_type_id);
+ return -1;
+}
+
int
rte_vfio_clear_group(int vfio_group_fd)
{
@@ -745,6 +804,26 @@ rte_vfio_setup_device(const char *sysfs_base, const char *dev_addr,
else
RTE_LOG(DEBUG, EAL, "Installed memory event callback for VFIO\n");
}
+ } else if (rte_eal_process_type() != RTE_PROC_PRIMARY &&
+ vfio_cfg == default_vfio_cfg &&
+ vfio_cfg->vfio_iommu_type == NULL) {
+ /* if we're not a primary process, we do not set up the VFIO
+ * container because it's already been set up by the primary
+ * process. instead, we simply ask the primary about VFIO type
+ * we are using, and set the VFIO config up appropriately.
+ */
+ ret = vfio_sync_default_container();
+ if (ret < 0) {
+ RTE_LOG(ERR, EAL, "Could not sync default VFIO container\n");
+ close(vfio_group_fd);
+ rte_vfio_clear_group(vfio_group_fd);
+ return -1;
+ }
+ /* we have successfully initialized VFIO, notify user */
+ const struct vfio_iommu_type *t =
+ default_vfio_cfg->vfio_iommu_type;
+ RTE_LOG(NOTICE, EAL, " using IOMMU type %d (%s)\n",
+ t->type_id, t->name);
}
/* get a file descriptor for the device */
@@ -857,7 +936,8 @@ rte_vfio_release_device(const char *sysfs_base, const char *dev_addr,
/* if there are no active device groups, unregister the callback to
* avoid spurious attempts to map/unmap memory from VFIO.
*/
- if (vfio_cfg == default_vfio_cfg && vfio_cfg->vfio_active_groups == 0)
+ if (vfio_cfg == default_vfio_cfg && vfio_cfg->vfio_active_groups == 0 &&
+ rte_eal_process_type() != RTE_PROC_SECONDARY)
rte_mem_event_callback_unregister(VFIO_MEM_EVENT_CLB_NAME,
NULL);
@@ -977,6 +1057,15 @@ vfio_get_default_container_fd(void)
return -1;
}
+int
+vfio_get_iommu_type(void)
+{
+ if (default_vfio_cfg->vfio_iommu_type == NULL)
+ return -1;
+
+ return default_vfio_cfg->vfio_iommu_type->type_id;
+}
+
const struct vfio_iommu_type *
vfio_set_iommu_type(int vfio_container_fd)
{
diff --git a/lib/librte_eal/linuxapp/eal/eal_vfio.h b/lib/librte_eal/linuxapp/eal/eal_vfio.h
index 63ae115c..cb2d35fb 100644
--- a/lib/librte_eal/linuxapp/eal/eal_vfio.h
+++ b/lib/librte_eal/linuxapp/eal/eal_vfio.h
@@ -5,6 +5,8 @@
#ifndef EAL_VFIO_H_
#define EAL_VFIO_H_
+#include <rte_common.h>
+
/*
* determine if VFIO is present on the system
*/
@@ -122,6 +124,9 @@ int vfio_get_default_container_fd(void);
const struct vfio_iommu_type *
vfio_set_iommu_type(int vfio_container_fd);
+int
+vfio_get_iommu_type(void);
+
/* check if we have any supported extensions */
int
vfio_has_supported_extensions(int vfio_container_fd);
@@ -133,6 +138,7 @@ int vfio_mp_sync_setup(void);
#define SOCKET_REQ_CONTAINER 0x100
#define SOCKET_REQ_GROUP 0x200
#define SOCKET_REQ_DEFAULT_CONTAINER 0x400
+#define SOCKET_REQ_IOMMU_TYPE 0x800
#define SOCKET_OK 0x0
#define SOCKET_NO_FD 0x1
#define SOCKET_ERR 0xFF
@@ -140,7 +146,11 @@ int vfio_mp_sync_setup(void);
struct vfio_mp_param {
int req;
int result;
- int group_num;
+ RTE_STD_C11
+ union {
+ int group_num;
+ int iommu_type_id;
+ };
};
#endif /* VFIO_PRESENT */
diff --git a/lib/librte_eal/linuxapp/eal/eal_vfio_mp_sync.c b/lib/librte_eal/linuxapp/eal/eal_vfio_mp_sync.c
index a1e8c834..2a47f29d 100644
--- a/lib/librte_eal/linuxapp/eal/eal_vfio_mp_sync.c
+++ b/lib/librte_eal/linuxapp/eal/eal_vfio_mp_sync.c
@@ -77,6 +77,22 @@ vfio_mp_primary(const struct rte_mp_msg *msg, const void *peer)
reply.fds[0] = fd;
}
break;
+ case SOCKET_REQ_IOMMU_TYPE:
+ {
+ int iommu_type_id;
+
+ r->req = SOCKET_REQ_IOMMU_TYPE;
+
+ iommu_type_id = vfio_get_iommu_type();
+
+ if (iommu_type_id < 0)
+ r->result = SOCKET_ERR;
+ else {
+ r->iommu_type_id = iommu_type_id;
+ r->result = SOCKET_OK;
+ }
+ break;
+ }
default:
RTE_LOG(ERR, EAL, "vfio received invalid message!\n");
return -1;