From 88fab00d4402af240c1b7cc2566133aece115488 Mon Sep 17 00:00:00 2001 From: Luca Boccassi Date: Mon, 12 Nov 2018 16:14:45 +0000 Subject: New upstream version 18.11-rc2 Change-Id: I43ca4edd0747b2dfc38c574ebf3c0aac17d7392c Signed-off-by: Luca Boccassi --- lib/librte_eal/common/eal_common_memory.c | 42 ++++++++++++++++++++++++++++--- 1 file changed, 38 insertions(+), 4 deletions(-) (limited to 'lib/librte_eal/common/eal_common_memory.c') diff --git a/lib/librte_eal/common/eal_common_memory.c b/lib/librte_eal/common/eal_common_memory.c index 12dcedf5..87fd9921 100644 --- a/lib/librte_eal/common/eal_common_memory.c +++ b/lib/librte_eal/common/eal_common_memory.c @@ -49,7 +49,7 @@ static uint64_t system_page_sz; * Current known limitations are 39 or 40 bits. Setting the starting address * at 4GB implies there are 508GB or 1020GB for mapping the available * hugepages. This is likely enough for most systems, although a device with - * addressing limitations should call rte_eal_check_dma_mask for ensuring all + * addressing limitations should call rte_mem_check_dma_mask for ensuring all * memory is within supported range. */ static uint64_t baseaddr = 0x100000000; @@ -446,11 +446,12 @@ check_iova(const struct rte_memseg_list *msl __rte_unused, #endif /* check memseg iovas are within the required range based on dma mask */ -int __rte_experimental -rte_eal_check_dma_mask(uint8_t maskbits) +static int __rte_experimental +check_dma_mask(uint8_t maskbits, bool thread_unsafe) { struct rte_mem_config *mcfg = rte_eal_get_configuration()->mem_config; uint64_t mask; + int ret; /* sanity check */ if (maskbits > MAX_DMA_MASK_BITS) { @@ -462,7 +463,12 @@ rte_eal_check_dma_mask(uint8_t maskbits) /* create dma mask */ mask = ~((1ULL << maskbits) - 1); - if (rte_memseg_walk(check_iova, &mask)) + if (thread_unsafe) + ret = rte_memseg_walk_thread_unsafe(check_iova, &mask); + else + ret = rte_memseg_walk(check_iova, &mask); + + if (ret) /* * Dma mask precludes hugepage usage. * This device can not be used and we do not need to keep @@ -480,6 +486,34 @@ rte_eal_check_dma_mask(uint8_t maskbits) return 0; } +int __rte_experimental +rte_mem_check_dma_mask(uint8_t maskbits) +{ + return check_dma_mask(maskbits, false); +} + +int __rte_experimental +rte_mem_check_dma_mask_thread_unsafe(uint8_t maskbits) +{ + return check_dma_mask(maskbits, true); +} + +/* + * Set dma mask to use when memory initialization is done. + * + * This function should ONLY be used by code executed before the memory + * initialization. PMDs should use rte_mem_check_dma_mask if addressing + * limitations by the device. + */ +void __rte_experimental +rte_mem_set_dma_mask(uint8_t maskbits) +{ + struct rte_mem_config *mcfg = rte_eal_get_configuration()->mem_config; + + mcfg->dma_maskbits = mcfg->dma_maskbits == 0 ? maskbits : + RTE_MIN(mcfg->dma_maskbits, maskbits); +} + /* return the number of memory channels */ unsigned rte_memory_get_nchannel(void) { -- cgit 1.2.3-korg