From 39157ec04095ab012d11db23c462844634bfbb8f Mon Sep 17 00:00:00 2001 From: Luca Boccassi Date: Mon, 23 Apr 2018 14:16:57 +0100 Subject: New upstream version 16.11.5 Change-Id: I47171042629a57c6958d50251351e668ca5f3d8b Signed-off-by: Luca Boccassi --- lib/librte_eal/bsdapp/contigmem/contigmem.c | 1 + lib/librte_eal/bsdapp/eal/eal_memory.c | 2 +- lib/librte_eal/common/eal_common_memzone.c | 3 +- lib/librte_eal/common/eal_common_pci_uio.c | 1 - .../common/include/arch/ppc_64/rte_atomic.h | 8 +- .../common/include/arch/x86/rte_atomic.h | 44 ++++++++++- lib/librte_eal/common/include/rte_debug.h | 2 +- lib/librte_eal/common/include/rte_version.h | 2 +- lib/librte_eal/common/malloc_elem.c | 1 + lib/librte_eal/common/malloc_heap.c | 6 +- lib/librte_eal/common/malloc_heap.h | 2 +- lib/librte_eal/common/rte_keepalive.c | 28 ++++--- lib/librte_eal/linuxapp/eal/eal_pci.c | 1 - lib/librte_eal/linuxapp/eal/eal_vfio.c | 92 +++++++++++++++++++++- lib/librte_eal/linuxapp/eal/eal_vfio.h | 25 ++++++ lib/librte_eal/linuxapp/igb_uio/compat.h | 4 + lib/librte_eal/linuxapp/igb_uio/igb_uio.c | 22 ++++++ lib/librte_eal/linuxapp/kni/ethtool/igb/igb_main.c | 41 ++++++++++ lib/librte_eal/linuxapp/kni/ethtool/igb/kcompat.h | 4 + 19 files changed, 264 insertions(+), 25 deletions(-) (limited to 'lib/librte_eal') diff --git a/lib/librte_eal/bsdapp/contigmem/contigmem.c b/lib/librte_eal/bsdapp/contigmem/contigmem.c index e8fb9087..9676d8b3 100644 --- a/lib/librte_eal/bsdapp/contigmem/contigmem.c +++ b/lib/librte_eal/bsdapp/contigmem/contigmem.c @@ -45,6 +45,7 @@ __FBSDID("$FreeBSD$"); #include #include #include +#include #include diff --git a/lib/librte_eal/bsdapp/eal/eal_memory.c b/lib/librte_eal/bsdapp/eal/eal_memory.c index 3614da8d..248312d9 100644 --- a/lib/librte_eal/bsdapp/eal/eal_memory.c +++ b/lib/librte_eal/bsdapp/eal/eal_memory.c @@ -150,7 +150,7 @@ rte_eal_hugepage_attach(void) /* Map the shared hugepage_info into the process address spaces */ hpi = mmap(NULL, sizeof(struct hugepage_info), PROT_READ, MAP_PRIVATE, fd_hugepage_info, 0); - if (hpi == NULL) { + if (hpi == MAP_FAILED) { RTE_LOG(ERR, EAL, "Could not mmap %s\n", eal_hugepage_info_path()); goto error; } diff --git a/lib/librte_eal/common/eal_common_memzone.c b/lib/librte_eal/common/eal_common_memzone.c index 64f4e0ad..b58d85b7 100644 --- a/lib/librte_eal/common/eal_common_memzone.c +++ b/lib/librte_eal/common/eal_common_memzone.c @@ -236,7 +236,7 @@ memzone_reserve_aligned_thread_unsafe(const char *name, size_t len, return NULL; } - const struct malloc_elem *elem = malloc_elem_from_data(mz_addr); + struct malloc_elem *elem = malloc_elem_from_data(mz_addr); /* fill the zone in config */ mz = get_next_free_memzone(); @@ -244,6 +244,7 @@ memzone_reserve_aligned_thread_unsafe(const char *name, size_t len, if (mz == NULL) { RTE_LOG(ERR, EAL, "%s(): Cannot find free memzone but there is room " "in config!\n", __func__); + malloc_elem_free(elem); rte_errno = ENOSPC; return NULL; } diff --git a/lib/librte_eal/common/eal_common_pci_uio.c b/lib/librte_eal/common/eal_common_pci_uio.c index 367a6816..6f91ff9c 100644 --- a/lib/librte_eal/common/eal_common_pci_uio.c +++ b/lib/librte_eal/common/eal_common_pci_uio.c @@ -117,7 +117,6 @@ pci_uio_map_resource(struct rte_pci_device *dev) dev->intr_handle.fd = -1; dev->intr_handle.uio_cfg_fd = -1; - dev->intr_handle.type = RTE_INTR_HANDLE_UNKNOWN; /* secondary processes - use already recorded details */ if (rte_eal_process_type() != RTE_PROC_PRIMARY) diff --git a/lib/librte_eal/common/include/arch/ppc_64/rte_atomic.h b/lib/librte_eal/common/include/arch/ppc_64/rte_atomic.h index fb4fccb4..37f5eff2 100644 --- a/lib/librte_eal/common/include/arch/ppc_64/rte_atomic.h +++ b/lib/librte_eal/common/include/arch/ppc_64/rte_atomic.h @@ -64,9 +64,9 @@ extern "C" { * occur before the STORE operations generated after. */ #ifdef RTE_ARCH_64 -#define rte_wmb() {asm volatile("lwsync" : : : "memory"); } +#define rte_wmb() asm volatile("lwsync" : : : "memory") #else -#define rte_wmb() {asm volatile("sync" : : : "memory"); } +#define rte_wmb() asm volatile("sync" : : : "memory") #endif /** @@ -76,9 +76,9 @@ extern "C" { * occur before the LOAD operations generated after. */ #ifdef RTE_ARCH_64 -#define rte_rmb() {asm volatile("lwsync" : : : "memory"); } +#define rte_rmb() asm volatile("lwsync" : : : "memory") #else -#define rte_rmb() {asm volatile("sync" : : : "memory"); } +#define rte_rmb() asm volatile("sync" : : : "memory") #endif #define rte_smp_mb() rte_mb() diff --git a/lib/librte_eal/common/include/arch/x86/rte_atomic.h b/lib/librte_eal/common/include/arch/x86/rte_atomic.h index 00b1cdf5..d12b679a 100644 --- a/lib/librte_eal/common/include/arch/x86/rte_atomic.h +++ b/lib/librte_eal/common/include/arch/x86/rte_atomic.h @@ -55,12 +55,52 @@ extern "C" { #define rte_rmb() _mm_lfence() -#define rte_smp_mb() rte_mb() - #define rte_smp_wmb() rte_compiler_barrier() #define rte_smp_rmb() rte_compiler_barrier() +/* + * From Intel Software Development Manual; Vol 3; + * 8.2.2 Memory Ordering in P6 and More Recent Processor Families: + * ... + * . Reads are not reordered with other reads. + * . Writes are not reordered with older reads. + * . Writes to memory are not reordered with other writes, + * with the following exceptions: + * . streaming stores (writes) executed with the non-temporal move + * instructions (MOVNTI, MOVNTQ, MOVNTDQ, MOVNTPS, and MOVNTPD); and + * . string operations (see Section 8.2.4.1). + * ... + * . Reads may be reordered with older writes to different locations but not + * with older writes to the same location. + * . Reads or writes cannot be reordered with I/O instructions, + * locked instructions, or serializing instructions. + * . Reads cannot pass earlier LFENCE and MFENCE instructions. + * . Writes ... cannot pass earlier LFENCE, SFENCE, and MFENCE instructions. + * . LFENCE instructions cannot pass earlier reads. + * . SFENCE instructions cannot pass earlier writes ... + * . MFENCE instructions cannot pass earlier reads, writes ... + * + * As pointed by Java guys, that makes possible to use lock-prefixed + * instructions to get the same effect as mfence and on most modern HW + * that gives a better perfomance then using mfence: + * https://shipilev.net/blog/2014/on-the-fence-with-dependencies/ + * Basic idea is to use lock prefixed add with some dummy memory location + * as the destination. From their experiments 128B(2 cache lines) below + * current stack pointer looks like a good candidate. + * So below we use that techinque for rte_smp_mb() implementation. + */ + +static inline void __attribute__((always_inline)) +rte_smp_mb(void) +{ +#ifdef RTE_ARCH_I686 + asm volatile("lock addl $0, -128(%%esp); " ::: "memory"); +#else + asm volatile("lock addl $0, -128(%%rsp); " ::: "memory"); +#endif +} + /*------------------------- 16 bit atomic operations -------------------------*/ #ifndef RTE_FORCE_INTRINSICS diff --git a/lib/librte_eal/common/include/rte_debug.h b/lib/librte_eal/common/include/rte_debug.h index cab6fb4c..ec1dce03 100644 --- a/lib/librte_eal/common/include/rte_debug.h +++ b/lib/librte_eal/common/include/rte_debug.h @@ -86,7 +86,7 @@ void rte_dump_registers(void); #endif #define RTE_VERIFY(exp) do { \ if (unlikely(!(exp))) \ - rte_panic("line %d\tassert \"" #exp "\" failed\n", __LINE__); \ + rte_panic("line %d\tassert \"%s\" failed\n", __LINE__, #exp); \ } while (0) /* diff --git a/lib/librte_eal/common/include/rte_version.h b/lib/librte_eal/common/include/rte_version.h index e92737d2..4a9f4821 100644 --- a/lib/librte_eal/common/include/rte_version.h +++ b/lib/librte_eal/common/include/rte_version.h @@ -66,7 +66,7 @@ extern "C" { /** * Patch level number i.e. the z in yy.mm.z */ -#define RTE_VER_MINOR 4 +#define RTE_VER_MINOR 5 /** * Extra string to be appended to version number diff --git a/lib/librte_eal/common/malloc_elem.c b/lib/librte_eal/common/malloc_elem.c index 77a86151..e2bd3ac2 100644 --- a/lib/librte_eal/common/malloc_elem.c +++ b/lib/librte_eal/common/malloc_elem.c @@ -98,6 +98,7 @@ elem_start_pt(struct malloc_elem *elem, size_t size, unsigned align, if ((new_data_start & bmask) != ((end_pt - 1) & bmask)) { end_pt = RTE_ALIGN_FLOOR(end_pt, bound); new_data_start = RTE_ALIGN_FLOOR((end_pt - size), align); + end_pt = new_data_start + size; if (((end_pt - 1) & bmask) != (new_data_start & bmask)) return NULL; } diff --git a/lib/librte_eal/common/malloc_heap.c b/lib/librte_eal/common/malloc_heap.c index 267a4c6c..c731f1cd 100644 --- a/lib/librte_eal/common/malloc_heap.c +++ b/lib/librte_eal/common/malloc_heap.c @@ -178,12 +178,14 @@ malloc_heap_alloc(struct malloc_heap *heap, * Function to retrieve data for heap on given socket */ int -malloc_heap_get_stats(const struct malloc_heap *heap, +malloc_heap_get_stats(struct malloc_heap *heap, struct rte_malloc_socket_stats *socket_stats) { size_t idx; struct malloc_elem *elem; + rte_spinlock_lock(&heap->lock); + /* Initialise variables for heap */ socket_stats->free_count = 0; socket_stats->heap_freesz_bytes = 0; @@ -205,6 +207,8 @@ malloc_heap_get_stats(const struct malloc_heap *heap, socket_stats->heap_allocsz_bytes = (socket_stats->heap_totalsz_bytes - socket_stats->heap_freesz_bytes); socket_stats->alloc_count = heap->alloc_count; + + rte_spinlock_unlock(&heap->lock); return 0; } diff --git a/lib/librte_eal/common/malloc_heap.h b/lib/librte_eal/common/malloc_heap.h index 3ccbef0f..3b1166f0 100644 --- a/lib/librte_eal/common/malloc_heap.h +++ b/lib/librte_eal/common/malloc_heap.h @@ -57,7 +57,7 @@ malloc_heap_alloc(struct malloc_heap *heap, const char *type, size_t size, unsigned flags, size_t align, size_t bound); int -malloc_heap_get_stats(const struct malloc_heap *heap, +malloc_heap_get_stats(struct malloc_heap *heap, struct rte_malloc_socket_stats *socket_stats); int diff --git a/lib/librte_eal/common/rte_keepalive.c b/lib/librte_eal/common/rte_keepalive.c index 9765d1bd..4625fab0 100644 --- a/lib/librte_eal/common/rte_keepalive.c +++ b/lib/librte_eal/common/rte_keepalive.c @@ -42,8 +42,12 @@ struct rte_keepalive { /** Core Liveness. */ - enum rte_keepalive_state __rte_cache_aligned state_flags[ - RTE_KEEPALIVE_MAXCORES]; + struct { + /* + * Each element must be cache aligned to prevent false sharing. + */ + enum rte_keepalive_state core_state __rte_cache_aligned; + } live_data[RTE_KEEPALIVE_MAXCORES]; /** Last-seen-alive timestamps */ uint64_t last_alive[RTE_KEEPALIVE_MAXCORES]; @@ -96,19 +100,22 @@ rte_keepalive_dispatch_pings(__rte_unused void *ptr_timer, if (keepcfg->active_cores[idx_core] == 0) continue; - switch (keepcfg->state_flags[idx_core]) { + switch (keepcfg->live_data[idx_core].core_state) { case RTE_KA_STATE_UNUSED: break; case RTE_KA_STATE_ALIVE: /* Alive */ - keepcfg->state_flags[idx_core] = RTE_KA_STATE_MISSING; + keepcfg->live_data[idx_core].core_state = + RTE_KA_STATE_MISSING; keepcfg->last_alive[idx_core] = rte_rdtsc(); break; case RTE_KA_STATE_MISSING: /* MIA */ print_trace("Core MIA. ", keepcfg, idx_core); - keepcfg->state_flags[idx_core] = RTE_KA_STATE_DEAD; + keepcfg->live_data[idx_core].core_state = + RTE_KA_STATE_DEAD; break; case RTE_KA_STATE_DEAD: /* Dead */ - keepcfg->state_flags[idx_core] = RTE_KA_STATE_GONE; + keepcfg->live_data[idx_core].core_state = + RTE_KA_STATE_GONE; print_trace("Core died. ", keepcfg, idx_core); if (keepcfg->callback) keepcfg->callback( @@ -119,7 +126,8 @@ rte_keepalive_dispatch_pings(__rte_unused void *ptr_timer, case RTE_KA_STATE_GONE: /* Buried */ break; case RTE_KA_STATE_DOZING: /* Core going idle */ - keepcfg->state_flags[idx_core] = RTE_KA_STATE_SLEEP; + keepcfg->live_data[idx_core].core_state = + RTE_KA_STATE_SLEEP; keepcfg->last_alive[idx_core] = rte_rdtsc(); break; case RTE_KA_STATE_SLEEP: /* Idled core */ @@ -129,7 +137,7 @@ rte_keepalive_dispatch_pings(__rte_unused void *ptr_timer, keepcfg->relay_callback( keepcfg->relay_callback_data, idx_core, - keepcfg->state_flags[idx_core], + keepcfg->live_data[idx_core].core_state, keepcfg->last_alive[idx_core] ); } @@ -173,11 +181,11 @@ rte_keepalive_register_core(struct rte_keepalive *keepcfg, const int id_core) void rte_keepalive_mark_alive(struct rte_keepalive *keepcfg) { - keepcfg->state_flags[rte_lcore_id()] = RTE_KA_STATE_ALIVE; + keepcfg->live_data[rte_lcore_id()].core_state = RTE_KA_STATE_ALIVE; } void rte_keepalive_mark_sleep(struct rte_keepalive *keepcfg) { - keepcfg->state_flags[rte_lcore_id()] = RTE_KA_STATE_DOZING; + keepcfg->live_data[rte_lcore_id()].core_state = RTE_KA_STATE_DOZING; } diff --git a/lib/librte_eal/linuxapp/eal/eal_pci.c b/lib/librte_eal/linuxapp/eal/eal_pci.c index 876ba381..b0d0c3c6 100644 --- a/lib/librte_eal/linuxapp/eal/eal_pci.c +++ b/lib/librte_eal/linuxapp/eal/eal_pci.c @@ -623,7 +623,6 @@ pci_ioport_map(struct rte_pci_device *dev, int bar __rte_unused, if (!found) return -1; - dev->intr_handle.type = RTE_INTR_HANDLE_UNKNOWN; p->base = start; RTE_LOG(DEBUG, EAL, "PCI Port IO found start=0x%x\n", start); diff --git a/lib/librte_eal/linuxapp/eal/eal_vfio.c b/lib/librte_eal/linuxapp/eal/eal_vfio.c index 702f7a2e..dd451071 100644 --- a/lib/librte_eal/linuxapp/eal/eal_vfio.c +++ b/lib/librte_eal/linuxapp/eal/eal_vfio.c @@ -50,12 +50,15 @@ static struct vfio_config vfio_cfg; static int vfio_type1_dma_map(int); +static int vfio_spapr_dma_map(int); static int vfio_noiommu_dma_map(int); /* IOMMU types we support */ static const struct vfio_iommu_type iommu_types[] = { /* x86 IOMMU, otherwise known as type 1 */ { RTE_VFIO_TYPE1, "Type 1", &vfio_type1_dma_map}, + /* ppc64 IOMMU, otherwise known as spapr */ + { RTE_VFIO_SPAPR, "sPAPR", &vfio_spapr_dma_map}, /* IOMMU-less mode */ { RTE_VFIO_NOIOMMU, "No-IOMMU", &vfio_noiommu_dma_map}, }; @@ -339,7 +342,7 @@ vfio_enable(const char *modname) int vfio_is_enabled(const char *modname) { - const int mod_available = rte_eal_check_module(modname); + const int mod_available = rte_eal_check_module(modname) > 0; return vfio_cfg.vfio_enabled && mod_available; } @@ -539,6 +542,93 @@ vfio_type1_dma_map(int vfio_container_fd) return 0; } +static int +vfio_spapr_dma_map(int vfio_container_fd) +{ + const struct rte_memseg *ms = rte_eal_get_physmem_layout(); + int i, ret; + + struct vfio_iommu_spapr_register_memory reg = { + .argsz = sizeof(reg), + .flags = 0 + }; + struct vfio_iommu_spapr_tce_info info = { + .argsz = sizeof(info), + }; + struct vfio_iommu_spapr_tce_create create = { + .argsz = sizeof(create), + }; + struct vfio_iommu_spapr_tce_remove remove = { + .argsz = sizeof(remove), + }; + + /* query spapr iommu info */ + ret = ioctl(vfio_container_fd, VFIO_IOMMU_SPAPR_TCE_GET_INFO, &info); + if (ret) { + RTE_LOG(ERR, EAL, " cannot get iommu info, " + "error %i (%s)\n", errno, strerror(errno)); + return -1; + } + + /* remove default DMA of 32 bit window */ + remove.start_addr = info.dma32_window_start; + ret = ioctl(vfio_container_fd, VFIO_IOMMU_SPAPR_TCE_REMOVE, &remove); + if (ret) { + RTE_LOG(ERR, EAL, " cannot remove default DMA window, " + "error %i (%s)\n", errno, strerror(errno)); + return -1; + } + + /* calculate window size based on number of hugepages configured */ + create.window_size = rte_eal_get_physmem_size(); + create.page_shift = __builtin_ctzll(ms->hugepage_sz); + create.levels = 2; + + ret = ioctl(vfio_container_fd, VFIO_IOMMU_SPAPR_TCE_CREATE, &create); + if (ret) { + RTE_LOG(ERR, EAL, " cannot create new DMA window, " + "error %i (%s)\n", errno, strerror(errno)); + return -1; + } + + /* map all DPDK segments for DMA. use 1:1 PA to IOVA mapping */ + for (i = 0; i < RTE_MAX_MEMSEG; i++) { + struct vfio_iommu_type1_dma_map dma_map; + + if (ms[i].addr == NULL) + break; + + reg.vaddr = (uintptr_t) ms[i].addr; + reg.size = ms[i].len; + ret = ioctl(vfio_container_fd, + VFIO_IOMMU_SPAPR_REGISTER_MEMORY, ®); + if (ret) { + RTE_LOG(ERR, EAL, " cannot register vaddr for IOMMU, " + "error %i (%s)\n", errno, strerror(errno)); + return -1; + } + + memset(&dma_map, 0, sizeof(dma_map)); + dma_map.argsz = sizeof(struct vfio_iommu_type1_dma_map); + dma_map.vaddr = ms[i].addr_64; + dma_map.size = ms[i].len; + dma_map.iova = ms[i].phys_addr; + dma_map.flags = VFIO_DMA_MAP_FLAG_READ | + VFIO_DMA_MAP_FLAG_WRITE; + + ret = ioctl(vfio_container_fd, VFIO_IOMMU_MAP_DMA, &dma_map); + + if (ret) { + RTE_LOG(ERR, EAL, " cannot set up DMA remapping, " + "error %i (%s)\n", errno, strerror(errno)); + return -1; + } + + } + + return 0; +} + static int vfio_noiommu_dma_map(int __rte_unused vfio_container_fd) { diff --git a/lib/librte_eal/linuxapp/eal/eal_vfio.h b/lib/librte_eal/linuxapp/eal/eal_vfio.h index 29f7f3ec..ac31a4fc 100644 --- a/lib/librte_eal/linuxapp/eal/eal_vfio.h +++ b/lib/librte_eal/linuxapp/eal/eal_vfio.h @@ -54,6 +54,31 @@ #define RTE_VFIO_TYPE1 VFIO_TYPE1_IOMMU +#ifndef VFIO_SPAPR_TCE_v2_IOMMU +#define RTE_VFIO_SPAPR 7 +#define VFIO_IOMMU_SPAPR_REGISTER_MEMORY _IO(VFIO_TYPE, VFIO_BASE + 17) +#define VFIO_IOMMU_SPAPR_TCE_CREATE _IO(VFIO_TYPE, VFIO_BASE + 19) +#define VFIO_IOMMU_SPAPR_TCE_REMOVE _IO(VFIO_TYPE, VFIO_BASE + 20) +struct vfio_iommu_spapr_register_memory { + uint32_t argsz; + uint32_t flags; + uint64_t vaddr; + uint64_t size; +}; +struct vfio_iommu_spapr_tce_create { + uint32_t argsz; + uint32_t page_shift; + uint64_t window_size; + uint32_t levels; +}; +struct vfio_iommu_spapr_tce_remove { + uint32_t argsz; + uint64_t start_addr; +}; +#else +#define RTE_VFIO_SPAPR VFIO_SPAPR_TCE_v2_IOMMU +#endif + #if LINUX_VERSION_CODE < KERNEL_VERSION(4, 5, 0) #define RTE_VFIO_NOIOMMU 8 #else diff --git a/lib/librte_eal/linuxapp/igb_uio/compat.h b/lib/librte_eal/linuxapp/igb_uio/compat.h index 0d781e48..38259330 100644 --- a/lib/librte_eal/linuxapp/igb_uio/compat.h +++ b/lib/librte_eal/linuxapp/igb_uio/compat.h @@ -123,3 +123,7 @@ static bool pci_check_and_mask_intx(struct pci_dev *pdev) } #endif /* < 3.3.0 */ + +#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 8, 0) +#define HAVE_ALLOC_IRQ_VECTORS 1 +#endif diff --git a/lib/librte_eal/linuxapp/igb_uio/igb_uio.c b/lib/librte_eal/linuxapp/igb_uio/igb_uio.c index df41e457..9f00f07a 100644 --- a/lib/librte_eal/linuxapp/igb_uio/igb_uio.c +++ b/lib/librte_eal/linuxapp/igb_uio/igb_uio.c @@ -325,7 +325,9 @@ static int igbuio_pci_probe(struct pci_dev *dev, const struct pci_device_id *id) { struct rte_uio_pci_dev *udev; +#ifndef HAVE_ALLOC_IRQ_VECTORS struct msix_entry msix_entry; +#endif int err; udev = kzalloc(sizeof(struct rte_uio_pci_dev), GFP_KERNEL); @@ -379,6 +381,7 @@ igbuio_pci_probe(struct pci_dev *dev, const struct pci_device_id *id) switch (igbuio_intr_mode_preferred) { case RTE_INTR_MODE_MSIX: /* Only 1 msi-x vector needed */ +#ifndef HAVE_ALLOC_IRQ_VECTORS msix_entry.entry = 0; if (pci_enable_msix(dev, &msix_entry, 1) == 0) { dev_dbg(&dev->dev, "using MSI-X"); @@ -386,6 +389,15 @@ igbuio_pci_probe(struct pci_dev *dev, const struct pci_device_id *id) udev->mode = RTE_INTR_MODE_MSIX; break; } +#else + if (pci_alloc_irq_vectors(dev, 1, 1, PCI_IRQ_MSIX) == 1) { + dev_dbg(&dev->dev, "using MSI-X"); + udev->info.irq_flags = IRQF_NO_THREAD; + udev->info.irq = pci_irq_vector(dev, 0); + udev->mode = RTE_INTR_MODE_MSIX; + break; + } +#endif /* fall back to INTX */ case RTE_INTR_MODE_LEGACY: if (pci_intx_mask_supported(dev)) { @@ -429,8 +441,13 @@ fail_remove_group: sysfs_remove_group(&dev->dev.kobj, &dev_attr_grp); fail_release_iomem: igbuio_pci_release_iomem(&udev->info); +#ifndef HAVE_ALLOC_IRQ_VECTORS if (udev->mode == RTE_INTR_MODE_MSIX) pci_disable_msix(udev->pdev); +#else + if (udev->mode == RTE_INTR_MODE_MSIX) + pci_free_irq_vectors(udev->pdev); +#endif pci_disable_device(dev); fail_free: kfree(udev); @@ -446,8 +463,13 @@ igbuio_pci_remove(struct pci_dev *dev) sysfs_remove_group(&dev->dev.kobj, &dev_attr_grp); uio_unregister_device(&udev->info); igbuio_pci_release_iomem(&udev->info); +#ifndef HAVE_ALLOC_IRQ_VECTORS if (udev->mode == RTE_INTR_MODE_MSIX) pci_disable_msix(dev); +#else + if (udev->mode == RTE_INTR_MODE_MSIX) + pci_free_irq_vectors(dev); +#endif pci_disable_device(dev); pci_set_drvdata(dev, NULL); kfree(udev); diff --git a/lib/librte_eal/linuxapp/kni/ethtool/igb/igb_main.c b/lib/librte_eal/linuxapp/kni/ethtool/igb/igb_main.c index acb1a69b..3c683e17 100644 --- a/lib/librte_eal/linuxapp/kni/ethtool/igb/igb_main.c +++ b/lib/librte_eal/linuxapp/kni/ethtool/igb/igb_main.c @@ -137,11 +137,20 @@ static void igb_clean_all_tx_rings(struct igb_adapter *); static void igb_clean_all_rx_rings(struct igb_adapter *); static void igb_clean_tx_ring(struct igb_ring *); static void igb_set_rx_mode(struct net_device *); +#ifdef HAVE_TIMER_SETUP +static void igb_update_phy_info(struct timer_list *); +static void igb_watchdog(struct timer_list *); +#else static void igb_update_phy_info(unsigned long); static void igb_watchdog(unsigned long); +#endif static void igb_watchdog_task(struct work_struct *); static void igb_dma_err_task(struct work_struct *); +#ifdef HAVE_TIMER_SETUP +static void igb_dma_err_timer(struct timer_list *); +#else static void igb_dma_err_timer(unsigned long data); +#endif static netdev_tx_t igb_xmit_frame(struct sk_buff *skb, struct net_device *); static struct net_device_stats *igb_get_stats(struct net_device *); static int igb_change_mtu(struct net_device *, int); @@ -2806,6 +2815,12 @@ static int __devinit igb_probe(struct pci_dev *pdev, /* Check if Media Autosense is enabled */ if (hw->mac.type == e1000_82580) igb_init_mas(adapter); +#ifdef HAVE_TIMER_SETUP + timer_setup(&adapter->watchdog_timer, &igb_watchdog, 0); + if (adapter->flags & IGB_FLAG_DETECT_BAD_DMA) + timer_setup(&adapter->dma_err_timer, &igb_dma_err_timer, 0); + timer_setup(&adapter->phy_info_timer, &igb_update_phy_info, 0); +#else setup_timer(&adapter->watchdog_timer, &igb_watchdog, (unsigned long) adapter); if (adapter->flags & IGB_FLAG_DETECT_BAD_DMA) @@ -2813,6 +2828,7 @@ static int __devinit igb_probe(struct pci_dev *pdev, (unsigned long) adapter); setup_timer(&adapter->phy_info_timer, &igb_update_phy_info, (unsigned long) adapter); +#endif INIT_WORK(&adapter->reset_task, igb_reset_task); INIT_WORK(&adapter->watchdog_task, igb_watchdog_task); @@ -4543,9 +4559,15 @@ static void igb_spoof_check(struct igb_adapter *adapter) /* Need to wait a few seconds after link up to get diagnostic information from * the phy */ +#ifdef HAVE_TIMER_SETUP +static void igb_update_phy_info(struct timer_list *t) +{ + struct igb_adapter *adapter = from_timer(adapter, t, phy_info_timer); +#else static void igb_update_phy_info(unsigned long data) { struct igb_adapter *adapter = (struct igb_adapter *) data; +#endif e1000_get_phy_info(&adapter->hw); } @@ -4594,9 +4616,15 @@ bool igb_has_link(struct igb_adapter *adapter) * igb_watchdog - Timer Call-back * @data: pointer to adapter cast into an unsigned long **/ +#ifdef HAVE_TIMER_SETUP +static void igb_watchdog(struct timer_list *t) +{ + struct igb_adapter *adapter = from_timer(adapter, t, watchdog_timer); +#else static void igb_watchdog(unsigned long data) { struct igb_adapter *adapter = (struct igb_adapter *)data; +#endif /* Do the rest outside of interrupt context */ schedule_work(&adapter->watchdog_task); } @@ -4854,9 +4882,15 @@ dma_timer_reset: * igb_dma_err_timer - Timer Call-back * @data: pointer to adapter cast into an unsigned long **/ +#ifdef HAVE_TIMER_SETUP +static void igb_dma_err_timer(struct timer_list *t) +{ + struct igb_adapter *adapter = from_timer(adapter, t, dma_err_timer); +#else static void igb_dma_err_timer(unsigned long data) { struct igb_adapter *adapter = (struct igb_adapter *)data; +#endif /* Do the rest outside of interrupt context */ schedule_work(&adapter->dma_err_task); } @@ -10051,6 +10085,12 @@ int igb_kni_probe(struct pci_dev *pdev, igb_init_mas(adapter); #ifdef NO_KNI +#ifdef HAVE_TIMER_SETUP + timer_setup(&adapter->watchdog_timer, &igb_watchdog, 0); + if (adapter->flags & IGB_FLAG_DETECT_BAD_DMA) + timer_setup(&adapter->dma_err_timer, &igb_dma_err_timer, 0); + timer_setup(&adapter->phy_info_timer, &igb_update_phy_info, 0); +#else setup_timer(&adapter->watchdog_timer, &igb_watchdog, (unsigned long) adapter); if (adapter->flags & IGB_FLAG_DETECT_BAD_DMA) @@ -10058,6 +10098,7 @@ int igb_kni_probe(struct pci_dev *pdev, (unsigned long) adapter); setup_timer(&adapter->phy_info_timer, &igb_update_phy_info, (unsigned long) adapter); +#endif INIT_WORK(&adapter->reset_task, igb_reset_task); INIT_WORK(&adapter->watchdog_task, igb_watchdog_task); diff --git a/lib/librte_eal/linuxapp/kni/ethtool/igb/kcompat.h b/lib/librte_eal/linuxapp/kni/ethtool/igb/kcompat.h index aea253b1..88bd18ec 100644 --- a/lib/librte_eal/linuxapp/kni/ethtool/igb/kcompat.h +++ b/lib/librte_eal/linuxapp/kni/ethtool/igb/kcompat.h @@ -3937,4 +3937,8 @@ skb_set_hash(struct sk_buff *skb, __u32 hash, __always_unused int type) #define HAVE_PCI_ENABLE_MSIX #endif +#if defined(timer_setup) && defined(from_timer) +#define HAVE_TIMER_SETUP +#endif + #endif /* _KCOMPAT_H_ */ -- cgit 1.2.3-korg