summaryrefslogtreecommitdiffstats
AgeCommit message (Expand)AuthorFilesLines
2019-08-19quic: fix rx_callback refactoringNathan Skrzypczak1-1/+1
2019-08-19vppapigen map: raise ValueError when fieldname is python keywordPaul Vinciguerra3-6/+10
2019-08-19dpdk: fix ipsec coverity warningMatthew Smith1-2/+4
2019-08-19vlib: fix vlib_buffer_main_init_numa_node memory leak.Guanghua Zhang1-3/+8
2019-08-19build: fix vpp compilation failure on ThunderX2 and AmpJianlin Lv1-1/+1
2019-08-19vppinfra: Update "show cpu" output for AArch64 chipsNitin Saxena1-15/+18
2019-08-19ip: leverage existing vlib_buffer_advanceZhiyong Yang1-6/+3
2019-08-19nsh: rewrite nsh_output_inlineZhiyong Yang1-32/+30
2019-08-19rdma: add support for MAC changesBenoît Ganne2-1/+35
2019-08-19dpdk: add TSO support in dpdk plugin.Chenmin Sun3-1/+44
2019-08-19dpdk: QAT devices update, add c4xxx and xeon d15xxJim Thompson1-1/+2
2019-08-19devices: skip checksum calculation if guest supports checksum offloadSteven Luong1-3/+7
2019-08-19rdma: add support for input feature arcsBenoît Ganne1-29/+35
2019-08-19session: add explicit reset apiFlorin Coras6-6/+72
2019-08-19tcp: set cc_algo on connection allocFlorin Coras3-6/+19
2019-08-19tcp: allow cc algos to set pacing rateFlorin Coras2-8/+15
2019-08-17gso: fix l3 and l4 header offset in case of tagged interfaceMohsin Kazmi2-0/+28
2019-08-17gso: fix the segmentation to use current_data offsetMohsin Kazmi1-15/+16
2019-08-17bonding lacp: deleting virtual interface which was enslaved may cause crashSteven Luong5-46/+66
2019-08-16acl: fix stats-segment counters validation on acl updateAndrew Yourtchenko3-26/+82
2019-08-16devices: vhost-user crashes displaying show trace for deleted interfaceSteven Luong1-6/+10
2019-08-16hsa: Refactor quic_echo to allow other protocolsNathan Skrzypczak10-2324/+2886
2019-08-16tcp: fix cc algo name parsingFlorin Coras1-1/+1
2019-08-16quic: quic_app_rx_callback refactor.Mathias Raoul1-117/+167
2019-08-16gso: remove the ip checksum flag in case of ipv6Mohsin Kazmi2-2/+0
2019-08-16ipsec: fix missed IPSEC_INTEG_ALG_MD5_96Dmitry Vakhrushev2-3/+8
2019-08-15dhcp: resolver process node index in main_tDave Barach4-5/+3
2019-08-15vlib: copy trace_handle in vlib_buffer_copy/clone() functionsJohn Lo7-19/+3
2019-08-15dpdk: ipsec tunnel support for ip6-in-ip4Matthew G Smith2-23/+10
2019-08-15stats: refactor header filesDave Barach5-82/+104
2019-08-14tcp: extend protocol configurationFlorin Coras5-133/+195
2019-08-14gbp: add local l3out redirect to local SEP unit testBenoît Ganne1-3/+36
2019-08-14gbp: do not scan gbp bihash if not instantiatedBenoît Ganne1-0/+6
2019-08-13svm: fix svmtool and svmdbtool segment fault.Guanghua Zhang2-0/+4
2019-08-13session: fix enqueue notification on 32bit systemsFlorin Coras1-3/+10
2019-08-13fib: add adj_midchain.h to the list of files to install in the include pathAlberto Compagno1-0/+1
2019-08-12papi: Revert vpp-api-python to py2, add py3 pkgIan Wells1-2/+29
2019-08-12http_static: fix session expiration timer bugsDave Barach1-33/+52
2019-08-12ip: allow addrs from the same prefix on intfMatthew Smith6-109/+532
2019-08-12tcp: handle fin+rst+syn in closing stateFlorin Coras1-0/+2
2019-08-12http_static: debug spew control, session expiration timersDave Barach1-18/+42
2019-08-10http_static: tls supportDave Barach4-100/+374
2019-08-09crypto: add '-maes' compile switchBenoît Ganne2-2/+2
2019-08-09quic: handle session migration notificationsAloys Augustin1-63/+38
2019-08-09vlib: fix vlib_buffer_copy to preserve buffer flags bitJohn Lo1-3/+8
2019-08-09session: allow closed transports to sendFlorin Coras1-4/+16
2019-08-09tls: mark as no lookup transportFlorin Coras3-15/+27
2019-08-09tls: handle transport resetFlorin Coras1-1/+6
2019-08-09gbp: add remote EP redirect to remote SEP unit testBenoît Ganne1-1/+94
2019-08-09gbp: add unknown remote EP redirect unit testBenoît Ganne1-6/+27
lass="p">, /* os_out_of_memory */ 1); } always_inline void * clib_mem_alloc_aligned (uword size, uword align) { return clib_mem_alloc_aligned_at_offset (size, align, /* align_offset */ 0, /* os_out_of_memory */ 1); } /* Memory allocator which calls os_out_of_memory() when it fails */ always_inline void * clib_mem_alloc_or_null (uword size) { return clib_mem_alloc_aligned_at_offset (size, /* align */ 1, /* align_offset */ 0, /* os_out_of_memory */ 0); } always_inline void * clib_mem_alloc_aligned_or_null (uword size, uword align) { return clib_mem_alloc_aligned_at_offset (size, align, /* align_offset */ 0, /* os_out_of_memory */ 0); } /* Memory allocator which panics when it fails. Use macro so that clib_panic macro can expand __FUNCTION__ and __LINE__. */ #define clib_mem_alloc_aligned_no_fail(size,align) \ ({ \ uword _clib_mem_alloc_size = (size); \ void * _clib_mem_alloc_p; \ _clib_mem_alloc_p = clib_mem_alloc_aligned (_clib_mem_alloc_size, (align)); \ if (! _clib_mem_alloc_p) \ clib_panic ("failed to allocate %d bytes", _clib_mem_alloc_size); \ _clib_mem_alloc_p; \ }) #define clib_mem_alloc_no_fail(size) clib_mem_alloc_aligned_no_fail(size,1) /* Alias to stack allocator for naming consistency. */ #define clib_mem_alloc_stack(bytes) __builtin_alloca(bytes) always_inline uword clib_mem_is_heap_object (void *p) { void *heap = clib_mem_get_per_cpu_heap (); uword offset = (uword) p - (uword) heap; mheap_elt_t *e, *n; if (offset >= vec_len (heap)) return 0; e = mheap_elt_at_uoffset (heap, offset); n = mheap_next_elt (e); /* Check that heap forward and reverse pointers agree. */ return e->n_user_data == n->prev_n_user_data; } always_inline void clib_mem_free (void *p) { u8 *heap = clib_mem_get_per_cpu_heap (); /* Make sure object is in the correct heap. */ ASSERT (clib_mem_is_heap_object (p)); mheap_put (heap, (u8 *) p - heap); #if CLIB_DEBUG > 0 VALGRIND_FREELIKE_BLOCK (p, 0); #endif } always_inline void * clib_mem_realloc (void *p, uword new_size, uword old_size) { /* By default use alloc, copy and free to emulate realloc. */ void *q = clib_mem_alloc (new_size); if (q) { uword copy_size; if (old_size < new_size) copy_size = old_size; else copy_size = new_size; clib_memcpy (q, p, copy_size); clib_mem_free (p); } return q; } always_inline uword clib_mem_size (void *p) { ASSERT (clib_mem_is_heap_object (p)); mheap_elt_t *e = mheap_user_pointer_to_elt (p); return mheap_elt_data_bytes (e); } always_inline void * clib_mem_get_heap (void) { return clib_mem_get_per_cpu_heap (); } always_inline void * clib_mem_set_heap (void *heap) { return clib_mem_set_per_cpu_heap (heap); } void *clib_mem_init (void *heap, uword size); void clib_mem_exit (void); uword clib_mem_get_page_size (void); void clib_mem_validate (void); void clib_mem_trace (int enable); typedef struct { /* Total number of objects allocated. */ uword object_count; /* Total allocated bytes. Bytes used and free. used + free = total */ uword bytes_total, bytes_used, bytes_free; /* Number of bytes used by mheap data structure overhead (e.g. free lists, mheap header). */ uword bytes_overhead; /* Amount of free space returned to operating system. */ uword bytes_free_reclaimed; /* For malloc which puts small objects in sbrk region and large objects in mmap'ed regions. */ uword bytes_used_sbrk; uword bytes_used_mmap; /* Max. number of bytes in this heap. */ uword bytes_max; } clib_mem_usage_t; void clib_mem_usage (clib_mem_usage_t * usage); u8 *format_clib_mem_usage (u8 * s, va_list * args); /* Allocate virtual address space. */ always_inline void * clib_mem_vm_alloc (uword size) { void *mmap_addr; uword flags = MAP_PRIVATE; #ifdef MAP_ANONYMOUS flags |= MAP_ANONYMOUS; #endif mmap_addr = mmap (0, size, PROT_READ | PROT_WRITE, flags, -1, 0); if (mmap_addr == (void *) -1) mmap_addr = 0; return mmap_addr; } always_inline void clib_mem_vm_free (void *addr, uword size) { munmap (addr, size); } always_inline void * clib_mem_vm_unmap (void *addr, uword size) { void *mmap_addr; uword flags = MAP_PRIVATE | MAP_FIXED; /* To unmap we "map" with no protection. If we actually called munmap then other callers could steal the address space. By changing to PROT_NONE the kernel can free up the pages which is really what we want "unmap" to mean. */ mmap_addr = mmap (addr, size, PROT_NONE, flags, -1, 0); if (mmap_addr == (void *) -1) mmap_addr = 0; return mmap_addr; } always_inline void * clib_mem_vm_map (void *addr, uword size) { void *mmap_addr; uword flags = MAP_PRIVATE | MAP_FIXED; mmap_addr = mmap (addr, size, (PROT_READ | PROT_WRITE), flags, -1, 0); if (mmap_addr == (void *) -1) mmap_addr = 0; return mmap_addr; } typedef struct { #define CLIB_MEM_VM_F_SHARED (1 << 0) #define CLIB_MEM_VM_F_HUGETLB (1 << 1) #define CLIB_MEM_VM_F_NUMA_PREFER (1 << 2) #define CLIB_MEM_VM_F_NUMA_FORCE (1 << 3) #define CLIB_MEM_VM_F_HUGETLB_PREALLOC (1 << 4) u32 flags; /**< vm allocation flags: <br> CLIB_MEM_VM_F_SHARED: request shared memory, file destiptor will be provided on successful allocation. <br> CLIB_MEM_VM_F_HUGETLB: request hugepages. <br> CLIB_MEM_VM_F_NUMA_PREFER: numa_node field contains valid numa node preference. <br> CLIB_MEM_VM_F_NUMA_FORCE: fail if setting numa policy fails. <br> CLIB_MEM_VM_F_HUGETLB_PREALLOC: pre-allocate hugepages if number of available pages is not sufficient. */ char *name; /**< Name for memory allocation, set by caller. */ uword size; /**< Allocation size, set by caller. */ int numa_node; /**< numa node preference. Valid if CLIB_MEM_VM_F_NUMA_PREFER set. */ void *addr; /**< Pointer to allocated memory, set on successful allocation. */ int fd; /**< File desriptor, set on successful allocation if CLIB_MEM_VM_F_SHARED is set. */ int log2_page_size; /* Page size in log2 format, set on successful allocation. */ int n_pages; /* Number of pages. */ } clib_mem_vm_alloc_t; clib_error_t *clib_mem_vm_ext_alloc (clib_mem_vm_alloc_t * a); int clib_mem_vm_get_log2_page_size (int fd); u64 *clib_mem_vm_get_paddr (void *mem, int log2_page_size, int n_pages); #include <vppinfra/error.h> /* clib_panic */ #endif /* _included_clib_mem_h */ /* * fd.io coding-style-patch-verification: ON * * Local Variables: * eval: (c-set-style "gnu") * End: */