aboutsummaryrefslogtreecommitdiffstats
path: root/libtransport/includes/hicn/transport/utils/fixed_block_allocator.h
diff options
context:
space:
mode:
Diffstat (limited to 'libtransport/includes/hicn/transport/utils/fixed_block_allocator.h')
-rw-r--r--libtransport/includes/hicn/transport/utils/fixed_block_allocator.h223
1 files changed, 147 insertions, 76 deletions
diff --git a/libtransport/includes/hicn/transport/utils/fixed_block_allocator.h b/libtransport/includes/hicn/transport/utils/fixed_block_allocator.h
index 1ade1516e..19b52b37e 100644
--- a/libtransport/includes/hicn/transport/utils/fixed_block_allocator.h
+++ b/libtransport/includes/hicn/transport/utils/fixed_block_allocator.h
@@ -1,108 +1,119 @@
/*
- * Copyright (c) 2019 Cisco and/or its affiliates.
+ * Copyright (c) 2021 Cisco and/or its affiliates.
*/
#pragma once
#include <hicn/transport/portability/c_portability.h>
+#include <hicn/transport/utils/branch_prediction.h>
+#include <hicn/transport/utils/singleton.h>
#include <hicn/transport/utils/spinlock.h>
-
#include <stdint.h>
+
+#include <cassert>
#include <cstdlib>
+#include <list>
#include <memory>
-#include <cassert>
namespace utils {
-template <std::size_t DEFAULT_SIZE = 512, std::size_t OBJECTS = 4096>
-class FixedBlockAllocator {
- FixedBlockAllocator(std::size_t size = DEFAULT_SIZE,
- std::size_t objects = OBJECTS)
- : block_size_(size < sizeof(void*) ? sizeof(long*) : size),
- object_size_(size),
- max_objects_(objects),
- p_head_(NULL),
- pool_index_(0),
- block_count_(0),
- blocks_in_use_(0),
- allocations_(0),
- deallocations_(0) {
- p_pool_ = (uint8_t*)new uint8_t[block_size_ * max_objects_];
- }
+template <std::size_t SIZE = 512, std::size_t OBJECTS = 4096>
+class FixedBlockAllocator
+ : public utils::ThreadLocalSingleton<FixedBlockAllocator<SIZE, OBJECTS>> {
+ friend class utils::ThreadLocalSingleton<FixedBlockAllocator<SIZE, OBJECTS>>;
+
+ static inline const std::size_t BLOCK_SIZE = SIZE;
+ static inline const std::size_t BLOCKS_PER_POOL = OBJECTS;
public:
- static FixedBlockAllocator* getInstance() {
- if (!instance_) {
- instance_ = std::unique_ptr<FixedBlockAllocator>(
- new FixedBlockAllocator(DEFAULT_SIZE, OBJECTS));
+ ~FixedBlockAllocator() {
+ for (auto& p : p_pools_) {
+ delete[] p;
}
-
- return instance_.get();
}
- ~FixedBlockAllocator() { delete[] p_pool_; }
-
- TRANSPORT_ALWAYS_INLINE void* allocateBlock(size_t size = DEFAULT_SIZE) {
- assert(size <= DEFAULT_SIZE);
+ void* allocateBlock() {
uint32_t index;
-
+ SpinLock::Acquire locked(lock_);
void* p_block = pop();
if (!p_block) {
- if (pool_index_ < max_objects_) {
- {
- SpinLock::Acquire locked(lock_);
- index = pool_index_++;
- }
- p_block = (void*)(p_pool_ + (index * block_size_));
- } else {
- // TODO Consider increasing pool here instead of throwing an exception
- throw std::runtime_error("No more memory available from packet pool!");
+ if (TRANSPORT_EXPECT_FALSE(current_pool_index_ >= BLOCKS_PER_POOL)) {
+ // Allocate new memory block
+ p_pools_.emplace_front(
+ new typename std::aligned_storage<SIZE>::type[BLOCKS_PER_POOL]);
+ // reset current_pool_index_
+ current_pool_index_ = 0;
+ // Increase total block count
+ block_count_ += BLOCKS_PER_POOL;
}
- }
- blocks_in_use_++;
- allocations_++;
+ auto& latest = p_pools_.front();
+ index = current_pool_index_++;
+ blocks_in_use_++;
+ allocations_++;
+ p_block = (void*)&latest[index];
+ }
return p_block;
}
- TRANSPORT_ALWAYS_INLINE void deallocateBlock(void* pBlock) {
+ void deallocateBlock(void* pBlock) {
+ SpinLock::Acquire locked(lock_);
push(pBlock);
- {
- SpinLock::Acquire locked(lock_);
- blocks_in_use_--;
- deallocations_++;
- }
+ blocks_in_use_--;
+ deallocations_++;
}
- TRANSPORT_ALWAYS_INLINE std::size_t blockSize() { return block_size_; }
+ public:
+ std::size_t blockSize() { return BLOCK_SIZE; }
+
+ uint32_t blockCount() { return block_count_; }
+
+ uint32_t blocksInUse() { return blocks_in_use_; }
- TRANSPORT_ALWAYS_INLINE uint32_t blockCount() { return block_count_; }
+ uint32_t allocations() { return allocations_; }
- TRANSPORT_ALWAYS_INLINE uint32_t blocksInUse() { return blocks_in_use_; }
+ uint32_t deallocations() { return deallocations_; }
- TRANSPORT_ALWAYS_INLINE uint32_t allocations() { return allocations_; }
+ void reset() {
+ p_head_ = nullptr;
+ blocks_in_use_ = 0;
+ allocations_ = 0;
+ deallocations_ = 0;
+ current_pool_index_ = 0;
+ block_count_ = BLOCKS_PER_POOL;
- TRANSPORT_ALWAYS_INLINE uint32_t deallocations() { return deallocations_; }
+ // Delete all memory pools but the first one
+ for (auto it = std::next(p_pools_.begin()); it != p_pools_.end();) {
+ delete[] * it;
+ it = p_pools_.erase(it);
+ }
+ }
private:
- TRANSPORT_ALWAYS_INLINE void push(void* p_memory) {
+ FixedBlockAllocator()
+ : p_head_(NULL),
+ current_pool_index_(0),
+ block_count_(BLOCKS_PER_POOL),
+ blocks_in_use_(0),
+ allocations_(0),
+ deallocations_(0) {
+ static_assert(SIZE >= sizeof(long*), "SIZE must be at least 8 bytes");
+ p_pools_.emplace_front(
+ new typename std::aligned_storage<SIZE>::type[BLOCKS_PER_POOL]);
+ }
+
+ void push(void* p_memory) {
Block* p_block = (Block*)p_memory;
- {
- SpinLock::Acquire locked(lock_);
- p_block->p_next = p_head_;
- p_head_ = p_block;
- }
+ p_block->p_next = p_head_;
+ p_head_ = p_block;
}
- TRANSPORT_ALWAYS_INLINE void* pop() {
+ void* pop() {
Block* p_block = nullptr;
- {
- SpinLock::Acquire locked(lock_);
- if (p_head_) {
- p_block = p_head_;
- p_head_ = p_head_->p_next;
- }
+ if (p_head_) {
+ p_block = p_head_;
+ p_head_ = p_head_->p_next;
}
return (void*)p_block;
@@ -112,15 +123,9 @@ class FixedBlockAllocator {
Block* p_next;
};
- static std::unique_ptr<FixedBlockAllocator> instance_;
-
- const std::size_t block_size_;
- const std::size_t object_size_;
- const std::size_t max_objects_;
-
Block* p_head_;
- uint8_t* p_pool_;
- uint32_t pool_index_;
+ uint32_t current_pool_index_;
+ std::list<typename std::aligned_storage<SIZE>::type*> p_pools_;
uint32_t block_count_;
uint32_t blocks_in_use_;
uint32_t allocations_;
@@ -129,8 +134,74 @@ class FixedBlockAllocator {
SpinLock lock_;
};
-template <std::size_t A, std::size_t B>
-std::unique_ptr<FixedBlockAllocator<A, B>>
- FixedBlockAllocator<A, B>::instance_ = nullptr;
+/**
+ * STL Allocator trait to be used with allocate_shared.
+ */
+template <typename T, typename Pool>
+class STLAllocator {
+ /**
+ * If STLAllocator is rebound to another type (!= T) using copy constructor,
+ * we may need to access private members of the source allocator to copy
+ * memory and pool.
+ */
+ template <typename U, typename P>
+ friend class STLAllocator;
+
+ public:
+ using size_type = std::size_t;
+ using difference_type = ptrdiff_t;
+ using pointer = T*;
+ using const_pointer = const T*;
+ using reference = T&;
+ using const_reference = const T&;
+ using value_type = T;
+
+ STLAllocator(pointer memory, Pool* memory_pool)
+ : memory_(memory), pool_(memory_pool) {}
+
+ ~STLAllocator() {}
+
+ template <typename U>
+ STLAllocator(const STLAllocator<U, Pool>& other) {
+ memory_ = other.memory_;
+ pool_ = other.pool_;
+ }
+
+ template <typename U>
+ struct rebind {
+ typedef STLAllocator<U, Pool> other;
+ };
+
+ pointer address(reference x) const { return &x; }
+ const_pointer address(const_reference x) const { return &x; }
+
+ pointer allocate(size_type n, pointer hint = 0) {
+ return static_cast<pointer>(memory_);
+ }
+
+ void deallocate(pointer p, size_type n) { pool_->deallocateBlock(memory_); }
+
+ template <typename... Args>
+ void construct(pointer p, Args&&... args) {
+ new (static_cast<pointer>(p)) T(std::forward<Args>(args)...);
+ }
+
+ void destroy(pointer p) { p->~T(); }
+
+ private:
+ void* memory_;
+ Pool* pool_;
+};
+
+template <typename T, typename U, typename V>
+inline bool operator==(const STLAllocator<T, V>&, const STLAllocator<U, V>&) {
+ return true;
+}
+
+template <typename T, typename U, typename V>
+inline bool operator!=(const STLAllocator<T, V>& a,
+ const STLAllocator<U, V>& b) {
+ return !(a == b);
+}
} // namespace utils \ No newline at end of file