aboutsummaryrefslogtreecommitdiffstats
path: root/libtransport/includes/hicn/transport/utils/fixed_block_allocator.h
blob: 1ade1516e25d36bf83d4d2db0a4192a9a88322ba (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
/*
 * Copyright (c) 2019 Cisco and/or its affiliates.
 */

#pragma once

#include <hicn/transport/portability/c_portability.h>
#include <hicn/transport/utils/spinlock.h>

#include <stdint.h>
#include <cstdlib>
#include <memory>
#include <cassert>

namespace utils {
template <std::size_t DEFAULT_SIZE = 512, std::size_t OBJECTS = 4096>
class FixedBlockAllocator {
  FixedBlockAllocator(std::size_t size = DEFAULT_SIZE,
                      std::size_t objects = OBJECTS)
      : block_size_(size < sizeof(void*) ? sizeof(long*) : size),
        object_size_(size),
        max_objects_(objects),
        p_head_(NULL),
        pool_index_(0),
        block_count_(0),
        blocks_in_use_(0),
        allocations_(0),
        deallocations_(0) {
    p_pool_ = (uint8_t*)new uint8_t[block_size_ * max_objects_];
  }

 public:
  static FixedBlockAllocator* getInstance() {
    if (!instance_) {
      instance_ = std::unique_ptr<FixedBlockAllocator>(
          new FixedBlockAllocator(DEFAULT_SIZE, OBJECTS));
    }

    return instance_.get();
  }

  ~FixedBlockAllocator() { delete[] p_pool_; }

  TRANSPORT_ALWAYS_INLINE void* allocateBlock(size_t size = DEFAULT_SIZE) {
    assert(size <= DEFAULT_SIZE);
    uint32_t index;

    void* p_block = pop();
    if (!p_block) {
      if (pool_index_ < max_objects_) {
        {
          SpinLock::Acquire locked(lock_);
          index = pool_index_++;
        }
        p_block = (void*)(p_pool_ + (index * block_size_));
      } else {
        // TODO Consider increasing pool here instead of throwing an exception
        throw std::runtime_error("No more memory available from packet pool!");
      }
    }

    blocks_in_use_++;
    allocations_++;

    return p_block;
  }

  TRANSPORT_ALWAYS_INLINE void deallocateBlock(void* pBlock) {
    push(pBlock);
    {
      SpinLock::Acquire locked(lock_);
      blocks_in_use_--;
      deallocations_++;
    }
  }

  TRANSPORT_ALWAYS_INLINE std::size_t blockSize() { return block_size_; }

  TRANSPORT_ALWAYS_INLINE uint32_t blockCount() { return block_count_; }

  TRANSPORT_ALWAYS_INLINE uint32_t blocksInUse() { return blocks_in_use_; }

  TRANSPORT_ALWAYS_INLINE uint32_t allocations() { return allocations_; }

  TRANSPORT_ALWAYS_INLINE uint32_t deallocations() { return deallocations_; }

 private:
  TRANSPORT_ALWAYS_INLINE void push(void* p_memory) {
    Block* p_block = (Block*)p_memory;
    {
      SpinLock::Acquire locked(lock_);
      p_block->p_next = p_head_;
      p_head_ = p_block;
    }
  }

  TRANSPORT_ALWAYS_INLINE void* pop() {
    Block* p_block = nullptr;

    {
      SpinLock::Acquire locked(lock_);
      if (p_head_) {
        p_block = p_head_;
        p_head_ = p_head_->p_next;
      }
    }

    return (void*)p_block;
  }

  struct Block {
    Block* p_next;
  };

  static std::unique_ptr<FixedBlockAllocator> instance_;

  const std::size_t block_size_;
  const std::size_t object_size_;
  const std::size_t max_objects_;

  Block* p_head_;
  uint8_t* p_pool_;
  uint32_t pool_index_;
  uint32_t block_count_;
  uint32_t blocks_in_use_;
  uint32_t allocations_;
  uint32_t deallocations_;

  SpinLock lock_;
};

template <std::size_t A, std::size_t B>
std::unique_ptr<FixedBlockAllocator<A, B>>
    FixedBlockAllocator<A, B>::instance_ = nullptr;

}  // namespace utils