1 |
|
|
/** |
2 |
|
|
* This file is part of the CernVM File System. |
3 |
|
|
*/ |
4 |
|
|
|
5 |
|
|
#ifndef CVMFS_MALLOC_ARENA_H_ |
6 |
|
|
#define CVMFS_MALLOC_ARENA_H_ |
7 |
|
|
|
8 |
|
|
#include <inttypes.h> |
9 |
|
|
#include <stdint.h> |
10 |
|
|
|
11 |
|
|
#include <cassert> |
12 |
|
|
#include <new> |
13 |
|
|
|
14 |
|
|
// TODO(jblomer): the arena size could be a template parameter. In order to |
15 |
|
|
// reduce code duplication, all functions not requiring the arena size could be |
16 |
|
|
// moved to a base class. |
17 |
|
|
|
18 |
|
|
/** |
19 |
|
|
* An mmap'd block of general purpose memory for malloc/free in sqlite. Uses |
20 |
|
|
* the "boundary-tag system" as described in TAOCP vol 1 section 2.5. |
21 |
|
|
* Not thread-safe. |
22 |
|
|
* |
23 |
|
|
* Returned pointers are 8-byte aligned. Since a reserved block has a 4 bytes |
24 |
|
|
* prefix, all reserved blocks must start addresses that end with binary 100. |
25 |
|
|
* We round up all allocation requests to a multiple of 8. Reserved blocks |
26 |
|
|
* are always placed at the upper bound of a free block. The first block of |
27 |
|
|
* the arena is appropriately padded and because the upper bound of the |
28 |
|
|
* initial large free block is at an address that ends on binary 100, all |
29 |
|
|
* reserved blocks will be aligned correctly. |
30 |
|
|
* |
31 |
|
|
* The tag layouts are as follows. Size is a signed 4 byte integer, previous |
32 |
|
|
* and next are 4 byte offsets into the arena, linking to the previous and |
33 |
|
|
* next free blocks in the list of free blocks. |
34 |
|
|
* |
35 |
|
|
* Available (free) block: Reserved (used) block: |
36 |
|
|
* |
37 |
|
|
* +--------+--------+ +--------+--------+ |
38 |
|
|
* upper | size | 00| upper | | 01| |
39 |
|
|
* +--------+--------+ +--------+--------+ |
40 |
|
|
* | ... | | ... | |
41 |
|
|
* +-----------------+ +-----------------+ |
42 |
|
|
* |previous| | | ... | |
43 |
|
|
* +-----------------+ +-----------------+ |
44 |
|
|
* lower | size | next | lower | -size | | |
45 |
|
|
* +--------+--------+ +--------+--------+ |
46 |
|
|
* B0 B7 B0 B7 |
47 |
|
|
* |
48 |
|
|
* |
49 |
|
|
* First block of arena_: Last "block" of arena_: |
50 |
|
|
* in the free list but blocked for merging |
51 |
|
|
* |
52 |
|
|
* +--------+######### |
53 |
|
|
* upper | 01| arena # |
54 |
|
|
* +--------+--------+ |
55 |
|
|
* |previous| | |
56 |
|
|
* +-----------------+ multiple of 8 |
57 |
|
|
* | int(0) | next | <- head_avail_ | |
58 |
|
|
* +-----------------+ +--------+######## |
59 |
|
|
* lower | this [+ pad@32] | lower |int(-1) |outside# |
60 |
|
|
* +--------+--------+ +--------+######## |
61 |
|
|
* B0 B7 B0 B4 |
62 |
|
|
* |
63 |
|
|
* The arena size has to be a power of 2MB. It also has to be a multiple of 8 |
64 |
|
|
* for alignment reasons. It needs to be <= 512MB. |
65 |
|
|
*/ |
66 |
|
|
class MallocArena { |
67 |
|
|
public: |
68 |
|
|
/** |
69 |
|
|
* Avoid very small free blocks. This must be a larger than |
70 |
|
|
* sizeof(AvailBlockCtl) + sizeof(AvailBlockTag) and a multiple of 8. |
71 |
|
|
*/ |
72 |
|
|
static const int kMinBlockSize = 24; |
73 |
|
|
|
74 |
|
|
/** |
75 |
|
|
* Returns the MallocArena that houses the destination of ptr. |
76 |
|
|
*/ |
77 |
|
15844095 |
static inline MallocArena *GetMallocArena(void *ptr, unsigned arena_size) { |
78 |
|
|
void *arena = reinterpret_cast<void *>( |
79 |
|
15844095 |
uintptr_t(ptr) & ~(uintptr_t(arena_size) - uintptr_t(1))); |
80 |
|
15844095 |
return *reinterpret_cast<MallocArena **>(arena); |
81 |
|
|
} |
82 |
|
|
|
83 |
|
|
explicit MallocArena(unsigned arena_size); |
84 |
|
|
static MallocArena *CreateInitialized(unsigned arena_size, |
85 |
|
|
unsigned char pattern); |
86 |
|
|
~MallocArena(); |
87 |
|
|
|
88 |
|
|
void *Malloc(const uint32_t size); |
89 |
|
|
void Free(void *ptr); |
90 |
|
14494289 |
inline bool Contains(void *ptr) const { |
91 |
|
14494289 |
return GetMallocArena(ptr, arena_size_) == this; |
92 |
|
|
} |
93 |
|
|
uint32_t GetSize(void *ptr) const; |
94 |
|
685550 |
bool IsEmpty() const { return no_reserved_ == 0; } |
95 |
|
|
|
96 |
|
|
private: |
97 |
|
|
static const char kTagAvail = 0; |
98 |
|
|
static const char kTagReserved = 1; |
99 |
|
|
|
100 |
|
|
/** |
101 |
|
|
* Lower boundary of a free block. Note that the linking of blocks is not |
102 |
|
|
* necessarily in ascending order but random. |
103 |
|
|
*/ |
104 |
|
|
struct AvailBlockCtl { |
105 |
|
2545314308 |
AvailBlockCtl *GetNextPtr(char *base) { |
106 |
|
2545314308 |
return reinterpret_cast<AvailBlockCtl *>(base + link_next); |
107 |
|
|
} |
108 |
|
11041787 |
AvailBlockCtl *GetPrevPtr(char *base) { |
109 |
|
11041787 |
return reinterpret_cast<AvailBlockCtl *>(base + link_prev); |
110 |
|
|
} |
111 |
|
22146396 |
int32_t ConvertToLink(char *base) { |
112 |
|
22146396 |
return reinterpret_cast<char *>(this) - base; |
113 |
|
|
} |
114 |
|
5395703 |
void ShrinkTo(int32_t smaller_size) { |
115 |
|
5395703 |
size = smaller_size; |
116 |
✓✗ |
5395703 |
new (AvailBlockTag::GetTagLocation(this)) AvailBlockTag(smaller_size); |
117 |
|
5395703 |
} |
118 |
|
|
int32_t size; // always positive |
119 |
|
|
int32_t link_next; // offset in the arena; saves 4 bytes on 64bit archs |
120 |
|
|
int32_t link_prev; // offset in the arena; saves 4 bytes on 64bit archs |
121 |
|
|
}; |
122 |
|
|
|
123 |
|
|
/** |
124 |
|
|
* 8 bytes upper boundary of a free block. |
125 |
|
|
*/ |
126 |
|
|
struct AvailBlockTag { |
127 |
|
10932458 |
explicit AvailBlockTag(int32_t s) : size(s), tag(kTagAvail) { } |
128 |
|
10932458 |
static void *GetTagLocation(AvailBlockCtl *block) { |
129 |
|
|
return |
130 |
|
10932458 |
reinterpret_cast<char *>(block) + block->size - sizeof(AvailBlockTag); |
131 |
|
|
} |
132 |
|
|
int32_t size; |
133 |
|
|
char padding[3]; |
134 |
|
|
char tag; |
135 |
|
|
}; |
136 |
|
|
|
137 |
|
|
/** |
138 |
|
|
* Lower boundary of a reserved block: a negative size to distinguish from |
139 |
|
|
* the lower boundary of a free block. |
140 |
|
|
*/ |
141 |
|
|
struct ReservedBlockCtl { |
142 |
|
|
public: |
143 |
|
5650916 |
explicit ReservedBlockCtl(int32_t s) : size_(-s) { |
144 |
|
5650916 |
char *base = reinterpret_cast<char *>(this); |
145 |
|
5650916 |
*(base + s - 1) = kTagReserved; |
146 |
|
5650916 |
} |
147 |
✗✓ |
14494289 |
int32_t size() const { assert(size_ <= 0); return -size_; } |
148 |
|
|
|
149 |
|
|
private: |
150 |
|
|
int32_t size_; // always negative |
151 |
|
|
}; |
152 |
|
|
|
153 |
|
|
void UnlinkAvailBlock(AvailBlockCtl *block); |
154 |
|
|
void EnqueueAvailBlock(AvailBlockCtl *block); |
155 |
|
|
AvailBlockCtl *FindAvailBlock(const int32_t block_size); |
156 |
|
|
void *ReserveBlock(AvailBlockCtl *block, int32_t block_size); |
157 |
|
|
|
158 |
|
|
/** |
159 |
|
|
* Starts with the address of the MallocArena object followed by a |
160 |
|
|
* head_avail_ block and ends with a -1 guard integer to mimic a reserved |
161 |
|
|
* end block. The arena is aligned at a multiple of arena_size. Therefore, |
162 |
|
|
* a pointer pointing into it can get the corresponding MallocArena object |
163 |
|
|
* in O(1). |
164 |
|
|
*/ |
165 |
|
|
char *arena_; ///< The memory block |
166 |
|
|
/** |
167 |
|
|
* Head of the list of available blocks. Located at the beginning of the |
168 |
|
|
* arena_. The only "available" block with size 0 and with the upper tag |
169 |
|
|
* field set to "reserved", so that it is not merged with another free'd |
170 |
|
|
* block. |
171 |
|
|
*/ |
172 |
|
|
AvailBlockCtl *head_avail_; |
173 |
|
|
AvailBlockCtl *rover_; ///< The free block where the next search starts. |
174 |
|
|
uint32_t no_reserved_; |
175 |
|
|
unsigned arena_size_; |
176 |
|
|
}; // class MallocArena |
177 |
|
|
|
178 |
|
|
#endif // CVMFS_MALLOC_ARENA_H_ |