1 |
|
|
/** |
2 |
|
|
* This file is part of the CernVM File System. |
3 |
|
|
*/ |
4 |
|
|
|
5 |
|
|
#define __STDC_FORMAT_MACROS |
6 |
|
|
|
7 |
|
|
#include "cvmfs_config.h" |
8 |
|
|
#include "malloc_arena.h" |
9 |
|
|
|
10 |
|
|
#include <cassert> |
11 |
|
|
#include <cstddef> |
12 |
|
|
#include <cstring> |
13 |
|
|
#include <new> |
14 |
|
|
|
15 |
|
|
#include "smalloc.h" |
16 |
|
|
|
17 |
|
|
using namespace std; // NOLINT |
18 |
|
|
|
19 |
|
|
|
20 |
|
|
/** |
21 |
|
|
* Walks through the free list starting at rover_ and looks for the first block |
22 |
|
|
* larger than block_size. Returns NULL if no such block exists. |
23 |
|
|
*/ |
24 |
|
7367973 |
MallocArena::AvailBlockCtl *MallocArena::FindAvailBlock( |
25 |
|
|
const int32_t block_size) |
26 |
|
|
{ |
27 |
|
7367973 |
bool wrapped = false; |
28 |
|
|
// Generally: p = LINK(q) |
29 |
|
7367973 |
AvailBlockCtl *q = rover_; |
30 |
|
|
AvailBlockCtl *p; |
31 |
|
2526790071 |
do { |
32 |
|
2534158044 |
p = q->GetNextPtr(arena_); |
33 |
✓✓ |
2534158044 |
if (p->size >= block_size) { |
34 |
|
5650916 |
rover_ = p->GetNextPtr(arena_); |
35 |
|
5650916 |
return p; |
36 |
|
|
} |
37 |
✓✓ |
2528507128 |
if (p == head_avail_) { |
38 |
✓✓ |
3945951 |
if (wrapped) |
39 |
|
1717057 |
return NULL; |
40 |
|
2228894 |
wrapped = true; |
41 |
|
|
} |
42 |
|
2526790071 |
q = p; |
43 |
|
|
} while (true); |
44 |
|
|
} |
45 |
|
|
|
46 |
|
|
|
47 |
|
|
/** |
48 |
|
|
* Creates a free block at the place of the reserved block ptr points into. |
49 |
|
|
* The free block might need to be merged with adjacent lower and/or upper |
50 |
|
|
* blocks. In these cases, the corresponding blocks are removed from the list |
51 |
|
|
* of available blocks. Every allocated block has a predecessor and a |
52 |
|
|
* successor in the arena. The newly created free block is added to the end of |
53 |
|
|
* the list of available blocks. |
54 |
|
|
*/ |
55 |
|
5536443 |
void MallocArena::Free(void *ptr) { |
56 |
✗✓ |
5536443 |
assert(Contains(ptr)); |
57 |
|
|
|
58 |
|
5536443 |
no_reserved_--; |
59 |
|
|
|
60 |
|
|
ReservedBlockCtl *block_ctl = reinterpret_cast<ReservedBlockCtl *>( |
61 |
|
5536443 |
reinterpret_cast<char *>(ptr) - sizeof(ReservedBlockCtl)); |
62 |
|
5536443 |
char prior_tag = *(reinterpret_cast<char *>(block_ctl) - 1); |
63 |
✓✓✗✓
|
5536443 |
assert((prior_tag == kTagAvail) || (prior_tag == kTagReserved)); |
64 |
|
|
|
65 |
|
5536443 |
int32_t new_size = block_ctl->size(); |
66 |
✗✓ |
5536443 |
assert(new_size > 0); |
67 |
|
5536443 |
AvailBlockCtl *new_avail = reinterpret_cast<AvailBlockCtl *>(block_ctl); |
68 |
|
|
|
69 |
✓✓ |
5536443 |
if (prior_tag == kTagAvail) { |
70 |
|
|
// Merge with block before and remove the block from the list |
71 |
|
|
int32_t prior_size = reinterpret_cast<AvailBlockTag *>( |
72 |
|
1738512 |
reinterpret_cast<char *>(block_ctl) - sizeof(AvailBlockTag))->size; |
73 |
✗✓ |
1738512 |
assert(prior_size > 0); |
74 |
|
1738512 |
new_size += prior_size; |
75 |
|
|
new_avail = reinterpret_cast<AvailBlockCtl *>( |
76 |
|
1738512 |
reinterpret_cast<char *>(block_ctl) - prior_size); |
77 |
|
|
// new_avail points now to the prior block |
78 |
|
1738512 |
UnlinkAvailBlock(new_avail); |
79 |
✓✓ |
1738512 |
if (rover_ == new_avail) |
80 |
|
22722 |
rover_ = head_avail_; |
81 |
|
|
} |
82 |
|
|
|
83 |
|
|
int32_t succ_size = *reinterpret_cast<int32_t *>( |
84 |
|
5536443 |
reinterpret_cast<char *>(new_avail) + new_size); |
85 |
✓✓ |
5536443 |
if (succ_size >= 0) { |
86 |
|
|
// Merge with succeeding block and remove the block from the list |
87 |
|
|
AvailBlockCtl *succ_avail = reinterpret_cast<AvailBlockCtl *>( |
88 |
|
3511619 |
reinterpret_cast<char *>(new_avail) + new_size); |
89 |
|
3511619 |
UnlinkAvailBlock(succ_avail); |
90 |
|
3511619 |
new_size += succ_size; |
91 |
✓✓ |
3511619 |
if (rover_ == succ_avail) |
92 |
|
42302 |
rover_ = head_avail_; |
93 |
|
|
} |
94 |
|
|
|
95 |
|
|
// Set new free block's boundaries |
96 |
|
5536443 |
new_avail->size = new_size; |
97 |
✓✗ |
5536443 |
new (AvailBlockTag::GetTagLocation(new_avail)) AvailBlockTag(new_size); |
98 |
|
|
|
99 |
|
5536443 |
EnqueueAvailBlock(new_avail); |
100 |
|
5536443 |
} |
101 |
|
|
|
102 |
|
|
|
103 |
|
|
/** |
104 |
|
|
* Inserts an avilable block at the end of the free list. |
105 |
|
|
*/ |
106 |
|
5536443 |
void MallocArena::EnqueueAvailBlock(AvailBlockCtl *block) { |
107 |
|
5536443 |
AvailBlockCtl *next = head_avail_; |
108 |
|
5536443 |
AvailBlockCtl *prev = head_avail_->GetPrevPtr(arena_); |
109 |
|
5536443 |
next->link_prev = block->ConvertToLink(arena_); |
110 |
|
5536443 |
prev->link_next = block->ConvertToLink(arena_); |
111 |
|
5536443 |
block->link_next = head_avail_->ConvertToLink(arena_); |
112 |
|
5536443 |
block->link_prev = prev->ConvertToLink(arena_); |
113 |
|
5536443 |
} |
114 |
|
|
|
115 |
|
|
|
116 |
|
|
/** |
117 |
|
|
* The ptr points to the result of Malloc(). The size of the area is stored |
118 |
|
|
* a few bytes before ptr. |
119 |
|
|
*/ |
120 |
|
8957846 |
uint32_t MallocArena::GetSize(void *ptr) const { |
121 |
✗✓ |
8957846 |
assert(Contains(ptr)); |
122 |
|
|
|
123 |
|
|
ReservedBlockCtl *block_ctl = reinterpret_cast<ReservedBlockCtl *>( |
124 |
|
8957846 |
reinterpret_cast<char *>(ptr) - sizeof(ReservedBlockCtl)); |
125 |
|
8957846 |
int32_t size = block_ctl->size(); |
126 |
✗✓ |
8957846 |
assert(size > 1); |
127 |
|
8957846 |
return size - sizeof(ReservedBlockCtl) - 1; |
128 |
|
|
} |
129 |
|
|
|
130 |
|
|
|
131 |
|
|
/** |
132 |
|
|
* Walks the list of available blocks starting from rover and allocates the |
133 |
|
|
* first available spot that's large enough. Puts the reserved block at the end |
134 |
|
|
* of the available one and, if necessary, removes the available one from the |
135 |
|
|
* list of free blocks. |
136 |
|
|
*/ |
137 |
|
7367973 |
void *MallocArena::Malloc(const uint32_t size) { |
138 |
✗✓ |
7367973 |
assert(size > 0); |
139 |
|
|
|
140 |
|
|
// Control word first, block type tag last |
141 |
|
7367973 |
int32_t total_size = sizeof(ReservedBlockCtl) + size + 1; |
142 |
|
7367973 |
total_size = RoundUp8(total_size); |
143 |
✓✓ |
7367973 |
if (total_size < kMinBlockSize) |
144 |
|
29407 |
total_size = kMinBlockSize; |
145 |
|
|
|
146 |
|
7367973 |
AvailBlockCtl *p = FindAvailBlock(total_size); |
147 |
✓✓ |
7367973 |
if (p == NULL) |
148 |
|
1717057 |
return NULL; |
149 |
|
|
|
150 |
|
5650916 |
no_reserved_++; |
151 |
|
5650916 |
return ReserveBlock(p, total_size); |
152 |
|
|
} |
153 |
|
|
|
154 |
|
|
|
155 |
|
|
/** |
156 |
|
|
* The arena starts with a pointer to this followed by the AvailBlockCtl of |
157 |
|
|
* head_avail_, followed by a reserved tag to prevent it from being merged, |
158 |
|
|
* followed by a free block spanning the arena until the end tag. The end tag |
159 |
|
|
* is a single negative int, which mimics another reserved block. |
160 |
|
|
*/ |
161 |
|
312 |
MallocArena::MallocArena(unsigned arena_size) |
162 |
|
|
: arena_(reinterpret_cast<char *>(sxmmap_align(arena_size))) |
163 |
|
|
, head_avail_(reinterpret_cast<AvailBlockCtl *>(arena_ + sizeof(uint64_t))) |
164 |
|
|
, rover_(head_avail_) |
165 |
|
|
, no_reserved_(0) |
166 |
|
312 |
, arena_size_(arena_size) |
167 |
|
|
{ |
168 |
✗✓ |
312 |
assert(arena_size_ > 0); |
169 |
✗✓ |
312 |
assert((arena_size_ % (2 * 1024 * 1024)) == 0); // Multiple of 2MB |
170 |
✗✓ |
312 |
assert(arena_size_ <= (512 * 1024 * 1024)); // <= 512MB |
171 |
|
|
|
172 |
|
312 |
const unsigned char padding = 7; |
173 |
|
|
// Size of the initial free block: everything minus arena boundaries |
174 |
|
|
int32_t usable_size = arena_size_ - |
175 |
|
312 |
(sizeof(uint64_t) + sizeof(AvailBlockCtl) + padding + 1 + sizeof(int32_t)); |
176 |
✗✓ |
312 |
assert((usable_size % 8) == 0); |
177 |
|
|
|
178 |
|
|
// First 8 bytes of arena: this pointer (occupies only 4 bytes on 32bit |
179 |
|
|
// architectures, in which case the second 4 bytes are unused.) |
180 |
|
312 |
*reinterpret_cast<MallocArena **>(arena_) = this; |
181 |
|
|
|
182 |
|
|
// The initial large free block |
183 |
|
|
AvailBlockCtl *free_block = |
184 |
|
|
new (arena_ + sizeof(uint64_t) + sizeof(AvailBlockCtl) + padding + 1) |
185 |
✓✗ |
312 |
AvailBlockCtl(); |
186 |
|
312 |
free_block->size = usable_size; |
187 |
|
|
free_block->link_next = free_block->link_prev = |
188 |
|
312 |
head_avail_->ConvertToLink(arena_); |
189 |
✓✗ |
312 |
new (AvailBlockTag::GetTagLocation(free_block)) AvailBlockTag(usable_size); |
190 |
|
|
|
191 |
|
312 |
head_avail_->size = 0; |
192 |
|
|
head_avail_->link_next = head_avail_->link_prev = |
193 |
|
312 |
free_block->ConvertToLink(arena_); |
194 |
|
|
|
195 |
|
|
// Prevent succeeding blocks from merging |
196 |
|
312 |
*(reinterpret_cast<char *>(free_block) - 1) = kTagReserved; |
197 |
|
|
// Final tag: reserved block marker |
198 |
|
312 |
*reinterpret_cast<int32_t *>(arena_ + arena_size_ - sizeof(int32_t)) = -1; |
199 |
|
312 |
} |
200 |
|
|
|
201 |
|
|
|
202 |
|
|
/** |
203 |
|
|
* Initializes the arena with repeated copies of the given pattern. Used for |
204 |
|
|
* testing. |
205 |
|
|
*/ |
206 |
|
4 |
MallocArena *MallocArena::CreateInitialized( |
207 |
|
|
unsigned arena_size, |
208 |
|
|
unsigned char pattern) |
209 |
|
|
{ |
210 |
|
4 |
MallocArena *result = new MallocArena(arena_size); |
211 |
|
|
// At this point, there is one big free block linked to by head_avail_ |
212 |
|
4 |
AvailBlockCtl *free_block = result->head_avail_->GetNextPtr(result->arena_); |
213 |
✗✓ |
4 |
assert(free_block != result->head_avail_); |
214 |
✗✓ |
4 |
assert(free_block->size > 0); |
215 |
|
|
// Strip control information at both ends of the block |
216 |
|
|
int usable_size = free_block->size - |
217 |
|
4 |
(sizeof(AvailBlockCtl) + sizeof(AvailBlockTag)); |
218 |
✗✓ |
4 |
assert(usable_size > 0); |
219 |
|
4 |
memset(free_block + 1, pattern, usable_size); |
220 |
|
4 |
return result; |
221 |
|
|
} |
222 |
|
|
|
223 |
|
|
|
224 |
|
309 |
MallocArena::~MallocArena() { |
225 |
|
309 |
sxunmap(arena_, arena_size_); |
226 |
|
309 |
} |
227 |
|
|
|
228 |
|
|
|
229 |
|
|
/** |
230 |
|
|
* Given the free block "block", cuts out a new reserved block of size |
231 |
|
|
* block_size at the end of the free block. Returns a pointer usable by the |
232 |
|
|
* application. |
233 |
|
|
*/ |
234 |
|
5650916 |
void *MallocArena::ReserveBlock( |
235 |
|
|
AvailBlockCtl *block, |
236 |
|
|
int32_t block_size) |
237 |
|
|
{ |
238 |
✗✓ |
5650916 |
assert(block->size >= block_size); |
239 |
|
|
|
240 |
|
5650916 |
int32_t remaining_size = block->size - block_size; |
241 |
|
|
// Avoid creation of very small blocks |
242 |
✓✓ |
5650916 |
if (remaining_size < kMinBlockSize) { |
243 |
|
255213 |
block_size += remaining_size; |
244 |
|
255213 |
remaining_size = 0; |
245 |
|
|
} |
246 |
|
|
|
247 |
|
|
// Update the list of available blocks |
248 |
✓✓ |
5650916 |
if (remaining_size == 0) { |
249 |
|
|
// Remove free block p from the list of available blocks |
250 |
|
255213 |
UnlinkAvailBlock(block); |
251 |
|
|
} else { |
252 |
|
5395703 |
block->ShrinkTo(remaining_size); |
253 |
|
|
} |
254 |
|
|
|
255 |
|
|
// Place the new allocation, which also sets the block type tag at the end |
256 |
|
5650916 |
char *new_block = reinterpret_cast<char *>(block) + remaining_size; |
257 |
✓✗ |
5650916 |
new (new_block) ReservedBlockCtl(block_size); |
258 |
|
5650916 |
return new_block + sizeof(ReservedBlockCtl); |
259 |
|
|
} |
260 |
|
|
|
261 |
|
|
|
262 |
|
|
/** |
263 |
|
|
* Removes the given block from the doubly linked free block list. This happens |
264 |
|
|
* when two adjacent free blocks are created in Free() and then merged. Or if |
265 |
|
|
* a block gets fully used in Malloc(). |
266 |
|
|
*/ |
267 |
|
5505344 |
void MallocArena::UnlinkAvailBlock(AvailBlockCtl *block) { |
268 |
|
5505344 |
AvailBlockCtl *next = block->GetNextPtr(arena_); |
269 |
|
5505344 |
AvailBlockCtl *prev = block->GetPrevPtr(arena_); |
270 |
|
5505344 |
prev->link_next = block->link_next; |
271 |
|
5505344 |
next->link_prev = block->link_prev; |
272 |
|
5505344 |
} |