GCC Code Coverage Report


Directory: cvmfs/
File: cvmfs/malloc_arena.cc
Date: 2024-04-21 02:33:16
Exec Total Coverage
Lines: 123 123 100.0%
Branches: 39 54 72.2%

Line Branch Exec Source
1 /**
2 * This file is part of the CernVM File System.
3 */
4
5 #define __STDC_FORMAT_MACROS
6
7 #include "cvmfs_config.h"
8 #include "malloc_arena.h"
9
10 #include <cassert>
11 #include <cstddef>
12 #include <cstring>
13 #include <new>
14
15 #include "util/smalloc.h"
16
17 using namespace std; // NOLINT
18
19
20 /**
21 * Walks through the free list starting at rover_ and looks for the first block
22 * larger than block_size. Returns NULL if no such block exists.
23 */
24 6997269 MallocArena::AvailBlockCtl *MallocArena::FindAvailBlock(
25 const int32_t block_size)
26 {
27 6997269 bool wrapped = false;
28 // Generally: p = LINK(q)
29 6997269 AvailBlockCtl *q = rover_;
30 AvailBlockCtl *p;
31 do {
32 2408052462 p = q->GetNextPtr(arena_);
33
2/2
✓ Branch 0 taken 5280402 times.
✓ Branch 1 taken 2402772060 times.
2408052462 if (p->size >= block_size) {
34 5280402 rover_ = p->GetNextPtr(arena_);
35 5280402 return p;
36 }
37
2/2
✓ Branch 0 taken 3710932 times.
✓ Branch 1 taken 2399061128 times.
2402772060 if (p == head_avail_) {
38
2/2
✓ Branch 0 taken 1716867 times.
✓ Branch 1 taken 1994065 times.
3710932 if (wrapped)
39 1716867 return NULL;
40 1994065 wrapped = true;
41 }
42 2401055193 q = p;
43 } while (true);
44 }
45
46
47 /**
48 * Creates a free block at the place of the reserved block ptr points into.
49 * The free block might need to be merged with adjacent lower and/or upper
50 * blocks. In these cases, the corresponding blocks are removed from the list
51 * of available blocks. Every allocated block has a predecessor and a
52 * successor in the arena. The newly created free block is added to the end of
53 * the list of available blocks.
54 */
55 5225969 void MallocArena::Free(void *ptr) {
56
1/2
✗ Branch 1 not taken.
✓ Branch 2 taken 5225969 times.
5225969 assert(Contains(ptr));
57
58 5225969 no_reserved_--;
59
60 5225969 ReservedBlockCtl *block_ctl = reinterpret_cast<ReservedBlockCtl *>(
61 reinterpret_cast<char *>(ptr) - sizeof(ReservedBlockCtl));
62 5225969 char prior_tag = *(reinterpret_cast<char *>(block_ctl) - 1);
63
3/4
✓ Branch 0 taken 3284059 times.
✓ Branch 1 taken 1941910 times.
✗ Branch 2 not taken.
✓ Branch 3 taken 3284059 times.
5225969 assert((prior_tag == kTagAvail) || (prior_tag == kTagReserved));
64
65 5225969 int32_t new_size = block_ctl->size();
66
1/2
✗ Branch 0 not taken.
✓ Branch 1 taken 5225969 times.
5225969 assert(new_size > 0);
67 5225969 AvailBlockCtl *new_avail = reinterpret_cast<AvailBlockCtl *>(block_ctl);
68
69
2/2
✓ Branch 0 taken 1941910 times.
✓ Branch 1 taken 3284059 times.
5225969 if (prior_tag == kTagAvail) {
70 // Merge with block before and remove the block from the list
71 1941910 int32_t prior_size = reinterpret_cast<AvailBlockTag *>(
72 reinterpret_cast<char *>(block_ctl) - sizeof(AvailBlockTag))->size;
73
1/2
✗ Branch 0 not taken.
✓ Branch 1 taken 1941910 times.
1941910 assert(prior_size > 0);
74 1941910 new_size += prior_size;
75 1941910 new_avail = reinterpret_cast<AvailBlockCtl *>(
76 1941910 reinterpret_cast<char *>(block_ctl) - prior_size);
77 // new_avail points now to the prior block
78 1941910 UnlinkAvailBlock(new_avail);
79
2/2
✓ Branch 0 taken 22487 times.
✓ Branch 1 taken 1919423 times.
1941910 if (rover_ == new_avail)
80 22487 rover_ = head_avail_;
81 }
82
83 5225969 int32_t succ_size = *reinterpret_cast<int32_t *>(
84 5225969 reinterpret_cast<char *>(new_avail) + new_size);
85
2/2
✓ Branch 0 taken 2843539 times.
✓ Branch 1 taken 2382430 times.
5225969 if (succ_size >= 0) {
86 // Merge with succeeding block and remove the block from the list
87 2843539 AvailBlockCtl *succ_avail = reinterpret_cast<AvailBlockCtl *>(
88 2843539 reinterpret_cast<char *>(new_avail) + new_size);
89 2843539 UnlinkAvailBlock(succ_avail);
90 2843539 new_size += succ_size;
91
2/2
✓ Branch 0 taken 37199 times.
✓ Branch 1 taken 2806340 times.
2843539 if (rover_ == succ_avail)
92 37199 rover_ = head_avail_;
93 }
94
95 // Set new free block's boundaries
96 5225969 new_avail->size = new_size;
97 5225969 new (AvailBlockTag::GetTagLocation(new_avail)) AvailBlockTag(new_size);
98
99 5225969 EnqueueAvailBlock(new_avail);
100 5225969 }
101
102
103 /**
104 * Inserts an available block at the end of the free list.
105 */
106 5225969 void MallocArena::EnqueueAvailBlock(AvailBlockCtl *block) {
107 5225969 AvailBlockCtl *next = head_avail_;
108 5225969 AvailBlockCtl *prev = head_avail_->GetPrevPtr(arena_);
109 5225969 next->link_prev = block->ConvertToLink(arena_);
110 5225969 prev->link_next = block->ConvertToLink(arena_);
111 5225969 block->link_next = head_avail_->ConvertToLink(arena_);
112 5225969 block->link_prev = prev->ConvertToLink(arena_);
113 5225969 }
114
115
116 /**
117 * The ptr points to the result of Malloc(). The size of the area is stored
118 * a few bytes before ptr.
119 */
120 8858077 uint32_t MallocArena::GetSize(void *ptr) const {
121
1/2
✗ Branch 1 not taken.
✓ Branch 2 taken 8858077 times.
8858077 assert(Contains(ptr));
122
123 8858077 ReservedBlockCtl *block_ctl = reinterpret_cast<ReservedBlockCtl *>(
124 reinterpret_cast<char *>(ptr) - sizeof(ReservedBlockCtl));
125 8858077 int32_t size = block_ctl->size();
126
1/2
✗ Branch 0 not taken.
✓ Branch 1 taken 8858077 times.
8858077 assert(size > 1);
127 8858077 return size - sizeof(ReservedBlockCtl) - 1;
128 }
129
130
131 /**
132 * Walks the list of available blocks starting from rover and allocates the
133 * first available spot that's large enough. Puts the reserved block at the end
134 * of the available one and, if necessary, removes the available one from the
135 * list of free blocks.
136 */
137 6997269 void *MallocArena::Malloc(const uint32_t size) {
138
1/2
✗ Branch 0 not taken.
✓ Branch 1 taken 6997269 times.
6997269 assert(size > 0);
139
140 // Control word first, block type tag last
141 6997269 int32_t total_size = sizeof(ReservedBlockCtl) + size + 1;
142 6997269 total_size = RoundUp8(total_size);
143
2/2
✓ Branch 0 taken 29983 times.
✓ Branch 1 taken 6967286 times.
6997269 if (total_size < kMinBlockSize)
144 29983 total_size = kMinBlockSize;
145
146 6997269 AvailBlockCtl *p = FindAvailBlock(total_size);
147
2/2
✓ Branch 0 taken 1716867 times.
✓ Branch 1 taken 5280402 times.
6997269 if (p == NULL)
148 1716867 return NULL;
149
150 5280402 no_reserved_++;
151 5280402 return ReserveBlock(p, total_size);
152 }
153
154
155 /**
156 * The arena starts with a pointer to this followed by the AvailBlockCtl of
157 * head_avail_, followed by a reserved tag to prevent it from being merged,
158 * followed by a free block spanning the arena until the end tag. The end tag
159 * is a single negative int, which mimics another reserved block.
160 */
161 170 MallocArena::MallocArena(unsigned arena_size)
162 170 : arena_(reinterpret_cast<char *>(sxmmap_align(arena_size)))
163 170 , head_avail_(reinterpret_cast<AvailBlockCtl *>(arena_ + sizeof(uint64_t)))
164 170 , rover_(head_avail_)
165 170 , no_reserved_(0)
166 170 , arena_size_(arena_size)
167 {
168
1/2
✗ Branch 0 not taken.
✓ Branch 1 taken 170 times.
170 assert(arena_size_ > 0);
169
1/2
✗ Branch 0 not taken.
✓ Branch 1 taken 170 times.
170 assert((arena_size_ % (2 * 1024 * 1024)) == 0); // Multiple of 2MB
170
1/2
✗ Branch 0 not taken.
✓ Branch 1 taken 170 times.
170 assert(arena_size_ <= (512 * 1024 * 1024)); // <= 512MB
171
172 170 const unsigned char padding = 7;
173 // Size of the initial free block: everything minus arena boundaries
174 170 int32_t usable_size = arena_size_ -
175 (sizeof(uint64_t) + sizeof(AvailBlockCtl) + padding + 1 + sizeof(int32_t));
176
1/2
✗ Branch 0 not taken.
✓ Branch 1 taken 170 times.
170 assert((usable_size % 8) == 0);
177
178 // First 8 bytes of arena: this pointer (occupies only 4 bytes on 32bit
179 // architectures, in which case the second 4 bytes are unused.)
180 170 *reinterpret_cast<MallocArena **>(arena_) = this;
181
182 // The initial large free block
183 AvailBlockCtl *free_block =
184 170 new (arena_ + sizeof(uint64_t) + sizeof(AvailBlockCtl) + padding + 1)
185 170 AvailBlockCtl();
186 170 free_block->size = usable_size;
187 170 free_block->link_next = free_block->link_prev =
188 170 head_avail_->ConvertToLink(arena_);
189 170 new (AvailBlockTag::GetTagLocation(free_block)) AvailBlockTag(usable_size);
190
191 170 head_avail_->size = 0;
192 340 head_avail_->link_next = head_avail_->link_prev =
193 170 free_block->ConvertToLink(arena_);
194
195 // Prevent succeeding blocks from merging
196 170 *(reinterpret_cast<char *>(free_block) - 1) = kTagReserved;
197 // Final tag: reserved block marker
198 170 *reinterpret_cast<int32_t *>(arena_ + arena_size_ - sizeof(int32_t)) = -1;
199 170 }
200
201
202 /**
203 * Initializes the arena with repeated copies of the given pattern. Used for
204 * testing.
205 */
206 1 MallocArena *MallocArena::CreateInitialized(
207 unsigned arena_size,
208 unsigned char pattern)
209 {
210 1 MallocArena *result = new MallocArena(arena_size);
211 // At this point, there is one big free block linked to by head_avail_
212 1 AvailBlockCtl *free_block = result->head_avail_->GetNextPtr(result->arena_);
213
1/2
✗ Branch 0 not taken.
✓ Branch 1 taken 1 times.
1 assert(free_block != result->head_avail_);
214
1/2
✗ Branch 0 not taken.
✓ Branch 1 taken 1 times.
1 assert(free_block->size > 0);
215 // Strip control information at both ends of the block
216 1 int usable_size = free_block->size -
217 (sizeof(AvailBlockCtl) + sizeof(AvailBlockTag));
218
1/2
✗ Branch 0 not taken.
✓ Branch 1 taken 1 times.
1 assert(usable_size > 0);
219 1 memset(free_block + 1, pattern, usable_size);
220 1 return result;
221 }
222
223
224 167 MallocArena::~MallocArena() {
225 167 sxunmap(arena_, arena_size_);
226 167 }
227
228
229 /**
230 * Given the free block "block", cuts out a new reserved block of size
231 * block_size at the end of the free block. Returns a pointer usable by the
232 * application.
233 */
234 5280402 void *MallocArena::ReserveBlock(
235 AvailBlockCtl *block,
236 int32_t block_size)
237 {
238
1/2
✗ Branch 0 not taken.
✓ Branch 1 taken 5280402 times.
5280402 assert(block->size >= block_size);
239
240 5280402 int32_t remaining_size = block->size - block_size;
241 // Avoid creation of very small blocks
242
2/2
✓ Branch 0 taken 423975 times.
✓ Branch 1 taken 4856427 times.
5280402 if (remaining_size < kMinBlockSize) {
243 423975 block_size += remaining_size;
244 423975 remaining_size = 0;
245 }
246
247 // Update the list of available blocks
248
2/2
✓ Branch 0 taken 423975 times.
✓ Branch 1 taken 4856427 times.
5280402 if (remaining_size == 0) {
249 // Remove free block p from the list of available blocks
250 423975 UnlinkAvailBlock(block);
251 } else {
252 4856427 block->ShrinkTo(remaining_size);
253 }
254
255 // Place the new allocation, which also sets the block type tag at the end
256 5280402 char *new_block = reinterpret_cast<char *>(block) + remaining_size;
257 5280402 new (new_block) ReservedBlockCtl(block_size);
258 5280402 return new_block + sizeof(ReservedBlockCtl);
259 }
260
261
262 /**
263 * Removes the given block from the doubly linked free block list. This happens
264 * when two adjacent free blocks are created in Free() and then merged. Or if
265 * a block gets fully used in Malloc().
266 */
267 5209424 void MallocArena::UnlinkAvailBlock(AvailBlockCtl *block) {
268 5209424 AvailBlockCtl *next = block->GetNextPtr(arena_);
269 5209424 AvailBlockCtl *prev = block->GetPrevPtr(arena_);
270 5209424 prev->link_next = block->link_next;
271 5209424 next->link_prev = block->link_prev;
272 5209424 }
273