GCC Code Coverage Report


Directory: cvmfs/
File: cvmfs/malloc_arena.cc
Date: 2025-06-29 02:35:41
Exec Total Coverage
Lines: 121 121 100.0%
Branches: 39 54 72.2%

Line Branch Exec Source
1 /**
2 * This file is part of the CernVM File System.
3 */
4
5 #define __STDC_FORMAT_MACROS
6
7
8 #include "malloc_arena.h"
9
10 #include <cassert>
11 #include <cstddef>
12 #include <cstring>
13 #include <new>
14
15 #include "util/smalloc.h"
16
17 using namespace std; // NOLINT
18
19
20 /**
21 * Walks through the free list starting at rover_ and looks for the first block
22 * larger than block_size. Returns NULL if no such block exists.
23 */
24 314961577 MallocArena::AvailBlockCtl *MallocArena::FindAvailBlock(
25 const int32_t block_size) {
26 314961577 bool wrapped = false;
27 // Generally: p = LINK(q)
28 314961577 AvailBlockCtl *q = rover_;
29 AvailBlockCtl *p;
30 do {
31 109549732055 p = q->GetNextPtr(arena_);
32
2/2
✓ Branch 0 taken 236026281 times.
✓ Branch 1 taken 109313705774 times.
109549732055 if (p->size >= block_size) {
33 236026281 rover_ = p->GetNextPtr(arena_);
34 236026281 return p;
35 }
36
2/2
✓ Branch 0 taken 169527346 times.
✓ Branch 1 taken 109144178428 times.
109313705774 if (p == head_avail_) {
37
2/2
✓ Branch 0 taken 78935296 times.
✓ Branch 1 taken 90592050 times.
169527346 if (wrapped)
38 78935296 return NULL;
39 90592050 wrapped = true;
40 }
41 109234770478 q = p;
42 } while (true);
43 }
44
45
46 /**
47 * Creates a free block at the place of the reserved block ptr points into.
48 * The free block might need to be merged with adjacent lower and/or upper
49 * blocks. In these cases, the corresponding blocks are removed from the list
50 * of available blocks. Every allocated block has a predecessor and a
51 * successor in the arena. The newly created free block is added to the end of
52 * the list of available blocks.
53 */
54 234025724 void MallocArena::Free(void *ptr) {
55
1/2
✗ Branch 1 not taken.
✓ Branch 2 taken 234025724 times.
234025724 assert(Contains(ptr));
56
57 234025724 no_reserved_--;
58
59 234025724 ReservedBlockCtl *block_ctl = reinterpret_cast<ReservedBlockCtl *>(
60 reinterpret_cast<char *>(ptr) - sizeof(ReservedBlockCtl));
61 234025724 const char prior_tag = *(reinterpret_cast<char *>(block_ctl) - 1);
62
3/4
✓ Branch 0 taken 147124248 times.
✓ Branch 1 taken 86901476 times.
✗ Branch 2 not taken.
✓ Branch 3 taken 147124248 times.
234025724 assert((prior_tag == kTagAvail) || (prior_tag == kTagReserved));
63
64 234025724 int32_t new_size = block_ctl->size();
65
1/2
✗ Branch 0 not taken.
✓ Branch 1 taken 234025724 times.
234025724 assert(new_size > 0);
66 234025724 AvailBlockCtl *new_avail = reinterpret_cast<AvailBlockCtl *>(block_ctl);
67
68
2/2
✓ Branch 0 taken 86901476 times.
✓ Branch 1 taken 147124248 times.
234025724 if (prior_tag == kTagAvail) {
69 // Merge with block before and remove the block from the list
70 86901476 const int32_t prior_size = reinterpret_cast<AvailBlockTag *>(
71 reinterpret_cast<char *>(block_ctl)
72 - sizeof(AvailBlockTag))
73 ->size;
74
1/2
✗ Branch 0 not taken.
✓ Branch 1 taken 86901476 times.
86901476 assert(prior_size > 0);
75 86901476 new_size += prior_size;
76 86901476 new_avail = reinterpret_cast<AvailBlockCtl *>(
77 86901476 reinterpret_cast<char *>(block_ctl) - prior_size);
78 // new_avail points now to the prior block
79 86901476 UnlinkAvailBlock(new_avail);
80
2/2
✓ Branch 0 taken 1015377 times.
✓ Branch 1 taken 85886099 times.
86901476 if (rover_ == new_avail)
81 1015377 rover_ = head_avail_;
82 }
83
84 234025724 const int32_t succ_size = *reinterpret_cast<int32_t *>(
85 234025724 reinterpret_cast<char *>(new_avail) + new_size);
86
2/2
✓ Branch 0 taken 128700909 times.
✓ Branch 1 taken 105324815 times.
234025724 if (succ_size >= 0) {
87 // Merge with succeeding block and remove the block from the list
88 128700909 AvailBlockCtl *succ_avail = reinterpret_cast<AvailBlockCtl *>(
89 128700909 reinterpret_cast<char *>(new_avail) + new_size);
90 128700909 UnlinkAvailBlock(succ_avail);
91 128700909 new_size += succ_size;
92
2/2
✓ Branch 0 taken 1685472 times.
✓ Branch 1 taken 127015437 times.
128700909 if (rover_ == succ_avail)
93 1685472 rover_ = head_avail_;
94 }
95
96 // Set new free block's boundaries
97 234025724 new_avail->size = new_size;
98 234025724 new (AvailBlockTag::GetTagLocation(new_avail)) AvailBlockTag(new_size);
99
100 234025724 EnqueueAvailBlock(new_avail);
101 234025724 }
102
103
104 /**
105 * Inserts an available block at the end of the free list.
106 */
107 234025724 void MallocArena::EnqueueAvailBlock(AvailBlockCtl *block) {
108 234025724 AvailBlockCtl *next = head_avail_;
109 234025724 AvailBlockCtl *prev = head_avail_->GetPrevPtr(arena_);
110 234025724 next->link_prev = block->ConvertToLink(arena_);
111 234025724 prev->link_next = block->ConvertToLink(arena_);
112 234025724 block->link_next = head_avail_->ConvertToLink(arena_);
113 234025724 block->link_prev = prev->ConvertToLink(arena_);
114 234025724 }
115
116
117 /**
118 * The ptr points to the result of Malloc(). The size of the area is stored
119 * a few bytes before ptr.
120 */
121 405573651 uint32_t MallocArena::GetSize(void *ptr) const {
122
1/2
✗ Branch 1 not taken.
✓ Branch 2 taken 405573651 times.
405573651 assert(Contains(ptr));
123
124 405573651 ReservedBlockCtl *block_ctl = reinterpret_cast<ReservedBlockCtl *>(
125 reinterpret_cast<char *>(ptr) - sizeof(ReservedBlockCtl));
126 405573651 const int32_t size = block_ctl->size();
127
1/2
✗ Branch 0 not taken.
✓ Branch 1 taken 405573651 times.
405573651 assert(size > 1);
128 405573651 return size - sizeof(ReservedBlockCtl) - 1;
129 }
130
131
132 /**
133 * Walks the list of available blocks starting from rover and allocates the
134 * first available spot that's large enough. Puts the reserved block at the end
135 * of the available one and, if necessary, removes the available one from the
136 * list of free blocks.
137 */
138 314961577 void *MallocArena::Malloc(const uint32_t size) {
139
1/2
✗ Branch 0 not taken.
✓ Branch 1 taken 314961577 times.
314961577 assert(size > 0);
140
141 // Control word first, block type tag last
142 314961577 int32_t total_size = sizeof(ReservedBlockCtl) + size + 1;
143 314961577 total_size = RoundUp8(total_size);
144
2/2
✓ Branch 0 taken 1265827 times.
✓ Branch 1 taken 313695750 times.
314961577 if (total_size < kMinBlockSize)
145 1265827 total_size = kMinBlockSize;
146
147 314961577 AvailBlockCtl *p = FindAvailBlock(total_size);
148
2/2
✓ Branch 0 taken 78935296 times.
✓ Branch 1 taken 236026281 times.
314961577 if (p == NULL)
149 78935296 return NULL;
150
151 236026281 no_reserved_++;
152 236026281 return ReserveBlock(p, total_size);
153 }
154
155
156 /**
157 * The arena starts with a pointer to this followed by the AvailBlockCtl of
158 * head_avail_, followed by a reserved tag to prevent it from being merged,
159 * followed by a free block spanning the arena until the end tag. The end tag
160 * is a single negative int, which mimics another reserved block.
161 */
162 5208 MallocArena::MallocArena(unsigned arena_size)
163 5208 : arena_(reinterpret_cast<char *>(sxmmap_align(arena_size)))
164 5208 , head_avail_(reinterpret_cast<AvailBlockCtl *>(arena_ + sizeof(uint64_t)))
165 5208 , rover_(head_avail_)
166 5208 , no_reserved_(0)
167 5208 , arena_size_(arena_size) {
168
1/2
✗ Branch 0 not taken.
✓ Branch 1 taken 5208 times.
5208 assert(arena_size_ > 0);
169
1/2
✗ Branch 0 not taken.
✓ Branch 1 taken 5208 times.
5208 assert((arena_size_ % (2 * 1024 * 1024)) == 0); // Multiple of 2MB
170
1/2
✗ Branch 0 not taken.
✓ Branch 1 taken 5208 times.
5208 assert(arena_size_ <= (512 * 1024 * 1024)); // <= 512MB
171
172 5208 const unsigned char padding = 7;
173 // Size of the initial free block: everything minus arena boundaries
174 5208 const int32_t usable_size = arena_size_
175 5208 - (sizeof(uint64_t) + sizeof(AvailBlockCtl)
176 + padding + 1 + sizeof(int32_t));
177
1/2
✗ Branch 0 not taken.
✓ Branch 1 taken 5208 times.
5208 assert((usable_size % 8) == 0);
178
179 // First 8 bytes of arena: this pointer (occupies only 4 bytes on 32bit
180 // architectures, in which case the second 4 bytes are unused.)
181 5208 *reinterpret_cast<MallocArena **>(arena_) = this;
182
183 // The initial large free block
184 5208 AvailBlockCtl *free_block = new (arena_ + sizeof(uint64_t)
185 + sizeof(AvailBlockCtl) + padding + 1)
186 5208 AvailBlockCtl();
187 5208 free_block->size = usable_size;
188 5208 free_block->link_next = free_block->link_prev = head_avail_->ConvertToLink(
189 arena_);
190 5208 new (AvailBlockTag::GetTagLocation(free_block)) AvailBlockTag(usable_size);
191
192 5208 head_avail_->size = 0;
193 5208 head_avail_->link_next = head_avail_->link_prev = free_block->ConvertToLink(
194 arena_);
195
196 // Prevent succeeding blocks from merging
197 5208 *(reinterpret_cast<char *>(free_block) - 1) = kTagReserved;
198 // Final tag: reserved block marker
199 5208 *reinterpret_cast<int32_t *>(arena_ + arena_size_ - sizeof(int32_t)) = -1;
200 5208 }
201
202
203 /**
204 * Initializes the arena with repeated copies of the given pattern. Used for
205 * testing.
206 */
207 21 MallocArena *MallocArena::CreateInitialized(unsigned arena_size,
208 unsigned char pattern) {
209 21 MallocArena *result = new MallocArena(arena_size);
210 // At this point, there is one big free block linked to by head_avail_
211 21 AvailBlockCtl *free_block = result->head_avail_->GetNextPtr(result->arena_);
212
1/2
✗ Branch 0 not taken.
✓ Branch 1 taken 21 times.
21 assert(free_block != result->head_avail_);
213
1/2
✗ Branch 0 not taken.
✓ Branch 1 taken 21 times.
21 assert(free_block->size > 0);
214 // Strip control information at both ends of the block
215 21 const int usable_size = free_block->size
216 21 - (sizeof(AvailBlockCtl) + sizeof(AvailBlockTag));
217
1/2
✗ Branch 0 not taken.
✓ Branch 1 taken 21 times.
21 assert(usable_size > 0);
218 21 memset(free_block + 1, pattern, usable_size);
219 21 return result;
220 }
221
222
223 5203 MallocArena::~MallocArena() { sxunmap(arena_, arena_size_); }
224
225
226 /**
227 * Given the free block "block", cuts out a new reserved block of size
228 * block_size at the end of the free block. Returns a pointer usable by the
229 * application.
230 */
231 236026281 void *MallocArena::ReserveBlock(AvailBlockCtl *block, int32_t block_size) {
232
1/2
✗ Branch 0 not taken.
✓ Branch 1 taken 236026281 times.
236026281 assert(block->size >= block_size);
233
234 236026281 int32_t remaining_size = block->size - block_size;
235 // Avoid creation of very small blocks
236
2/2
✓ Branch 0 taken 17776038 times.
✓ Branch 1 taken 218250243 times.
236026281 if (remaining_size < kMinBlockSize) {
237 17776038 block_size += remaining_size;
238 17776038 remaining_size = 0;
239 }
240
241 // Update the list of available blocks
242
2/2
✓ Branch 0 taken 17776038 times.
✓ Branch 1 taken 218250243 times.
236026281 if (remaining_size == 0) {
243 // Remove free block p from the list of available blocks
244 17776038 UnlinkAvailBlock(block);
245 } else {
246 218250243 block->ShrinkTo(remaining_size);
247 }
248
249 // Place the new allocation, which also sets the block type tag at the end
250 236026281 char *new_block = reinterpret_cast<char *>(block) + remaining_size;
251 236026281 new (new_block) ReservedBlockCtl(block_size);
252 236026281 return new_block + sizeof(ReservedBlockCtl);
253 }
254
255
256 /**
257 * Removes the given block from the doubly linked free block list. This happens
258 * when two adjacent free blocks are created in Free() and then merged. Or if
259 * a block gets fully used in Malloc().
260 */
261 233378423 void MallocArena::UnlinkAvailBlock(AvailBlockCtl *block) {
262 233378423 AvailBlockCtl *next = block->GetNextPtr(arena_);
263 233378423 AvailBlockCtl *prev = block->GetPrevPtr(arena_);
264 233378423 prev->link_next = block->link_next;
265 233378423 next->link_prev = block->link_prev;
266 233378423 }
267