GCC Code Coverage Report


Directory: cvmfs/
File: cvmfs/malloc_arena.h
Date: 2024-04-28 02:33:07
Exec Total Coverage
Lines: 25 25 100.0%
Branches: 1 2 50.0%

Line Branch Exec Source
1 /**
2 * This file is part of the CernVM File System.
3 */
4
5 #ifndef CVMFS_MALLOC_ARENA_H_
6 #define CVMFS_MALLOC_ARENA_H_
7
8 #include <inttypes.h>
9 #include <stdint.h>
10
11 #include <cassert>
12 #include <new>
13
14 // TODO(jblomer): the arena size could be a template parameter. In order to
15 // reduce code duplication, all functions not requiring the arena size could be
16 // moved to a base class.
17
18 /**
19 * An mmap'd block of general purpose memory for malloc/free in sqlite. Uses
20 * the "boundary-tag system" as described in TAOCP vol 1 section 2.5.
21 * Not thread-safe.
22 *
23 * Returned pointers are 8-byte aligned. Since a reserved block has a 4 bytes
24 * prefix, all reserved blocks must start addresses that end with binary 100.
25 * We round up all allocation requests to a multiple of 8. Reserved blocks
26 * are always placed at the upper bound of a free block. The first block of
27 * the arena is appropriately padded and because the upper bound of the
28 * initial large free block is at an address that ends on binary 100, all
29 * reserved blocks will be aligned correctly.
30 *
31 * The tag layouts are as follows. Size is a signed 4 byte integer, previous
32 * and next are 4 byte offsets into the arena, linking to the previous and
33 * next free blocks in the list of free blocks.
34 *
35 * Available (free) block: Reserved (used) block:
36 *
37 * +--------+--------+ +--------+--------+
38 * upper | size | 00| upper | | 01|
39 * +--------+--------+ +--------+--------+
40 * | ... | | ... |
41 * +-----------------+ +-----------------+
42 * |previous| | | ... |
43 * +-----------------+ +-----------------+
44 * lower | size | next | lower | -size | |
45 * +--------+--------+ +--------+--------+
46 * B0 B7 B0 B7
47 *
48 *
49 * First block of arena_: Last "block" of arena_:
50 * in the free list but blocked for merging
51 *
52 * +--------+#########
53 * upper | 01| arena #
54 * +--------+--------+
55 * |previous| |
56 * +-----------------+ multiple of 8
57 * | int(0) | next | <- head_avail_ |
58 * +-----------------+ +--------+########
59 * lower | this [+ pad@32] | lower |int(-1) |outside#
60 * +--------+--------+ +--------+########
61 * B0 B7 B0 B4
62 *
63 * The arena size has to be a power of 2MB. It also has to be a multiple of 8
64 * for alignment reasons. It needs to be <= 512MB.
65 */
66 class MallocArena {
67 public:
68 /**
69 * Avoid very small free blocks. This must be a larger than
70 * sizeof(AvailBlockCtl) + sizeof(AvailBlockTag) and a multiple of 8.
71 */
72 static const int kMinBlockSize = 24;
73
74 /**
75 * Returns the MallocArena that houses the destination of ptr.
76 */
77 15024268 static inline MallocArena *GetMallocArena(void *ptr, unsigned arena_size) {
78 // NOLINTNEXTLINE(performance-no-int-to-ptr)
79 15024268 void *arena = reinterpret_cast<void *>(
80 15024268 uintptr_t(ptr) & ~(uintptr_t(arena_size) - uintptr_t(1)));
81 15024268 return *reinterpret_cast<MallocArena **>(arena);
82 }
83
84 explicit MallocArena(unsigned arena_size);
85 static MallocArena *CreateInitialized(unsigned arena_size,
86 unsigned char pattern);
87 ~MallocArena();
88
89 void *Malloc(const uint32_t size);
90 void Free(void *ptr);
91 14084056 inline bool Contains(void *ptr) const {
92 14084056 return GetMallocArena(ptr, arena_size_) == this;
93 }
94 uint32_t GetSize(void *ptr) const;
95 132550 bool IsEmpty() const { return no_reserved_ == 0; }
96
97 private:
98 static const char kTagAvail = 0;
99 static const char kTagReserved = 1;
100
101 /**
102 * Lower boundary of a free block. Note that the linking of blocks is not
103 * necessarily in ascending order but random.
104 */
105 struct AvailBlockCtl {
106 2402447376 AvailBlockCtl *GetNextPtr(char *base) {
107 2402447376 return reinterpret_cast<AvailBlockCtl *>(base + link_next);
108 }
109 10435111 AvailBlockCtl *GetPrevPtr(char *base) {
110 10435111 return reinterpret_cast<AvailBlockCtl *>(base + link_prev);
111 }
112 20904064 int32_t ConvertToLink(char *base) {
113 20904064 return static_cast<int>(reinterpret_cast<char *>(this) - base);
114 }
115 4859120 void ShrinkTo(int32_t smaller_size) {
116 4859120 size = smaller_size;
117 4859120 new (AvailBlockTag::GetTagLocation(this)) AvailBlockTag(smaller_size);
118 4859120 }
119 int32_t size; // always positive
120 int32_t link_next; // offset in the arena; saves 4 bytes on 64bit archs
121 int32_t link_prev; // offset in the arena; saves 4 bytes on 64bit archs
122 };
123
124 /**
125 * 8 bytes upper boundary of a free block.
126 */
127 struct AvailBlockTag {
128 10085221 explicit AvailBlockTag(int32_t s) : size(s), tag(kTagAvail) { }
129 10085221 static void *GetTagLocation(AvailBlockCtl *block) {
130 return
131 10085221 reinterpret_cast<char *>(block) + block->size - sizeof(AvailBlockTag);
132 }
133 int32_t size;
134 char padding[3];
135 char tag;
136 };
137
138 /**
139 * Lower boundary of a reserved block: a negative size to distinguish from
140 * the lower boundary of a free block.
141 */
142 struct ReservedBlockCtl {
143 public:
144 5280488 explicit ReservedBlockCtl(int32_t s) : size_(-s) {
145 5280488 char *base = reinterpret_cast<char *>(this);
146 5280488 *(base + s - 1) = kTagReserved;
147 5280488 }
148
1/2
✗ Branch 0 not taken.
✓ Branch 1 taken 14084056 times.
14084056 int32_t size() const { assert(size_ <= 0); return -size_; }
149
150 private:
151 int32_t size_; // always negative
152 };
153
154 void UnlinkAvailBlock(AvailBlockCtl *block);
155 void EnqueueAvailBlock(AvailBlockCtl *block);
156 AvailBlockCtl *FindAvailBlock(const int32_t block_size);
157 void *ReserveBlock(AvailBlockCtl *block, int32_t block_size);
158
159 /**
160 * Starts with the address of the MallocArena object followed by a
161 * head_avail_ block and ends with a -1 guard integer to mimic a reserved
162 * end block. The arena is aligned at a multiple of arena_size. Therefore,
163 * a pointer pointing into it can get the corresponding MallocArena object
164 * in O(1).
165 */
166 char *arena_; ///< The memory block
167 /**
168 * Head of the list of available blocks. Located at the beginning of the
169 * arena_. The only "available" block with size 0 and with the upper tag
170 * field set to "reserved", so that it is not merged with another free'd
171 * block.
172 */
173 AvailBlockCtl *head_avail_;
174 AvailBlockCtl *rover_; ///< The free block where the next search starts.
175 uint32_t no_reserved_;
176 unsigned arena_size_;
177 }; // class MallocArena
178
179 #endif // CVMFS_MALLOC_ARENA_H_
180