GCC Code Coverage Report


Directory: cvmfs/
File: cvmfs/kvstore.cc
Date: 2025-06-22 02:36:02
Exec Total Coverage
Lines: 193 249 77.5%
Branches: 112 258 43.4%

Line Branch Exec Source
1 /**
2 * This file is part of the CernVM File System.
3 */
4
5 #include "kvstore.h"
6
7 #include <assert.h>
8 #include <errno.h>
9 #include <limits.h>
10 #include <string.h>
11 #include <unistd.h>
12
13 #include <algorithm>
14
15 #include "util/async.h"
16 #include "util/concurrency.h"
17 #include "util/logging.h"
18
19 using namespace std; // NOLINT
20
21 namespace {
22
23 3410513 static inline uint32_t hasher_any(const shash::Any &key) {
24 // We'll just do the same thing as hasher_md5, since every hash is at
25 // least as large.
26 3410513 return (uint32_t) * (reinterpret_cast<const uint32_t *>(key.digest) + 1);
27 }
28
29 } // anonymous namespace
30
31 const double MemoryKvStore::kCompactThreshold = 0.8;
32
33
34 3023 MemoryKvStore::MemoryKvStore(unsigned int cache_entries,
35 MemoryAllocator alloc,
36 unsigned alloc_size,
37 3023 perf::StatisticsTemplate statistics)
38 3023 : allocator_(alloc)
39 3023 , used_bytes_(0)
40 3023 , entry_count_(0)
41 3023 , max_entries_(cache_entries)
42
2/4
✓ Branch 1 taken 3023 times.
✗ Branch 2 not taken.
✓ Branch 4 taken 3023 times.
✗ Branch 5 not taken.
3023 , entries_(cache_entries, shash::Any(), hasher_any,
43
2/4
✓ Branch 2 taken 3023 times.
✗ Branch 3 not taken.
✓ Branch 5 taken 3023 times.
✗ Branch 6 not taken.
6046 perf::StatisticsTemplate("lru", statistics))
44 3023 , heap_(NULL)
45
2/4
✓ Branch 2 taken 3023 times.
✗ Branch 3 not taken.
✓ Branch 5 taken 3023 times.
✗ Branch 6 not taken.
6046 , counters_(statistics) {
46 3023 const int retval = pthread_rwlock_init(&rwlock_, NULL);
47
1/2
✗ Branch 0 not taken.
✓ Branch 1 taken 3023 times.
3023 assert(retval == 0);
48
2/2
✓ Branch 0 taken 784 times.
✓ Branch 1 taken 2239 times.
3023 switch (alloc) {
49 784 case kMallocHeap:
50 784 heap_ = new MallocHeap(
51
3/6
✓ Branch 1 taken 784 times.
✗ Branch 2 not taken.
✓ Branch 4 taken 784 times.
✗ Branch 5 not taken.
✓ Branch 7 taken 784 times.
✗ Branch 8 not taken.
784 alloc_size, this->MakeCallback(&MemoryKvStore::OnBlockMove, this));
52 784 break;
53 2239 default:
54 2239 break;
55 }
56 3023 }
57
58
59 3023 MemoryKvStore::~MemoryKvStore() {
60
2/2
✓ Branch 0 taken 784 times.
✓ Branch 1 taken 2239 times.
3023 delete heap_;
61 3023 pthread_rwlock_destroy(&rwlock_);
62 3023 }
63
64
65 void MemoryKvStore::OnBlockMove(const MallocHeap::BlockPtr &ptr) {
66 bool ok;
67 struct AllocHeader a;
68 MemoryBuffer buf;
69
70 // must be locked by caller
71 assert(ptr.pointer);
72 memcpy(&a, ptr.pointer, sizeof(a));
73 LogCvmfs(kLogKvStore, kLogDebug, "compaction moved %s to %p",
74 a.id.ToString().c_str(), ptr.pointer);
75 assert(a.version == 0);
76 const bool update_lru = false;
77 ok = entries_.Lookup(a.id, &buf, update_lru);
78 assert(ok);
79 buf.address = static_cast<char *>(ptr.pointer) + sizeof(a);
80 ok = entries_.UpdateValue(buf.id, buf);
81 assert(ok);
82 }
83
84
85 2333 bool MemoryKvStore::Contains(const shash::Any &id) {
86
1/2
✓ Branch 1 taken 2333 times.
✗ Branch 2 not taken.
2333 MemoryBuffer buf;
87 // LogCvmfs(kLogKvStore, kLogDebug, "check buffer %s", id.ToString().c_str());
88 2333 const bool update_lru = false;
89
1/2
✓ Branch 1 taken 2333 times.
✗ Branch 2 not taken.
4666 return entries_.Lookup(id, &buf, update_lru);
90 }
91
92
93 6129 int MemoryKvStore::DoMalloc(MemoryBuffer *buf) {
94
1/2
✓ Branch 1 taken 6129 times.
✗ Branch 2 not taken.
6129 MemoryBuffer tmp;
95
1/2
✓ Branch 1 taken 6129 times.
✗ Branch 2 not taken.
6129 AllocHeader a;
96
97
1/2
✗ Branch 0 not taken.
✓ Branch 1 taken 6129 times.
6129 assert(buf);
98 6129 memcpy(&tmp, buf, sizeof(tmp));
99
100 6129 tmp.address = NULL;
101
1/2
✓ Branch 0 taken 6129 times.
✗ Branch 1 not taken.
6129 if (tmp.size > 0) {
102
1/3
✓ Branch 0 taken 6129 times.
✗ Branch 1 not taken.
✗ Branch 2 not taken.
6129 switch (allocator_) {
103 6129 case kMallocLibc:
104 6129 tmp.address = malloc(tmp.size);
105
1/2
✗ Branch 0 not taken.
✓ Branch 1 taken 6129 times.
6129 if (!tmp.address)
106 return -errno;
107 6129 break;
108 case kMallocHeap:
109 assert(heap_);
110 a.id = tmp.id;
111 tmp.address = heap_->Allocate(tmp.size + sizeof(a), &a, sizeof(a));
112 if (!tmp.address)
113 return -ENOMEM;
114 tmp.address = static_cast<char *>(tmp.address) + sizeof(a);
115 break;
116 default:
117 abort();
118 }
119 }
120
121 6129 memcpy(buf, &tmp, sizeof(*buf));
122 6129 return 0;
123 }
124
125
126 4601 void MemoryKvStore::DoFree(MemoryBuffer *buf) {
127
1/2
✓ Branch 1 taken 4601 times.
✗ Branch 2 not taken.
4601 const AllocHeader a;
128
129
1/2
✗ Branch 0 not taken.
✓ Branch 1 taken 4601 times.
4601 assert(buf);
130
1/2
✗ Branch 0 not taken.
✓ Branch 1 taken 4601 times.
4601 if (!buf->address)
131 return;
132
1/3
✓ Branch 0 taken 4601 times.
✗ Branch 1 not taken.
✗ Branch 2 not taken.
4601 switch (allocator_) {
133 4601 case kMallocLibc:
134 4601 free(buf->address);
135 4601 return;
136 case kMallocHeap:
137 heap_->MarkFree(static_cast<char *>(buf->address) - sizeof(a));
138 return;
139 default:
140 abort();
141 }
142 }
143
144
145 6129 bool MemoryKvStore::CompactMemory() {
146 double utilization;
147
1/2
✗ Branch 0 not taken.
✓ Branch 1 taken 6129 times.
6129 switch (allocator_) {
148 case kMallocHeap:
149 utilization = heap_->utilization();
150 LogCvmfs(kLogKvStore, kLogDebug, "compact requested (%f)", utilization);
151 if (utilization < kCompactThreshold) {
152 LogCvmfs(kLogKvStore, kLogDebug, "compacting heap");
153 heap_->Compact();
154 if (heap_->utilization() > utilization)
155 return true;
156 }
157 return false;
158 6129 default:
159 // the others can't do any compact, so just ignore
160 6129 LogCvmfs(kLogKvStore, kLogDebug, "compact requested");
161 6129 return false;
162 }
163 }
164
165
166 373 int64_t MemoryKvStore::GetSize(const shash::Any &id) {
167
1/2
✓ Branch 1 taken 373 times.
✗ Branch 2 not taken.
373 MemoryBuffer mem;
168 373 perf::Inc(counters_.n_getsize);
169 373 const bool update_lru = false;
170
3/4
✓ Branch 1 taken 373 times.
✗ Branch 2 not taken.
✓ Branch 3 taken 332 times.
✓ Branch 4 taken 41 times.
373 if (entries_.Lookup(id, &mem, update_lru)) {
171 // LogCvmfs(kLogKvStore, kLogDebug, "%s is %u B", id.ToString().c_str(),
172 // mem.size);
173 332 return mem.size;
174 } else {
175
1/2
✓ Branch 2 taken 41 times.
✗ Branch 3 not taken.
41 LogCvmfs(kLogKvStore, kLogDebug, "miss %s on GetSize",
176
1/2
✓ Branch 1 taken 41 times.
✗ Branch 2 not taken.
82 id.ToString().c_str());
177 41 return -ENOENT;
178 }
179 }
180
181
182 205 int64_t MemoryKvStore::GetRefcount(const shash::Any &id) {
183
1/2
✓ Branch 1 taken 205 times.
✗ Branch 2 not taken.
205 MemoryBuffer mem;
184 205 perf::Inc(counters_.n_getrefcount);
185 205 const bool update_lru = false;
186
2/4
✓ Branch 1 taken 205 times.
✗ Branch 2 not taken.
✓ Branch 3 taken 205 times.
✗ Branch 4 not taken.
205 if (entries_.Lookup(id, &mem, update_lru)) {
187 // LogCvmfs(kLogKvStore, kLogDebug, "%s has refcount %u",
188 // id.ToString().c_str(), mem.refcount);
189 205 return mem.refcount;
190 } else {
191 LogCvmfs(kLogKvStore, kLogDebug, "miss %s on GetRefcount",
192 id.ToString().c_str());
193 return -ENOENT;
194 }
195 }
196
197
198 530187 bool MemoryKvStore::IncRef(const shash::Any &id) {
199 530187 perf::Inc(counters_.n_incref);
200 530187 const WriteLockGuard guard(rwlock_);
201
1/2
✓ Branch 1 taken 530187 times.
✗ Branch 2 not taken.
530187 MemoryBuffer mem;
202
3/4
✓ Branch 1 taken 530187 times.
✗ Branch 2 not taken.
✓ Branch 3 taken 530146 times.
✓ Branch 4 taken 41 times.
530187 if (entries_.Lookup(id, &mem)) {
203
1/2
✗ Branch 0 not taken.
✓ Branch 1 taken 530146 times.
530146 assert(mem.refcount < UINT_MAX);
204 530146 ++mem.refcount;
205
1/2
✓ Branch 1 taken 530146 times.
✗ Branch 2 not taken.
530146 entries_.Insert(id, mem);
206
1/2
✓ Branch 2 taken 530146 times.
✗ Branch 3 not taken.
530146 LogCvmfs(kLogKvStore, kLogDebug, "increased refcount of %s to %u",
207
1/2
✓ Branch 1 taken 530146 times.
✗ Branch 2 not taken.
1060292 id.ToString().c_str(), mem.refcount);
208 530146 return true;
209 } else {
210
1/2
✓ Branch 2 taken 41 times.
✗ Branch 3 not taken.
41 LogCvmfs(kLogKvStore, kLogDebug, "miss %s on IncRef",
211
1/2
✓ Branch 1 taken 41 times.
✗ Branch 2 not taken.
82 id.ToString().c_str());
212 41 return false;
213 }
214 530187 }
215
216
217 529899 bool MemoryKvStore::Unref(const shash::Any &id) {
218 529899 perf::Inc(counters_.n_unref);
219 529899 const WriteLockGuard guard(rwlock_);
220
1/2
✓ Branch 1 taken 529899 times.
✗ Branch 2 not taken.
529899 MemoryBuffer mem;
221
3/4
✓ Branch 1 taken 529899 times.
✗ Branch 2 not taken.
✓ Branch 3 taken 529858 times.
✓ Branch 4 taken 41 times.
529899 if (entries_.Lookup(id, &mem)) {
222
1/2
✗ Branch 0 not taken.
✓ Branch 1 taken 529858 times.
529858 assert(mem.refcount > 0);
223 529858 --mem.refcount;
224
1/2
✓ Branch 1 taken 529858 times.
✗ Branch 2 not taken.
529858 entries_.Insert(id, mem);
225
1/2
✓ Branch 2 taken 529858 times.
✗ Branch 3 not taken.
529858 LogCvmfs(kLogKvStore, kLogDebug, "decreased refcount of %s to %u",
226
1/2
✓ Branch 1 taken 529858 times.
✗ Branch 2 not taken.
1059716 id.ToString().c_str(), mem.refcount);
227 529858 return true;
228 } else {
229
2/4
✓ Branch 1 taken 41 times.
✗ Branch 2 not taken.
✓ Branch 5 taken 41 times.
✗ Branch 6 not taken.
41 LogCvmfs(kLogKvStore, kLogDebug, "miss %s on Unref", id.ToString().c_str());
230 41 return false;
231 }
232 529899 }
233
234
235 318 int64_t MemoryKvStore::Read(const shash::Any &id,
236 void *buf,
237 size_t size,
238 size_t offset) {
239
1/2
✓ Branch 1 taken 318 times.
✗ Branch 2 not taken.
318 MemoryBuffer mem;
240 318 perf::Inc(counters_.n_read);
241 318 const ReadLockGuard guard(rwlock_);
242
2/4
✓ Branch 1 taken 318 times.
✗ Branch 2 not taken.
✗ Branch 3 not taken.
✓ Branch 4 taken 318 times.
318 if (!entries_.Lookup(id, &mem)) {
243 LogCvmfs(kLogKvStore, kLogDebug, "miss %s on Read", id.ToString().c_str());
244 return -ENOENT;
245 }
246
1/2
✗ Branch 0 not taken.
✓ Branch 1 taken 318 times.
318 if (offset > mem.size) {
247 LogCvmfs(kLogKvStore, kLogDebug, "out of bounds read (%zu>%zu) on %s",
248 offset, mem.size, id.ToString().c_str());
249 return 0;
250 }
251 318 const uint64_t copy_size = std::min(mem.size - offset, size);
252 // LogCvmfs(kLogKvStore, kLogDebug, "copy %u B from offset %u of %s",
253 // copy_size, offset, id.ToString().c_str());
254 318 memcpy(buf, static_cast<char *>(mem.address) + offset, copy_size);
255 318 perf::Xadd(counters_.sz_read, copy_size);
256 318 return copy_size;
257 318 }
258
259
260 6129 int MemoryKvStore::Commit(const MemoryBuffer &buf) {
261 6129 const WriteLockGuard guard(rwlock_);
262
1/2
✓ Branch 1 taken 6129 times.
✗ Branch 2 not taken.
12258 return DoCommit(buf);
263 6129 }
264
265
266 6129 int MemoryKvStore::DoCommit(const MemoryBuffer &buf) {
267 // we need to be careful about refcounts. If another thread wants to read
268 // a cache entry while it's being written (OpenFromTxn put partial data in
269 // the kvstore, will be committed again later) the refcount in the kvstore
270 // will differ from the refcount in the cache transaction. To avoid leaks,
271 // either the caller needs to fetch the cache entry before every write to
272 // find the current refcount, or the kvstore can ignore the passed-in
273 // refcount if the entry already exists. This implementation does the latter,
274 // and as a result it's not possible to directly modify the refcount
275 // without a race condition. This is a hint that callers should use the
276 // refcount like a lock and not directly modify the numeric value.
277
278
1/2
✓ Branch 1 taken 6129 times.
✗ Branch 2 not taken.
6129 CompactMemory();
279
280
1/2
✓ Branch 1 taken 6129 times.
✗ Branch 2 not taken.
6129 MemoryBuffer mem;
281 6129 perf::Inc(counters_.n_commit);
282
2/4
✓ Branch 1 taken 6129 times.
✗ Branch 2 not taken.
✓ Branch 5 taken 6129 times.
✗ Branch 6 not taken.
6129 LogCvmfs(kLogKvStore, kLogDebug, "commit %s", buf.id.ToString().c_str());
283
3/4
✓ Branch 1 taken 6129 times.
✗ Branch 2 not taken.
✓ Branch 3 taken 138 times.
✓ Branch 4 taken 5991 times.
6129 if (entries_.Lookup(buf.id, &mem)) {
284
1/2
✓ Branch 1 taken 138 times.
✗ Branch 2 not taken.
138 LogCvmfs(kLogKvStore, kLogDebug, "commit overwrites existing entry");
285 138 const size_t old_size = mem.size;
286
1/2
✓ Branch 1 taken 138 times.
✗ Branch 2 not taken.
138 DoFree(&mem);
287 138 used_bytes_ -= old_size;
288 138 counters_.sz_size->Set(used_bytes_);
289 138 --entry_count_;
290 } else {
291 // since this is a new entry, the caller can choose the starting
292 // refcount (starting at 1 for pinning, for example)
293 5991 mem.refcount = buf.refcount;
294 }
295 6129 mem.object_flags = buf.object_flags;
296 6129 mem.id = buf.id;
297 6129 mem.size = buf.size;
298
1/2
✗ Branch 0 not taken.
✓ Branch 1 taken 6129 times.
6129 if (entry_count_ == max_entries_) {
299 LogCvmfs(kLogKvStore, kLogDebug, "too many entries in kvstore");
300 return -ENFILE;
301 }
302
2/4
✓ Branch 1 taken 6129 times.
✗ Branch 2 not taken.
✗ Branch 3 not taken.
✓ Branch 4 taken 6129 times.
6129 if (DoMalloc(&mem) < 0) {
303 LogCvmfs(kLogKvStore, kLogDebug, "failed to allocate %s",
304 buf.id.ToString().c_str());
305 return -EIO;
306 }
307
1/2
✗ Branch 0 not taken.
✓ Branch 1 taken 6129 times.
6129 assert(SSIZE_MAX - mem.size > used_bytes_);
308 6129 memcpy(mem.address, buf.address, mem.size);
309
1/2
✓ Branch 1 taken 6129 times.
✗ Branch 2 not taken.
6129 entries_.Insert(buf.id, mem);
310 6129 ++entry_count_;
311 6129 used_bytes_ += mem.size;
312 6129 counters_.sz_size->Set(used_bytes_);
313 6129 perf::Xadd(counters_.sz_committed, mem.size);
314 6129 return 0;
315 }
316
317
318 246 bool MemoryKvStore::Delete(const shash::Any &id) {
319 246 perf::Inc(counters_.n_delete);
320 246 const WriteLockGuard guard(rwlock_);
321
1/2
✓ Branch 1 taken 246 times.
✗ Branch 2 not taken.
492 return DoDelete(id);
322 246 }
323
324
325 246 bool MemoryKvStore::DoDelete(const shash::Any &id) {
326
1/2
✓ Branch 1 taken 246 times.
✗ Branch 2 not taken.
246 MemoryBuffer buf;
327
3/4
✓ Branch 1 taken 246 times.
✗ Branch 2 not taken.
✓ Branch 3 taken 82 times.
✓ Branch 4 taken 164 times.
246 if (!entries_.Lookup(id, &buf)) {
328
1/2
✓ Branch 2 taken 82 times.
✗ Branch 3 not taken.
82 LogCvmfs(kLogKvStore, kLogDebug, "miss %s on Delete",
329
1/2
✓ Branch 1 taken 82 times.
✗ Branch 2 not taken.
164 id.ToString().c_str());
330 82 return false;
331 }
332
1/2
✗ Branch 0 not taken.
✓ Branch 1 taken 164 times.
164 if (buf.refcount > 0) {
333 LogCvmfs(kLogKvStore, kLogDebug, "can't delete %s, nonzero refcount",
334 id.ToString().c_str());
335 return false;
336 }
337
1/2
✗ Branch 0 not taken.
✓ Branch 1 taken 164 times.
164 assert(entry_count_ > 0);
338 164 --entry_count_;
339 164 used_bytes_ -= buf.size;
340 164 counters_.sz_size->Set(used_bytes_);
341 164 perf::Xadd(counters_.sz_deleted, buf.size);
342
1/2
✓ Branch 1 taken 164 times.
✗ Branch 2 not taken.
164 DoFree(&buf);
343
1/2
✓ Branch 1 taken 164 times.
✗ Branch 2 not taken.
164 entries_.Forget(id);
344
2/4
✓ Branch 1 taken 164 times.
✗ Branch 2 not taken.
✓ Branch 5 taken 164 times.
✗ Branch 6 not taken.
164 LogCvmfs(kLogKvStore, kLogDebug, "deleted %s", id.ToString().c_str());
345 164 return true;
346 }
347
348
349 774 bool MemoryKvStore::ShrinkTo(size_t size) {
350 774 perf::Inc(counters_.n_shrinkto);
351 774 const WriteLockGuard guard(rwlock_);
352
1/2
✓ Branch 1 taken 774 times.
✗ Branch 2 not taken.
774 shash::Any key;
353
1/2
✓ Branch 1 taken 774 times.
✗ Branch 2 not taken.
774 MemoryBuffer buf;
354
355
2/2
✓ Branch 0 taken 322 times.
✓ Branch 1 taken 452 times.
774 if (used_bytes_ <= size) {
356
1/2
✓ Branch 1 taken 322 times.
✗ Branch 2 not taken.
322 LogCvmfs(kLogKvStore, kLogDebug, "no need to shrink");
357 322 return true;
358 }
359
360
1/2
✓ Branch 1 taken 452 times.
✗ Branch 2 not taken.
452 LogCvmfs(kLogKvStore, kLogDebug, "shrinking to %zu B", size);
361
1/2
✓ Branch 1 taken 452 times.
✗ Branch 2 not taken.
452 entries_.FilterBegin();
362
3/4
✓ Branch 1 taken 5203 times.
✗ Branch 2 not taken.
✓ Branch 3 taken 4970 times.
✓ Branch 4 taken 233 times.
5203 while (entries_.FilterNext()) {
363
2/2
✓ Branch 0 taken 219 times.
✓ Branch 1 taken 4751 times.
4970 if (used_bytes_ <= size)
364 219 break;
365
1/2
✓ Branch 1 taken 4751 times.
✗ Branch 2 not taken.
4751 entries_.FilterGet(&key, &buf);
366
2/2
✓ Branch 0 taken 452 times.
✓ Branch 1 taken 4299 times.
4751 if (buf.refcount > 0) {
367
1/2
✓ Branch 2 taken 452 times.
✗ Branch 3 not taken.
452 LogCvmfs(kLogKvStore, kLogDebug, "skip %s, nonzero refcount",
368
1/2
✓ Branch 1 taken 452 times.
✗ Branch 2 not taken.
904 key.ToString().c_str());
369 452 continue;
370 }
371
1/2
✗ Branch 0 not taken.
✓ Branch 1 taken 4299 times.
4299 assert(entry_count_ > 0);
372 4299 --entry_count_;
373
1/2
✓ Branch 1 taken 4299 times.
✗ Branch 2 not taken.
4299 entries_.FilterDelete();
374 4299 used_bytes_ -= buf.size;
375 4299 perf::Xadd(counters_.sz_shrunk, buf.size);
376 4299 counters_.sz_size->Set(used_bytes_);
377
1/2
✓ Branch 1 taken 4299 times.
✗ Branch 2 not taken.
4299 DoFree(&buf);
378
2/4
✓ Branch 1 taken 4299 times.
✗ Branch 2 not taken.
✓ Branch 5 taken 4299 times.
✗ Branch 6 not taken.
4299 LogCvmfs(kLogKvStore, kLogDebug, "delete %s", key.ToString().c_str());
379 }
380
1/2
✓ Branch 1 taken 452 times.
✗ Branch 2 not taken.
452 entries_.FilterEnd();
381
1/2
✓ Branch 1 taken 452 times.
✗ Branch 2 not taken.
452 LogCvmfs(kLogKvStore, kLogDebug, "shrunk to %zu B", used_bytes_);
382 452 return used_bytes_ <= size;
383 774 }
384