GCC Code Coverage Report


Directory: cvmfs/
File: cvmfs/kvstore.cc
Date: 2026-04-26 02:35:59
Exec Total Coverage
Lines: 193 249 77.5%
Branches: 112 258 43.4%

Line Branch Exec Source
1 /**
2 * This file is part of the CernVM File System.
3 */
4
5 #include "kvstore.h"
6
7 #include <assert.h>
8 #include <errno.h>
9 #include <limits.h>
10 #include <string.h>
11 #include <unistd.h>
12
13 #include <algorithm>
14
15 #include "util/logging.h"
16
17 using namespace std; // NOLINT
18
19 namespace {
20
21 3399952 static inline uint32_t hasher_any(const shash::Any &key) {
22 // We'll just do the same thing as hasher_md5, since every hash is at
23 // least as large.
24 return static_cast<uint32_t>(
25 3399952 *(reinterpret_cast<const uint32_t *>(key.digest) + 1));
26 }
27
28 } // anonymous namespace
29
30 const double MemoryKvStore::kCompactThreshold = 0.8;
31
32
33 2405 MemoryKvStore::MemoryKvStore(unsigned int cache_entries,
34 MemoryAllocator alloc,
35 unsigned alloc_size,
36 2405 perf::StatisticsTemplate statistics)
37 2405 : allocator_(alloc)
38 2405 , used_bytes_(0)
39 2405 , entry_count_(0)
40 2405 , max_entries_(cache_entries)
41
2/4
✓ Branch 1 taken 2405 times.
✗ Branch 2 not taken.
✓ Branch 4 taken 2405 times.
✗ Branch 5 not taken.
2405 , entries_(cache_entries, shash::Any(), hasher_any,
42
2/4
✓ Branch 2 taken 2405 times.
✗ Branch 3 not taken.
✓ Branch 5 taken 2405 times.
✗ Branch 6 not taken.
4810 perf::StatisticsTemplate("lru", statistics))
43 2405 , heap_(NULL)
44
2/4
✓ Branch 2 taken 2405 times.
✗ Branch 3 not taken.
✓ Branch 5 taken 2405 times.
✗ Branch 6 not taken.
4810 , counters_(statistics) {
45 2405 const int retval = pthread_rwlock_init(&rwlock_, NULL);
46
1/2
✗ Branch 0 not taken.
✓ Branch 1 taken 2405 times.
2405 assert(retval == 0);
47
2/2
✓ Branch 0 taken 432 times.
✓ Branch 1 taken 1973 times.
2405 switch (alloc) {
48 432 case kMallocHeap:
49 432 heap_ = new MallocHeap(
50
3/6
✓ Branch 1 taken 432 times.
✗ Branch 2 not taken.
✓ Branch 4 taken 432 times.
✗ Branch 5 not taken.
✓ Branch 7 taken 432 times.
✗ Branch 8 not taken.
432 alloc_size, this->MakeCallback(&MemoryKvStore::OnBlockMove, this));
51 432 break;
52 1973 default:
53 1973 break;
54 }
55 2405 }
56
57
58 2405 MemoryKvStore::~MemoryKvStore() {
59
2/2
✓ Branch 0 taken 432 times.
✓ Branch 1 taken 1973 times.
2405 delete heap_;
60 2405 pthread_rwlock_destroy(&rwlock_);
61 2405 }
62
63
64 void MemoryKvStore::OnBlockMove(const MallocHeap::BlockPtr &ptr) {
65 bool ok;
66 struct AllocHeader a;
67 MemoryBuffer buf;
68
69 // must be locked by caller
70 assert(ptr.pointer);
71 memcpy(&a, ptr.pointer, sizeof(a));
72 LogCvmfs(kLogKvStore, kLogDebug, "compaction moved %s to %p",
73 a.id.ToString().c_str(), ptr.pointer);
74 assert(a.version == 0);
75 const bool update_lru = false;
76 ok = entries_.Lookup(a.id, &buf, update_lru);
77 assert(ok);
78 buf.address = static_cast<char *>(ptr.pointer) + sizeof(a);
79 ok = entries_.UpdateValue(buf.id, buf);
80 assert(ok);
81 }
82
83
84 2033 bool MemoryKvStore::Contains(const shash::Any &id) {
85
1/2
✓ Branch 1 taken 2033 times.
✗ Branch 2 not taken.
2033 MemoryBuffer buf;
86 // LogCvmfs(kLogKvStore, kLogDebug, "check buffer %s", id.ToString().c_str());
87 2033 const bool update_lru = false;
88
1/2
✓ Branch 1 taken 2033 times.
✗ Branch 2 not taken.
4066 return entries_.Lookup(id, &buf, update_lru);
89 }
90
91
92 4598 int MemoryKvStore::DoMalloc(MemoryBuffer *buf) {
93
1/2
✓ Branch 1 taken 4598 times.
✗ Branch 2 not taken.
4598 MemoryBuffer tmp;
94
1/2
✓ Branch 1 taken 4598 times.
✗ Branch 2 not taken.
4598 AllocHeader a;
95
96
1/2
✗ Branch 0 not taken.
✓ Branch 1 taken 4598 times.
4598 assert(buf);
97 4598 memcpy(&tmp, buf, sizeof(tmp));
98
99 4598 tmp.address = NULL;
100
1/2
✓ Branch 0 taken 4598 times.
✗ Branch 1 not taken.
4598 if (tmp.size > 0) {
101
1/3
✓ Branch 0 taken 4598 times.
✗ Branch 1 not taken.
✗ Branch 2 not taken.
4598 switch (allocator_) {
102 4598 case kMallocLibc:
103 4598 tmp.address = malloc(tmp.size);
104
1/2
✗ Branch 0 not taken.
✓ Branch 1 taken 4598 times.
4598 if (!tmp.address)
105 return -errno;
106 4598 break;
107 case kMallocHeap:
108 assert(heap_);
109 a.id = tmp.id;
110 tmp.address = heap_->Allocate(tmp.size + sizeof(a), &a, sizeof(a));
111 if (!tmp.address)
112 return -ENOMEM;
113 tmp.address = static_cast<char *>(tmp.address) + sizeof(a);
114 break;
115 default:
116 abort();
117 }
118 }
119
120 4598 memcpy(buf, &tmp, sizeof(*buf));
121 4598 return 0;
122 }
123
124
125 3140 void MemoryKvStore::DoFree(MemoryBuffer *buf) {
126
1/2
✓ Branch 1 taken 3140 times.
✗ Branch 2 not taken.
3140 const AllocHeader a;
127
128
1/2
✗ Branch 0 not taken.
✓ Branch 1 taken 3140 times.
3140 assert(buf);
129
1/2
✗ Branch 0 not taken.
✓ Branch 1 taken 3140 times.
3140 if (!buf->address)
130 return;
131
1/3
✓ Branch 0 taken 3140 times.
✗ Branch 1 not taken.
✗ Branch 2 not taken.
3140 switch (allocator_) {
132 3140 case kMallocLibc:
133 3140 free(buf->address);
134 3140 return;
135 case kMallocHeap:
136 heap_->MarkFree(static_cast<char *>(buf->address) - sizeof(a));
137 return;
138 default:
139 abort();
140 }
141 }
142
143
144 4598 bool MemoryKvStore::CompactMemory() {
145 double utilization;
146
1/2
✗ Branch 0 not taken.
✓ Branch 1 taken 4598 times.
4598 switch (allocator_) {
147 case kMallocHeap:
148 utilization = heap_->utilization();
149 LogCvmfs(kLogKvStore, kLogDebug, "compact requested (%f)", utilization);
150 if (utilization < kCompactThreshold) {
151 LogCvmfs(kLogKvStore, kLogDebug, "compacting heap");
152 heap_->Compact();
153 if (heap_->utilization() > utilization)
154 return true;
155 }
156 return false;
157 4598 default:
158 // the others can't do any compact, so just ignore
159 4598 LogCvmfs(kLogKvStore, kLogDebug, "compact requested");
160 4598 return false;
161 }
162 }
163
164
165 315 int64_t MemoryKvStore::GetSize(const shash::Any &id) {
166
1/2
✓ Branch 1 taken 315 times.
✗ Branch 2 not taken.
315 MemoryBuffer mem;
167 315 perf::Inc(counters_.n_getsize);
168 315 const bool update_lru = false;
169
3/4
✓ Branch 1 taken 315 times.
✗ Branch 2 not taken.
✓ Branch 3 taken 288 times.
✓ Branch 4 taken 27 times.
315 if (entries_.Lookup(id, &mem, update_lru)) {
170 // LogCvmfs(kLogKvStore, kLogDebug, "%s is %u B", id.ToString().c_str(),
171 // mem.size);
172 288 return mem.size;
173 } else {
174
1/2
✓ Branch 2 taken 27 times.
✗ Branch 3 not taken.
27 LogCvmfs(kLogKvStore, kLogDebug, "miss %s on GetSize",
175
1/2
✓ Branch 1 taken 27 times.
✗ Branch 2 not taken.
54 id.ToString().c_str());
176 27 return -ENOENT;
177 }
178 }
179
180
181 135 int64_t MemoryKvStore::GetRefcount(const shash::Any &id) {
182
1/2
✓ Branch 1 taken 135 times.
✗ Branch 2 not taken.
135 MemoryBuffer mem;
183 135 perf::Inc(counters_.n_getrefcount);
184 135 const bool update_lru = false;
185
2/4
✓ Branch 1 taken 135 times.
✗ Branch 2 not taken.
✓ Branch 3 taken 135 times.
✗ Branch 4 not taken.
135 if (entries_.Lookup(id, &mem, update_lru)) {
186 // LogCvmfs(kLogKvStore, kLogDebug, "%s has refcount %u",
187 // id.ToString().c_str(), mem.refcount);
188 135 return mem.refcount;
189 } else {
190 LogCvmfs(kLogKvStore, kLogDebug, "miss %s on GetRefcount",
191 id.ToString().c_str());
192 return -ENOENT;
193 }
194 }
195
196
197 541089 bool MemoryKvStore::IncRef(const shash::Any &id) {
198 541089 perf::Inc(counters_.n_incref);
199 541089 const WriteLockGuard guard(rwlock_);
200
1/2
✓ Branch 1 taken 541089 times.
✗ Branch 2 not taken.
541089 MemoryBuffer mem;
201
3/4
✓ Branch 1 taken 541089 times.
✗ Branch 2 not taken.
✓ Branch 3 taken 541062 times.
✓ Branch 4 taken 27 times.
541089 if (entries_.Lookup(id, &mem)) {
202
1/2
✗ Branch 0 not taken.
✓ Branch 1 taken 541062 times.
541062 assert(mem.refcount < UINT_MAX);
203 541062 ++mem.refcount;
204
1/2
✓ Branch 1 taken 541062 times.
✗ Branch 2 not taken.
541062 entries_.Insert(id, mem);
205
1/2
✓ Branch 2 taken 541062 times.
✗ Branch 3 not taken.
541062 LogCvmfs(kLogKvStore, kLogDebug, "increased refcount of %s to %u",
206
1/2
✓ Branch 1 taken 541062 times.
✗ Branch 2 not taken.
1082124 id.ToString().c_str(), mem.refcount);
207 541062 return true;
208 } else {
209
1/2
✓ Branch 2 taken 27 times.
✗ Branch 3 not taken.
27 LogCvmfs(kLogKvStore, kLogDebug, "miss %s on IncRef",
210
1/2
✓ Branch 1 taken 27 times.
✗ Branch 2 not taken.
54 id.ToString().c_str());
211 27 return false;
212 }
213 541089 }
214
215
216 540795 bool MemoryKvStore::Unref(const shash::Any &id) {
217 540795 perf::Inc(counters_.n_unref);
218 540795 const WriteLockGuard guard(rwlock_);
219
1/2
✓ Branch 1 taken 540795 times.
✗ Branch 2 not taken.
540795 MemoryBuffer mem;
220
3/4
✓ Branch 1 taken 540795 times.
✗ Branch 2 not taken.
✓ Branch 3 taken 540768 times.
✓ Branch 4 taken 27 times.
540795 if (entries_.Lookup(id, &mem)) {
221
1/2
✗ Branch 0 not taken.
✓ Branch 1 taken 540768 times.
540768 assert(mem.refcount > 0);
222 540768 --mem.refcount;
223
1/2
✓ Branch 1 taken 540768 times.
✗ Branch 2 not taken.
540768 entries_.Insert(id, mem);
224
1/2
✓ Branch 2 taken 540768 times.
✗ Branch 3 not taken.
540768 LogCvmfs(kLogKvStore, kLogDebug, "decreased refcount of %s to %u",
225
1/2
✓ Branch 1 taken 540768 times.
✗ Branch 2 not taken.
1081536 id.ToString().c_str(), mem.refcount);
226 540768 return true;
227 } else {
228
2/4
✓ Branch 1 taken 27 times.
✗ Branch 2 not taken.
✓ Branch 5 taken 27 times.
✗ Branch 6 not taken.
27 LogCvmfs(kLogKvStore, kLogDebug, "miss %s on Unref", id.ToString().c_str());
229 27 return false;
230 }
231 540795 }
232
233
234 244 int64_t MemoryKvStore::Read(const shash::Any &id,
235 void *buf,
236 size_t size,
237 size_t offset) {
238
1/2
✓ Branch 1 taken 244 times.
✗ Branch 2 not taken.
244 MemoryBuffer mem;
239 244 perf::Inc(counters_.n_read);
240 244 const ReadLockGuard guard(rwlock_);
241
2/4
✓ Branch 1 taken 244 times.
✗ Branch 2 not taken.
✗ Branch 3 not taken.
✓ Branch 4 taken 244 times.
244 if (!entries_.Lookup(id, &mem)) {
242 LogCvmfs(kLogKvStore, kLogDebug, "miss %s on Read", id.ToString().c_str());
243 return -ENOENT;
244 }
245
1/2
✗ Branch 0 not taken.
✓ Branch 1 taken 244 times.
244 if (offset > mem.size) {
246 LogCvmfs(kLogKvStore, kLogDebug, "out of bounds read (%zu>%zu) on %s",
247 offset, mem.size, id.ToString().c_str());
248 return 0;
249 }
250 244 const uint64_t copy_size = std::min(mem.size - offset, size);
251 // LogCvmfs(kLogKvStore, kLogDebug, "copy %u B from offset %u of %s",
252 // copy_size, offset, id.ToString().c_str());
253 244 memcpy(buf, static_cast<char *>(mem.address) + offset, copy_size);
254 244 perf::Xadd(counters_.sz_read, copy_size);
255 244 return copy_size;
256 244 }
257
258
259 4598 int MemoryKvStore::Commit(const MemoryBuffer &buf) {
260 4598 const WriteLockGuard guard(rwlock_);
261
1/2
✓ Branch 1 taken 4598 times.
✗ Branch 2 not taken.
9196 return DoCommit(buf);
262 4598 }
263
264
265 4598 int MemoryKvStore::DoCommit(const MemoryBuffer &buf) {
266 // we need to be careful about refcounts. If another thread wants to read
267 // a cache entry while it's being written (OpenFromTxn put partial data in
268 // the kvstore, will be committed again later) the refcount in the kvstore
269 // will differ from the refcount in the cache transaction. To avoid leaks,
270 // either the caller needs to fetch the cache entry before every write to
271 // find the current refcount, or the kvstore can ignore the passed-in
272 // refcount if the entry already exists. This implementation does the latter,
273 // and as a result it's not possible to directly modify the refcount
274 // without a race condition. This is a hint that callers should use the
275 // refcount like a lock and not directly modify the numeric value.
276
277
1/2
✓ Branch 1 taken 4598 times.
✗ Branch 2 not taken.
4598 CompactMemory();
278
279
1/2
✓ Branch 1 taken 4598 times.
✗ Branch 2 not taken.
4598 MemoryBuffer mem;
280 4598 perf::Inc(counters_.n_commit);
281
2/4
✓ Branch 1 taken 4598 times.
✗ Branch 2 not taken.
✓ Branch 5 taken 4598 times.
✗ Branch 6 not taken.
4598 LogCvmfs(kLogKvStore, kLogDebug, "commit %s", buf.id.ToString().c_str());
282
3/4
✓ Branch 1 taken 4598 times.
✗ Branch 2 not taken.
✓ Branch 3 taken 114 times.
✓ Branch 4 taken 4484 times.
4598 if (entries_.Lookup(buf.id, &mem)) {
283
1/2
✓ Branch 1 taken 114 times.
✗ Branch 2 not taken.
114 LogCvmfs(kLogKvStore, kLogDebug, "commit overwrites existing entry");
284 114 const size_t old_size = mem.size;
285
1/2
✓ Branch 1 taken 114 times.
✗ Branch 2 not taken.
114 DoFree(&mem);
286 114 used_bytes_ -= old_size;
287 114 counters_.sz_size->Set(used_bytes_);
288 114 --entry_count_;
289 } else {
290 // since this is a new entry, the caller can choose the starting
291 // refcount (starting at 1 for pinning, for example)
292 4484 mem.refcount = buf.refcount;
293 }
294 4598 mem.object_flags = buf.object_flags;
295 4598 mem.id = buf.id;
296 4598 mem.size = buf.size;
297
1/2
✗ Branch 0 not taken.
✓ Branch 1 taken 4598 times.
4598 if (entry_count_ == max_entries_) {
298 LogCvmfs(kLogKvStore, kLogDebug, "too many entries in kvstore");
299 return -ENFILE;
300 }
301
2/4
✓ Branch 1 taken 4598 times.
✗ Branch 2 not taken.
✗ Branch 3 not taken.
✓ Branch 4 taken 4598 times.
4598 if (DoMalloc(&mem) < 0) {
302 LogCvmfs(kLogKvStore, kLogDebug, "failed to allocate %s",
303 buf.id.ToString().c_str());
304 return -EIO;
305 }
306
1/2
✗ Branch 0 not taken.
✓ Branch 1 taken 4598 times.
4598 assert(SSIZE_MAX - mem.size > used_bytes_);
307 4598 memcpy(mem.address, buf.address, mem.size);
308
1/2
✓ Branch 1 taken 4598 times.
✗ Branch 2 not taken.
4598 entries_.Insert(buf.id, mem);
309 4598 ++entry_count_;
310 4598 used_bytes_ += mem.size;
311 4598 counters_.sz_size->Set(used_bytes_);
312 4598 perf::Xadd(counters_.sz_committed, mem.size);
313 4598 return 0;
314 }
315
316
317 162 bool MemoryKvStore::Delete(const shash::Any &id) {
318 162 perf::Inc(counters_.n_delete);
319 162 const WriteLockGuard guard(rwlock_);
320
1/2
✓ Branch 1 taken 162 times.
✗ Branch 2 not taken.
324 return DoDelete(id);
321 162 }
322
323
324 162 bool MemoryKvStore::DoDelete(const shash::Any &id) {
325
1/2
✓ Branch 1 taken 162 times.
✗ Branch 2 not taken.
162 MemoryBuffer buf;
326
3/4
✓ Branch 1 taken 162 times.
✗ Branch 2 not taken.
✓ Branch 3 taken 54 times.
✓ Branch 4 taken 108 times.
162 if (!entries_.Lookup(id, &buf)) {
327
1/2
✓ Branch 2 taken 54 times.
✗ Branch 3 not taken.
54 LogCvmfs(kLogKvStore, kLogDebug, "miss %s on Delete",
328
1/2
✓ Branch 1 taken 54 times.
✗ Branch 2 not taken.
108 id.ToString().c_str());
329 54 return false;
330 }
331
1/2
✗ Branch 0 not taken.
✓ Branch 1 taken 108 times.
108 if (buf.refcount > 0) {
332 LogCvmfs(kLogKvStore, kLogDebug, "can't delete %s, nonzero refcount",
333 id.ToString().c_str());
334 return false;
335 }
336
1/2
✗ Branch 0 not taken.
✓ Branch 1 taken 108 times.
108 assert(entry_count_ > 0);
337 108 --entry_count_;
338 108 used_bytes_ -= buf.size;
339 108 counters_.sz_size->Set(used_bytes_);
340 108 perf::Xadd(counters_.sz_deleted, buf.size);
341
1/2
✓ Branch 1 taken 108 times.
✗ Branch 2 not taken.
108 DoFree(&buf);
342
1/2
✓ Branch 1 taken 108 times.
✗ Branch 2 not taken.
108 entries_.Forget(id);
343
2/4
✓ Branch 1 taken 108 times.
✗ Branch 2 not taken.
✓ Branch 5 taken 108 times.
✗ Branch 6 not taken.
108 LogCvmfs(kLogKvStore, kLogDebug, "deleted %s", id.ToString().c_str());
344 108 return true;
345 }
346
347
348 701 bool MemoryKvStore::ShrinkTo(size_t size) {
349 701 perf::Inc(counters_.n_shrinkto);
350 701 const WriteLockGuard guard(rwlock_);
351
1/2
✓ Branch 1 taken 701 times.
✗ Branch 2 not taken.
701 shash::Any key;
352
1/2
✓ Branch 1 taken 701 times.
✗ Branch 2 not taken.
701 MemoryBuffer buf;
353
354
2/2
✓ Branch 0 taken 299 times.
✓ Branch 1 taken 402 times.
701 if (used_bytes_ <= size) {
355
1/2
✓ Branch 1 taken 299 times.
✗ Branch 2 not taken.
299 LogCvmfs(kLogKvStore, kLogDebug, "no need to shrink");
356 299 return true;
357 }
358
359
1/2
✓ Branch 1 taken 402 times.
✗ Branch 2 not taken.
402 LogCvmfs(kLogKvStore, kLogDebug, "shrinking to %zu B", size);
360
1/2
✓ Branch 1 taken 402 times.
✗ Branch 2 not taken.
402 entries_.FilterBegin();
361
3/4
✓ Branch 1 taken 3722 times.
✗ Branch 2 not taken.
✓ Branch 3 taken 3499 times.
✓ Branch 4 taken 223 times.
3722 while (entries_.FilterNext()) {
362
2/2
✓ Branch 0 taken 179 times.
✓ Branch 1 taken 3320 times.
3499 if (used_bytes_ <= size)
363 179 break;
364
1/2
✓ Branch 1 taken 3320 times.
✗ Branch 2 not taken.
3320 entries_.FilterGet(&key, &buf);
365
2/2
✓ Branch 0 taken 402 times.
✓ Branch 1 taken 2918 times.
3320 if (buf.refcount > 0) {
366
1/2
✓ Branch 2 taken 402 times.
✗ Branch 3 not taken.
402 LogCvmfs(kLogKvStore, kLogDebug, "skip %s, nonzero refcount",
367
1/2
✓ Branch 1 taken 402 times.
✗ Branch 2 not taken.
804 key.ToString().c_str());
368 402 continue;
369 }
370
1/2
✗ Branch 0 not taken.
✓ Branch 1 taken 2918 times.
2918 assert(entry_count_ > 0);
371 2918 --entry_count_;
372
1/2
✓ Branch 1 taken 2918 times.
✗ Branch 2 not taken.
2918 entries_.FilterDelete();
373 2918 used_bytes_ -= buf.size;
374 2918 perf::Xadd(counters_.sz_shrunk, buf.size);
375 2918 counters_.sz_size->Set(used_bytes_);
376
1/2
✓ Branch 1 taken 2918 times.
✗ Branch 2 not taken.
2918 DoFree(&buf);
377
2/4
✓ Branch 1 taken 2918 times.
✗ Branch 2 not taken.
✓ Branch 5 taken 2918 times.
✗ Branch 6 not taken.
2918 LogCvmfs(kLogKvStore, kLogDebug, "delete %s", key.ToString().c_str());
378 }
379
1/2
✓ Branch 1 taken 402 times.
✗ Branch 2 not taken.
402 entries_.FilterEnd();
380
1/2
✓ Branch 1 taken 402 times.
✗ Branch 2 not taken.
402 LogCvmfs(kLogKvStore, kLogDebug, "shrunk to %zu B", used_bytes_);
381 402 return used_bytes_ <= size;
382 701 }
383