Directory: | cvmfs/ |
---|---|
File: | cvmfs/kvstore.cc |
Date: | 2025-08-31 02:39:21 |
Exec | Total | Coverage | |
---|---|---|---|
Lines: | 193 | 249 | 77.5% |
Branches: | 112 | 258 | 43.4% |
Line | Branch | Exec | Source |
---|---|---|---|
1 | /** | ||
2 | * This file is part of the CernVM File System. | ||
3 | */ | ||
4 | |||
5 | #include "kvstore.h" | ||
6 | |||
7 | #include <assert.h> | ||
8 | #include <errno.h> | ||
9 | #include <limits.h> | ||
10 | #include <string.h> | ||
11 | #include <unistd.h> | ||
12 | |||
13 | #include <algorithm> | ||
14 | |||
15 | #include "util/async.h" | ||
16 | #include "util/concurrency.h" | ||
17 | #include "util/logging.h" | ||
18 | |||
19 | using namespace std; // NOLINT | ||
20 | |||
21 | namespace { | ||
22 | |||
23 | 436530 | static inline uint32_t hasher_any(const shash::Any &key) { | |
24 | // We'll just do the same thing as hasher_md5, since every hash is at | ||
25 | // least as large. | ||
26 | return static_cast<uint32_t>( | ||
27 | 436530 | *(reinterpret_cast<const uint32_t *>(key.digest) + 1)); | |
28 | } | ||
29 | |||
30 | } // anonymous namespace | ||
31 | |||
32 | const double MemoryKvStore::kCompactThreshold = 0.8; | ||
33 | |||
34 | |||
35 | 1109 | MemoryKvStore::MemoryKvStore(unsigned int cache_entries, | |
36 | MemoryAllocator alloc, | ||
37 | unsigned alloc_size, | ||
38 | 1109 | perf::StatisticsTemplate statistics) | |
39 | 1109 | : allocator_(alloc) | |
40 | 1109 | , used_bytes_(0) | |
41 | 1109 | , entry_count_(0) | |
42 | 1109 | , max_entries_(cache_entries) | |
43 |
2/4✓ Branch 1 taken 1109 times.
✗ Branch 2 not taken.
✓ Branch 4 taken 1109 times.
✗ Branch 5 not taken.
|
1109 | , entries_(cache_entries, shash::Any(), hasher_any, |
44 |
2/4✓ Branch 2 taken 1109 times.
✗ Branch 3 not taken.
✓ Branch 5 taken 1109 times.
✗ Branch 6 not taken.
|
2218 | perf::StatisticsTemplate("lru", statistics)) |
45 | 1109 | , heap_(NULL) | |
46 |
2/4✓ Branch 2 taken 1109 times.
✗ Branch 3 not taken.
✓ Branch 5 taken 1109 times.
✗ Branch 6 not taken.
|
2218 | , counters_(statistics) { |
47 | 1109 | const int retval = pthread_rwlock_init(&rwlock_, NULL); | |
48 |
1/2✗ Branch 0 not taken.
✓ Branch 1 taken 1109 times.
|
1109 | assert(retval == 0); |
49 |
2/2✓ Branch 0 taken 48 times.
✓ Branch 1 taken 1061 times.
|
1109 | switch (alloc) { |
50 | 48 | case kMallocHeap: | |
51 | 48 | heap_ = new MallocHeap( | |
52 |
3/6✓ Branch 1 taken 48 times.
✗ Branch 2 not taken.
✓ Branch 4 taken 48 times.
✗ Branch 5 not taken.
✓ Branch 7 taken 48 times.
✗ Branch 8 not taken.
|
48 | alloc_size, this->MakeCallback(&MemoryKvStore::OnBlockMove, this)); |
53 | 48 | break; | |
54 | 1061 | default: | |
55 | 1061 | break; | |
56 | } | ||
57 | 1109 | } | |
58 | |||
59 | |||
60 | 1109 | MemoryKvStore::~MemoryKvStore() { | |
61 |
2/2✓ Branch 0 taken 48 times.
✓ Branch 1 taken 1061 times.
|
1109 | delete heap_; |
62 | 1109 | pthread_rwlock_destroy(&rwlock_); | |
63 | 1109 | } | |
64 | |||
65 | |||
66 | ✗ | void MemoryKvStore::OnBlockMove(const MallocHeap::BlockPtr &ptr) { | |
67 | bool ok; | ||
68 | ✗ | struct AllocHeader a; | |
69 | ✗ | MemoryBuffer buf; | |
70 | |||
71 | // must be locked by caller | ||
72 | ✗ | assert(ptr.pointer); | |
73 | ✗ | memcpy(&a, ptr.pointer, sizeof(a)); | |
74 | ✗ | LogCvmfs(kLogKvStore, kLogDebug, "compaction moved %s to %p", | |
75 | ✗ | a.id.ToString().c_str(), ptr.pointer); | |
76 | ✗ | assert(a.version == 0); | |
77 | ✗ | const bool update_lru = false; | |
78 | ✗ | ok = entries_.Lookup(a.id, &buf, update_lru); | |
79 | ✗ | assert(ok); | |
80 | ✗ | buf.address = static_cast<char *>(ptr.pointer) + sizeof(a); | |
81 | ✗ | ok = entries_.UpdateValue(buf.id, buf); | |
82 | ✗ | assert(ok); | |
83 | } | ||
84 | |||
85 | |||
86 | 1449 | bool MemoryKvStore::Contains(const shash::Any &id) { | |
87 |
1/2✓ Branch 1 taken 1449 times.
✗ Branch 2 not taken.
|
1449 | MemoryBuffer buf; |
88 | // LogCvmfs(kLogKvStore, kLogDebug, "check buffer %s", id.ToString().c_str()); | ||
89 | 1449 | const bool update_lru = false; | |
90 |
1/2✓ Branch 1 taken 1449 times.
✗ Branch 2 not taken.
|
2898 | return entries_.Lookup(id, &buf, update_lru); |
91 | } | ||
92 | |||
93 | |||
94 | 4984 | int MemoryKvStore::DoMalloc(MemoryBuffer *buf) { | |
95 |
1/2✓ Branch 1 taken 4984 times.
✗ Branch 2 not taken.
|
4984 | MemoryBuffer tmp; |
96 |
1/2✓ Branch 1 taken 4984 times.
✗ Branch 2 not taken.
|
4984 | AllocHeader a; |
97 | |||
98 |
1/2✗ Branch 0 not taken.
✓ Branch 1 taken 4984 times.
|
4984 | assert(buf); |
99 | 4984 | memcpy(&tmp, buf, sizeof(tmp)); | |
100 | |||
101 | 4984 | tmp.address = NULL; | |
102 |
1/2✓ Branch 0 taken 4984 times.
✗ Branch 1 not taken.
|
4984 | if (tmp.size > 0) { |
103 |
1/3✓ Branch 0 taken 4984 times.
✗ Branch 1 not taken.
✗ Branch 2 not taken.
|
4984 | switch (allocator_) { |
104 | 4984 | case kMallocLibc: | |
105 | 4984 | tmp.address = malloc(tmp.size); | |
106 |
1/2✗ Branch 0 not taken.
✓ Branch 1 taken 4984 times.
|
4984 | if (!tmp.address) |
107 | ✗ | return -errno; | |
108 | 4984 | break; | |
109 | ✗ | case kMallocHeap: | |
110 | ✗ | assert(heap_); | |
111 | ✗ | a.id = tmp.id; | |
112 | ✗ | tmp.address = heap_->Allocate(tmp.size + sizeof(a), &a, sizeof(a)); | |
113 | ✗ | if (!tmp.address) | |
114 | ✗ | return -ENOMEM; | |
115 | ✗ | tmp.address = static_cast<char *>(tmp.address) + sizeof(a); | |
116 | ✗ | break; | |
117 | ✗ | default: | |
118 | ✗ | abort(); | |
119 | } | ||
120 | } | ||
121 | |||
122 | 4984 | memcpy(buf, &tmp, sizeof(*buf)); | |
123 | 4984 | return 0; | |
124 | } | ||
125 | |||
126 | |||
127 | 4538 | void MemoryKvStore::DoFree(MemoryBuffer *buf) { | |
128 |
1/2✓ Branch 1 taken 4538 times.
✗ Branch 2 not taken.
|
4538 | const AllocHeader a; |
129 | |||
130 |
1/2✗ Branch 0 not taken.
✓ Branch 1 taken 4538 times.
|
4538 | assert(buf); |
131 |
1/2✗ Branch 0 not taken.
✓ Branch 1 taken 4538 times.
|
4538 | if (!buf->address) |
132 | ✗ | return; | |
133 |
1/3✓ Branch 0 taken 4538 times.
✗ Branch 1 not taken.
✗ Branch 2 not taken.
|
4538 | switch (allocator_) { |
134 | 4538 | case kMallocLibc: | |
135 | 4538 | free(buf->address); | |
136 | 4538 | return; | |
137 | ✗ | case kMallocHeap: | |
138 | ✗ | heap_->MarkFree(static_cast<char *>(buf->address) - sizeof(a)); | |
139 | ✗ | return; | |
140 | ✗ | default: | |
141 | ✗ | abort(); | |
142 | } | ||
143 | } | ||
144 | |||
145 | |||
146 | 4984 | bool MemoryKvStore::CompactMemory() { | |
147 | double utilization; | ||
148 |
1/2✗ Branch 0 not taken.
✓ Branch 1 taken 4984 times.
|
4984 | switch (allocator_) { |
149 | ✗ | case kMallocHeap: | |
150 | ✗ | utilization = heap_->utilization(); | |
151 | ✗ | LogCvmfs(kLogKvStore, kLogDebug, "compact requested (%f)", utilization); | |
152 | ✗ | if (utilization < kCompactThreshold) { | |
153 | ✗ | LogCvmfs(kLogKvStore, kLogDebug, "compacting heap"); | |
154 | ✗ | heap_->Compact(); | |
155 | ✗ | if (heap_->utilization() > utilization) | |
156 | ✗ | return true; | |
157 | } | ||
158 | ✗ | return false; | |
159 | 4984 | default: | |
160 | // the others can't do any compact, so just ignore | ||
161 | 4984 | LogCvmfs(kLogKvStore, kLogDebug, "compact requested"); | |
162 | 4984 | return false; | |
163 | } | ||
164 | } | ||
165 | |||
166 | |||
167 | 239 | int64_t MemoryKvStore::GetSize(const shash::Any &id) { | |
168 |
1/2✓ Branch 1 taken 239 times.
✗ Branch 2 not taken.
|
239 | MemoryBuffer mem; |
169 | 239 | perf::Inc(counters_.n_getsize); | |
170 | 239 | const bool update_lru = false; | |
171 |
3/4✓ Branch 1 taken 239 times.
✗ Branch 2 not taken.
✓ Branch 3 taken 196 times.
✓ Branch 4 taken 43 times.
|
239 | if (entries_.Lookup(id, &mem, update_lru)) { |
172 | // LogCvmfs(kLogKvStore, kLogDebug, "%s is %u B", id.ToString().c_str(), | ||
173 | // mem.size); | ||
174 | 196 | return mem.size; | |
175 | } else { | ||
176 |
1/2✓ Branch 2 taken 43 times.
✗ Branch 3 not taken.
|
43 | LogCvmfs(kLogKvStore, kLogDebug, "miss %s on GetSize", |
177 |
1/2✓ Branch 1 taken 43 times.
✗ Branch 2 not taken.
|
86 | id.ToString().c_str()); |
178 | 43 | return -ENOENT; | |
179 | } | ||
180 | } | ||
181 | |||
182 | |||
183 | 215 | int64_t MemoryKvStore::GetRefcount(const shash::Any &id) { | |
184 |
1/2✓ Branch 1 taken 215 times.
✗ Branch 2 not taken.
|
215 | MemoryBuffer mem; |
185 | 215 | perf::Inc(counters_.n_getrefcount); | |
186 | 215 | const bool update_lru = false; | |
187 |
2/4✓ Branch 1 taken 215 times.
✗ Branch 2 not taken.
✓ Branch 3 taken 215 times.
✗ Branch 4 not taken.
|
215 | if (entries_.Lookup(id, &mem, update_lru)) { |
188 | // LogCvmfs(kLogKvStore, kLogDebug, "%s has refcount %u", | ||
189 | // id.ToString().c_str(), mem.refcount); | ||
190 | 215 | return mem.refcount; | |
191 | } else { | ||
192 | ✗ | LogCvmfs(kLogKvStore, kLogDebug, "miss %s on GetRefcount", | |
193 | ✗ | id.ToString().c_str()); | |
194 | ✗ | return -ENOENT; | |
195 | } | ||
196 | } | ||
197 | |||
198 | |||
199 | 33613 | bool MemoryKvStore::IncRef(const shash::Any &id) { | |
200 | 33613 | perf::Inc(counters_.n_incref); | |
201 | 33613 | const WriteLockGuard guard(rwlock_); | |
202 |
1/2✓ Branch 1 taken 33613 times.
✗ Branch 2 not taken.
|
33613 | MemoryBuffer mem; |
203 |
3/4✓ Branch 1 taken 33613 times.
✗ Branch 2 not taken.
✓ Branch 3 taken 33570 times.
✓ Branch 4 taken 43 times.
|
33613 | if (entries_.Lookup(id, &mem)) { |
204 |
1/2✗ Branch 0 not taken.
✓ Branch 1 taken 33570 times.
|
33570 | assert(mem.refcount < UINT_MAX); |
205 | 33570 | ++mem.refcount; | |
206 |
1/2✓ Branch 1 taken 33570 times.
✗ Branch 2 not taken.
|
33570 | entries_.Insert(id, mem); |
207 |
1/2✓ Branch 2 taken 33570 times.
✗ Branch 3 not taken.
|
33570 | LogCvmfs(kLogKvStore, kLogDebug, "increased refcount of %s to %u", |
208 |
1/2✓ Branch 1 taken 33570 times.
✗ Branch 2 not taken.
|
67140 | id.ToString().c_str(), mem.refcount); |
209 | 33570 | return true; | |
210 | } else { | ||
211 |
1/2✓ Branch 2 taken 43 times.
✗ Branch 3 not taken.
|
43 | LogCvmfs(kLogKvStore, kLogDebug, "miss %s on IncRef", |
212 |
1/2✓ Branch 1 taken 43 times.
✗ Branch 2 not taken.
|
86 | id.ToString().c_str()); |
213 | 43 | return false; | |
214 | } | ||
215 | 33613 | } | |
216 | |||
217 | |||
218 | 33595 | bool MemoryKvStore::Unref(const shash::Any &id) { | |
219 | 33595 | perf::Inc(counters_.n_unref); | |
220 | 33595 | const WriteLockGuard guard(rwlock_); | |
221 |
1/2✓ Branch 1 taken 33595 times.
✗ Branch 2 not taken.
|
33595 | MemoryBuffer mem; |
222 |
3/4✓ Branch 1 taken 33595 times.
✗ Branch 2 not taken.
✓ Branch 3 taken 33552 times.
✓ Branch 4 taken 43 times.
|
33595 | if (entries_.Lookup(id, &mem)) { |
223 |
1/2✗ Branch 0 not taken.
✓ Branch 1 taken 33552 times.
|
33552 | assert(mem.refcount > 0); |
224 | 33552 | --mem.refcount; | |
225 |
1/2✓ Branch 1 taken 33552 times.
✗ Branch 2 not taken.
|
33552 | entries_.Insert(id, mem); |
226 |
1/2✓ Branch 2 taken 33552 times.
✗ Branch 3 not taken.
|
33552 | LogCvmfs(kLogKvStore, kLogDebug, "decreased refcount of %s to %u", |
227 |
1/2✓ Branch 1 taken 33552 times.
✗ Branch 2 not taken.
|
67104 | id.ToString().c_str(), mem.refcount); |
228 | 33552 | return true; | |
229 | } else { | ||
230 |
2/4✓ Branch 1 taken 43 times.
✗ Branch 2 not taken.
✓ Branch 5 taken 43 times.
✗ Branch 6 not taken.
|
43 | LogCvmfs(kLogKvStore, kLogDebug, "miss %s on Unref", id.ToString().c_str()); |
231 | 43 | return false; | |
232 | } | ||
233 | 33595 | } | |
234 | |||
235 | |||
236 | 276 | int64_t MemoryKvStore::Read(const shash::Any &id, | |
237 | void *buf, | ||
238 | size_t size, | ||
239 | size_t offset) { | ||
240 |
1/2✓ Branch 1 taken 276 times.
✗ Branch 2 not taken.
|
276 | MemoryBuffer mem; |
241 | 276 | perf::Inc(counters_.n_read); | |
242 | 276 | const ReadLockGuard guard(rwlock_); | |
243 |
2/4✓ Branch 1 taken 276 times.
✗ Branch 2 not taken.
✗ Branch 3 not taken.
✓ Branch 4 taken 276 times.
|
276 | if (!entries_.Lookup(id, &mem)) { |
244 | ✗ | LogCvmfs(kLogKvStore, kLogDebug, "miss %s on Read", id.ToString().c_str()); | |
245 | ✗ | return -ENOENT; | |
246 | } | ||
247 |
1/2✗ Branch 0 not taken.
✓ Branch 1 taken 276 times.
|
276 | if (offset > mem.size) { |
248 | ✗ | LogCvmfs(kLogKvStore, kLogDebug, "out of bounds read (%zu>%zu) on %s", | |
249 | ✗ | offset, mem.size, id.ToString().c_str()); | |
250 | ✗ | return 0; | |
251 | } | ||
252 | 276 | const uint64_t copy_size = std::min(mem.size - offset, size); | |
253 | // LogCvmfs(kLogKvStore, kLogDebug, "copy %u B from offset %u of %s", | ||
254 | // copy_size, offset, id.ToString().c_str()); | ||
255 | 276 | memcpy(buf, static_cast<char *>(mem.address) + offset, copy_size); | |
256 | 276 | perf::Xadd(counters_.sz_read, copy_size); | |
257 | 276 | return copy_size; | |
258 | 276 | } | |
259 | |||
260 | |||
261 | 4984 | int MemoryKvStore::Commit(const MemoryBuffer &buf) { | |
262 | 4984 | const WriteLockGuard guard(rwlock_); | |
263 |
1/2✓ Branch 1 taken 4984 times.
✗ Branch 2 not taken.
|
9968 | return DoCommit(buf); |
264 | 4984 | } | |
265 | |||
266 | |||
267 | 4984 | int MemoryKvStore::DoCommit(const MemoryBuffer &buf) { | |
268 | // we need to be careful about refcounts. If another thread wants to read | ||
269 | // a cache entry while it's being written (OpenFromTxn put partial data in | ||
270 | // the kvstore, will be committed again later) the refcount in the kvstore | ||
271 | // will differ from the refcount in the cache transaction. To avoid leaks, | ||
272 | // either the caller needs to fetch the cache entry before every write to | ||
273 | // find the current refcount, or the kvstore can ignore the passed-in | ||
274 | // refcount if the entry already exists. This implementation does the latter, | ||
275 | // and as a result it's not possible to directly modify the refcount | ||
276 | // without a race condition. This is a hint that callers should use the | ||
277 | // refcount like a lock and not directly modify the numeric value. | ||
278 | |||
279 |
1/2✓ Branch 1 taken 4984 times.
✗ Branch 2 not taken.
|
4984 | CompactMemory(); |
280 | |||
281 |
1/2✓ Branch 1 taken 4984 times.
✗ Branch 2 not taken.
|
4984 | MemoryBuffer mem; |
282 | 4984 | perf::Inc(counters_.n_commit); | |
283 |
2/4✓ Branch 1 taken 4984 times.
✗ Branch 2 not taken.
✓ Branch 5 taken 4984 times.
✗ Branch 6 not taken.
|
4984 | LogCvmfs(kLogKvStore, kLogDebug, "commit %s", buf.id.ToString().c_str()); |
284 |
3/4✓ Branch 1 taken 4984 times.
✗ Branch 2 not taken.
✓ Branch 3 taken 94 times.
✓ Branch 4 taken 4890 times.
|
4984 | if (entries_.Lookup(buf.id, &mem)) { |
285 |
1/2✓ Branch 1 taken 94 times.
✗ Branch 2 not taken.
|
94 | LogCvmfs(kLogKvStore, kLogDebug, "commit overwrites existing entry"); |
286 | 94 | const size_t old_size = mem.size; | |
287 |
1/2✓ Branch 1 taken 94 times.
✗ Branch 2 not taken.
|
94 | DoFree(&mem); |
288 | 94 | used_bytes_ -= old_size; | |
289 | 94 | counters_.sz_size->Set(used_bytes_); | |
290 | 94 | --entry_count_; | |
291 | } else { | ||
292 | // since this is a new entry, the caller can choose the starting | ||
293 | // refcount (starting at 1 for pinning, for example) | ||
294 | 4890 | mem.refcount = buf.refcount; | |
295 | } | ||
296 | 4984 | mem.object_flags = buf.object_flags; | |
297 | 4984 | mem.id = buf.id; | |
298 | 4984 | mem.size = buf.size; | |
299 |
1/2✗ Branch 0 not taken.
✓ Branch 1 taken 4984 times.
|
4984 | if (entry_count_ == max_entries_) { |
300 | ✗ | LogCvmfs(kLogKvStore, kLogDebug, "too many entries in kvstore"); | |
301 | ✗ | return -ENFILE; | |
302 | } | ||
303 |
2/4✓ Branch 1 taken 4984 times.
✗ Branch 2 not taken.
✗ Branch 3 not taken.
✓ Branch 4 taken 4984 times.
|
4984 | if (DoMalloc(&mem) < 0) { |
304 | ✗ | LogCvmfs(kLogKvStore, kLogDebug, "failed to allocate %s", | |
305 | ✗ | buf.id.ToString().c_str()); | |
306 | ✗ | return -EIO; | |
307 | } | ||
308 |
1/2✗ Branch 0 not taken.
✓ Branch 1 taken 4984 times.
|
4984 | assert(SSIZE_MAX - mem.size > used_bytes_); |
309 | 4984 | memcpy(mem.address, buf.address, mem.size); | |
310 |
1/2✓ Branch 1 taken 4984 times.
✗ Branch 2 not taken.
|
4984 | entries_.Insert(buf.id, mem); |
311 | 4984 | ++entry_count_; | |
312 | 4984 | used_bytes_ += mem.size; | |
313 | 4984 | counters_.sz_size->Set(used_bytes_); | |
314 | 4984 | perf::Xadd(counters_.sz_committed, mem.size); | |
315 | 4984 | return 0; | |
316 | } | ||
317 | |||
318 | |||
319 | 258 | bool MemoryKvStore::Delete(const shash::Any &id) { | |
320 | 258 | perf::Inc(counters_.n_delete); | |
321 | 258 | const WriteLockGuard guard(rwlock_); | |
322 |
1/2✓ Branch 1 taken 258 times.
✗ Branch 2 not taken.
|
516 | return DoDelete(id); |
323 | 258 | } | |
324 | |||
325 | |||
326 | 258 | bool MemoryKvStore::DoDelete(const shash::Any &id) { | |
327 |
1/2✓ Branch 1 taken 258 times.
✗ Branch 2 not taken.
|
258 | MemoryBuffer buf; |
328 |
3/4✓ Branch 1 taken 258 times.
✗ Branch 2 not taken.
✓ Branch 3 taken 86 times.
✓ Branch 4 taken 172 times.
|
258 | if (!entries_.Lookup(id, &buf)) { |
329 |
1/2✓ Branch 2 taken 86 times.
✗ Branch 3 not taken.
|
86 | LogCvmfs(kLogKvStore, kLogDebug, "miss %s on Delete", |
330 |
1/2✓ Branch 1 taken 86 times.
✗ Branch 2 not taken.
|
172 | id.ToString().c_str()); |
331 | 86 | return false; | |
332 | } | ||
333 |
1/2✗ Branch 0 not taken.
✓ Branch 1 taken 172 times.
|
172 | if (buf.refcount > 0) { |
334 | ✗ | LogCvmfs(kLogKvStore, kLogDebug, "can't delete %s, nonzero refcount", | |
335 | ✗ | id.ToString().c_str()); | |
336 | ✗ | return false; | |
337 | } | ||
338 |
1/2✗ Branch 0 not taken.
✓ Branch 1 taken 172 times.
|
172 | assert(entry_count_ > 0); |
339 | 172 | --entry_count_; | |
340 | 172 | used_bytes_ -= buf.size; | |
341 | 172 | counters_.sz_size->Set(used_bytes_); | |
342 | 172 | perf::Xadd(counters_.sz_deleted, buf.size); | |
343 |
1/2✓ Branch 1 taken 172 times.
✗ Branch 2 not taken.
|
172 | DoFree(&buf); |
344 |
1/2✓ Branch 1 taken 172 times.
✗ Branch 2 not taken.
|
172 | entries_.Forget(id); |
345 |
2/4✓ Branch 1 taken 172 times.
✗ Branch 2 not taken.
✓ Branch 5 taken 172 times.
✗ Branch 6 not taken.
|
172 | LogCvmfs(kLogKvStore, kLogDebug, "deleted %s", id.ToString().c_str()); |
346 | 172 | return true; | |
347 | } | ||
348 | |||
349 | |||
350 | 291 | bool MemoryKvStore::ShrinkTo(size_t size) { | |
351 | 291 | perf::Inc(counters_.n_shrinkto); | |
352 | 291 | const WriteLockGuard guard(rwlock_); | |
353 |
1/2✓ Branch 1 taken 291 times.
✗ Branch 2 not taken.
|
291 | shash::Any key; |
354 |
1/2✓ Branch 1 taken 291 times.
✗ Branch 2 not taken.
|
291 | MemoryBuffer buf; |
355 | |||
356 |
2/2✓ Branch 0 taken 101 times.
✓ Branch 1 taken 190 times.
|
291 | if (used_bytes_ <= size) { |
357 |
1/2✓ Branch 1 taken 101 times.
✗ Branch 2 not taken.
|
101 | LogCvmfs(kLogKvStore, kLogDebug, "no need to shrink"); |
358 | 101 | return true; | |
359 | } | ||
360 | |||
361 |
1/2✓ Branch 1 taken 190 times.
✗ Branch 2 not taken.
|
190 | LogCvmfs(kLogKvStore, kLogDebug, "shrinking to %zu B", size); |
362 |
1/2✓ Branch 1 taken 190 times.
✗ Branch 2 not taken.
|
190 | entries_.FilterBegin(); |
363 |
3/4✓ Branch 1 taken 4652 times.
✗ Branch 2 not taken.
✓ Branch 3 taken 4597 times.
✓ Branch 4 taken 55 times.
|
4652 | while (entries_.FilterNext()) { |
364 |
2/2✓ Branch 0 taken 135 times.
✓ Branch 1 taken 4462 times.
|
4597 | if (used_bytes_ <= size) |
365 | 135 | break; | |
366 |
1/2✓ Branch 1 taken 4462 times.
✗ Branch 2 not taken.
|
4462 | entries_.FilterGet(&key, &buf); |
367 |
2/2✓ Branch 0 taken 190 times.
✓ Branch 1 taken 4272 times.
|
4462 | if (buf.refcount > 0) { |
368 |
1/2✓ Branch 2 taken 190 times.
✗ Branch 3 not taken.
|
190 | LogCvmfs(kLogKvStore, kLogDebug, "skip %s, nonzero refcount", |
369 |
1/2✓ Branch 1 taken 190 times.
✗ Branch 2 not taken.
|
380 | key.ToString().c_str()); |
370 | 190 | continue; | |
371 | } | ||
372 |
1/2✗ Branch 0 not taken.
✓ Branch 1 taken 4272 times.
|
4272 | assert(entry_count_ > 0); |
373 | 4272 | --entry_count_; | |
374 |
1/2✓ Branch 1 taken 4272 times.
✗ Branch 2 not taken.
|
4272 | entries_.FilterDelete(); |
375 | 4272 | used_bytes_ -= buf.size; | |
376 | 4272 | perf::Xadd(counters_.sz_shrunk, buf.size); | |
377 | 4272 | counters_.sz_size->Set(used_bytes_); | |
378 |
1/2✓ Branch 1 taken 4272 times.
✗ Branch 2 not taken.
|
4272 | DoFree(&buf); |
379 |
2/4✓ Branch 1 taken 4272 times.
✗ Branch 2 not taken.
✓ Branch 5 taken 4272 times.
✗ Branch 6 not taken.
|
4272 | LogCvmfs(kLogKvStore, kLogDebug, "delete %s", key.ToString().c_str()); |
380 | } | ||
381 |
1/2✓ Branch 1 taken 190 times.
✗ Branch 2 not taken.
|
190 | entries_.FilterEnd(); |
382 |
1/2✓ Branch 1 taken 190 times.
✗ Branch 2 not taken.
|
190 | LogCvmfs(kLogKvStore, kLogDebug, "shrunk to %zu B", used_bytes_); |
383 | 190 | return used_bytes_ <= size; | |
384 | 291 | } | |
385 |