Directory: | cvmfs/ |
---|---|
File: | cvmfs/kvstore.cc |
Date: | 2025-07-13 02:35:07 |
Exec | Total | Coverage | |
---|---|---|---|
Lines: | 193 | 249 | 77.5% |
Branches: | 112 | 258 | 43.4% |
Line | Branch | Exec | Source |
---|---|---|---|
1 | /** | ||
2 | * This file is part of the CernVM File System. | ||
3 | */ | ||
4 | |||
5 | #include "kvstore.h" | ||
6 | |||
7 | #include <assert.h> | ||
8 | #include <errno.h> | ||
9 | #include <limits.h> | ||
10 | #include <string.h> | ||
11 | #include <unistd.h> | ||
12 | |||
13 | #include <algorithm> | ||
14 | |||
15 | #include "util/async.h" | ||
16 | #include "util/concurrency.h" | ||
17 | #include "util/logging.h" | ||
18 | |||
19 | using namespace std; // NOLINT | ||
20 | |||
21 | namespace { | ||
22 | |||
23 | 359788 | static inline uint32_t hasher_any(const shash::Any &key) { | |
24 | // We'll just do the same thing as hasher_md5, since every hash is at | ||
25 | // least as large. | ||
26 | return static_cast<uint32_t>( | ||
27 | 359788 | *(reinterpret_cast<const uint32_t *>(key.digest) + 1)); | |
28 | } | ||
29 | |||
30 | } // anonymous namespace | ||
31 | |||
32 | const double MemoryKvStore::kCompactThreshold = 0.8; | ||
33 | |||
34 | |||
35 | 393 | MemoryKvStore::MemoryKvStore(unsigned int cache_entries, | |
36 | MemoryAllocator alloc, | ||
37 | unsigned alloc_size, | ||
38 | 393 | perf::StatisticsTemplate statistics) | |
39 | 393 | : allocator_(alloc) | |
40 | 393 | , used_bytes_(0) | |
41 | 393 | , entry_count_(0) | |
42 | 393 | , max_entries_(cache_entries) | |
43 |
2/4✓ Branch 1 taken 393 times.
✗ Branch 2 not taken.
✓ Branch 4 taken 393 times.
✗ Branch 5 not taken.
|
393 | , entries_(cache_entries, shash::Any(), hasher_any, |
44 |
2/4✓ Branch 2 taken 393 times.
✗ Branch 3 not taken.
✓ Branch 5 taken 393 times.
✗ Branch 6 not taken.
|
786 | perf::StatisticsTemplate("lru", statistics)) |
45 | 393 | , heap_(NULL) | |
46 |
2/4✓ Branch 2 taken 393 times.
✗ Branch 3 not taken.
✓ Branch 5 taken 393 times.
✗ Branch 6 not taken.
|
786 | , counters_(statistics) { |
47 | 393 | const int retval = pthread_rwlock_init(&rwlock_, NULL); | |
48 |
1/2✗ Branch 0 not taken.
✓ Branch 1 taken 393 times.
|
393 | assert(retval == 0); |
49 |
2/2✓ Branch 0 taken 64 times.
✓ Branch 1 taken 329 times.
|
393 | switch (alloc) { |
50 | 64 | case kMallocHeap: | |
51 | 64 | heap_ = new MallocHeap( | |
52 |
3/6✓ Branch 1 taken 64 times.
✗ Branch 2 not taken.
✓ Branch 4 taken 64 times.
✗ Branch 5 not taken.
✓ Branch 7 taken 64 times.
✗ Branch 8 not taken.
|
64 | alloc_size, this->MakeCallback(&MemoryKvStore::OnBlockMove, this)); |
53 | 64 | break; | |
54 | 329 | default: | |
55 | 329 | break; | |
56 | } | ||
57 | 393 | } | |
58 | |||
59 | |||
60 | 393 | MemoryKvStore::~MemoryKvStore() { | |
61 |
2/2✓ Branch 0 taken 64 times.
✓ Branch 1 taken 329 times.
|
393 | delete heap_; |
62 | 393 | pthread_rwlock_destroy(&rwlock_); | |
63 | 393 | } | |
64 | |||
65 | |||
66 | ✗ | void MemoryKvStore::OnBlockMove(const MallocHeap::BlockPtr &ptr) { | |
67 | bool ok; | ||
68 | ✗ | struct AllocHeader a; | |
69 | ✗ | MemoryBuffer buf; | |
70 | |||
71 | // must be locked by caller | ||
72 | ✗ | assert(ptr.pointer); | |
73 | ✗ | memcpy(&a, ptr.pointer, sizeof(a)); | |
74 | ✗ | LogCvmfs(kLogKvStore, kLogDebug, "compaction moved %s to %p", | |
75 | ✗ | a.id.ToString().c_str(), ptr.pointer); | |
76 | ✗ | assert(a.version == 0); | |
77 | ✗ | const bool update_lru = false; | |
78 | ✗ | ok = entries_.Lookup(a.id, &buf, update_lru); | |
79 | ✗ | assert(ok); | |
80 | ✗ | buf.address = static_cast<char *>(ptr.pointer) + sizeof(a); | |
81 | ✗ | ok = entries_.UpdateValue(buf.id, buf); | |
82 | ✗ | assert(ok); | |
83 | } | ||
84 | |||
85 | |||
86 | 414 | bool MemoryKvStore::Contains(const shash::Any &id) { | |
87 |
1/2✓ Branch 1 taken 414 times.
✗ Branch 2 not taken.
|
414 | MemoryBuffer buf; |
88 | // LogCvmfs(kLogKvStore, kLogDebug, "check buffer %s", id.ToString().c_str()); | ||
89 | 414 | const bool update_lru = false; | |
90 |
1/2✓ Branch 1 taken 414 times.
✗ Branch 2 not taken.
|
828 | return entries_.Lookup(id, &buf, update_lru); |
91 | } | ||
92 | |||
93 | |||
94 | 757 | int MemoryKvStore::DoMalloc(MemoryBuffer *buf) { | |
95 |
1/2✓ Branch 1 taken 757 times.
✗ Branch 2 not taken.
|
757 | MemoryBuffer tmp; |
96 |
1/2✓ Branch 1 taken 757 times.
✗ Branch 2 not taken.
|
757 | AllocHeader a; |
97 | |||
98 |
1/2✗ Branch 0 not taken.
✓ Branch 1 taken 757 times.
|
757 | assert(buf); |
99 | 757 | memcpy(&tmp, buf, sizeof(tmp)); | |
100 | |||
101 | 757 | tmp.address = NULL; | |
102 |
1/2✓ Branch 0 taken 757 times.
✗ Branch 1 not taken.
|
757 | if (tmp.size > 0) { |
103 |
1/3✓ Branch 0 taken 757 times.
✗ Branch 1 not taken.
✗ Branch 2 not taken.
|
757 | switch (allocator_) { |
104 | 757 | case kMallocLibc: | |
105 | 757 | tmp.address = malloc(tmp.size); | |
106 |
1/2✗ Branch 0 not taken.
✓ Branch 1 taken 757 times.
|
757 | if (!tmp.address) |
107 | ✗ | return -errno; | |
108 | 757 | break; | |
109 | ✗ | case kMallocHeap: | |
110 | ✗ | assert(heap_); | |
111 | ✗ | a.id = tmp.id; | |
112 | ✗ | tmp.address = heap_->Allocate(tmp.size + sizeof(a), &a, sizeof(a)); | |
113 | ✗ | if (!tmp.address) | |
114 | ✗ | return -ENOMEM; | |
115 | ✗ | tmp.address = static_cast<char *>(tmp.address) + sizeof(a); | |
116 | ✗ | break; | |
117 | ✗ | default: | |
118 | ✗ | abort(); | |
119 | } | ||
120 | } | ||
121 | |||
122 | 757 | memcpy(buf, &tmp, sizeof(*buf)); | |
123 | 757 | return 0; | |
124 | } | ||
125 | |||
126 | |||
127 | 561 | void MemoryKvStore::DoFree(MemoryBuffer *buf) { | |
128 |
1/2✓ Branch 1 taken 561 times.
✗ Branch 2 not taken.
|
561 | const AllocHeader a; |
129 | |||
130 |
1/2✗ Branch 0 not taken.
✓ Branch 1 taken 561 times.
|
561 | assert(buf); |
131 |
1/2✗ Branch 0 not taken.
✓ Branch 1 taken 561 times.
|
561 | if (!buf->address) |
132 | ✗ | return; | |
133 |
1/3✓ Branch 0 taken 561 times.
✗ Branch 1 not taken.
✗ Branch 2 not taken.
|
561 | switch (allocator_) { |
134 | 561 | case kMallocLibc: | |
135 | 561 | free(buf->address); | |
136 | 561 | return; | |
137 | ✗ | case kMallocHeap: | |
138 | ✗ | heap_->MarkFree(static_cast<char *>(buf->address) - sizeof(a)); | |
139 | ✗ | return; | |
140 | ✗ | default: | |
141 | ✗ | abort(); | |
142 | } | ||
143 | } | ||
144 | |||
145 | |||
146 | 757 | bool MemoryKvStore::CompactMemory() { | |
147 | double utilization; | ||
148 |
1/2✗ Branch 0 not taken.
✓ Branch 1 taken 757 times.
|
757 | switch (allocator_) { |
149 | ✗ | case kMallocHeap: | |
150 | ✗ | utilization = heap_->utilization(); | |
151 | ✗ | LogCvmfs(kLogKvStore, kLogDebug, "compact requested (%f)", utilization); | |
152 | ✗ | if (utilization < kCompactThreshold) { | |
153 | ✗ | LogCvmfs(kLogKvStore, kLogDebug, "compacting heap"); | |
154 | ✗ | heap_->Compact(); | |
155 | ✗ | if (heap_->utilization() > utilization) | |
156 | ✗ | return true; | |
157 | } | ||
158 | ✗ | return false; | |
159 | 757 | default: | |
160 | // the others can't do any compact, so just ignore | ||
161 | 757 | LogCvmfs(kLogKvStore, kLogDebug, "compact requested"); | |
162 | 757 | return false; | |
163 | } | ||
164 | } | ||
165 | |||
166 | |||
167 | 58 | int64_t MemoryKvStore::GetSize(const shash::Any &id) { | |
168 |
1/2✓ Branch 1 taken 58 times.
✗ Branch 2 not taken.
|
58 | MemoryBuffer mem; |
169 | 58 | perf::Inc(counters_.n_getsize); | |
170 | 58 | const bool update_lru = false; | |
171 |
3/4✓ Branch 1 taken 58 times.
✗ Branch 2 not taken.
✓ Branch 3 taken 53 times.
✓ Branch 4 taken 5 times.
|
58 | if (entries_.Lookup(id, &mem, update_lru)) { |
172 | // LogCvmfs(kLogKvStore, kLogDebug, "%s is %u B", id.ToString().c_str(), | ||
173 | // mem.size); | ||
174 | 53 | return mem.size; | |
175 | } else { | ||
176 |
1/2✓ Branch 2 taken 5 times.
✗ Branch 3 not taken.
|
5 | LogCvmfs(kLogKvStore, kLogDebug, "miss %s on GetSize", |
177 |
1/2✓ Branch 1 taken 5 times.
✗ Branch 2 not taken.
|
10 | id.ToString().c_str()); |
178 | 5 | return -ENOENT; | |
179 | } | ||
180 | } | ||
181 | |||
182 | |||
183 | 25 | int64_t MemoryKvStore::GetRefcount(const shash::Any &id) { | |
184 |
1/2✓ Branch 1 taken 25 times.
✗ Branch 2 not taken.
|
25 | MemoryBuffer mem; |
185 | 25 | perf::Inc(counters_.n_getrefcount); | |
186 | 25 | const bool update_lru = false; | |
187 |
2/4✓ Branch 1 taken 25 times.
✗ Branch 2 not taken.
✓ Branch 3 taken 25 times.
✗ Branch 4 not taken.
|
25 | if (entries_.Lookup(id, &mem, update_lru)) { |
188 | // LogCvmfs(kLogKvStore, kLogDebug, "%s has refcount %u", | ||
189 | // id.ToString().c_str(), mem.refcount); | ||
190 | 25 | return mem.refcount; | |
191 | } else { | ||
192 | ✗ | LogCvmfs(kLogKvStore, kLogDebug, "miss %s on GetRefcount", | |
193 | ✗ | id.ToString().c_str()); | |
194 | ✗ | return -ENOENT; | |
195 | } | ||
196 | } | ||
197 | |||
198 | |||
199 | 55272 | bool MemoryKvStore::IncRef(const shash::Any &id) { | |
200 | 55272 | perf::Inc(counters_.n_incref); | |
201 | 55272 | const WriteLockGuard guard(rwlock_); | |
202 |
1/2✓ Branch 1 taken 55272 times.
✗ Branch 2 not taken.
|
55272 | MemoryBuffer mem; |
203 |
3/4✓ Branch 1 taken 55272 times.
✗ Branch 2 not taken.
✓ Branch 3 taken 55267 times.
✓ Branch 4 taken 5 times.
|
55272 | if (entries_.Lookup(id, &mem)) { |
204 |
1/2✗ Branch 0 not taken.
✓ Branch 1 taken 55267 times.
|
55267 | assert(mem.refcount < UINT_MAX); |
205 | 55267 | ++mem.refcount; | |
206 |
1/2✓ Branch 1 taken 55267 times.
✗ Branch 2 not taken.
|
55267 | entries_.Insert(id, mem); |
207 |
1/2✓ Branch 2 taken 55267 times.
✗ Branch 3 not taken.
|
55267 | LogCvmfs(kLogKvStore, kLogDebug, "increased refcount of %s to %u", |
208 |
1/2✓ Branch 1 taken 55267 times.
✗ Branch 2 not taken.
|
110534 | id.ToString().c_str(), mem.refcount); |
209 | 55267 | return true; | |
210 | } else { | ||
211 |
1/2✓ Branch 2 taken 5 times.
✗ Branch 3 not taken.
|
5 | LogCvmfs(kLogKvStore, kLogDebug, "miss %s on IncRef", |
212 |
1/2✓ Branch 1 taken 5 times.
✗ Branch 2 not taken.
|
10 | id.ToString().c_str()); |
213 | 5 | return false; | |
214 | } | ||
215 | 55272 | } | |
216 | |||
217 | |||
218 | 55242 | bool MemoryKvStore::Unref(const shash::Any &id) { | |
219 | 55242 | perf::Inc(counters_.n_unref); | |
220 | 55242 | const WriteLockGuard guard(rwlock_); | |
221 |
1/2✓ Branch 1 taken 55242 times.
✗ Branch 2 not taken.
|
55242 | MemoryBuffer mem; |
222 |
3/4✓ Branch 1 taken 55242 times.
✗ Branch 2 not taken.
✓ Branch 3 taken 55237 times.
✓ Branch 4 taken 5 times.
|
55242 | if (entries_.Lookup(id, &mem)) { |
223 |
1/2✗ Branch 0 not taken.
✓ Branch 1 taken 55237 times.
|
55237 | assert(mem.refcount > 0); |
224 | 55237 | --mem.refcount; | |
225 |
1/2✓ Branch 1 taken 55237 times.
✗ Branch 2 not taken.
|
55237 | entries_.Insert(id, mem); |
226 |
1/2✓ Branch 2 taken 55237 times.
✗ Branch 3 not taken.
|
55237 | LogCvmfs(kLogKvStore, kLogDebug, "decreased refcount of %s to %u", |
227 |
1/2✓ Branch 1 taken 55237 times.
✗ Branch 2 not taken.
|
110474 | id.ToString().c_str(), mem.refcount); |
228 | 55237 | return true; | |
229 | } else { | ||
230 |
2/4✓ Branch 1 taken 5 times.
✗ Branch 2 not taken.
✓ Branch 5 taken 5 times.
✗ Branch 6 not taken.
|
5 | LogCvmfs(kLogKvStore, kLogDebug, "miss %s on Unref", id.ToString().c_str()); |
231 | 5 | return false; | |
232 | } | ||
233 | 55242 | } | |
234 | |||
235 | |||
236 | 53 | int64_t MemoryKvStore::Read(const shash::Any &id, | |
237 | void *buf, | ||
238 | size_t size, | ||
239 | size_t offset) { | ||
240 |
1/2✓ Branch 1 taken 53 times.
✗ Branch 2 not taken.
|
53 | MemoryBuffer mem; |
241 | 53 | perf::Inc(counters_.n_read); | |
242 | 53 | const ReadLockGuard guard(rwlock_); | |
243 |
2/4✓ Branch 1 taken 53 times.
✗ Branch 2 not taken.
✗ Branch 3 not taken.
✓ Branch 4 taken 53 times.
|
53 | if (!entries_.Lookup(id, &mem)) { |
244 | ✗ | LogCvmfs(kLogKvStore, kLogDebug, "miss %s on Read", id.ToString().c_str()); | |
245 | ✗ | return -ENOENT; | |
246 | } | ||
247 |
1/2✗ Branch 0 not taken.
✓ Branch 1 taken 53 times.
|
53 | if (offset > mem.size) { |
248 | ✗ | LogCvmfs(kLogKvStore, kLogDebug, "out of bounds read (%zu>%zu) on %s", | |
249 | ✗ | offset, mem.size, id.ToString().c_str()); | |
250 | ✗ | return 0; | |
251 | } | ||
252 | 53 | const uint64_t copy_size = std::min(mem.size - offset, size); | |
253 | // LogCvmfs(kLogKvStore, kLogDebug, "copy %u B from offset %u of %s", | ||
254 | // copy_size, offset, id.ToString().c_str()); | ||
255 | 53 | memcpy(buf, static_cast<char *>(mem.address) + offset, copy_size); | |
256 | 53 | perf::Xadd(counters_.sz_read, copy_size); | |
257 | 53 | return copy_size; | |
258 | 53 | } | |
259 | |||
260 | |||
261 | 757 | int MemoryKvStore::Commit(const MemoryBuffer &buf) { | |
262 | 757 | const WriteLockGuard guard(rwlock_); | |
263 |
1/2✓ Branch 1 taken 757 times.
✗ Branch 2 not taken.
|
1514 | return DoCommit(buf); |
264 | 757 | } | |
265 | |||
266 | |||
267 | 757 | int MemoryKvStore::DoCommit(const MemoryBuffer &buf) { | |
268 | // we need to be careful about refcounts. If another thread wants to read | ||
269 | // a cache entry while it's being written (OpenFromTxn put partial data in | ||
270 | // the kvstore, will be committed again later) the refcount in the kvstore | ||
271 | // will differ from the refcount in the cache transaction. To avoid leaks, | ||
272 | // either the caller needs to fetch the cache entry before every write to | ||
273 | // find the current refcount, or the kvstore can ignore the passed-in | ||
274 | // refcount if the entry already exists. This implementation does the latter, | ||
275 | // and as a result it's not possible to directly modify the refcount | ||
276 | // without a race condition. This is a hint that callers should use the | ||
277 | // refcount like a lock and not directly modify the numeric value. | ||
278 | |||
279 |
1/2✓ Branch 1 taken 757 times.
✗ Branch 2 not taken.
|
757 | CompactMemory(); |
280 | |||
281 |
1/2✓ Branch 1 taken 757 times.
✗ Branch 2 not taken.
|
757 | MemoryBuffer mem; |
282 | 757 | perf::Inc(counters_.n_commit); | |
283 |
2/4✓ Branch 1 taken 757 times.
✗ Branch 2 not taken.
✓ Branch 5 taken 757 times.
✗ Branch 6 not taken.
|
757 | LogCvmfs(kLogKvStore, kLogDebug, "commit %s", buf.id.ToString().c_str()); |
284 |
3/4✓ Branch 1 taken 757 times.
✗ Branch 2 not taken.
✓ Branch 3 taken 21 times.
✓ Branch 4 taken 736 times.
|
757 | if (entries_.Lookup(buf.id, &mem)) { |
285 |
1/2✓ Branch 1 taken 21 times.
✗ Branch 2 not taken.
|
21 | LogCvmfs(kLogKvStore, kLogDebug, "commit overwrites existing entry"); |
286 | 21 | const size_t old_size = mem.size; | |
287 |
1/2✓ Branch 1 taken 21 times.
✗ Branch 2 not taken.
|
21 | DoFree(&mem); |
288 | 21 | used_bytes_ -= old_size; | |
289 | 21 | counters_.sz_size->Set(used_bytes_); | |
290 | 21 | --entry_count_; | |
291 | } else { | ||
292 | // since this is a new entry, the caller can choose the starting | ||
293 | // refcount (starting at 1 for pinning, for example) | ||
294 | 736 | mem.refcount = buf.refcount; | |
295 | } | ||
296 | 757 | mem.object_flags = buf.object_flags; | |
297 | 757 | mem.id = buf.id; | |
298 | 757 | mem.size = buf.size; | |
299 |
1/2✗ Branch 0 not taken.
✓ Branch 1 taken 757 times.
|
757 | if (entry_count_ == max_entries_) { |
300 | ✗ | LogCvmfs(kLogKvStore, kLogDebug, "too many entries in kvstore"); | |
301 | ✗ | return -ENFILE; | |
302 | } | ||
303 |
2/4✓ Branch 1 taken 757 times.
✗ Branch 2 not taken.
✗ Branch 3 not taken.
✓ Branch 4 taken 757 times.
|
757 | if (DoMalloc(&mem) < 0) { |
304 | ✗ | LogCvmfs(kLogKvStore, kLogDebug, "failed to allocate %s", | |
305 | ✗ | buf.id.ToString().c_str()); | |
306 | ✗ | return -EIO; | |
307 | } | ||
308 |
1/2✗ Branch 0 not taken.
✓ Branch 1 taken 757 times.
|
757 | assert(SSIZE_MAX - mem.size > used_bytes_); |
309 | 757 | memcpy(mem.address, buf.address, mem.size); | |
310 |
1/2✓ Branch 1 taken 757 times.
✗ Branch 2 not taken.
|
757 | entries_.Insert(buf.id, mem); |
311 | 757 | ++entry_count_; | |
312 | 757 | used_bytes_ += mem.size; | |
313 | 757 | counters_.sz_size->Set(used_bytes_); | |
314 | 757 | perf::Xadd(counters_.sz_committed, mem.size); | |
315 | 757 | return 0; | |
316 | } | ||
317 | |||
318 | |||
319 | 30 | bool MemoryKvStore::Delete(const shash::Any &id) { | |
320 | 30 | perf::Inc(counters_.n_delete); | |
321 | 30 | const WriteLockGuard guard(rwlock_); | |
322 |
1/2✓ Branch 1 taken 30 times.
✗ Branch 2 not taken.
|
60 | return DoDelete(id); |
323 | 30 | } | |
324 | |||
325 | |||
326 | 30 | bool MemoryKvStore::DoDelete(const shash::Any &id) { | |
327 |
1/2✓ Branch 1 taken 30 times.
✗ Branch 2 not taken.
|
30 | MemoryBuffer buf; |
328 |
3/4✓ Branch 1 taken 30 times.
✗ Branch 2 not taken.
✓ Branch 3 taken 10 times.
✓ Branch 4 taken 20 times.
|
30 | if (!entries_.Lookup(id, &buf)) { |
329 |
1/2✓ Branch 2 taken 10 times.
✗ Branch 3 not taken.
|
10 | LogCvmfs(kLogKvStore, kLogDebug, "miss %s on Delete", |
330 |
1/2✓ Branch 1 taken 10 times.
✗ Branch 2 not taken.
|
20 | id.ToString().c_str()); |
331 | 10 | return false; | |
332 | } | ||
333 |
1/2✗ Branch 0 not taken.
✓ Branch 1 taken 20 times.
|
20 | if (buf.refcount > 0) { |
334 | ✗ | LogCvmfs(kLogKvStore, kLogDebug, "can't delete %s, nonzero refcount", | |
335 | ✗ | id.ToString().c_str()); | |
336 | ✗ | return false; | |
337 | } | ||
338 |
1/2✗ Branch 0 not taken.
✓ Branch 1 taken 20 times.
|
20 | assert(entry_count_ > 0); |
339 | 20 | --entry_count_; | |
340 | 20 | used_bytes_ -= buf.size; | |
341 | 20 | counters_.sz_size->Set(used_bytes_); | |
342 | 20 | perf::Xadd(counters_.sz_deleted, buf.size); | |
343 |
1/2✓ Branch 1 taken 20 times.
✗ Branch 2 not taken.
|
20 | DoFree(&buf); |
344 |
1/2✓ Branch 1 taken 20 times.
✗ Branch 2 not taken.
|
20 | entries_.Forget(id); |
345 |
2/4✓ Branch 1 taken 20 times.
✗ Branch 2 not taken.
✓ Branch 5 taken 20 times.
✗ Branch 6 not taken.
|
20 | LogCvmfs(kLogKvStore, kLogDebug, "deleted %s", id.ToString().c_str()); |
346 | 20 | return true; | |
347 | } | ||
348 | |||
349 | |||
350 | 85 | bool MemoryKvStore::ShrinkTo(size_t size) { | |
351 | 85 | perf::Inc(counters_.n_shrinkto); | |
352 | 85 | const WriteLockGuard guard(rwlock_); | |
353 |
1/2✓ Branch 1 taken 85 times.
✗ Branch 2 not taken.
|
85 | shash::Any key; |
354 |
1/2✓ Branch 1 taken 85 times.
✗ Branch 2 not taken.
|
85 | MemoryBuffer buf; |
355 | |||
356 |
2/2✓ Branch 0 taken 35 times.
✓ Branch 1 taken 50 times.
|
85 | if (used_bytes_ <= size) { |
357 |
1/2✓ Branch 1 taken 35 times.
✗ Branch 2 not taken.
|
35 | LogCvmfs(kLogKvStore, kLogDebug, "no need to shrink"); |
358 | 35 | return true; | |
359 | } | ||
360 | |||
361 |
1/2✓ Branch 1 taken 50 times.
✗ Branch 2 not taken.
|
50 | LogCvmfs(kLogKvStore, kLogDebug, "shrinking to %zu B", size); |
362 |
1/2✓ Branch 1 taken 50 times.
✗ Branch 2 not taken.
|
50 | entries_.FilterBegin(); |
363 |
3/4✓ Branch 1 taken 620 times.
✗ Branch 2 not taken.
✓ Branch 3 taken 595 times.
✓ Branch 4 taken 25 times.
|
620 | while (entries_.FilterNext()) { |
364 |
2/2✓ Branch 0 taken 25 times.
✓ Branch 1 taken 570 times.
|
595 | if (used_bytes_ <= size) |
365 | 25 | break; | |
366 |
1/2✓ Branch 1 taken 570 times.
✗ Branch 2 not taken.
|
570 | entries_.FilterGet(&key, &buf); |
367 |
2/2✓ Branch 0 taken 50 times.
✓ Branch 1 taken 520 times.
|
570 | if (buf.refcount > 0) { |
368 |
1/2✓ Branch 2 taken 50 times.
✗ Branch 3 not taken.
|
50 | LogCvmfs(kLogKvStore, kLogDebug, "skip %s, nonzero refcount", |
369 |
1/2✓ Branch 1 taken 50 times.
✗ Branch 2 not taken.
|
100 | key.ToString().c_str()); |
370 | 50 | continue; | |
371 | } | ||
372 |
1/2✗ Branch 0 not taken.
✓ Branch 1 taken 520 times.
|
520 | assert(entry_count_ > 0); |
373 | 520 | --entry_count_; | |
374 |
1/2✓ Branch 1 taken 520 times.
✗ Branch 2 not taken.
|
520 | entries_.FilterDelete(); |
375 | 520 | used_bytes_ -= buf.size; | |
376 | 520 | perf::Xadd(counters_.sz_shrunk, buf.size); | |
377 | 520 | counters_.sz_size->Set(used_bytes_); | |
378 |
1/2✓ Branch 1 taken 520 times.
✗ Branch 2 not taken.
|
520 | DoFree(&buf); |
379 |
2/4✓ Branch 1 taken 520 times.
✗ Branch 2 not taken.
✓ Branch 5 taken 520 times.
✗ Branch 6 not taken.
|
520 | LogCvmfs(kLogKvStore, kLogDebug, "delete %s", key.ToString().c_str()); |
380 | } | ||
381 |
1/2✓ Branch 1 taken 50 times.
✗ Branch 2 not taken.
|
50 | entries_.FilterEnd(); |
382 |
1/2✓ Branch 1 taken 50 times.
✗ Branch 2 not taken.
|
50 | LogCvmfs(kLogKvStore, kLogDebug, "shrunk to %zu B", used_bytes_); |
383 | 50 | return used_bytes_ <= size; | |
384 | 85 | } | |
385 |