GCC Code Coverage Report


Directory: cvmfs/
File: cvmfs/glue_buffer.h
Date: 2026-04-26 02:35:59
Exec Total Coverage
Lines: 302 427 70.7%
Branches: 117 304 38.5%

Line Branch Exec Source
1 /**
2 * This file is part of the CernVM File System.
3 *
4 * This module provides the inode tracker in order to remember inodes
5 * and their parents that are in use by the kernel.
6 *
7 * These objects have to survive reloading of the library, so no virtual
8 * functions.
9 */
10
11 #include "duplex_testing.h"
12 #include <pthread.h>
13 #include <sched.h>
14 #include <stdint.h>
15
16 #include <cassert>
17 #include <cstring>
18 #include <string>
19
20 #include "bigqueue.h"
21 #include "bigvector.h"
22 #include "crypto/hash.h"
23 #include "directory_entry.h"
24 #include "shortstring.h"
25 #include "smallhash.h"
26 #include "util/atomic.h"
27 #include "util/exception.h"
28 #include "util/mutex.h"
29 #include "util/posix.h" // IWYU pragma: keep
30 #include "util/smalloc.h"
31 #include "util/string.h"
32
33 #ifndef CVMFS_GLUE_BUFFER_H_
34 #define CVMFS_GLUE_BUFFER_H_
35
36 namespace glue {
37
38 /**
39 * Inode + file type. Stores the file type in the 4 most significant bits
40 * of the 64 bit unsigned integer representing the inode. That makes the class
41 * compatible with a pure 64bit inode used in previous cvmfs versions in the
42 * inode tracker. The file type is stored using the POSIX representation in
43 * the inode's mode field.
44 * Note that InodeEx, used as a hash table key, hashes only over the inode part.
45 */
46 class InodeEx {
47 private:
48 // Extracts the file type bits from the POSIX mode field and shifts them to
49 // the right so that they align with EFileType constants.
50 176 static inline uint64_t ShiftMode(unsigned mode) { return (mode >> 12) & 017; }
51
52 public:
53 enum EFileType {
54 kUnknownType = 0,
55 kRegular = 010,
56 kSymlink = 012,
57 kDirectory = 004,
58 kFifo = 001,
59 kSocket = 014,
60 kCharDev = 002,
61 kBulkDev = 006,
62 };
63
64 322505 InodeEx() : inode_ex_(0) { }
65 75328 InodeEx(uint64_t inode, EFileType type)
66 75328 : inode_ex_(inode | (static_cast<uint64_t>(type) << 60)) { }
67 44 InodeEx(uint64_t inode, unsigned mode)
68 44 : inode_ex_(inode | (ShiftMode(mode) << 60)) { }
69
70 1663820 inline uint64_t GetInode() const { return inode_ex_ & ~(uint64_t(15) << 60); }
71 528 inline EFileType GetFileType() const {
72 528 return static_cast<EFileType>(inode_ex_ >> 60);
73 }
74
75 571674 inline bool operator==(const InodeEx &other) const {
76 571674 return GetInode() == other.GetInode();
77 }
78 148890 inline bool operator!=(const InodeEx &other) const {
79 148890 return GetInode() != other.GetInode();
80 }
81
82 132 inline bool IsCompatibleFileType(unsigned mode) const {
83 132 return (static_cast<uint64_t>(GetFileType()) == ShiftMode(mode))
84
4/4
✓ Branch 0 taken 88 times.
✓ Branch 1 taken 44 times.
✓ Branch 3 taken 44 times.
✓ Branch 4 taken 44 times.
132 || (GetFileType() == kUnknownType);
85 }
86
87 private:
88 uint64_t inode_ex_;
89 };
90
91 346454 static inline uint32_t hasher_md5(const shash::Md5 &key) {
92 // Don't start with the first bytes, because == is using them as well
93 return static_cast<uint32_t>(
94 346454 *(reinterpret_cast<const uint32_t *>(key.digest) + 1));
95 }
96
97 327400 static inline uint32_t hasher_inode(const uint64_t &inode) {
98 327400 return MurmurHash2(&inode, sizeof(inode), 0x07387a4f);
99 }
100
101 187400 static inline uint32_t hasher_inode_ex(const InodeEx &inode_ex) {
102 187400 return hasher_inode(inode_ex.GetInode());
103 }
104
105
106 //------------------------------------------------------------------------------
107
108
109 /**
110 * Pointer to a 2 byte length information followed by the characters
111 */
112 class StringRef {
113 public:
114 343287 StringRef() { length_ = NULL; }
115
116 252 uint16_t length() const { return *length_; }
117 uint16_t size() const { return sizeof(uint16_t) + *length_; }
118 34984 static uint16_t size(const uint16_t length) {
119 34984 return sizeof(uint16_t) + length;
120 }
121 252 char *data() const { return reinterpret_cast<char *>(length_ + 1); }
122 34984 static StringRef Place(const uint16_t length, const char *str, void *addr) {
123 34984 StringRef result;
124 34984 result.length_ = reinterpret_cast<uint16_t *>(addr);
125 34984 *result.length_ = length;
126
2/2
✓ Branch 0 taken 34908 times.
✓ Branch 1 taken 76 times.
34984 if (length > 0)
127 34908 memcpy(result.length_ + 1, str, length);
128 34984 return result;
129 }
130
131 private:
132 uint16_t *length_;
133 };
134
135
136 //------------------------------------------------------------------------------
137
138
139 /**
140 * Manages memory bins with immutable strings (deleting is a no-op).
141 * When the fraction of garbage is too large, the user of the StringHeap
142 * can copy the entire contents to a new heap.
143 */
144 class StringHeap : public SingleCopy {
145 public:
146 1073 StringHeap() {
147
1/2
✓ Branch 1 taken 1073 times.
✗ Branch 2 not taken.
1073 Init(128 * 1024); // 128kB (should be >= 64kB+2B which is largest string)
148 1073 }
149
150
1/2
✓ Branch 3 taken 127 times.
✗ Branch 4 not taken.
127 explicit StringHeap(const uint64_t minimum_size) { Init(minimum_size); }
151
152 1200 void Init(const uint64_t minimum_size) {
153 1200 size_ = 0;
154 1200 used_ = 0;
155
156 // Initial bin: 128kB or smallest power of 2 >= minimum size
157 1200 uint64_t pow2_size = 128 * 1024;
158
2/2
✓ Branch 0 taken 57 times.
✓ Branch 1 taken 1200 times.
1257 while (pow2_size < minimum_size)
159 57 pow2_size *= 2;
160 1200 AddBin(pow2_size);
161 1200 }
162
163 1197 ~StringHeap() {
164
2/2
✓ Branch 1 taken 1197 times.
✓ Branch 2 taken 1197 times.
2394 for (unsigned i = 0; i < bins_.size(); ++i) {
165 1197 smunmap(bins_.At(i));
166 }
167 1197 }
168
169 34984 StringRef AddString(const uint16_t length, const char *str) {
170 34984 const uint16_t str_size = StringRef::size(length);
171 34984 const uint64_t remaining_bin_size = bin_size_ - bin_used_;
172 // May require opening of new bin
173
1/2
✗ Branch 0 not taken.
✓ Branch 1 taken 34984 times.
34984 if (remaining_bin_size < str_size) {
174 size_ += remaining_bin_size;
175 AddBin(2 * bin_size_);
176 }
177 34984 StringRef result = StringRef::Place(
178 length, str,
179 34984 static_cast<char *>(bins_.At(bins_.size() - 1)) + bin_used_);
180 34984 size_ += str_size;
181 34984 used_ += str_size;
182 34984 bin_used_ += str_size;
183 34984 return result;
184 }
185
186 void RemoveString(const StringRef str_ref) { used_ -= str_ref.size(); }
187
188 double GetUsage() const {
189 if (size_ == 0)
190 return 1.0;
191 return static_cast<double>(used_) / static_cast<double>(size_);
192 }
193
194 uint64_t used() const { return used_; }
195
196 // mmap'd bytes, used for testing
197 169 uint64_t GetSizeAlloc() const {
198 169 uint64_t s = bin_size_;
199 169 uint64_t result = 0;
200
2/2
✓ Branch 1 taken 169 times.
✓ Branch 2 taken 169 times.
338 for (unsigned i = 0; i < bins_.size(); ++i) {
201 169 result += s;
202 169 s /= 2;
203 }
204 169 return result;
205 }
206
207 private:
208 1200 void AddBin(const uint64_t size) {
209 1200 void *bin = smmap(size);
210
1/2
✓ Branch 1 taken 1200 times.
✗ Branch 2 not taken.
1200 bins_.PushBack(bin);
211 1200 bin_size_ = size;
212 1200 bin_used_ = 0;
213 1200 }
214
215 uint64_t size_;
216 uint64_t used_;
217 uint64_t bin_size_;
218 uint64_t bin_used_;
219 BigVector<void *> bins_;
220 };
221
222
223 //------------------------------------------------------------------------------
224
225
226 class PathStore {
227 public:
228 /**
229 * Used to enumerate all paths
230 */
231 struct Cursor {
232 186 Cursor() : idx(0) { }
233 uint32_t idx;
234 };
235
236
237 1031 PathStore() {
238
3/6
✓ Branch 2 taken 1031 times.
✗ Branch 3 not taken.
✓ Branch 6 taken 1031 times.
✗ Branch 7 not taken.
✓ Branch 9 taken 1031 times.
✗ Branch 10 not taken.
1031 map_.Init(16, shash::Md5(shash::AsciiPtr("!")), hasher_md5);
239
2/4
✓ Branch 1 taken 1031 times.
✗ Branch 2 not taken.
✓ Branch 4 taken 1031 times.
✗ Branch 5 not taken.
1031 string_heap_ = new StringHeap();
240 1031 }
241
242
1/2
✓ Branch 0 taken 1029 times.
✗ Branch 1 not taken.
1029 ~PathStore() { delete string_heap_; }
243
244 explicit PathStore(const PathStore &other);
245 PathStore &operator=(const PathStore &other);
246
247 69892 void Insert(const shash::Md5 &md5path, const PathString &path) {
248
1/2
✓ Branch 1 taken 69892 times.
✗ Branch 2 not taken.
69892 PathInfo info;
249
1/2
✓ Branch 1 taken 69892 times.
✗ Branch 2 not taken.
69892 const bool found = map_.Lookup(md5path, &info);
250
2/2
✓ Branch 0 taken 34908 times.
✓ Branch 1 taken 34984 times.
69892 if (found) {
251 34908 info.refcnt++;
252
1/2
✓ Branch 1 taken 34908 times.
✗ Branch 2 not taken.
34908 map_.Insert(md5path, info);
253 34984 return;
254 }
255
256
1/2
✓ Branch 1 taken 34984 times.
✗ Branch 2 not taken.
34984 PathInfo new_entry;
257
3/4
✓ Branch 1 taken 34984 times.
✗ Branch 2 not taken.
✓ Branch 3 taken 76 times.
✓ Branch 4 taken 34908 times.
34984 if (path.IsEmpty()) {
258
1/2
✓ Branch 1 taken 76 times.
✗ Branch 2 not taken.
76 new_entry.name = string_heap_->AddString(0, "");
259
1/2
✓ Branch 1 taken 76 times.
✗ Branch 2 not taken.
76 map_.Insert(md5path, new_entry);
260 76 return;
261 }
262
263
1/2
✓ Branch 1 taken 34908 times.
✗ Branch 2 not taken.
34908 const PathString parent_path = GetParentPath(path);
264
1/2
✓ Branch 3 taken 34908 times.
✗ Branch 4 not taken.
34908 new_entry.parent = shash::Md5(parent_path.GetChars(),
265 parent_path.GetLength());
266
1/2
✓ Branch 1 taken 34908 times.
✗ Branch 2 not taken.
34908 Insert(new_entry.parent, parent_path);
267
268 34908 const uint16_t name_length = path.GetLength() - parent_path.GetLength() - 1;
269 34908 const char *name_str = path.GetChars() + parent_path.GetLength() + 1;
270
1/2
✓ Branch 1 taken 34908 times.
✗ Branch 2 not taken.
34908 new_entry.name = string_heap_->AddString(name_length, name_str);
271
1/2
✓ Branch 1 taken 34908 times.
✗ Branch 2 not taken.
34908 map_.Insert(md5path, new_entry);
272 34908 }
273
274 126 bool Lookup(const shash::Md5 &md5path, PathString *path) {
275
1/2
✓ Branch 1 taken 126 times.
✗ Branch 2 not taken.
126 PathInfo info;
276
1/2
✓ Branch 1 taken 126 times.
✗ Branch 2 not taken.
126 bool retval = map_.Lookup(md5path, &info);
277
1/2
✗ Branch 0 not taken.
✓ Branch 1 taken 126 times.
126 if (!retval)
278 return false;
279
280
2/2
✓ Branch 1 taken 42 times.
✓ Branch 2 taken 84 times.
126 if (info.parent.IsNull())
281 42 return true;
282
283
1/2
✓ Branch 1 taken 84 times.
✗ Branch 2 not taken.
84 retval = Lookup(info.parent, path);
284
1/2
✗ Branch 0 not taken.
✓ Branch 1 taken 84 times.
84 assert(retval);
285
1/2
✓ Branch 1 taken 84 times.
✗ Branch 2 not taken.
84 path->Append("/", 1);
286
1/2
✓ Branch 3 taken 84 times.
✗ Branch 4 not taken.
84 path->Append(info.name.data(), info.name.length());
287 84 return true;
288 }
289
290 void Erase(const shash::Md5 &md5path) {
291 PathInfo info;
292 const bool found = map_.Lookup(md5path, &info);
293 if (!found)
294 return;
295
296 info.refcnt--;
297 if (info.refcnt == 0) {
298 map_.Erase(md5path);
299 string_heap_->RemoveString(info.name);
300 if (string_heap_->GetUsage() < 0.75) {
301 StringHeap *new_string_heap = new StringHeap(string_heap_->used());
302 const shash::Md5 empty_path = map_.empty_key();
303 for (unsigned i = 0; i < map_.capacity(); ++i) {
304 if (map_.keys()[i] != empty_path) {
305 (map_.values() + i)->name = new_string_heap->AddString(
306 map_.values()[i].name.length(), map_.values()[i].name.data());
307 }
308 }
309 delete string_heap_;
310 string_heap_ = new_string_heap;
311 }
312 Erase(info.parent);
313 } else {
314 map_.Insert(md5path, info);
315 }
316 }
317
318 void Clear() {
319 map_.Clear();
320 delete string_heap_;
321 string_heap_ = new StringHeap();
322 }
323
324 186 Cursor BeginEnumerate() { return Cursor(); }
325
326 294 bool Next(Cursor *cursor, shash::Md5 *parent, StringRef *name) {
327 294 const shash::Md5 empty_key = map_.empty_key();
328
2/2
✓ Branch 1 taken 1764 times.
✓ Branch 2 taken 126 times.
1890 while (cursor->idx < map_.capacity()) {
329
2/2
✓ Branch 2 taken 1596 times.
✓ Branch 3 taken 168 times.
1764 if (map_.keys()[cursor->idx] == empty_key) {
330 1596 cursor->idx++;
331 1596 continue;
332 }
333 168 *parent = map_.values()[cursor->idx].parent;
334 168 *name = map_.values()[cursor->idx].name;
335 168 cursor->idx++;
336 168 return true;
337 }
338 126 return false;
339 }
340
341 private:
342 struct PathInfo {
343 308009 PathInfo() { refcnt = 1; }
344 shash::Md5 parent;
345 uint32_t refcnt;
346 StringRef name;
347 };
348
349 void CopyFrom(const PathStore &other);
350
351 SmallHashDynamic<shash::Md5, PathInfo> map_;
352 StringHeap *string_heap_;
353 };
354
355
356 //------------------------------------------------------------------------------
357
358
359 /**
360 * A vector of stat structs. When removing items, the empty slot is swapped
361 * with the last element so that there are no gaps in the vector. The memory
362 * allocation of the vector grows and shrinks with the size.
363 * Removal of items returns the inode of the element swapped with the gap so
364 * that the page entry tracker can update its index.
365 */
366 class StatStore {
367 public:
368 44214 int32_t Add(const struct stat &info) {
369 // We don't support more that 2B open files
370
1/2
✗ Branch 1 not taken.
✓ Branch 2 taken 44214 times.
44214 assert(store_.size() < (1LU << 31));
371 44214 const int32_t index = static_cast<int>(store_.size());
372 44214 store_.PushBack(info);
373 44214 return index;
374 }
375
376 // Note that that if the last element is removed, no swap has taken place
377 44171 uint64_t Erase(int32_t index) {
378 44171 struct stat const info_back = store_.At(store_.size() - 1);
379 44171 store_.Replace(index, info_back);
380 44171 store_.SetSize(store_.size() - 1);
381
1/2
✓ Branch 1 taken 44171 times.
✗ Branch 2 not taken.
44171 store_.ShrinkIfOversized();
382 44171 return info_back.st_ino;
383 }
384
385 87866 struct stat Get(int32_t index) const { return store_.At(index); }
386
387 private:
388 BigVector<struct stat> store_;
389 };
390
391
392 //------------------------------------------------------------------------------
393
394
395 class PathMap {
396 public:
397
4/8
✓ Branch 2 taken 1031 times.
✗ Branch 3 not taken.
✓ Branch 6 taken 1031 times.
✗ Branch 7 not taken.
✓ Branch 10 taken 1031 times.
✗ Branch 11 not taken.
✓ Branch 13 taken 1031 times.
✗ Branch 14 not taken.
1031 PathMap() { map_.Init(16, shash::Md5(shash::AsciiPtr("!")), hasher_md5); }
398
399 42 bool LookupPath(const shash::Md5 &md5path, PathString *path) {
400 42 const bool found = path_store_.Lookup(md5path, path);
401 42 return found;
402 }
403
404 42 uint64_t LookupInodeByPath(const PathString &path) {
405 uint64_t inode;
406
1/2
✓ Branch 1 taken 42 times.
✗ Branch 2 not taken.
42 const bool found = map_.Lookup(
407
1/2
✓ Branch 3 taken 42 times.
✗ Branch 4 not taken.
42 shash::Md5(path.GetChars(), path.GetLength()), &inode);
408
1/2
✓ Branch 0 taken 42 times.
✗ Branch 1 not taken.
42 if (found)
409 42 return inode;
410 return 0;
411 }
412
413 126 uint64_t LookupInodeByMd5Path(const shash::Md5 &md5path) {
414 uint64_t inode;
415
1/2
✓ Branch 1 taken 126 times.
✗ Branch 2 not taken.
126 const bool found = map_.Lookup(md5path, &inode);
416
1/2
✓ Branch 0 taken 126 times.
✗ Branch 1 not taken.
126 if (found)
417 126 return inode;
418 return 0;
419 }
420
421 34984 shash::Md5 Insert(const PathString &path, const uint64_t inode) {
422 34984 shash::Md5 md5path(path.GetChars(), path.GetLength());
423
1/2
✓ Branch 1 taken 34984 times.
✗ Branch 2 not taken.
34984 if (!map_.Contains(md5path)) {
424 34984 path_store_.Insert(md5path, path);
425 34984 map_.Insert(md5path, inode);
426 }
427 34984 return md5path;
428 }
429
430 void Erase(const shash::Md5 &md5path) {
431 const bool found = map_.Contains(md5path);
432 if (found) {
433 path_store_.Erase(md5path);
434 map_.Erase(md5path);
435 }
436 }
437
438 void Replace(const shash::Md5 &md5path, uint64_t new_inode) {
439 map_.Insert(md5path, new_inode);
440 }
441
442 void Clear() {
443 map_.Clear();
444 path_store_.Clear();
445 }
446
447 // For enumerating
448 480 PathStore *path_store() { return &path_store_; }
449
450 private:
451 SmallHashDynamic<shash::Md5, uint64_t> map_;
452 PathStore path_store_;
453 };
454
455
456 //------------------------------------------------------------------------------
457
458
459 /**
460 * This class has the same memory layout than the previous "InodeMap" class,
461 * therefore there is no data structure migration during reload required.
462 */
463 class InodeExMap {
464 public:
465
1/2
✓ Branch 3 taken 1031 times.
✗ Branch 4 not taken.
1031 InodeExMap() { map_.Init(16, InodeEx(), hasher_inode_ex); }
466
467 84 bool LookupMd5Path(InodeEx *inode_ex, shash::Md5 *md5path) {
468 84 const bool found = map_.LookupEx(inode_ex, md5path);
469 84 return found;
470 }
471
472 34984 void Insert(const InodeEx inode_ex, const shash::Md5 &md5path) {
473 34984 map_.Insert(inode_ex, md5path);
474 34984 }
475
476 void Erase(const uint64_t inode) {
477 map_.Erase(InodeEx(inode, InodeEx::kUnknownType));
478 }
479
480 void Clear() { map_.Clear(); }
481
482 private:
483 SmallHashDynamic<InodeEx, shash::Md5> map_;
484 };
485
486
487 //------------------------------------------------------------------------------
488
489
490 class InodeReferences {
491 public:
492 /**
493 * Used to enumerate all inodes
494 */
495 struct Cursor {
496 186 Cursor() : idx(0) { }
497 uint32_t idx;
498 };
499
500
1/2
✓ Branch 2 taken 1031 times.
✗ Branch 3 not taken.
1031 InodeReferences() { map_.Init(16, 0, hasher_inode); }
501
502 34984 bool Get(const uint64_t inode, const uint32_t by) {
503 34984 uint32_t refcounter = 0;
504
1/2
✓ Branch 1 taken 34984 times.
✗ Branch 2 not taken.
34984 const bool found = map_.Lookup(inode, &refcounter);
505 34984 const bool new_inode = !found;
506 34984 refcounter += by; // This is 0 if the inode is not found
507
1/2
✓ Branch 1 taken 34984 times.
✗ Branch 2 not taken.
34984 map_.Insert(inode, refcounter);
508 34984 return new_inode;
509 }
510
511 bool Put(const uint64_t inode, const uint32_t by) {
512 uint32_t refcounter;
513 const bool found = map_.Lookup(inode, &refcounter);
514 if (!found) {
515 // May happen if a retired inode is cleared, i.e. if a file with
516 // outdated content is closed
517 return false;
518 }
519
520 if (refcounter < by) {
521 PANIC(kLogSyslogErr | kLogDebug,
522 "inode tracker refcount mismatch, inode % " PRIu64
523 ", refcounts %u / %u",
524 inode, refcounter, by);
525 }
526
527 if (refcounter == by) {
528 map_.Erase(inode);
529 return true;
530 }
531 refcounter -= by;
532 map_.Insert(inode, refcounter);
533 return false;
534 }
535
536 void Replace(const uint64_t old_inode, const uint64_t new_inode) {
537 map_.Erase(old_inode);
538 map_.Insert(new_inode, 0);
539 }
540
541 void Clear() { map_.Clear(); }
542
543 186 Cursor BeginEnumerate() { return Cursor(); }
544
545 104802 bool Next(Cursor *cursor, uint64_t *inode) {
546 104802 const uint64_t empty_key = map_.empty_key();
547
2/2
✓ Branch 1 taken 275940 times.
✓ Branch 2 taken 186 times.
276126 while (cursor->idx < map_.capacity()) {
548
2/2
✓ Branch 1 taken 171324 times.
✓ Branch 2 taken 104616 times.
275940 if (map_.keys()[cursor->idx] == empty_key) {
549 171324 cursor->idx++;
550 171324 continue;
551 }
552 104616 *inode = map_.keys()[cursor->idx];
553 104616 cursor->idx++;
554 104616 return true;
555 }
556 186 return false;
557 }
558
559 private:
560 SmallHashDynamic<uint64_t, uint32_t> map_;
561 };
562
563
564 //------------------------------------------------------------------------------
565
566
567 /**
568 * Tracks inode reference counters as given by Fuse.
569 */
570 class InodeTracker {
571 public:
572 /**
573 * Used to actively evict all known paths from kernel caches
574 */
575 struct Cursor {
576 186 explicit Cursor(const PathStore::Cursor &p,
577 const InodeReferences::Cursor &i)
578 186 : csr_paths(p), csr_inos(i) { }
579 PathStore::Cursor csr_paths;
580 InodeReferences::Cursor csr_inos;
581 };
582
583 /**
584 * To avoid taking the InodeTracker mutex multiple times, the fuse
585 * forget_multi callback releases inodes references through this RAII object.
586 * Copy and assign operator should be deleted but that would require
587 * all compilers to use RVO. TODO(jblomer): fix with C++11
588 */
589 class VfsPutRaii {
590 public:
591 explicit VfsPutRaii(InodeTracker *t) : tracker_(t) { tracker_->Lock(); }
592 ~VfsPutRaii() { tracker_->Unlock(); }
593
594 bool VfsPut(const uint64_t inode, const uint32_t by) {
595 const bool removed = tracker_->inode_references_.Put(inode, by);
596 if (removed) {
597 // TODO(jblomer): pop operation (Lookup+Erase)
598 shash::Md5 md5path;
599 InodeEx inode_ex(inode, InodeEx::kUnknownType);
600 const bool found = tracker_->inode_ex_map_.LookupMd5Path(&inode_ex,
601 &md5path);
602 if (!found) {
603 PANIC(kLogSyslogErr | kLogDebug,
604 "inode tracker ref map and path map out of sync: %" PRIu64,
605 inode);
606 }
607 tracker_->inode_ex_map_.Erase(inode);
608 tracker_->path_map_.Erase(md5path);
609 atomic_inc64(&tracker_->statistics_.num_removes);
610 }
611 atomic_xadd64(&tracker_->statistics_.num_references, -int32_t(by));
612 return removed;
613 }
614
615 private:
616 InodeTracker *tracker_;
617 };
618
619 // Cannot be moved to the statistics manager because it has to survive
620 // reloads. Added manually in the fuse module initialization and in talk.cc.
621 struct Statistics {
622 1031 Statistics() {
623 1031 atomic_init64(&num_inserts);
624 1031 atomic_init64(&num_removes);
625 1031 atomic_init64(&num_references);
626 1031 atomic_init64(&num_hits_inode);
627 1031 atomic_init64(&num_hits_path);
628 1031 atomic_init64(&num_misses_path);
629 1031 }
630 std::string Print() {
631 return "inserts: " + StringifyInt(atomic_read64(&num_inserts))
632 + " removes: " + StringifyInt(atomic_read64(&num_removes))
633 + " references: " + StringifyInt(atomic_read64(&num_references))
634 + " hits(inode): " + StringifyInt(atomic_read64(&num_hits_inode))
635 + " hits(path): " + StringifyInt(atomic_read64(&num_hits_path))
636 + " misses(path): "
637 + StringifyInt(atomic_read64(&num_misses_path));
638 }
639 atomic_int64 num_inserts;
640 atomic_int64 num_removes;
641 atomic_int64 num_references;
642 atomic_int64 num_hits_inode;
643 atomic_int64 num_hits_path;
644 atomic_int64 num_misses_path;
645 };
646 Statistics GetStatistics() { return statistics_; }
647
648 InodeTracker();
649 explicit InodeTracker(const InodeTracker &other);
650 InodeTracker &operator=(const InodeTracker &other);
651 ~InodeTracker();
652
653 34984 void VfsGetBy(const InodeEx inode_ex, const uint32_t by,
654 const PathString &path) {
655 34984 const uint64_t inode = inode_ex.GetInode();
656 34984 Lock();
657
1/2
✓ Branch 1 taken 34984 times.
✗ Branch 2 not taken.
34984 const bool is_new_inode = inode_references_.Get(inode, by);
658
1/2
✓ Branch 1 taken 34984 times.
✗ Branch 2 not taken.
34984 const shash::Md5 md5path = path_map_.Insert(path, inode);
659
1/2
✓ Branch 1 taken 34984 times.
✗ Branch 2 not taken.
34984 inode_ex_map_.Insert(inode_ex, md5path);
660 34984 Unlock();
661
662 34984 atomic_xadd64(&statistics_.num_references, by);
663
1/2
✓ Branch 0 taken 34984 times.
✗ Branch 1 not taken.
34984 if (is_new_inode)
664 34984 atomic_inc64(&statistics_.num_inserts);
665 34984 }
666
667 34984 void VfsGet(const InodeEx inode_ex, const PathString &path) {
668 34984 VfsGetBy(inode_ex, 1, path);
669 34984 }
670
671 VfsPutRaii GetVfsPutRaii() { return VfsPutRaii(this); }
672
673 bool FindPath(InodeEx *inode_ex, PathString *path) {
674 Lock();
675 shash::Md5 md5path;
676 bool found = inode_ex_map_.LookupMd5Path(inode_ex, &md5path);
677 if (found) {
678 found = path_map_.LookupPath(md5path, path);
679 assert(found);
680 }
681 Unlock();
682
683 if (found) {
684 atomic_inc64(&statistics_.num_hits_path);
685 } else {
686 atomic_inc64(&statistics_.num_misses_path);
687 }
688 return found;
689 }
690
691 uint64_t FindInode(const PathString &path) {
692 Lock();
693 const uint64_t inode = path_map_.LookupInodeByPath(path);
694 Unlock();
695 atomic_inc64(&statistics_.num_hits_inode);
696 return inode;
697 }
698
699 84 bool FindDentry(uint64_t ino, uint64_t *parent_ino, NameString *name) {
700 84 PathString path;
701 84 InodeEx inodex(ino, InodeEx::kUnknownType);
702
1/2
✓ Branch 1 taken 84 times.
✗ Branch 2 not taken.
84 shash::Md5 md5path;
703
704 84 Lock();
705
1/2
✓ Branch 1 taken 84 times.
✗ Branch 2 not taken.
84 bool found = inode_ex_map_.LookupMd5Path(&inodex, &md5path);
706
2/2
✓ Branch 0 taken 42 times.
✓ Branch 1 taken 42 times.
84 if (found) {
707
1/2
✓ Branch 1 taken 42 times.
✗ Branch 2 not taken.
42 found = path_map_.LookupPath(md5path, &path);
708
1/2
✗ Branch 0 not taken.
✓ Branch 1 taken 42 times.
42 assert(found);
709
2/4
✓ Branch 1 taken 42 times.
✗ Branch 2 not taken.
✓ Branch 4 taken 42 times.
✗ Branch 5 not taken.
42 *name = GetFileName(path);
710
2/4
✓ Branch 1 taken 42 times.
✗ Branch 2 not taken.
✓ Branch 4 taken 42 times.
✗ Branch 5 not taken.
42 path = GetParentPath(path);
711
1/2
✓ Branch 1 taken 42 times.
✗ Branch 2 not taken.
42 *parent_ino = path_map_.LookupInodeByPath(path);
712 }
713 84 Unlock();
714 84 return found;
715 84 }
716
717 /**
718 * The new inode has reference counter 0. Returns true if the inode was
719 * found and replaced
720 */
721 bool ReplaceInode(uint64_t old_inode, const InodeEx &new_inode) {
722 shash::Md5 md5path;
723 InodeEx old_inode_ex(old_inode, InodeEx::kUnknownType);
724 Lock();
725 const bool found = inode_ex_map_.LookupMd5Path(&old_inode_ex, &md5path);
726 if (found) {
727 inode_references_.Replace(old_inode, new_inode.GetInode());
728 path_map_.Replace(md5path, new_inode.GetInode());
729 inode_ex_map_.Erase(old_inode);
730 inode_ex_map_.Insert(new_inode, md5path);
731 }
732 Unlock();
733 return found;
734 }
735
736 186 Cursor BeginEnumerate() {
737 186 Lock();
738 186 return Cursor(path_map_.path_store()->BeginEnumerate(),
739 372 inode_references_.BeginEnumerate());
740 }
741
742 294 bool NextEntry(Cursor *cursor, uint64_t *inode_parent, NameString *name) {
743
1/2
✓ Branch 1 taken 294 times.
✗ Branch 2 not taken.
294 shash::Md5 parent_md5;
744 294 StringRef name_ref;
745
1/2
✓ Branch 2 taken 294 times.
✗ Branch 3 not taken.
294 const bool result = path_map_.path_store()->Next(&(cursor->csr_paths),
746 &parent_md5, &name_ref);
747
2/2
✓ Branch 0 taken 126 times.
✓ Branch 1 taken 168 times.
294 if (!result)
748 126 return false;
749
2/2
✓ Branch 1 taken 42 times.
✓ Branch 2 taken 126 times.
168 if (parent_md5.IsNull())
750 42 *inode_parent = 0;
751 else
752
1/2
✓ Branch 1 taken 126 times.
✗ Branch 2 not taken.
126 *inode_parent = path_map_.LookupInodeByMd5Path(parent_md5);
753
1/2
✓ Branch 3 taken 168 times.
✗ Branch 4 not taken.
168 name->Assign(name_ref.data(), name_ref.length());
754 168 return true;
755 }
756
757 104802 bool NextInode(Cursor *cursor, uint64_t *inode) {
758 104802 return inode_references_.Next(&(cursor->csr_inos), inode);
759 }
760
761 186 void EndEnumerate(Cursor *cursor) { Unlock(); }
762
763 private:
764 static const unsigned kVersion = 4;
765
766 void InitLock();
767 void CopyFrom(const InodeTracker &other);
768 35254 inline void Lock() const {
769 35254 const int retval = pthread_mutex_lock(lock_);
770
1/2
✗ Branch 0 not taken.
✓ Branch 1 taken 35254 times.
35254 assert(retval == 0);
771 35254 }
772 35254 inline void Unlock() const {
773 35254 const int retval = pthread_mutex_unlock(lock_);
774
1/2
✗ Branch 0 not taken.
✓ Branch 1 taken 35254 times.
35254 assert(retval == 0);
775 35254 }
776
777 unsigned version_;
778 pthread_mutex_t *lock_;
779 PathMap path_map_;
780 InodeExMap inode_ex_map_;
781 InodeReferences inode_references_;
782 Statistics statistics_;
783 }; // class InodeTracker
784
785
786 /**
787 * Tracks fuse name lookup replies for active cache eviction.
788 * Class renamed from previous name NentryTracker
789 */
790 class DentryTracker {
791 FRIEND_TEST(T_GlueBuffer, DentryTracker);
792
793 private:
794 struct Entry {
795 Entry() : expiry(0), inode_parent(0) { }
796 69806 Entry(uint64_t e, uint64_t p, const char *n)
797 69806 : expiry(e), inode_parent(p), name(n, strlen(n)) { }
798 uint64_t expiry;
799 uint64_t inode_parent;
800 NameString name;
801 };
802
803 public:
804 struct Cursor {
805 406 explicit Cursor(Entry *h) : head(h), pos(0) { }
806 Entry *head;
807 size_t pos;
808 };
809
810 // Cannot be moved to the statistics manager because it has to survive
811 // reloads. Added manually in the fuse module initialization and in talk.cc.
812 struct Statistics {
813 829 Statistics() : num_insert(0), num_remove(0), num_prune(0) { }
814 int64_t num_insert;
815 int64_t num_remove;
816 int64_t num_prune;
817 };
818 264 Statistics GetStatistics() { return statistics_; }
819
820 static void *MainCleaner(void *data);
821
822 DentryTracker();
823 DentryTracker(const DentryTracker &other);
824 DentryTracker &operator=(const DentryTracker &other);
825 ~DentryTracker();
826
827 /**
828 * Lock object during copy
829 */
830 DentryTracker *Move();
831
832 69894 void Add(const uint64_t inode_parent, const char *name, uint64_t timeout_s) {
833
2/2
✓ Branch 0 taken 44 times.
✓ Branch 1 taken 69850 times.
69894 if (!is_active_)
834 44 return;
835
2/2
✓ Branch 0 taken 44 times.
✓ Branch 1 taken 69806 times.
69850 if (timeout_s == 0)
836 44 return;
837
838 69806 const uint64_t now = platform_monotonic_time();
839 69806 Lock();
840
1/2
✓ Branch 2 taken 69806 times.
✗ Branch 3 not taken.
69806 entries_.PushBack(Entry(now + timeout_s, inode_parent, name));
841 69806 statistics_.num_insert++;
842 69806 DoPrune(now);
843 69806 Unlock();
844 }
845
846 void Prune();
847 /**
848 * The nentry tracker is only needed for active cache eviction and can
849 * otherwise ignore new entries.
850 */
851 44 void Disable() { is_active_ = false; }
852 bool is_active() const { return is_active_; }
853
854 void SpawnCleaner(unsigned interval_s);
855
856 Cursor BeginEnumerate();
857 bool NextEntry(Cursor *cursor, uint64_t *inode_parent, NameString *name);
858 void EndEnumerate(Cursor *cursor);
859
860 private:
861 static const unsigned kVersion = 0;
862
863 void CopyFrom(const DentryTracker &other);
864
865 void InitLock();
866 70634 inline void Lock() const {
867 70634 const int retval = pthread_mutex_lock(lock_);
868
1/2
✗ Branch 0 not taken.
✓ Branch 1 taken 70634 times.
70634 assert(retval == 0);
869 70634 }
870 70634 inline void Unlock() const {
871 70634 const int retval = pthread_mutex_unlock(lock_);
872
1/2
✗ Branch 0 not taken.
✓ Branch 1 taken 70634 times.
70634 assert(retval == 0);
873 70634 }
874
875 70128 void DoPrune(uint64_t now) {
876 Entry *entry;
877
3/4
✓ Branch 1 taken 70260 times.
✗ Branch 2 not taken.
✓ Branch 3 taken 70050 times.
✓ Branch 4 taken 210 times.
70260 while (entries_.Peek(&entry)) {
878
2/2
✓ Branch 0 taken 69918 times.
✓ Branch 1 taken 132 times.
70050 if (entry->expiry >= now)
879 69918 break;
880
1/2
✓ Branch 1 taken 132 times.
✗ Branch 2 not taken.
132 entries_.PopFront();
881 132 statistics_.num_remove++;
882 }
883 70128 statistics_.num_prune++;
884 70128 }
885
886 pthread_mutex_t *lock_;
887 unsigned version_;
888 Statistics statistics_;
889 bool is_active_;
890 BigQueue<Entry> entries_;
891
892 int pipe_terminate_[2];
893 int cleaning_interval_ms_;
894 pthread_t thread_cleaner_;
895 }; // class DentryTracker
896
897 /**
898 * Tracks the content hash associated to inodes of regular files whose content
899 * may be in the page cache. It is used in cvmfs_open() and cvmfs_close().
900 */
901 class PageCacheTracker {
902 private:
903 struct Entry {
904 13134 Entry() : nopen(0), idx_stat(-1) { }
905 Entry(int32_t n, int32_t i, const shash::Any &h)
906 : nopen(n), idx_stat(i), hash(h) { }
907 /**
908 * Reference counter for currently open files with a given inode. If the
909 * sign bit is set, the entry is in the transition phase from one hash to
910 * another. The sign will be cleared on Close() in this case.
911 */
912 int32_t nopen;
913 /**
914 * Points into the list of stat structs; >= 0 only for open files.
915 */
916 int32_t idx_stat;
917 /**
918 * The content hash of the data stored in the page cache. For chunked files,
919 * hash contains an artificial hash over all the chunk hash values.
920 */
921 shash::Any hash;
922 };
923
924 public:
925 /**
926 * In the fuse file handle, use bit 62 to indicate that the file was opened
927 * with a direct I/O setting and cvmfs_release() should not call Close().
928 * Note that the sign bit (bit 63) indicates chunked files.
929 */
930 static const unsigned int kBitDirectIo = 62;
931
932 /**
933 * Instruct cvmfs_open() on how to handle the page cache.
934 */
935 struct OpenDirectives {
936 /**
937 * Flush the page cache; logically, the flush takes place some time between
938 * cvmfs_open() and cvmfs_close(). That's important in case we have two
939 * open() calls on stale page cache data.
940 */
941 bool keep_cache;
942 /**
943 * Don't use the page cache at all (neither write nor read). If this is set
944 * on cvmfs_open(), don't call Close() on cvmfs_close().
945 * Direct I/O prevents shared mmap on the file. Private mmap, however,
946 * which includes loading binaries, still works.
947 */
948 bool direct_io;
949
950 // Defaults to the old (pre v2.10) behavior: always flush the cache, never
951 // use direct I/O.
952 516 OpenDirectives() : keep_cache(false), direct_io(false) { }
953
954 OpenDirectives(bool k, bool d) : keep_cache(k), direct_io(d) { }
955 };
956
957 /**
958 * To avoid taking the PageCacheTracker mutex multiple times, the
959 * fuse forget_multi callback evicts inodes through this RAII object.
960 * Copy and assign operator should be deleted but that would require
961 * all compilers to use RVO. TODO(jblomer): fix with C++11
962 */
963 class EvictRaii {
964 public:
965 explicit EvictRaii(PageCacheTracker *t);
966 ~EvictRaii();
967 void Evict(uint64_t inode);
968
969 private:
970 PageCacheTracker *tracker_;
971 };
972
973 // Cannot be moved to the statistics manager because it has to survive
974 // reloads. Added manually in the fuse module initialization and in talk.cc.
975 struct Statistics {
976 583 Statistics()
977 583 : n_insert(0)
978 583 , n_remove(0)
979 583 , n_open_direct(0)
980 583 , n_open_flush(0)
981 583 , n_open_cached(0) { }
982 uint64_t n_insert;
983 uint64_t n_remove;
984 uint64_t n_open_direct;
985 uint64_t n_open_flush;
986 uint64_t n_open_cached;
987 };
988 Statistics GetStatistics() { return statistics_; }
989
990 PageCacheTracker();
991 explicit PageCacheTracker(const PageCacheTracker &other);
992 PageCacheTracker &operator=(const PageCacheTracker &other);
993 ~PageCacheTracker();
994
995 OpenDirectives Open(uint64_t inode, const shash::Any &hash,
996 const struct stat &info);
997 /**
998 * Forced direct I/O open. Used when the corresponding flag is set in the
999 * file catalogs. In this case, we don't need to track the inode.
1000 */
1001 OpenDirectives OpenDirect();
1002 void Close(uint64_t inode);
1003
1004 84 bool GetInfoIfOpen(uint64_t inode, shash::Any *hash, struct stat *info) {
1005 84 const MutexLockGuard guard(lock_);
1006
1/2
✓ Branch 1 taken 84 times.
✗ Branch 2 not taken.
84 Entry entry;
1007
1/2
✓ Branch 1 taken 84 times.
✗ Branch 2 not taken.
84 const bool retval = map_.Lookup(inode, &entry);
1008
3/4
✓ Branch 0 taken 84 times.
✗ Branch 1 not taken.
✓ Branch 2 taken 42 times.
✓ Branch 3 taken 42 times.
84 if (retval && (entry.nopen != 0)) {
1009
1/2
✗ Branch 0 not taken.
✓ Branch 1 taken 42 times.
42 assert(entry.idx_stat >= 0);
1010 42 *hash = entry.hash;
1011
1/2
✓ Branch 0 taken 42 times.
✗ Branch 1 not taken.
42 if (info != NULL)
1012
1/2
✓ Branch 1 taken 42 times.
✗ Branch 2 not taken.
42 *info = stat_store_.Get(entry.idx_stat);
1013 42 return true;
1014 }
1015 42 return false;
1016 84 }
1017
1018 /**
1019 * Checks if the dirent's inode is registered in the page cache tracker and
1020 * if
1021 * - it is currently open and has a different content than dirent
1022 * - it has been previously found stale (no matter if now open or not)
1023 */
1024 bool IsStale(const catalog::DirectoryEntry &dirent) {
1025 Entry entry;
1026 const MutexLockGuard guard(lock_);
1027
1028 const bool retval = map_.Lookup(dirent.inode(), &entry);
1029 if (!retval)
1030 return false;
1031 if (entry.hash.IsNull()) {
1032 // A previous call to IsStale() returned true (see below)
1033 return true;
1034 }
1035 if (entry.nopen == 0)
1036 return false;
1037 if (entry.hash == dirent.checksum())
1038 return false;
1039
1040 bool is_stale = true;
1041 if (dirent.IsChunkedFile()) {
1042 // Shortcut for chunked files: go by last modified timestamp
1043 is_stale = stat_store_.Get(entry.idx_stat).st_mtime != dirent.mtime();
1044 }
1045 if (is_stale) {
1046 // We mark that inode as "stale" by setting its hash to NULL.
1047 // When we check next time IsStale(), it is returned stale even
1048 // if it is not open.
1049 // The call to GetInfoIfOpen() will from now on return the null hash.
1050 // That works, the caller will still assume that the version in the
1051 // page cache tracker is different from any inode in the catalogs.
1052 entry.hash = shash::Any();
1053 map_.Insert(dirent.inode(), entry);
1054 }
1055 return is_stale;
1056 }
1057
1058 86 EvictRaii GetEvictRaii() { return EvictRaii(this); }
1059
1060 // Used in RestoreState to prevent using the page cache tracker from a
1061 // previous version after hotpatch
1062 44 void Disable() { is_active_ = false; }
1063
1064 private:
1065 static const unsigned kVersion = 0;
1066
1067 void InitLock();
1068 void CopyFrom(const PageCacheTracker &other);
1069
1070 pthread_mutex_t *lock_;
1071 unsigned version_;
1072 /**
1073 * The page cache tracker only works correctly if it is used from the start
1074 * of the mount. If the instance is hot-patched from a previous version, the
1075 * page cache tracker remains turned off.
1076 */
1077 bool is_active_;
1078 Statistics statistics_;
1079 SmallHashDynamic<uint64_t, Entry> map_;
1080 StatStore stat_store_;
1081 };
1082
1083
1084 } // namespace glue
1085
1086 #endif // CVMFS_GLUE_BUFFER_H_
1087