GCC Code Coverage Report


Directory: cvmfs/
File: cvmfs/compat.h
Date: 2025-07-13 02:35:07
Exec Total Coverage
Lines: 0 95 0.0%
Branches: 0 74 0.0%

Line Branch Exec Source
1 /**
2 * This file is part of the CernVM File System.
3 *
4 * A mediator to transform old data structures into new one on reload
5 */
6
7 #ifndef CVMFS_COMPAT_H_
8 #define CVMFS_COMPAT_H_
9
10 #include <pthread.h>
11 #include <sched.h>
12 #include <stdint.h>
13
14 #include <cassert>
15 #include <google/sparse_hash_map>
16 #include <string>
17
18 #include "bigvector.h"
19 #include "catalog_mgr.h"
20 #include "crypto/hash.h"
21 #include "file_chunk.h"
22 #include "glue_buffer.h"
23 #include "shortstring.h"
24 #include "util/algorithm.h"
25 #include "util/atomic.h"
26
27 namespace compat {
28
29 namespace shash_v1 {
30
31 enum Algorithms {
32 kMd5 = 0,
33 kSha1,
34 kRmd160,
35 kAny,
36 };
37 const unsigned kDigestSizes[] = {16, 20, 20, 20};
38 const unsigned kMaxDigestSize = 20;
39 extern const char *kSuffixes[];
40 const unsigned kSuffixLengths[] = {0, 0, 7, 0};
41 const unsigned kMaxSuffixLength = 7;
42
43 template<unsigned digest_size_, Algorithms algorithm_>
44 struct Digest {
45 unsigned char digest[digest_size_];
46 Algorithms algorithm;
47
48 unsigned GetDigestSize() const { return kDigestSizes[algorithm]; }
49 unsigned GetHexSize() const {
50 return 2 * kDigestSizes[algorithm] + kSuffixLengths[algorithm];
51 }
52
53 Digest() {
54 algorithm = algorithm_;
55 memset(digest, 0, digest_size_);
56 }
57
58 Digest(const Algorithms a, const unsigned char *digest_buffer,
59 const unsigned buffer_size) {
60 algorithm = a;
61 assert(buffer_size <= digest_size_);
62 memcpy(digest, digest_buffer, buffer_size);
63 }
64
65 std::string ToString() const {
66 const unsigned string_length = GetHexSize();
67 std::string result(string_length, 0);
68
69 unsigned i;
70 for (i = 0; i < kDigestSizes[algorithm]; ++i) {
71 char dgt1 = (unsigned)digest[i] / 16;
72 char dgt2 = (unsigned)digest[i] % 16;
73 dgt1 += (dgt1 <= 9) ? '0' : 'a' - 10;
74 dgt2 += (dgt2 <= 9) ? '0' : 'a' - 10;
75 result[i * 2] = dgt1;
76 result[i * 2 + 1] = dgt2;
77 }
78 unsigned pos = i * 2;
79 for (const char *s = kSuffixes[algorithm]; *s != '\0'; ++s) {
80 result[pos] = *s;
81 pos++;
82 }
83 return result;
84 }
85
86 bool IsNull() const {
87 for (unsigned i = 0; i < kDigestSizes[algorithm]; ++i)
88 if (digest[i] != 0)
89 return false;
90 return true;
91 }
92
93 bool operator==(const Digest<digest_size_, algorithm_> &other) const {
94 if (this->algorithm != other.algorithm)
95 return false;
96 for (unsigned i = 0; i < kDigestSizes[algorithm]; ++i)
97 if (this->digest[i] != other.digest[i])
98 return false;
99 return true;
100 }
101
102 bool operator!=(const Digest<digest_size_, algorithm_> &other) const {
103 return !(*this == other);
104 }
105
106 bool operator<(const Digest<digest_size_, algorithm_> &other) const {
107 if (this->algorithm != other.algorithm)
108 return (this->algorithm < other.algorithm);
109 for (unsigned i = 0; i < kDigestSizes[algorithm]; ++i) {
110 if (this->digest[i] > other.digest[i])
111 return false;
112 if (this->digest[i] < other.digest[i])
113 return true;
114 }
115 return false;
116 }
117
118 bool operator>(const Digest<digest_size_, algorithm_> &other) const {
119 if (this->algorithm != other.algorithm)
120 return (this->algorithm > other.algorithm);
121 for (int i = 0; i < kDigestSizes[algorithm]; ++i) {
122 if (this->digest[i] < other.digest[i])
123 return false;
124 if (this->digest[i] > other.digest[i])
125 return true;
126 }
127 return false;
128 }
129 };
130
131 struct Md5 : public Digest<16, kMd5> {
132 Md5() : Digest<16, kMd5>() { }
133 Md5(const char *chars, const unsigned length);
134 };
135
136 struct Any : public Digest<20, kAny> {
137 Any() : Digest<20, kAny>() { }
138 };
139
140 void MigrateAny(const Any *old_hash, shash::Any *new_hash);
141
142 } // namespace shash_v1
143
144
145 //------------------------------------------------------------------------------
146
147
148 namespace shash_v2 {
149
150 enum Algorithms {
151 kMd5 = 0,
152 kSha1,
153 kRmd160,
154 kAny,
155 };
156 const unsigned kDigestSizes[] = {16, 20, 20, 20};
157 const unsigned kMaxDigestSize = 20;
158 extern const char *kAlgorithmIds[];
159 const unsigned kAlgorithmIdSizes[] = {0, 0, 7, 0};
160 const unsigned kMaxAlgorithmIdentifierSize = 7;
161 typedef char Suffix;
162 const char kSuffixNone = 0;
163 const char kSuffixCatalog = 'C';
164 const char kSuffixHistory = 'H';
165 const char kSuffixMicroCatalog = 'L'; // currently unused
166 const char kSuffixPartial = 'P';
167 const char kSuffixTemporary = 'T';
168 const char kSuffixCertificate = 'X';
169
170 template<unsigned digest_size_, Algorithms algorithm_>
171 struct Digest {
172 unsigned char digest[digest_size_];
173 Algorithms algorithm;
174 Suffix suffix;
175
176 unsigned GetDigestSize() const { return kDigestSizes[algorithm]; }
177 unsigned GetHexSize() const {
178 return 2 * kDigestSizes[algorithm] + kAlgorithmIdSizes[algorithm];
179 }
180
181 Digest() : algorithm(algorithm_), suffix(kSuffixNone) {
182 memset(digest, 0, digest_size_);
183 }
184
185 Digest(const Algorithms a, const unsigned char *digest_buffer,
186 const unsigned buffer_size, const Suffix s = kSuffixNone)
187 : algorithm(a), suffix(s) {
188 assert(buffer_size <= digest_size_);
189 memcpy(digest, digest_buffer, buffer_size);
190 }
191 };
192
193 struct Any : public Digest<20, kAny> {
194 Any() : Digest<20, kAny>() { }
195 };
196
197 void MigrateAny(const Any *old_hash, shash::Any *new_hash);
198
199 } // namespace shash_v2
200
201
202 //------------------------------------------------------------------------------
203
204
205 namespace inode_tracker {
206
207 struct Dirent {
208 Dirent() { parent_inode = 0; }
209 Dirent(const uint64_t p, const NameString &n) {
210 parent_inode = p;
211 name = n;
212 references = 1;
213 }
214 uint32_t references;
215 uint64_t parent_inode;
216 NameString name;
217 };
218
219
220 class InodeContainer {
221 public:
222 typedef google::sparse_hash_map<uint64_t, Dirent, hash_murmur<uint64_t> >
223 InodeMap;
224
225 InodeContainer() { assert(false); }
226 bool Add(const uint64_t inode, const uint64_t parent_inode,
227 const NameString &name) {
228 assert(false);
229 return false;
230 }
231 bool Get(const uint64_t inode, const uint64_t parent_inode,
232 const NameString &name) {
233 assert(false);
234 return false;
235 }
236 uint32_t Put(const uint64_t inode, const uint32_t by) {
237 assert(false);
238 return false;
239 }
240 bool ConstructPath(const uint64_t inode, PathString *path);
241 bool Contains(const uint64_t inode) { return map_.find(inode) != map_.end(); }
242 inline size_t Size() { return map_.size(); }
243 // private:
244 std::string DebugPrint() {
245 assert(false);
246 return "";
247 }
248 InodeMap map_;
249 };
250
251
252 /**
253 * Tracks inode reference counters as given by Fuse.
254 */
255 class InodeTracker {
256 public:
257 struct Statistics {
258 Statistics() { assert(false); }
259 std::string Print() {
260 assert(false);
261 return "";
262 }
263 atomic_int64 num_inserts;
264 atomic_int64 num_dangling_try;
265 atomic_int64 num_double_add;
266 atomic_int64 num_removes;
267 atomic_int64 num_references;
268 atomic_int64 num_ancient_hits;
269 atomic_int64 num_ancient_misses;
270 };
271 Statistics GetStatistics() { return statistics_; }
272
273 InodeTracker() { assert(false); }
274 explicit InodeTracker(const InodeTracker &other) { assert(false); }
275 InodeTracker &operator=(const InodeTracker &other) { assert(false); }
276 ~InodeTracker();
277
278 bool VfsGet(const uint64_t inode, const uint64_t parent_inode,
279 const NameString &name) {
280 assert(false);
281 return false;
282 }
283 bool VfsAdd(const uint64_t inode, const uint64_t parent_inode,
284 const NameString &name) {
285 assert(false);
286 return false;
287 }
288 void VfsPut(const uint64_t inode, const uint32_t by) { assert(false); }
289 bool Find(const uint64_t inode, PathString *path) { assert(false); }
290
291 // private:
292 static const unsigned kVersion = 1;
293
294 void InitLock() { assert(false); }
295 void CopyFrom(const InodeTracker &other) { assert(false); }
296 inline void Lock() const {
297 // NOT NEEDED
298 }
299 inline void Unlock() const {
300 // NOT NEEDED
301 }
302
303 unsigned version_;
304 pthread_mutex_t *lock_;
305 InodeContainer inode2path_;
306 Statistics statistics_;
307 };
308
309 void Migrate(InodeTracker *old_tracker, glue::InodeTracker *new_tracker);
310
311 } // namespace inode_tracker
312
313
314 //------------------------------------------------------------------------------
315
316
317 namespace inode_tracker_v2 {
318
319 template<class Key, class Value, class Derived>
320 class SmallHashBase {
321 public:
322 static const double kLoadFactor; // mainly useless for the dynamic version
323 static const double kThresholdGrow; // only used for resizable version
324 static const double kThresholdShrink; // only used for resizable version
325
326 SmallHashBase() { assert(false); }
327 ~SmallHashBase() {
328 delete[] keys_;
329 delete[] values_;
330 }
331 void Init(uint32_t expected_size, Key empty,
332 uint32_t (*hasher)(const Key &key)) {
333 assert(false);
334 }
335 bool Lookup(const Key &key, Value *value) const {
336 uint32_t bucket;
337 uint32_t collisions;
338 const bool found = DoLookup(key, &bucket, &collisions);
339 if (found)
340 *value = values_[bucket];
341 return found;
342 }
343 bool Contains(const Key &key) const {
344 uint32_t bucket;
345 uint32_t collisions;
346 const bool found = DoLookup(key, &bucket, &collisions);
347 return found;
348 }
349 void Insert(const Key &key, const Value &value) { assert(false); }
350 void Erase(const Key &key) { assert(false); }
351 void Clear() { assert(false); }
352 uint64_t bytes_allocated() const { return bytes_allocated_; }
353 static double GetEntrySize() { assert(false); }
354 void GetCollisionStats(uint64_t *num_collisions,
355 uint32_t *max_collisions) const {
356 assert(false);
357 }
358
359 // private:
360 uint32_t ScaleHash(const Key &key) const {
361 double bucket = (double(hasher_(key)) * double(capacity_) / // NOLINT
362 double((uint32_t)(-1))); // NOLINT
363 return (uint32_t)bucket % capacity_;
364 }
365 void InitMemory() { assert(false); }
366 bool DoInsert(const Key &key, const Value &value,
367 const bool count_collisions) {
368 assert(false);
369 }
370 bool DoLookup(const Key &key, uint32_t *bucket, uint32_t *collisions) const {
371 *bucket = ScaleHash(key);
372 *collisions = 0;
373 while (!(keys_[*bucket] == empty_key_)) {
374 if (keys_[*bucket] == key)
375 return true;
376 *bucket = (*bucket + 1) % capacity_;
377 (*collisions)++;
378 }
379 return false;
380 }
381 void DoClear(const bool reset_capacity) { assert(false); }
382 // Methods for resizable version
383 void SetThresholds() { }
384 void Grow() { }
385 void Shrink() { }
386 void ResetCapacity() { }
387
388 // Separate key and value arrays for better locality
389 Key *keys_;
390 Value *values_;
391 uint32_t capacity_;
392 uint32_t initial_capacity_;
393 uint32_t size_;
394 uint32_t (*hasher_)(const Key &key);
395 uint64_t bytes_allocated_;
396 uint64_t num_collisions_;
397 uint32_t max_collisions_; /**< maximum collisions for a single insert */
398 Key empty_key_;
399 };
400
401 template<class Key, class Value>
402 class SmallHashDynamic
403 : public SmallHashBase<Key, Value, SmallHashDynamic<Key, Value> > {
404 friend class SmallHashBase<Key, Value, SmallHashDynamic<Key, Value> >;
405
406 public:
407 typedef SmallHashBase<Key, Value, SmallHashDynamic<Key, Value> > Base;
408 static const double kThresholdGrow;
409 static const double kThresholdShrink;
410
411 SmallHashDynamic() : Base() { assert(false); }
412 explicit SmallHashDynamic(const SmallHashDynamic<Key, Value> &other)
413 : Base() {
414 assert(false);
415 }
416 SmallHashDynamic<Key, Value> &operator=(
417 const SmallHashDynamic<Key, Value> &other) {
418 assert(false);
419 }
420
421 uint32_t capacity() const { return Base::capacity_; }
422 uint32_t size() const { return Base::size_; }
423 uint32_t num_migrates() const { assert(false); }
424
425 protected:
426 void SetThresholds() { assert(false); }
427 void Grow() { assert(false); }
428 void Shrink() { assert(false); }
429 void ResetCapacity() { assert(false); }
430
431 private:
432 void Migrate(const uint32_t new_capacity) { assert(false); }
433 void CopyFrom(const SmallHashDynamic<Key, Value> &other) { assert(false); }
434 uint32_t num_migrates_;
435 uint32_t threshold_grow_;
436 uint32_t threshold_shrink_;
437 };
438
439
440 class PathMap {
441 public:
442 PathMap() { assert(false); }
443 bool LookupPath(const shash_v1::Md5 &md5path, PathString *path) {
444 PathInfo value;
445 const bool found = map_.Lookup(md5path, &value);
446 path->Assign(value.path);
447 return found;
448 }
449 uint64_t LookupInode(const PathString &path) {
450 PathInfo value;
451 const bool found = map_.Lookup(
452 shash_v1::Md5(path.GetChars(), path.GetLength()), &value);
453 if (found)
454 return value.inode;
455 return 0;
456 }
457 shash_v1::Md5 Insert(const PathString &path, const uint64_t inode) {
458 assert(false);
459 }
460 void Erase(const shash_v1::Md5 &md5path) { assert(false); }
461 void Clear() { assert(false); }
462
463 // private:
464 struct PathInfo {
465 PathInfo() { inode = 0; }
466 PathInfo(const uint64_t i, const PathString &p) {
467 inode = i;
468 path = p;
469 }
470 uint64_t inode;
471 PathString path;
472 };
473 SmallHashDynamic<shash_v1::Md5, PathInfo> map_;
474 };
475
476 class InodeMap {
477 public:
478 InodeMap() { assert(false); }
479 bool LookupMd5Path(const uint64_t inode, shash_v1::Md5 *md5path) {
480 const bool found = map_.Lookup(inode, md5path);
481 return found;
482 }
483 void Insert(const uint64_t inode, const shash_v1::Md5 &md5path) {
484 assert(false);
485 }
486 void Erase(const uint64_t inode) { assert(false); }
487 void Clear() { assert(false); }
488 // private:
489 SmallHashDynamic<uint64_t, shash_v1::Md5> map_;
490 };
491
492
493 class InodeReferences {
494 public:
495 InodeReferences() { assert(false); }
496 bool Get(const uint64_t inode, const uint32_t by) { assert(false); }
497 bool Put(const uint64_t inode, const uint32_t by) { assert(false); }
498 void Clear() { assert(false); }
499 // private:
500 SmallHashDynamic<uint64_t, uint32_t> map_;
501 };
502
503 class InodeTracker {
504 public:
505 struct Statistics {
506 Statistics() { assert(false); }
507 std::string Print() { assert(false); }
508 atomic_int64 num_inserts;
509 atomic_int64 num_removes;
510 atomic_int64 num_references;
511 atomic_int64 num_hits_inode;
512 atomic_int64 num_hits_path;
513 atomic_int64 num_misses_path;
514 };
515 Statistics GetStatistics() { assert(false); }
516
517 InodeTracker() { assert(false); }
518 explicit InodeTracker(const InodeTracker &other) { assert(false); }
519 InodeTracker &operator=(const InodeTracker &other) { assert(false); }
520 ~InodeTracker() {
521 pthread_mutex_destroy(lock_);
522 free(lock_);
523 }
524 void VfsGetBy(const uint64_t inode, const uint32_t by,
525 const PathString &path) {
526 assert(false);
527 }
528 void VfsGet(const uint64_t inode, const PathString &path) { assert(false); }
529 void VfsPut(const uint64_t inode, const uint32_t by) { assert(false); }
530 bool FindPath(const uint64_t inode, PathString *path) {
531 // Lock();
532 shash_v1::Md5 md5path;
533 bool found = inode_map_.LookupMd5Path(inode, &md5path);
534 if (found) {
535 found = path_map_.LookupPath(md5path, path);
536 assert(found);
537 }
538 // Unlock();
539 // if (found) atomic_inc64(&statistics_.num_hits_path);
540 // else atomic_inc64(&statistics_.num_misses_path);
541 return found;
542 }
543
544 uint64_t FindInode(const PathString &path) { assert(false); }
545
546 public:
547 static const unsigned kVersion = 2;
548
549 void InitLock() { assert(false); }
550 void CopyFrom(const InodeTracker &other) { assert(false); }
551 inline void Lock() const { assert(false); }
552 inline void Unlock() const { assert(false); }
553
554 unsigned version_;
555 pthread_mutex_t *lock_;
556 PathMap path_map_;
557 InodeMap inode_map_;
558 InodeReferences inode_references_;
559 Statistics statistics_;
560 };
561
562 void Migrate(InodeTracker *old_tracker, glue::InodeTracker *new_tracker);
563
564 } // namespace inode_tracker_v2
565
566
567 //------------------------------------------------------------------------------
568
569
570 namespace inode_tracker_v3 {
571
572 class StringRef {
573 public:
574 StringRef() { length_ = NULL; }
575 uint16_t length() const { return *length_; }
576 uint16_t size() const { return sizeof(uint16_t) + *length_; }
577 static uint16_t size(const uint16_t length) {
578 return sizeof(uint16_t) + length;
579 }
580 char *data() const { return reinterpret_cast<char *>(length_ + 1); }
581 static StringRef Place(const uint16_t length, const char *str, void *addr) {
582 assert(false);
583 }
584
585 private:
586 uint16_t *length_;
587 };
588
589 class StringHeap : public SingleCopy {
590 public:
591 StringHeap() { assert(false); }
592 explicit StringHeap(const uint32_t minimum_size) { assert(false); }
593 void Init(const uint32_t minimum_size) { assert(false); }
594
595 ~StringHeap() {
596 for (unsigned i = 0; i < bins_.size(); ++i) {
597 smunmap(bins_.At(i));
598 }
599 }
600
601 StringRef AddString(const uint16_t length, const char *str) { assert(false); }
602 void RemoveString(const StringRef str_ref) { assert(false); }
603 double GetUsage() const { assert(false); }
604 uint64_t used() const { assert(false); }
605
606 private:
607 void AddBin(const uint64_t size) { assert(false); }
608
609 uint64_t size_;
610 uint64_t used_;
611 uint64_t bin_size_;
612 uint64_t bin_used_;
613 BigVector<void *> bins_;
614 };
615
616
617 class PathStore {
618 public:
619 PathStore() { assert(false); }
620 ~PathStore() { delete string_heap_; }
621 explicit PathStore(const PathStore &other) { assert(false); }
622 PathStore &operator=(const PathStore &other) { assert(false); }
623
624 void Insert(const shash_v1::Md5 &md5path, const PathString &path) {
625 assert(false);
626 }
627
628 bool Lookup(const shash_v1::Md5 &md5path, PathString *path) {
629 PathInfo info;
630 bool retval = map_.Lookup(md5path, &info);
631 if (!retval)
632 return false;
633
634 if (info.parent.IsNull()) {
635 return true;
636 }
637
638 retval = Lookup(info.parent, path);
639 assert(retval);
640 path->Append("/", 1);
641 path->Append(info.name.data(), info.name.length());
642 return true;
643 }
644
645 void Erase(const shash_v1::Md5 &md5path) { assert(false); }
646 void Clear() { assert(false); }
647
648 // private:
649 struct PathInfo {
650 PathInfo() { refcnt = 1; }
651 shash_v1::Md5 parent;
652 uint32_t refcnt;
653 StringRef name;
654 };
655 void CopyFrom(const PathStore &other) { assert(false); }
656 SmallHashDynamic<shash_v1::Md5, PathInfo> map_;
657 StringHeap *string_heap_;
658 };
659
660
661 class PathMap {
662 public:
663 PathMap() { assert(false); }
664 bool LookupPath(const shash_v1::Md5 &md5path, PathString *path) {
665 const bool found = path_store_.Lookup(md5path, path);
666 return found;
667 }
668 uint64_t LookupInode(const PathString &path) { assert(false); }
669 shash_v1::Md5 Insert(const PathString &path, const uint64_t inode) {
670 assert(false);
671 }
672 void Erase(const shash_v1::Md5 &md5path) { assert(false); }
673 void Clear() { assert(false); }
674
675 public:
676 SmallHashDynamic<shash_v1::Md5, uint64_t> map_;
677 PathStore path_store_;
678 };
679
680 class InodeMap {
681 public:
682 InodeMap() { assert(false); }
683 bool LookupMd5Path(const uint64_t inode, shash_v1::Md5 *md5path) {
684 const bool found = map_.Lookup(inode, md5path);
685 return found;
686 }
687 void Insert(const uint64_t inode, const shash_v1::Md5 &md5path) {
688 assert(false);
689 }
690 void Erase(const uint64_t inode) { assert(false); }
691 void Clear() { assert(false); }
692 // private:
693 SmallHashDynamic<uint64_t, shash_v1::Md5> map_;
694 };
695
696
697 class InodeReferences {
698 public:
699 InodeReferences() { assert(false); }
700 bool Get(const uint64_t inode, const uint32_t by) { assert(false); }
701 bool Put(const uint64_t inode, const uint32_t by) { assert(false); }
702 void Clear() { assert(false); }
703 // private:
704 SmallHashDynamic<uint64_t, uint32_t> map_;
705 };
706
707 class InodeTracker {
708 public:
709 struct Statistics {
710 Statistics() { assert(false); }
711 std::string Print() { assert(false); }
712 atomic_int64 num_inserts;
713 atomic_int64 num_removes;
714 atomic_int64 num_references;
715 atomic_int64 num_hits_inode;
716 atomic_int64 num_hits_path;
717 atomic_int64 num_misses_path;
718 };
719 Statistics GetStatistics() { assert(false); }
720
721 InodeTracker() { assert(false); }
722 explicit InodeTracker(const InodeTracker &other) { assert(false); }
723 InodeTracker &operator=(const InodeTracker &other) { assert(false); }
724 ~InodeTracker() {
725 pthread_mutex_destroy(lock_);
726 free(lock_);
727 }
728 void VfsGetBy(const uint64_t inode, const uint32_t by,
729 const PathString &path) {
730 assert(false);
731 }
732 void VfsGet(const uint64_t inode, const PathString &path) { assert(false); }
733 void VfsPut(const uint64_t inode, const uint32_t by) { assert(false); }
734 bool FindPath(const uint64_t inode, PathString *path) {
735 // Lock();
736 shash_v1::Md5 md5path;
737 bool found = inode_map_.LookupMd5Path(inode, &md5path);
738 if (found) {
739 found = path_map_.LookupPath(md5path, path);
740 assert(found);
741 }
742 // Unlock();
743 // if (found) atomic_inc64(&statistics_.num_hits_path);
744 // else atomic_inc64(&statistics_.num_misses_path);
745 return found;
746 }
747
748 uint64_t FindInode(const PathString &path) { assert(false); }
749
750 // private:
751 static const unsigned kVersion = 3;
752
753 void InitLock() { assert(false); }
754 void CopyFrom(const InodeTracker &other) { assert(false); }
755 inline void Lock() const { assert(false); }
756 inline void Unlock() const { assert(false); }
757
758 unsigned version_;
759 pthread_mutex_t *lock_;
760 PathMap path_map_;
761 InodeMap inode_map_;
762 InodeReferences inode_references_;
763 Statistics statistics_;
764 };
765
766 void Migrate(InodeTracker *old_tracker, glue::InodeTracker *new_tracker);
767
768 } // namespace inode_tracker_v3
769
770
771 //------------------------------------------------------------------------------
772
773
774 namespace chunk_tables {
775
776 class FileChunk {
777 public:
778 FileChunk() { assert(false); }
779 FileChunk(const shash_v1::Any &hash, const off_t offset, const size_t size) {
780 assert(false);
781 }
782 inline const shash_v1::Any &content_hash() const { return content_hash_; }
783 inline off_t offset() const { return offset_; }
784 inline size_t size() const { return size_; }
785
786 // protected:
787 shash_v1::Any content_hash_; //!< content hash of the compressed file chunk
788 off_t offset_; //!< byte offset in the uncompressed input file
789 size_t size_; //!< uncompressed size of the data chunk
790 };
791
792 struct FileChunkReflist {
793 FileChunkReflist() { assert(false); }
794 FileChunkReflist(BigVector<FileChunk> *l, const PathString &p) {
795 assert(false);
796 }
797 BigVector<FileChunk> *list;
798 PathString path;
799 };
800
801 struct ChunkTables {
802 ChunkTables() { assert(false); }
803 ~ChunkTables();
804 ChunkTables(const ChunkTables &other) { assert(false); }
805 ChunkTables &operator=(const ChunkTables &other) { assert(false); }
806 void CopyFrom(const ChunkTables &other) { assert(false); }
807 void InitLocks() { assert(false); }
808 void InitHashmaps() { assert(false); }
809 pthread_mutex_t *Handle2Lock(const uint64_t handle) const { assert(false); }
810 inline void Lock() { assert(false); }
811 inline void Unlock() { assert(false); }
812
813 int version;
814 static const unsigned kNumHandleLocks = 128;
815 SmallHashDynamic<uint64_t, ::ChunkFd> handle2fd;
816 // The file descriptors attached to handles need to be locked.
817 // Using a hash map to survive with a small, fixed number of locks
818 BigVector<pthread_mutex_t *> handle_locks;
819 SmallHashDynamic<uint64_t, FileChunkReflist> inode2chunks;
820 SmallHashDynamic<uint64_t, uint32_t> inode2references;
821 uint64_t next_handle;
822 pthread_mutex_t *lock;
823 };
824
825 void Migrate(ChunkTables *old_tables, ::ChunkTables *new_tables);
826
827 } // namespace chunk_tables
828
829
830 //------------------------------------------------------------------------------
831
832
833 namespace chunk_tables_v2 {
834
835 class FileChunk {
836 public:
837 FileChunk() { assert(false); }
838 FileChunk(const shash_v2::Any &hash, const off_t offset, const size_t size) {
839 assert(false);
840 }
841 inline const shash_v2::Any &content_hash() const { return content_hash_; }
842 inline off_t offset() const { return offset_; }
843 inline size_t size() const { return size_; }
844
845 // protected:
846 shash_v2::Any content_hash_; //!< content hash of the compressed file chunk
847 off_t offset_; //!< byte offset in the uncompressed input file
848 size_t size_; //!< uncompressed size of the data chunk
849 };
850
851 struct FileChunkReflist {
852 FileChunkReflist() { assert(false); }
853 FileChunkReflist(BigVector<FileChunk> *l, const PathString &p) {
854 assert(false);
855 }
856 BigVector<FileChunk> *list;
857 PathString path;
858 };
859
860 struct ChunkTables {
861 ChunkTables() { assert(false); }
862 ~ChunkTables();
863 ChunkTables(const ChunkTables &other) { assert(false); }
864 ChunkTables &operator=(const ChunkTables &other) { assert(false); }
865 void CopyFrom(const ChunkTables &other) { assert(false); }
866 void InitLocks() { assert(false); }
867 void InitHashmaps() { assert(false); }
868 pthread_mutex_t *Handle2Lock(const uint64_t handle) const { assert(false); }
869 inline void Lock() { assert(false); }
870 inline void Unlock() { assert(false); }
871
872 int version;
873 static const unsigned kNumHandleLocks = 128;
874 SmallHashDynamic<uint64_t, ::ChunkFd> handle2fd;
875 // The file descriptors attached to handles need to be locked.
876 // Using a hash map to survive with a small, fixed number of locks
877 BigVector<pthread_mutex_t *> handle_locks;
878 SmallHashDynamic<uint64_t, FileChunkReflist> inode2chunks;
879 SmallHashDynamic<uint64_t, uint32_t> inode2references;
880 uint64_t next_handle;
881 pthread_mutex_t *lock;
882 };
883
884 void Migrate(ChunkTables *old_tables, ::ChunkTables *new_tables);
885
886 } // namespace chunk_tables_v2
887
888
889 //------------------------------------------------------------------------------
890
891
892 namespace chunk_tables_v3 {
893
894 struct ChunkTables {
895 ChunkTables() { assert(false); }
896 ~ChunkTables();
897 ChunkTables(const ChunkTables &other) { assert(false); }
898 ChunkTables &operator=(const ChunkTables &other) { assert(false); }
899 void CopyFrom(const ChunkTables &other) { assert(false); }
900 void InitLocks() { assert(false); }
901 void InitHashmaps() { assert(false); }
902 pthread_mutex_t *Handle2Lock(const uint64_t handle) const { assert(false); }
903 inline void Lock() { assert(false); }
904 inline void Unlock() { assert(false); }
905
906 int version;
907 static const unsigned kNumHandleLocks = 128;
908 SmallHashDynamic<uint64_t, ::ChunkFd> handle2fd;
909 // The file descriptors attached to handles need to be locked.
910 // Using a hash map to survive with a small, fixed number of locks
911 BigVector<pthread_mutex_t *> handle_locks;
912 SmallHashDynamic<uint64_t, FileChunkReflist> inode2chunks;
913 SmallHashDynamic<uint64_t, uint32_t> inode2references;
914 uint64_t next_handle;
915 pthread_mutex_t *lock;
916 };
917
918 void Migrate(ChunkTables *old_tables, ::ChunkTables *new_tables);
919
920 } // namespace chunk_tables_v3
921
922
923 } // namespace compat
924
925 #endif // CVMFS_COMPAT_H_
926