GCC Code Coverage Report


Directory: cvmfs/
File: cvmfs/compat.h
Date: 2024-04-28 02:33:07
Exec Total Coverage
Lines: 0 97 0.0%
Branches: 0 74 0.0%

Line Branch Exec Source
1 /**
2 * This file is part of the CernVM File System.
3 *
4 * A mediator to transform old data structures into new one on reload
5 */
6
7 #ifndef CVMFS_COMPAT_H_
8 #define CVMFS_COMPAT_H_
9
10 #include <google/sparse_hash_map>
11 #include <pthread.h>
12 #include <sched.h>
13 #include <stdint.h>
14
15 #include <cassert>
16 #include <string>
17
18 #include "bigvector.h"
19 #include "catalog_mgr.h"
20 #include "crypto/hash.h"
21 #include "file_chunk.h"
22 #include "glue_buffer.h"
23 #include "shortstring.h"
24 #include "util/algorithm.h"
25 #include "util/atomic.h"
26
27 namespace compat {
28
29 namespace shash_v1 {
30
31 enum Algorithms {
32 kMd5 = 0,
33 kSha1,
34 kRmd160,
35 kAny,
36 };
37 const unsigned kDigestSizes[] = {16, 20, 20, 20};
38 const unsigned kMaxDigestSize = 20;
39 extern const char *kSuffixes[];
40 const unsigned kSuffixLengths[] = {0, 0, 7, 0};
41 const unsigned kMaxSuffixLength = 7;
42
43 template<unsigned digest_size_, Algorithms algorithm_>
44 struct Digest {
45 unsigned char digest[digest_size_];
46 Algorithms algorithm;
47
48 unsigned GetDigestSize() const { return kDigestSizes[algorithm]; }
49 unsigned GetHexSize() const {
50 return 2*kDigestSizes[algorithm] + kSuffixLengths[algorithm];
51 }
52
53 Digest() {
54 algorithm = algorithm_;
55 memset(digest, 0, digest_size_);
56 }
57
58 Digest(const Algorithms a,
59 const unsigned char *digest_buffer, const unsigned buffer_size)
60 {
61 algorithm = a;
62 assert(buffer_size <= digest_size_);
63 memcpy(digest, digest_buffer, buffer_size);
64 }
65
66 std::string ToString() const {
67 const unsigned string_length = GetHexSize();
68 std::string result(string_length, 0);
69
70 unsigned i;
71 for (i = 0; i < kDigestSizes[algorithm]; ++i) {
72 char dgt1 = (unsigned)digest[i] / 16;
73 char dgt2 = (unsigned)digest[i] % 16;
74 dgt1 += (dgt1 <= 9) ? '0' : 'a' - 10;
75 dgt2 += (dgt2 <= 9) ? '0' : 'a' - 10;
76 result[i*2] = dgt1;
77 result[i*2+1] = dgt2;
78 }
79 unsigned pos = i*2;
80 for (const char *s = kSuffixes[algorithm]; *s != '\0'; ++s) {
81 result[pos] = *s;
82 pos++;
83 }
84 return result;
85 }
86
87 bool IsNull() const {
88 for (unsigned i = 0; i < kDigestSizes[algorithm]; ++i)
89 if (digest[i] != 0)
90 return false;
91 return true;
92 }
93
94 bool operator ==(const Digest<digest_size_, algorithm_> &other) const {
95 if (this->algorithm != other.algorithm)
96 return false;
97 for (unsigned i = 0; i < kDigestSizes[algorithm]; ++i)
98 if (this->digest[i] != other.digest[i])
99 return false;
100 return true;
101 }
102
103 bool operator !=(const Digest<digest_size_, algorithm_> &other) const {
104 return !(*this == other);
105 }
106
107 bool operator <(const Digest<digest_size_, algorithm_> &other) const {
108 if (this->algorithm != other.algorithm)
109 return (this->algorithm < other.algorithm);
110 for (unsigned i = 0; i < kDigestSizes[algorithm]; ++i) {
111 if (this->digest[i] > other.digest[i])
112 return false;
113 if (this->digest[i] < other.digest[i])
114 return true;
115 }
116 return false;
117 }
118
119 bool operator >(const Digest<digest_size_, algorithm_> &other) const {
120 if (this->algorithm != other.algorithm)
121 return (this->algorithm > other.algorithm);
122 for (int i = 0; i < kDigestSizes[algorithm]; ++i) {
123 if (this->digest[i] < other.digest[i])
124 return false;
125 if (this->digest[i] > other.digest[i])
126 return true;
127 }
128 return false;
129 }
130 };
131
132 struct Md5 : public Digest<16, kMd5> {
133 Md5() : Digest<16, kMd5>() { }
134 Md5(const char *chars, const unsigned length);
135 };
136
137 struct Any : public Digest<20, kAny> {
138 Any() : Digest<20, kAny>() { }
139 };
140
141 void MigrateAny(const Any *old_hash, shash::Any *new_hash);
142
143 } // namespace shash_v1
144
145
146 //------------------------------------------------------------------------------
147
148
149 namespace shash_v2 {
150
151 enum Algorithms {
152 kMd5 = 0,
153 kSha1,
154 kRmd160,
155 kAny,
156 };
157 const unsigned kDigestSizes[] = {16, 20, 20, 20};
158 const unsigned kMaxDigestSize = 20;
159 extern const char *kAlgorithmIds[];
160 const unsigned kAlgorithmIdSizes[] = {0, 0, 7, 0};
161 const unsigned kMaxAlgorithmIdentifierSize = 7;
162 typedef char Suffix;
163 const char kSuffixNone = 0;
164 const char kSuffixCatalog = 'C';
165 const char kSuffixHistory = 'H';
166 const char kSuffixMicroCatalog = 'L'; // currently unused
167 const char kSuffixPartial = 'P';
168 const char kSuffixTemporary = 'T';
169 const char kSuffixCertificate = 'X';
170
171 template<unsigned digest_size_, Algorithms algorithm_>
172 struct Digest {
173 unsigned char digest[digest_size_];
174 Algorithms algorithm;
175 Suffix suffix;
176
177 unsigned GetDigestSize() const { return kDigestSizes[algorithm]; }
178 unsigned GetHexSize() const {
179 return 2*kDigestSizes[algorithm] + kAlgorithmIdSizes[algorithm];
180 }
181
182 Digest() :
183 algorithm(algorithm_), suffix(kSuffixNone)
184 {
185 memset(digest, 0, digest_size_);
186 }
187
188 Digest(const Algorithms a,
189 const unsigned char *digest_buffer, const unsigned buffer_size,
190 const Suffix s = kSuffixNone) :
191 algorithm(a), suffix(s)
192 {
193 assert(buffer_size <= digest_size_);
194 memcpy(digest, digest_buffer, buffer_size);
195 }
196 };
197
198 struct Any : public Digest<20, kAny> {
199 Any() : Digest<20, kAny>() { }
200 };
201
202 void MigrateAny(const Any *old_hash, shash::Any *new_hash);
203
204 } // namespace shash_v2
205
206
207 //------------------------------------------------------------------------------
208
209
210 namespace inode_tracker {
211
212 struct Dirent {
213 Dirent() { parent_inode = 0; }
214 Dirent(const uint64_t p, const NameString &n) {
215 parent_inode = p;
216 name = n;
217 references = 1;
218 }
219 uint32_t references;
220 uint64_t parent_inode;
221 NameString name;
222 };
223
224
225 class InodeContainer {
226 public:
227 typedef google::sparse_hash_map<uint64_t, Dirent,
228 hash_murmur<uint64_t> >
229 InodeMap;
230
231 InodeContainer() {
232 assert(false);
233 }
234 bool Add(const uint64_t inode, const uint64_t parent_inode,
235 const NameString &name)
236 {
237 assert(false); return false;
238 }
239 bool Get(const uint64_t inode, const uint64_t parent_inode,
240 const NameString &name)
241 {
242 assert(false); return false;
243 }
244 uint32_t Put(const uint64_t inode, const uint32_t by) {
245 assert(false); return false;
246 }
247 bool ConstructPath(const uint64_t inode, PathString *path);
248 bool Contains(const uint64_t inode) {
249 return map_.find(inode) != map_.end();
250 }
251 inline size_t Size() { return map_.size(); }
252 // private:
253 std::string DebugPrint() { assert(false); return ""; }
254 InodeMap map_;
255 };
256
257
258 /**
259 * Tracks inode reference counters as given by Fuse.
260 */
261 class InodeTracker {
262 public:
263 struct Statistics {
264 Statistics() {
265 assert(false);
266 }
267 std::string Print() { assert(false); return ""; }
268 atomic_int64 num_inserts;
269 atomic_int64 num_dangling_try;
270 atomic_int64 num_double_add;
271 atomic_int64 num_removes;
272 atomic_int64 num_references;
273 atomic_int64 num_ancient_hits;
274 atomic_int64 num_ancient_misses;
275 };
276 Statistics GetStatistics() { return statistics_; }
277
278 InodeTracker() { assert(false); }
279 explicit InodeTracker(const InodeTracker &other) { assert(false); }
280 InodeTracker &operator= (const InodeTracker &other) { assert(false); }
281 ~InodeTracker();
282
283 bool VfsGet(const uint64_t inode, const uint64_t parent_inode,
284 const NameString &name)
285 {
286 assert(false); return false;
287 }
288 bool VfsAdd(const uint64_t inode, const uint64_t parent_inode,
289 const NameString &name)
290 {
291 assert(false); return false;
292 }
293 void VfsPut(const uint64_t inode, const uint32_t by) { assert(false); }
294 bool Find(const uint64_t inode, PathString *path) { assert(false); }
295
296 // private:
297 static const unsigned kVersion = 1;
298
299 void InitLock() { assert(false); }
300 void CopyFrom(const InodeTracker &other) { assert(false); }
301 inline void Lock() const {
302 // NOT NEEDED
303 }
304 inline void Unlock() const {
305 // NOT NEEDED
306 }
307
308 unsigned version_;
309 pthread_mutex_t *lock_;
310 InodeContainer inode2path_;
311 Statistics statistics_;
312 };
313
314 void Migrate(InodeTracker *old_tracker, glue::InodeTracker *new_tracker);
315
316 } // namespace inode_tracker
317
318
319 //------------------------------------------------------------------------------
320
321
322 namespace inode_tracker_v2 {
323
324 template<class Key, class Value, class Derived>
325 class SmallHashBase {
326 public:
327 static const double kLoadFactor; // mainly useless for the dynamic version
328 static const double kThresholdGrow; // only used for resizable version
329 static const double kThresholdShrink; // only used for resizable version
330
331 SmallHashBase() { assert(false); }
332 ~SmallHashBase() {
333 delete[] keys_;
334 delete[] values_;
335 }
336 void Init(uint32_t expected_size, Key empty,
337 uint32_t (*hasher)(const Key &key))
338 {
339 assert(false);
340 }
341 bool Lookup(const Key &key, Value *value) const {
342 uint32_t bucket;
343 uint32_t collisions;
344 const bool found = DoLookup(key, &bucket, &collisions);
345 if (found)
346 *value = values_[bucket];
347 return found;
348 }
349 bool Contains(const Key &key) const {
350 uint32_t bucket;
351 uint32_t collisions;
352 const bool found = DoLookup(key, &bucket, &collisions);
353 return found;
354 }
355 void Insert(const Key &key, const Value &value) {
356 assert(false);
357 }
358 void Erase(const Key &key) { assert(false); }
359 void Clear() { assert(false); }
360 uint64_t bytes_allocated() const { return bytes_allocated_; }
361 static double GetEntrySize() {
362 assert(false);
363 }
364 void GetCollisionStats(uint64_t *num_collisions,
365 uint32_t *max_collisions) const
366 {
367 assert(false);
368 }
369
370 // private:
371 uint32_t ScaleHash(const Key &key) const {
372 double bucket = (double(hasher_(key)) * double(capacity_) / // NOLINT
373 double((uint32_t)(-1))); // NOLINT
374 return (uint32_t)bucket % capacity_;
375 }
376 void InitMemory() { assert(false); }
377 bool DoInsert(const Key &key, const Value &value,
378 const bool count_collisions)
379 {
380 assert(false);
381 }
382 bool DoLookup(const Key &key, uint32_t *bucket, uint32_t *collisions) const {
383 *bucket = ScaleHash(key);
384 *collisions = 0;
385 while (!(keys_[*bucket] == empty_key_)) {
386 if (keys_[*bucket] == key)
387 return true;
388 *bucket = (*bucket+1) % capacity_;
389 (*collisions)++;
390 }
391 return false;
392 }
393 void DoClear(const bool reset_capacity) {
394 assert(false);
395 }
396 // Methods for resizable version
397 void SetThresholds() { }
398 void Grow() { }
399 void Shrink() { }
400 void ResetCapacity() { }
401
402 // Separate key and value arrays for better locality
403 Key *keys_;
404 Value *values_;
405 uint32_t capacity_;
406 uint32_t initial_capacity_;
407 uint32_t size_;
408 uint32_t (*hasher_)(const Key &key);
409 uint64_t bytes_allocated_;
410 uint64_t num_collisions_;
411 uint32_t max_collisions_; /**< maximum collisions for a single insert */
412 Key empty_key_;
413 };
414
415 template<class Key, class Value>
416 class SmallHashDynamic :
417 public SmallHashBase< Key, Value, SmallHashDynamic<Key, Value> >
418 {
419 friend class SmallHashBase< Key, Value, SmallHashDynamic<Key, Value> >;
420 public:
421 typedef SmallHashBase< Key, Value, SmallHashDynamic<Key, Value> > Base;
422 static const double kThresholdGrow;
423 static const double kThresholdShrink;
424
425 SmallHashDynamic() : Base() {
426 assert(false);
427 }
428 explicit SmallHashDynamic(const SmallHashDynamic<Key, Value> &other) : Base()
429 {
430 assert(false);
431 }
432 SmallHashDynamic<Key, Value> &operator= (
433 const SmallHashDynamic<Key, Value> &other)
434 {
435 assert(false);
436 }
437
438 uint32_t capacity() const { return Base::capacity_; }
439 uint32_t size() const { return Base::size_; }
440 uint32_t num_migrates() const { assert(false); }
441 protected:
442 void SetThresholds() {
443 assert(false);
444 }
445 void Grow() { assert(false); }
446 void Shrink() { assert(false); }
447 void ResetCapacity() { assert(false); }
448
449 private:
450 void Migrate(const uint32_t new_capacity) {
451 assert(false);
452 }
453 void CopyFrom(const SmallHashDynamic<Key, Value> &other) {
454 assert(false);
455 }
456 uint32_t num_migrates_;
457 uint32_t threshold_grow_;
458 uint32_t threshold_shrink_;
459 };
460
461
462 class PathMap {
463 public:
464 PathMap() {
465 assert(false);
466 }
467 bool LookupPath(const shash_v1::Md5 &md5path, PathString *path) {
468 PathInfo value;
469 bool found = map_.Lookup(md5path, &value);
470 path->Assign(value.path);
471 return found;
472 }
473 uint64_t LookupInode(const PathString &path) {
474 PathInfo value;
475 bool found = map_.Lookup(shash_v1::Md5(path.GetChars(), path.GetLength()),
476 &value);
477 if (found) return value.inode;
478 return 0;
479 }
480 shash_v1::Md5 Insert(const PathString &path, const uint64_t inode) {
481 assert(false);
482 }
483 void Erase(const shash_v1::Md5 &md5path) {
484 assert(false);
485 }
486 void Clear() { assert(false); }
487
488 // private:
489 struct PathInfo {
490 PathInfo() { inode = 0; }
491 PathInfo(const uint64_t i, const PathString &p) { inode = i; path = p; }
492 uint64_t inode;
493 PathString path;
494 };
495 SmallHashDynamic<shash_v1::Md5, PathInfo> map_;
496 };
497
498 class InodeMap {
499 public:
500 InodeMap() {
501 assert(false);
502 }
503 bool LookupMd5Path(const uint64_t inode, shash_v1::Md5 *md5path) {
504 bool found = map_.Lookup(inode, md5path);
505 return found;
506 }
507 void Insert(const uint64_t inode, const shash_v1::Md5 &md5path) {
508 assert(false);
509 }
510 void Erase(const uint64_t inode) {
511 assert(false);
512 }
513 void Clear() { assert(false); }
514 // private:
515 SmallHashDynamic<uint64_t, shash_v1::Md5> map_;
516 };
517
518
519 class InodeReferences {
520 public:
521 InodeReferences() {
522 assert(false);
523 }
524 bool Get(const uint64_t inode, const uint32_t by) {
525 assert(false);
526 }
527 bool Put(const uint64_t inode, const uint32_t by) {
528 assert(false);
529 }
530 void Clear() { assert(false); }
531 // private:
532 SmallHashDynamic<uint64_t, uint32_t> map_;
533 };
534
535 class InodeTracker {
536 public:
537 struct Statistics {
538 Statistics() { assert(false); }
539 std::string Print() { assert(false); }
540 atomic_int64 num_inserts;
541 atomic_int64 num_removes;
542 atomic_int64 num_references;
543 atomic_int64 num_hits_inode;
544 atomic_int64 num_hits_path;
545 atomic_int64 num_misses_path;
546 };
547 Statistics GetStatistics() { assert(false); }
548
549 InodeTracker() { assert(false); }
550 explicit InodeTracker(const InodeTracker &other) { assert(false); }
551 InodeTracker &operator= (const InodeTracker &other) { assert(false); }
552 ~InodeTracker() {
553 pthread_mutex_destroy(lock_);
554 free(lock_);
555 }
556 void VfsGetBy(const uint64_t inode, const uint32_t by, const PathString &path)
557 {
558 assert(false);
559 }
560 void VfsGet(const uint64_t inode, const PathString &path) {
561 assert(false);
562 }
563 void VfsPut(const uint64_t inode, const uint32_t by) {
564 assert(false);
565 }
566 bool FindPath(const uint64_t inode, PathString *path) {
567 // Lock();
568 shash_v1::Md5 md5path;
569 bool found = inode_map_.LookupMd5Path(inode, &md5path);
570 if (found) {
571 found = path_map_.LookupPath(md5path, path);
572 assert(found);
573 }
574 // Unlock();
575 // if (found) atomic_inc64(&statistics_.num_hits_path);
576 // else atomic_inc64(&statistics_.num_misses_path);
577 return found;
578 }
579
580 uint64_t FindInode(const PathString &path) {
581 assert(false);
582 }
583
584 public:
585 static const unsigned kVersion = 2;
586
587 void InitLock() { assert(false); }
588 void CopyFrom(const InodeTracker &other) { assert(false); }
589 inline void Lock() const { assert(false); }
590 inline void Unlock() const { assert(false); }
591
592 unsigned version_;
593 pthread_mutex_t *lock_;
594 PathMap path_map_;
595 InodeMap inode_map_;
596 InodeReferences inode_references_;
597 Statistics statistics_;
598 };
599
600 void Migrate(InodeTracker *old_tracker, glue::InodeTracker *new_tracker);
601
602 } // namespace inode_tracker_v2
603
604
605 //------------------------------------------------------------------------------
606
607
608 namespace inode_tracker_v3 {
609
610 class StringRef {
611 public:
612 StringRef() { length_ = NULL; }
613 uint16_t length() const { return *length_; }
614 uint16_t size() const { return sizeof(uint16_t) + *length_; }
615 static uint16_t size(const uint16_t length) {
616 return sizeof(uint16_t) + length;
617 }
618 char *data() const { return reinterpret_cast<char *>(length_ + 1); }
619 static StringRef Place(const uint16_t length, const char *str,
620 void *addr)
621 {
622 assert(false);
623 }
624 private:
625 uint16_t *length_;
626 };
627
628 class StringHeap : public SingleCopy {
629 public:
630 StringHeap() { assert(false); }
631 explicit StringHeap(const uint32_t minimum_size) { assert(false); }
632 void Init(const uint32_t minimum_size) { assert(false); }
633
634 ~StringHeap() {
635 for (unsigned i = 0; i < bins_.size(); ++i) {
636 smunmap(bins_.At(i));
637 }
638 }
639
640 StringRef AddString(const uint16_t length, const char *str) {
641 assert(false);
642 }
643 void RemoveString(const StringRef str_ref) { assert(false); }
644 double GetUsage() const { assert(false); }
645 uint64_t used() const { assert(false); }
646
647 private:
648 void AddBin(const uint64_t size) { assert(false); }
649
650 uint64_t size_;
651 uint64_t used_;
652 uint64_t bin_size_;
653 uint64_t bin_used_;
654 BigVector<void *> bins_;
655 };
656
657
658 class PathStore {
659 public:
660 PathStore() { assert(false); }
661 ~PathStore() {
662 delete string_heap_;
663 }
664 explicit PathStore(const PathStore &other) { assert(false); }
665 PathStore &operator= (const PathStore &other) { assert(false); }
666
667 void Insert(const shash_v1::Md5 &md5path, const PathString &path) {
668 assert(false);
669 }
670
671 bool Lookup(const shash_v1::Md5 &md5path, PathString *path) {
672 PathInfo info;
673 bool retval = map_.Lookup(md5path, &info);
674 if (!retval)
675 return false;
676
677 if (info.parent.IsNull()) {
678 return true;
679 }
680
681 retval = Lookup(info.parent, path);
682 assert(retval);
683 path->Append("/", 1);
684 path->Append(info.name.data(), info.name.length());
685 return true;
686 }
687
688 void Erase(const shash_v1::Md5 &md5path) { assert(false); }
689 void Clear() { assert(false); }
690
691 // private:
692 struct PathInfo {
693 PathInfo() {
694 refcnt = 1;
695 }
696 shash_v1::Md5 parent;
697 uint32_t refcnt;
698 StringRef name;
699 };
700 void CopyFrom(const PathStore &other) { assert(false); }
701 SmallHashDynamic<shash_v1::Md5, PathInfo> map_;
702 StringHeap *string_heap_;
703 };
704
705
706 class PathMap {
707 public:
708 PathMap() {
709 assert(false);
710 }
711 bool LookupPath(const shash_v1::Md5 &md5path, PathString *path) {
712 bool found = path_store_.Lookup(md5path, path);
713 return found;
714 }
715 uint64_t LookupInode(const PathString &path) { assert(false); }
716 shash_v1::Md5 Insert(const PathString &path, const uint64_t inode) {
717 assert(false);
718 }
719 void Erase(const shash_v1::Md5 &md5path) {
720 assert(false);
721 }
722 void Clear() { assert(false); }
723 public:
724 SmallHashDynamic<shash_v1::Md5, uint64_t> map_;
725 PathStore path_store_;
726 };
727
728 class InodeMap {
729 public:
730 InodeMap() {
731 assert(false);
732 }
733 bool LookupMd5Path(const uint64_t inode, shash_v1::Md5 *md5path) {
734 bool found = map_.Lookup(inode, md5path);
735 return found;
736 }
737 void Insert(const uint64_t inode, const shash_v1::Md5 &md5path) {
738 assert(false);
739 }
740 void Erase(const uint64_t inode) {
741 assert(false);
742 }
743 void Clear() { assert(false); }
744 // private:
745 SmallHashDynamic<uint64_t, shash_v1::Md5> map_;
746 };
747
748
749 class InodeReferences {
750 public:
751 InodeReferences() {
752 assert(false);
753 }
754 bool Get(const uint64_t inode, const uint32_t by) {
755 assert(false);
756 }
757 bool Put(const uint64_t inode, const uint32_t by) {
758 assert(false);
759 }
760 void Clear() { assert(false); }
761 // private:
762 SmallHashDynamic<uint64_t, uint32_t> map_;
763 };
764
765 class InodeTracker {
766 public:
767 struct Statistics {
768 Statistics() { assert(false); }
769 std::string Print() { assert(false); }
770 atomic_int64 num_inserts;
771 atomic_int64 num_removes;
772 atomic_int64 num_references;
773 atomic_int64 num_hits_inode;
774 atomic_int64 num_hits_path;
775 atomic_int64 num_misses_path;
776 };
777 Statistics GetStatistics() { assert(false); }
778
779 InodeTracker() { assert(false); }
780 explicit InodeTracker(const InodeTracker &other) { assert(false); }
781 InodeTracker &operator= (const InodeTracker &other) { assert(false); }
782 ~InodeTracker() {
783 pthread_mutex_destroy(lock_);
784 free(lock_);
785 }
786 void VfsGetBy(const uint64_t inode, const uint32_t by, const PathString &path)
787 {
788 assert(false);
789 }
790 void VfsGet(const uint64_t inode, const PathString &path) {
791 assert(false);
792 }
793 void VfsPut(const uint64_t inode, const uint32_t by) {
794 assert(false);
795 }
796 bool FindPath(const uint64_t inode, PathString *path) {
797 // Lock();
798 shash_v1::Md5 md5path;
799 bool found = inode_map_.LookupMd5Path(inode, &md5path);
800 if (found) {
801 found = path_map_.LookupPath(md5path, path);
802 assert(found);
803 }
804 // Unlock();
805 // if (found) atomic_inc64(&statistics_.num_hits_path);
806 // else atomic_inc64(&statistics_.num_misses_path);
807 return found;
808 }
809
810 uint64_t FindInode(const PathString &path) {
811 assert(false);
812 }
813
814 // private:
815 static const unsigned kVersion = 3;
816
817 void InitLock() { assert(false); }
818 void CopyFrom(const InodeTracker &other) { assert(false); }
819 inline void Lock() const { assert(false); }
820 inline void Unlock() const { assert(false); }
821
822 unsigned version_;
823 pthread_mutex_t *lock_;
824 PathMap path_map_;
825 InodeMap inode_map_;
826 InodeReferences inode_references_;
827 Statistics statistics_;
828 };
829
830 void Migrate(InodeTracker *old_tracker, glue::InodeTracker *new_tracker);
831
832 } // namespace inode_tracker_v3
833
834
835 //------------------------------------------------------------------------------
836
837
838 namespace chunk_tables {
839
840 class FileChunk {
841 public:
842 FileChunk() { assert(false); }
843 FileChunk(const shash_v1::Any &hash, const off_t offset, const size_t size) {
844 assert(false);
845 }
846 inline const shash_v1::Any& content_hash() const { return content_hash_; }
847 inline off_t offset() const { return offset_; }
848 inline size_t size() const { return size_; }
849
850 // protected:
851 shash_v1::Any content_hash_; //!< content hash of the compressed file chunk
852 off_t offset_; //!< byte offset in the uncompressed input file
853 size_t size_; //!< uncompressed size of the data chunk
854 };
855
856 struct FileChunkReflist {
857 FileChunkReflist() { assert(false); }
858 FileChunkReflist(BigVector<FileChunk> *l, const PathString &p) {
859 assert(false);
860 }
861 BigVector<FileChunk> *list;
862 PathString path;
863 };
864
865 struct ChunkTables {
866 ChunkTables() { assert(false); }
867 ~ChunkTables();
868 ChunkTables(const ChunkTables &other) { assert(false); }
869 ChunkTables &operator= (const ChunkTables &other) { assert(false); }
870 void CopyFrom(const ChunkTables &other) { assert(false); }
871 void InitLocks() { assert(false); }
872 void InitHashmaps() { assert(false); }
873 pthread_mutex_t *Handle2Lock(const uint64_t handle) const { assert(false); }
874 inline void Lock() { assert(false); }
875 inline void Unlock() { assert(false); }
876
877 int version;
878 static const unsigned kNumHandleLocks = 128;
879 SmallHashDynamic<uint64_t, ::ChunkFd> handle2fd;
880 // The file descriptors attached to handles need to be locked.
881 // Using a hash map to survive with a small, fixed number of locks
882 BigVector<pthread_mutex_t *> handle_locks;
883 SmallHashDynamic<uint64_t, FileChunkReflist> inode2chunks;
884 SmallHashDynamic<uint64_t, uint32_t> inode2references;
885 uint64_t next_handle;
886 pthread_mutex_t *lock;
887 };
888
889 void Migrate(ChunkTables *old_tables, ::ChunkTables *new_tables);
890
891 } // namespace chunk_tables
892
893
894 //------------------------------------------------------------------------------
895
896
897 namespace chunk_tables_v2 {
898
899 class FileChunk {
900 public:
901 FileChunk() { assert(false); }
902 FileChunk(const shash_v2::Any &hash, const off_t offset, const size_t size) {
903 assert(false);
904 }
905 inline const shash_v2::Any& content_hash() const { return content_hash_; }
906 inline off_t offset() const { return offset_; }
907 inline size_t size() const { return size_; }
908
909 // protected:
910 shash_v2::Any content_hash_; //!< content hash of the compressed file chunk
911 off_t offset_; //!< byte offset in the uncompressed input file
912 size_t size_; //!< uncompressed size of the data chunk
913 };
914
915 struct FileChunkReflist {
916 FileChunkReflist() { assert(false); }
917 FileChunkReflist(BigVector<FileChunk> *l, const PathString &p) {
918 assert(false);
919 }
920 BigVector<FileChunk> *list;
921 PathString path;
922 };
923
924 struct ChunkTables {
925 ChunkTables() { assert(false); }
926 ~ChunkTables();
927 ChunkTables(const ChunkTables &other) { assert(false); }
928 ChunkTables &operator= (const ChunkTables &other) { assert(false); }
929 void CopyFrom(const ChunkTables &other) { assert(false); }
930 void InitLocks() { assert(false); }
931 void InitHashmaps() { assert(false); }
932 pthread_mutex_t *Handle2Lock(const uint64_t handle) const { assert(false); }
933 inline void Lock() { assert(false); }
934 inline void Unlock() { assert(false); }
935
936 int version;
937 static const unsigned kNumHandleLocks = 128;
938 SmallHashDynamic<uint64_t, ::ChunkFd> handle2fd;
939 // The file descriptors attached to handles need to be locked.
940 // Using a hash map to survive with a small, fixed number of locks
941 BigVector<pthread_mutex_t *> handle_locks;
942 SmallHashDynamic<uint64_t, FileChunkReflist> inode2chunks;
943 SmallHashDynamic<uint64_t, uint32_t> inode2references;
944 uint64_t next_handle;
945 pthread_mutex_t *lock;
946 };
947
948 void Migrate(ChunkTables *old_tables, ::ChunkTables *new_tables);
949
950 } // namespace chunk_tables_v2
951
952
953 //------------------------------------------------------------------------------
954
955
956 namespace chunk_tables_v3 {
957
958 struct ChunkTables {
959 ChunkTables() { assert(false); }
960 ~ChunkTables();
961 ChunkTables(const ChunkTables &other) { assert(false); }
962 ChunkTables &operator= (const ChunkTables &other) { assert(false); }
963 void CopyFrom(const ChunkTables &other) { assert(false); }
964 void InitLocks() { assert(false); }
965 void InitHashmaps() { assert(false); }
966 pthread_mutex_t *Handle2Lock(const uint64_t handle) const { assert(false); }
967 inline void Lock() { assert(false); }
968 inline void Unlock() { assert(false); }
969
970 int version;
971 static const unsigned kNumHandleLocks = 128;
972 SmallHashDynamic<uint64_t, ::ChunkFd> handle2fd;
973 // The file descriptors attached to handles need to be locked.
974 // Using a hash map to survive with a small, fixed number of locks
975 BigVector<pthread_mutex_t *> handle_locks;
976 SmallHashDynamic<uint64_t, FileChunkReflist> inode2chunks;
977 SmallHashDynamic<uint64_t, uint32_t> inode2references;
978 uint64_t next_handle;
979 pthread_mutex_t *lock;
980 };
981
982 void Migrate(ChunkTables *old_tables, ::ChunkTables *new_tables);
983
984 } // namespace chunk_tables_v3
985
986
987 } // namespace compat
988
989 #endif // CVMFS_COMPAT_H_
990