GCC Code Coverage Report


Directory: cvmfs/
File: cvmfs/catalog_mgr.h
Date: 2025-06-29 02:35:41
Exec Total Coverage
Lines: 107 141 75.9%
Branches: 47 100 47.0%

Line Branch Exec Source
1 /**
2 * This file is part of the CernVM File System.
3 */
4
5 #ifndef CVMFS_CATALOG_MGR_H_
6 #define CVMFS_CATALOG_MGR_H_
7
8 #ifndef __STDC_FORMAT_MACROS
9 #define __STDC_FORMAT_MACROS
10 #endif
11
12 #include <inttypes.h>
13 #include <pthread.h>
14
15 #include <cassert>
16 #include <map>
17 #include <string>
18 #include <vector>
19
20 #include "catalog.h"
21 #include "crypto/hash.h"
22 #include "directory_entry.h"
23 #include "file_chunk.h"
24 #include "manifest_fetch.h"
25 #include "statistics.h"
26 #include "util/algorithm.h"
27 #include "util/atomic.h"
28 #include "util/logging.h"
29 #include "util/platform.h"
30
31 class XattrList;
32 namespace catalog {
33
34 const unsigned kSqliteMemPerThread = 1 * 1024 * 1024;
35
36
37 /**
38 * LookupOption for a directory entry (bitmask).
39 * kLookupDefault = Look solely at the given directory entry (parent is ignored)
40 * kLookupRawSymlink = Don't resolve environment variables in symlink targets
41 */
42 typedef unsigned LookupOptions;
43 const unsigned kLookupDefault = 0b1;
44 const unsigned kLookupRawSymlink = 0b10;
45
46 /**
47 * Results upon loading a catalog file.
48 */
49 enum LoadReturn {
50 kLoadNew = 0,
51 kLoadUp2Date,
52 kLoadNoSpace,
53 kLoadFail,
54
55 kLoadNumEntries
56 };
57
58 /**
59 * Location of the most recent root catalog.
60 * Used as part of the process of loading a catalog.
61 * - GetNewRootCatalogContext() sets the location within the CatalogContext obj
62 * - LoadCatalogByHash(): when loading a root catalog it uses the location
63 * stored within the CatalogContext object to retrieve
64 * the root catalog from the right location
65 */
66 enum RootCatalogLocation {
67 kCtlgNoLocationNeeded = 0, // hash known, no location needed
68 kCtlgLocationMounted, // already loaded in mounted_catalogs_
69 kCtlgLocationServer,
70 kCtlgLocationBreadcrumb
71 };
72
73 /**
74 * CatalogContext class contains all necessary information to load a catalog and
75 * also keeps track of the resulting output.
76 * It works as follows:
77 * 1) Load a new root catalog:
78 * - Use empty constructor CatalogContext()
79 * - Let the CatalogContext object be populated by GetNewRootCatalogContext()
80 * - This will set: hash, mountpoint, root_ctlg_revision, root_ctlg_location
81 * - Call LoadCatalogByHash()
82 * - This will set: sqlite_path
83 * 2) Load a catalog based on a given hash
84 * - Populate CatalogContext object; used constructor depends on catalog type
85 * - Root catalog: CatalogContext(shash::Any hash, PathString mountpoint,
86 RootCatalogLocation location)
87 - Nested catalog: CatalogContext(shash::Any hash, PathString mountpoint)
88 - Note: in this case root_ctlg_revision is not used
89 * - Call LoadCatalogByHash()
90 - This will set: sqlite_path
91 */
92 struct CatalogContext {
93 public:
94 26 CatalogContext()
95 26 : hash_(shash::Any())
96 26 , mountpoint_(PathString("invalid", 7))
97 , // empty str is root ctlg
98
1/2
✓ Branch 2 taken 26 times.
✗ Branch 3 not taken.
26 sqlite_path_("")
99 26 , root_ctlg_revision_(-1ul)
100 26 , root_ctlg_location_(kCtlgNoLocationNeeded)
101
1/2
✓ Branch 1 taken 26 times.
✗ Branch 2 not taken.
26 , manifest_ensemble_(NULL) { }
102 3 CatalogContext(const shash::Any &hash, const PathString &mountpoint)
103 3 : hash_(hash)
104 3 , mountpoint_(mountpoint)
105
1/2
✓ Branch 2 taken 3 times.
✗ Branch 3 not taken.
3 , sqlite_path_("")
106 3 , root_ctlg_revision_(-1ul)
107 3 , root_ctlg_location_(kCtlgNoLocationNeeded)
108
1/2
✓ Branch 1 taken 3 times.
✗ Branch 2 not taken.
3 , manifest_ensemble_(NULL) { }
109
110 1932 CatalogContext(const shash::Any &hash, const PathString &mountpoint,
111 const RootCatalogLocation location)
112 1932 : hash_(hash)
113 1932 , mountpoint_(mountpoint)
114
1/2
✓ Branch 2 taken 1932 times.
✗ Branch 3 not taken.
1932 , sqlite_path_("")
115 1932 , root_ctlg_revision_(-1ul)
116 1932 , root_ctlg_location_(location)
117
1/2
✓ Branch 1 taken 1932 times.
✗ Branch 2 not taken.
1932 , manifest_ensemble_(NULL) { }
118
119 3247 bool IsRootCatalog() { return mountpoint_.IsEmpty(); }
120
121 709 std::string *GetSqlitePathPtr() { return &sqlite_path_; }
122 shash::Any *GetHashPtr() { return &hash_; }
123
124 5575 shash::Any hash() const { return hash_; }
125 3643 PathString mountpoint() const { return mountpoint_; }
126 1780 std::string sqlite_path() const { return sqlite_path_; }
127 uint64_t root_ctlg_revision() const { return root_ctlg_revision_; }
128 655 RootCatalogLocation root_ctlg_location() const { return root_ctlg_location_; }
129 489 manifest::ManifestEnsemble *manifest_ensemble() const {
130 489 return manifest_ensemble_.weak_ref();
131 }
132
133 713 void SetHash(shash::Any hash) { hash_ = hash; }
134 857 void SetMountpoint(const PathString &mountpoint) { mountpoint_ = mountpoint; }
135 824 void SetSqlitePath(const std::string &sqlite_path) {
136 824 sqlite_path_ = sqlite_path;
137 824 }
138 145 void SetRootCtlgRevision(uint64_t root_ctlg_revision) {
139 145 root_ctlg_revision_ = root_ctlg_revision;
140 145 }
141 976 void SetRootCtlgLocation(RootCatalogLocation root_ctlg_location) {
142 976 root_ctlg_location_ = root_ctlg_location;
143 976 }
144 /**
145 * Gives ownership to CatalogContext
146 */
147 113 void TakeManifestEnsemble(manifest::ManifestEnsemble *manifest_ensemble) {
148 113 manifest_ensemble_ = manifest_ensemble;
149 113 }
150
151
152 private:
153 // mandatory for LoadCatalogByHash()
154 shash::Any hash_;
155 // mandatory for LoadCatalogByHash()
156 PathString mountpoint_;
157 // out parameter, path name of the sqlite catalog
158 std::string sqlite_path_;
159 // root catalog: revision is needed for GetNewRootCatalogContext()
160 uint64_t root_ctlg_revision_;
161 // root catalog: location is mandatory for LoadCatalogByHash()
162 RootCatalogLocation root_ctlg_location_;
163 // root catalog: if location = server mandatory for LoadCatalogByHash()
164 UniquePtr<manifest::ManifestEnsemble> manifest_ensemble_;
165 };
166
167 inline const char *Code2Ascii(const LoadReturn error) {
168 const char *texts[kLoadNumEntries + 1];
169 texts[0] = "loaded new catalog";
170 texts[1] = "catalog was up to date";
171 texts[2] = "not enough space to load catalog";
172 texts[3] = "failed to load catalog";
173 texts[4] = "no text";
174 return texts[error];
175 }
176
177
178 struct Statistics {
179 perf::Counter *n_lookup_inode;
180 perf::Counter *n_lookup_path;
181 perf::Counter *n_lookup_path_negative;
182 perf::Counter *n_lookup_xattrs;
183 perf::Counter *n_listing;
184 perf::Counter *n_nested_listing;
185 perf::Counter *n_detach_siblings;
186 perf::Counter *n_write_lock;
187 perf::Counter *ns_write_lock;
188
189 perf::Counter *catalog_revision;
190
191 1431 explicit Statistics(perf::Statistics *statistics) {
192
3/6
✓ Branch 2 taken 1431 times.
✗ Branch 3 not taken.
✓ Branch 6 taken 1431 times.
✗ Branch 7 not taken.
✓ Branch 9 taken 1431 times.
✗ Branch 10 not taken.
1431 n_lookup_inode = statistics->Register("catalog_mgr.n_lookup_inode",
193 "Number of inode lookups");
194
3/6
✓ Branch 2 taken 1431 times.
✗ Branch 3 not taken.
✓ Branch 6 taken 1431 times.
✗ Branch 7 not taken.
✓ Branch 9 taken 1431 times.
✗ Branch 10 not taken.
1431 n_lookup_path = statistics->Register("catalog_mgr.n_lookup_path",
195 "Number of path lookups");
196
3/6
✓ Branch 2 taken 1431 times.
✗ Branch 3 not taken.
✓ Branch 6 taken 1431 times.
✗ Branch 7 not taken.
✓ Branch 9 taken 1431 times.
✗ Branch 10 not taken.
1431 n_lookup_path_negative = statistics->Register(
197 "catalog_mgr.n_lookup_path_negative",
198 "Number of negative path lookups");
199
3/6
✓ Branch 2 taken 1431 times.
✗ Branch 3 not taken.
✓ Branch 6 taken 1431 times.
✗ Branch 7 not taken.
✓ Branch 9 taken 1431 times.
✗ Branch 10 not taken.
1431 n_lookup_xattrs = statistics->Register("catalog_mgr.n_lookup_xattrs",
200 "Number of xattrs lookups");
201
3/6
✓ Branch 2 taken 1431 times.
✗ Branch 3 not taken.
✓ Branch 6 taken 1431 times.
✗ Branch 7 not taken.
✓ Branch 9 taken 1431 times.
✗ Branch 10 not taken.
1431 n_listing = statistics->Register("catalog_mgr.n_listing",
202 "Number of listings");
203
3/6
✓ Branch 2 taken 1431 times.
✗ Branch 3 not taken.
✓ Branch 6 taken 1431 times.
✗ Branch 7 not taken.
✓ Branch 9 taken 1431 times.
✗ Branch 10 not taken.
1431 n_nested_listing = statistics->Register(
204 "catalog_mgr.n_nested_listing",
205 "Number of listings of nested catalogs");
206
3/6
✓ Branch 2 taken 1431 times.
✗ Branch 3 not taken.
✓ Branch 6 taken 1431 times.
✗ Branch 7 not taken.
✓ Branch 9 taken 1431 times.
✗ Branch 10 not taken.
1431 n_detach_siblings = statistics->Register(
207 "catalog_mgr.n_detach_siblings",
208 "Number of times the CVMFS_CATALOG_WATERMARK was hit");
209
3/6
✓ Branch 2 taken 1431 times.
✗ Branch 3 not taken.
✓ Branch 6 taken 1431 times.
✗ Branch 7 not taken.
✓ Branch 9 taken 1431 times.
✗ Branch 10 not taken.
1431 n_write_lock = statistics->Register("catalog_mgr.n_write_lock",
210 "number of write lock calls");
211
3/6
✓ Branch 2 taken 1431 times.
✗ Branch 3 not taken.
✓ Branch 6 taken 1431 times.
✗ Branch 7 not taken.
✓ Branch 9 taken 1431 times.
✗ Branch 10 not taken.
1431 ns_write_lock = statistics->Register("catalog_mgr.ns_write_lock",
212 "time spent in WriteLock() [ns]");
213
3/6
✓ Branch 2 taken 1431 times.
✗ Branch 3 not taken.
✓ Branch 6 taken 1431 times.
✗ Branch 7 not taken.
✓ Branch 9 taken 1431 times.
✗ Branch 10 not taken.
1431 catalog_revision = statistics->Register(
214 "catalog_revision", "Revision number of the root file catalog");
215 1431 }
216 };
217
218
219 template<class CatalogT>
220 class AbstractCatalogManager;
221
222
223 /**
224 * This class provides the read-only interface to a tree of catalogs
225 * representing a (subtree of a) repository.
226 * Mostly lookup functions filling DirectoryEntry objects.
227 * Reloading of expired catalogs, attaching of nested catalogs and delegating
228 * of lookups to the appropriate catalog is done transparently.
229 *
230 * The loading / creating of catalogs is up to derived classes.
231 *
232 * CatalogT is either Catalog or MockCatalog.
233 *
234 * Usage:
235 * DerivedCatalogManager *catalog_manager = new DerivedCatalogManager();
236 * catalog_manager->Init();
237 * catalog_manager->Lookup(<inode>, &<result_entry>);
238 */
239 template<class CatalogT>
240 class AbstractCatalogManager : public SingleCopy {
241 public:
242 typedef std::vector<CatalogT *> CatalogList;
243 typedef CatalogT catalog_t;
244
245 static const inode_t kInodeOffset = 255;
246 explicit AbstractCatalogManager(perf::Statistics *statistics);
247 virtual ~AbstractCatalogManager();
248
249 void SetInodeAnnotation(InodeAnnotation *new_annotation);
250 virtual bool Init();
251 LoadReturn RemountDryrun();
252 LoadReturn Remount();
253 LoadReturn ChangeRoot(const shash::Any &root_hash);
254 void DetachNested();
255
256 bool LookupPath(const PathString &path, const LookupOptions options,
257 DirectoryEntry *entry);
258 461 bool LookupPath(const std::string &path, const LookupOptions options,
259 DirectoryEntry *entry) {
260 461 PathString p;
261
1/2
✓ Branch 3 taken 461 times.
✗ Branch 4 not taken.
461 p.Assign(&path[0], path.length());
262
1/2
✓ Branch 1 taken 461 times.
✗ Branch 2 not taken.
922 return LookupPath(p, options, entry);
263 461 }
264 bool LookupXattrs(const PathString &path, XattrList *xattrs);
265
266 bool LookupNested(const PathString &path,
267 PathString *mountpoint,
268 shash::Any *hash,
269 uint64_t *size);
270 bool ListCatalogSkein(const PathString &path,
271 std::vector<PathString> *result_list);
272
273 bool Listing(const PathString &path, DirectoryEntryList *listing,
274 const bool expand_symlink);
275 203 bool Listing(const PathString &path, DirectoryEntryList *listing) {
276 203 return Listing(path, listing, true);
277 }
278 171 bool Listing(const std::string &path, DirectoryEntryList *listing) {
279 171 PathString p;
280
1/2
✓ Branch 3 taken 171 times.
✗ Branch 4 not taken.
171 p.Assign(&path[0], path.length());
281
1/2
✓ Branch 1 taken 171 times.
✗ Branch 2 not taken.
342 return Listing(p, listing);
282 171 }
283 bool ListingStat(const PathString &path, StatEntryList *listing);
284
285 bool ListFileChunks(const PathString &path,
286 const shash::Algorithms interpret_hashes_as,
287 FileChunkList *chunks);
288 void SetOwnerMaps(const OwnerMap &uid_map, const OwnerMap &gid_map);
289 void SetCatalogWatermark(unsigned limit);
290
291 shash::Any GetNestedCatalogHash(const PathString &mountpoint);
292
293 61 Statistics statistics() const { return statistics_; }
294 uint64_t inode_gauge() {
295 ReadLock();
296 uint64_t r = inode_gauge_;
297 Unlock();
298 return r;
299 }
300 529 bool volatile_flag() const { return volatile_flag_; }
301 uint64_t GetRevision() const;
302 uint64_t GetTimestamp() const;
303 uint64_t GetTTL() const;
304 bool HasExplicitTTL() const;
305 bool GetVOMSAuthz(std::string *authz) const;
306 int GetNumCatalogs() const;
307 std::string PrintHierarchy() const;
308 std::string PrintAllMemStatistics() const;
309
310 /**
311 * Get the inode number of the root DirectoryEntry
312 * ('root' means the root of the whole file system)
313 * @return the root inode number
314 */
315 51 inline inode_t GetRootInode() const {
316
2/2
✓ Branch 0 taken 11 times.
✓ Branch 1 taken 40 times.
51 return inode_annotation_ ? inode_annotation_->Annotate(kInodeOffset + 1)
317 51 : kInodeOffset + 1;
318 }
319 10726 inline CatalogT *GetRootCatalog() const { return catalogs_.front(); }
320 /**
321 * Inodes are ambiquitous under some circumstances, to prevent problems
322 * they must be passed through this method first
323 * @param inode the raw inode
324 * @return the revised inode
325 */
326 inline inode_t MangleInode(const inode_t inode) const {
327 return (inode <= kInodeOffset) ? GetRootInode() : inode;
328 }
329
330 catalog::Counters LookupCounters(const PathString &path,
331 std::string *subcatalog_path,
332 shash::Any *hash);
333
334 protected:
335 /**
336 * Load the catalog and return a file name and the catalog hash.
337 *
338 * GetNewRootCatalogContext() populates CatalogContext object with the
339 * information needed to retrieve the most recent root catalog independent of
340 * its location.
341 * The CatalogContext object must be populated with at least hash and
342 * mountpoint to call LoadCatalogByHash().
343 *
344 * See class description of CatalogContext for more information.
345 */
346 virtual LoadReturn GetNewRootCatalogContext(CatalogContext *result) = 0;
347 virtual LoadReturn LoadCatalogByHash(CatalogContext *ctlg_context) = 0;
348 2161 virtual void UnloadCatalog(const CatalogT *catalog) { }
349 304 virtual void ActivateCatalog(CatalogT *catalog) { }
350 10 const std::vector<CatalogT *> &GetCatalogs() const { return catalogs_; }
351
352 /**
353 * Opportunistic optimization: the client catalog manager uses this method
354 * to preload into the cache a nested catalog that is likely to be required
355 * next. Likely, because there is a race with the root catalog reload which
356 * may result in the wrong catalog being staged. That's not a fault though,
357 * the correct catalog will still be loaded with the write lock held.
358 * Note that this method is never used for root catalogs.
359 */
360 223 virtual void StageNestedCatalogByHash(const shash::Any & /*hash*/,
361 223 const PathString & /*mountpoint*/) { }
362 /**
363 * Called within the ReadLock(), which will be released before downloading
364 * the catalog (and before leaving the method)
365 */
366 void StageNestedCatalogAndUnlock(const PathString &path,
367 const CatalogT *parent,
368 bool is_listable);
369
370 /**
371 * Create a new Catalog object.
372 * Every derived class has to implement this and return a newly
373 * created (derived) Catalog structure of it's desired type.
374 * @param mountpoint the future mountpoint of the catalog to create
375 * @param catalog_hash the content hash of the catalog database
376 * @param parent_catalog the parent of the catalog to create
377 * @return a newly created (derived) Catalog
378 */
379 virtual CatalogT *CreateCatalog(const PathString &mountpoint,
380 const shash::Any &catalog_hash,
381 CatalogT *parent_catalog) = 0;
382
383 CatalogT *MountCatalog(const PathString &mountpoint, const shash::Any &hash,
384 CatalogT *parent_catalog);
385 bool MountSubtree(const PathString &path,
386 const CatalogT *entry_point,
387 bool can_listing,
388 CatalogT **leaf_catalog);
389
390 CatalogT *LoadFreeCatalog(const PathString &mountpoint,
391 const shash::Any &hash);
392
393 bool AttachCatalog(const std::string &db_path, CatalogT *new_catalog);
394 void DetachCatalog(CatalogT *catalog);
395 void DetachSubtree(CatalogT *catalog);
396 void DetachSiblings(const PathString &current_tree);
397 1431 void DetachAll() {
398
2/2
✓ Branch 1 taken 1224 times.
✓ Branch 2 taken 207 times.
1431 if (!catalogs_.empty())
399 1224 DetachSubtree(GetRootCatalog());
400 1431 }
401 bool IsAttached(const PathString &root_path,
402 CatalogT **attached_catalog) const;
403
404 CatalogT *FindCatalog(const PathString &path) const;
405
406 uint64_t GetRevisionNoLock() const;
407 uint64_t GetTimestampNoLock() const;
408 3492 inline void ReadLock() const {
409 3492 const int retval = pthread_rwlock_rdlock(rwlock_);
410
1/2
✗ Branch 0 not taken.
✓ Branch 1 taken 3492 times.
3492 assert(retval == 0);
411 3492 }
412 1817 inline void WriteLock() const {
413 1817 const uint64_t timestamp = platform_monotonic_time_ns();
414 1817 const int retval = pthread_rwlock_wrlock(rwlock_);
415
1/2
✗ Branch 0 not taken.
✓ Branch 1 taken 1817 times.
1817 assert(retval == 0);
416 1817 perf::Inc(statistics_.n_write_lock);
417 1817 const uint64_t duration = platform_monotonic_time_ns() - timestamp;
418 1817 perf::Xadd(statistics_.ns_write_lock, duration);
419 1817 }
420 5309 inline void Unlock() const {
421 5309 const int retval = pthread_rwlock_unlock(rwlock_);
422
1/2
✗ Branch 0 not taken.
✓ Branch 1 taken 5309 times.
5309 assert(retval == 0);
423 5309 }
424 virtual void EnforceSqliteMemLimit();
425
426 private:
427 void CheckInodeWatermark();
428
429 /**
430 * The flat list of all attached catalogs.
431 */
432 CatalogList catalogs_;
433 int inode_watermark_status_; /**< 0: OK, 1: > 32bit */
434 uint64_t inode_gauge_; /**< highest issued inode */
435 uint64_t revision_cache_;
436 uint64_t timestamp_cache_;
437 /**
438 * Try to keep number of nested catalogs below the given limit. Zero means no
439 * limit. Surpassing the watermark on mounting a catalog triggers
440 * a DetachSiblings() call.
441 */
442 unsigned catalog_watermark_;
443 /**
444 * Not protected by a read lock because it can only change when the root
445 * catalog is exchanged (during big global lock of the file system).
446 */
447 bool volatile_flag_;
448 /**
449 * Saves the result of GetVOMSAuthz when a root catalog is attached
450 */
451 bool has_authz_cache_;
452 /**
453 * Saves the VOMS requirements when a root catalog is attached
454 */
455 std::string authz_cache_;
456 /**
457 * Counts how often the inodes have been invalidated.
458 */
459 uint64_t incarnation_;
460 // TODO(molina) we could just add an atomic global counter instead
461 InodeAnnotation *inode_annotation_; /**< applied to all catalogs */
462 pthread_rwlock_t *rwlock_;
463 Statistics statistics_;
464 pthread_key_t pkey_sqlitemem_;
465 OwnerMap uid_map_;
466 OwnerMap gid_map_;
467
468 // Not needed anymore since there are the glue buffers
469 // Catalog *Inode2Catalog(const inode_t inode);
470 std::string PrintHierarchyRecursively(const CatalogT *catalog,
471 const int level) const;
472 std::string PrintMemStatsRecursively(const CatalogT *catalog) const;
473
474 InodeRange AcquireInodes(uint64_t size);
475 void ReleaseInodes(const InodeRange chunk);
476 }; // class CatalogManager
477
478 class InodeGenerationAnnotation : public InodeAnnotation {
479 public:
480 757 InodeGenerationAnnotation() { inode_offset_ = 0; }
481 3006 virtual ~InodeGenerationAnnotation() { }
482 virtual bool ValidInode(const uint64_t inode) {
483 return inode >= inode_offset_;
484 }
485 11 virtual inode_t Annotate(const inode_t raw_inode) {
486 11 return raw_inode + inode_offset_;
487 }
488 virtual inode_t Strip(const inode_t annotated_inode) {
489 return annotated_inode - inode_offset_;
490 }
491 11 virtual void IncGeneration(const uint64_t by) {
492 11 inode_offset_ += by;
493 11 LogCvmfs(kLogCatalog, kLogDebug, "set inode generation to %lu",
494 inode_offset_);
495 11 }
496 308 virtual inode_t GetGeneration() { return inode_offset_; }
497
498 private:
499 uint64_t inode_offset_;
500 };
501
502 /**
503 * In NFS mode, the root inode has to be always 256. Otherwise the inode maps
504 * lookup fails. In general, the catalog manager inodes in NFS mode are only
505 * used for the chunk tables.
506 */
507 class InodeNfsGenerationAnnotation : public InodeAnnotation {
508 public:
509 InodeNfsGenerationAnnotation() { inode_offset_ = 0; }
510 virtual ~InodeNfsGenerationAnnotation() { }
511 virtual bool ValidInode(const uint64_t inode) {
512 return (inode >= inode_offset_) || (inode == kRootInode);
513 }
514 virtual inode_t Annotate(const inode_t raw_inode) {
515 if (raw_inode <= kRootInode)
516 return kRootInode;
517 return raw_inode + inode_offset_;
518 }
519 virtual inode_t Strip(const inode_t annotated_inode) {
520 if (annotated_inode == kRootInode)
521 return annotated_inode;
522 return annotated_inode - inode_offset_;
523 }
524 virtual void IncGeneration(const uint64_t by) {
525 inode_offset_ += by;
526 LogCvmfs(kLogCatalog, kLogDebug, "set inode generation to %lu",
527 inode_offset_);
528 }
529 virtual inode_t GetGeneration() { return inode_offset_; }
530
531 private:
532 static const uint64_t
533 kRootInode = AbstractCatalogManager<Catalog>::kInodeOffset + 1;
534 uint64_t inode_offset_;
535 };
536
537 } // namespace catalog
538
539 #include "catalog_mgr_impl.h"
540
541 #endif // CVMFS_CATALOG_MGR_H_
542