GCC Code Coverage Report


Directory: cvmfs/
File: cvmfs/catalog_mgr.h
Date: 2024-04-21 02:33:16
Exec Total Coverage
Lines: 103 134 76.9%
Branches: 47 100 47.0%

Line Branch Exec Source
1 /**
2 * This file is part of the CernVM File System.
3 */
4
5 #ifndef CVMFS_CATALOG_MGR_H_
6 #define CVMFS_CATALOG_MGR_H_
7
8 #ifndef __STDC_FORMAT_MACROS
9 #define __STDC_FORMAT_MACROS
10 #endif
11
12 #include <inttypes.h>
13 #include <pthread.h>
14
15 #include <cassert>
16 #include <map>
17 #include <string>
18 #include <vector>
19
20 #include "catalog.h"
21 #include "crypto/hash.h"
22 #include "directory_entry.h"
23 #include "file_chunk.h"
24 #include "manifest_fetch.h"
25 #include "statistics.h"
26 #include "util/algorithm.h"
27 #include "util/atomic.h"
28 #include "util/logging.h"
29 #include "util/platform.h"
30
31 class XattrList;
32 namespace catalog {
33
34 const unsigned kSqliteMemPerThread = 1*1024*1024;
35
36
37 /**
38 * LookupOption for a directory entry (bitmask).
39 * kLookupDefault = Look solely at the given directory entry (parent is ignored)
40 * kLookupRawSymlink = Don't resolve environment variables in symlink targets
41 */
42 typedef unsigned LookupOptions;
43 const unsigned kLookupDefault = 0b1;
44 const unsigned kLookupRawSymlink = 0b10;
45
46 /**
47 * Results upon loading a catalog file.
48 */
49 enum LoadReturn {
50 kLoadNew = 0,
51 kLoadUp2Date,
52 kLoadNoSpace,
53 kLoadFail,
54
55 kLoadNumEntries
56 };
57
58 /**
59 * Location of the most recent root catalog.
60 * Used as part of the process of loading a catalog.
61 * - GetNewRootCatalogContext() sets the location within the CatalogContext obj
62 * - LoadCatalogByHash(): when loading a root catalog it uses the location
63 * stored within the CatalogContext object to retrieve
64 * the root catalog from the right location
65 */
66 enum RootCatalogLocation {
67 kCtlgNoLocationNeeded = 0, // hash known, no location needed
68 kCtlgLocationMounted, // already loaded in mounted_catalogs_
69 kCtlgLocationServer,
70 kCtlgLocationBreadcrumb
71 };
72
73 /**
74 * CatalogContext class contains all necessary information to load a catalog and
75 * also keeps track of the resulting output.
76 * It works as follows:
77 * 1) Load a new root catalog:
78 * - Use empty constructor CatalogContext()
79 * - Let the CatalogContext object be populated by GetNewRootCatalogContext()
80 * - This will set: hash, mountpoint, root_ctlg_revision, root_ctlg_location
81 * - Call LoadCatalogByHash()
82 * - This will set: sqlite_path
83 * 2) Load a catalog based on a given hash
84 * - Populate CatalogContext object; used constructor depends on catalog type
85 * - Root catalog: CatalogContext(shash::Any hash, PathString mountpoint,
86 RootCatalogLocation location)
87 - Nested catalog: CatalogContext(shash::Any hash, PathString mountpoint)
88 - Note: in this case root_ctlg_revision is not used
89 * - Call LoadCatalogByHash()
90 - This will set: sqlite_path
91 */
92 struct CatalogContext {
93 public:
94 8 CatalogContext() :
95 8 hash_(shash::Any()),
96 8 mountpoint_(PathString("invalid", 7)), // empty str is root ctlg
97
1/2
✓ Branch 2 taken 8 times.
✗ Branch 3 not taken.
8 sqlite_path_(""),
98 8 root_ctlg_revision_(-1ul),
99 8 root_ctlg_location_(kCtlgNoLocationNeeded),
100
1/2
✓ Branch 1 taken 8 times.
✗ Branch 2 not taken.
8 manifest_ensemble_(NULL) { }
101 3 CatalogContext(const shash::Any &hash, const PathString &mountpoint) :
102 3 hash_(hash),
103 3 mountpoint_(mountpoint),
104
1/2
✓ Branch 2 taken 3 times.
✗ Branch 3 not taken.
3 sqlite_path_(""),
105 3 root_ctlg_revision_(-1ul),
106 3 root_ctlg_location_(kCtlgNoLocationNeeded),
107
1/2
✓ Branch 1 taken 3 times.
✗ Branch 2 not taken.
3 manifest_ensemble_(NULL) { }
108
109 112 CatalogContext(const shash::Any &hash, const PathString &mountpoint,
110 112 const RootCatalogLocation location) :
111 112 hash_(hash),
112 112 mountpoint_(mountpoint),
113
1/2
✓ Branch 2 taken 112 times.
✗ Branch 3 not taken.
112 sqlite_path_(""),
114 112 root_ctlg_revision_(-1ul),
115 112 root_ctlg_location_(location),
116
1/2
✓ Branch 1 taken 112 times.
✗ Branch 2 not taken.
112 manifest_ensemble_(NULL) { }
117
118 178 bool IsRootCatalog() {
119 178 return mountpoint_.IsEmpty();
120 }
121
122 39 std::string *GetSqlitePathPtr() { return &sqlite_path_; }
123 shash::Any *GetHashPtr() { return &hash_; }
124
125 333 shash::Any hash() const { return hash_; }
126 226 PathString mountpoint() const { return mountpoint_; }
127 107 std::string sqlite_path() const { return sqlite_path_; }
128 uint64_t root_ctlg_revision() const { return root_ctlg_revision_; }
129 40 RootCatalogLocation root_ctlg_location() const
130 40 { return root_ctlg_location_; }
131 44 manifest::ManifestEnsemble *manifest_ensemble() const
132 44 { return manifest_ensemble_.weak_ref(); }
133
134 49 void SetHash(shash::Any hash) { hash_ = hash; }
135 54 void SetMountpoint(const PathString &mountpoint) { mountpoint_ = mountpoint; }
136 53 void SetSqlitePath(const std::string &sqlite_path)
137 53 { sqlite_path_ = sqlite_path; }
138 18 void SetRootCtlgRevision(uint64_t root_ctlg_revision)
139 18 { root_ctlg_revision_ = root_ctlg_revision; }
140 73 void SetRootCtlgLocation(RootCatalogLocation root_ctlg_location)
141 73 { root_ctlg_location_ = root_ctlg_location; }
142 /**
143 * Gives ownership to CatalogContext
144 */
145 13 void TakeManifestEnsemble(manifest::ManifestEnsemble *manifest_ensemble)
146 13 { manifest_ensemble_ = manifest_ensemble; }
147
148
149 private:
150 // mandatory for LoadCatalogByHash()
151 shash::Any hash_;
152 // mandatory for LoadCatalogByHash()
153 PathString mountpoint_;
154 // out parameter, path name of the sqlite catalog
155 std::string sqlite_path_;
156 // root catalog: revision is needed for GetNewRootCatalogContext()
157 uint64_t root_ctlg_revision_;
158 // root catalog: location is mandatory for LoadCatalogByHash()
159 RootCatalogLocation root_ctlg_location_;
160 // root catalog: if location = server mandatory for LoadCatalogByHash()
161 UniquePtr<manifest::ManifestEnsemble> manifest_ensemble_;
162 };
163
164 inline const char *Code2Ascii(const LoadReturn error) {
165 const char *texts[kLoadNumEntries + 1];
166 texts[0] = "loaded new catalog";
167 texts[1] = "catalog was up to date";
168 texts[2] = "not enough space to load catalog";
169 texts[3] = "failed to load catalog";
170 texts[4] = "no text";
171 return texts[error];
172 }
173
174
175 struct Statistics {
176 perf::Counter *n_lookup_inode;
177 perf::Counter *n_lookup_path;
178 perf::Counter *n_lookup_path_negative;
179 perf::Counter *n_lookup_xattrs;
180 perf::Counter *n_listing;
181 perf::Counter *n_nested_listing;
182 perf::Counter *n_detach_siblings;
183 perf::Counter *n_write_lock;
184 perf::Counter *ns_write_lock;
185
186 perf::Counter *catalog_revision;
187
188 74 explicit Statistics(perf::Statistics *statistics) {
189
3/6
✓ Branch 2 taken 74 times.
✗ Branch 3 not taken.
✓ Branch 6 taken 74 times.
✗ Branch 7 not taken.
✓ Branch 9 taken 74 times.
✗ Branch 10 not taken.
74 n_lookup_inode = statistics->Register("catalog_mgr.n_lookup_inode",
190 "Number of inode lookups");
191
3/6
✓ Branch 2 taken 74 times.
✗ Branch 3 not taken.
✓ Branch 6 taken 74 times.
✗ Branch 7 not taken.
✓ Branch 9 taken 74 times.
✗ Branch 10 not taken.
74 n_lookup_path = statistics->Register("catalog_mgr.n_lookup_path",
192 "Number of path lookups");
193
3/6
✓ Branch 2 taken 74 times.
✗ Branch 3 not taken.
✓ Branch 6 taken 74 times.
✗ Branch 7 not taken.
✓ Branch 9 taken 74 times.
✗ Branch 10 not taken.
74 n_lookup_path_negative = statistics->Register(
194 "catalog_mgr.n_lookup_path_negative",
195 "Number of negative path lookups");
196
3/6
✓ Branch 2 taken 74 times.
✗ Branch 3 not taken.
✓ Branch 6 taken 74 times.
✗ Branch 7 not taken.
✓ Branch 9 taken 74 times.
✗ Branch 10 not taken.
74 n_lookup_xattrs = statistics->Register("catalog_mgr.n_lookup_xattrs",
197 "Number of xattrs lookups");
198
3/6
✓ Branch 2 taken 74 times.
✗ Branch 3 not taken.
✓ Branch 6 taken 74 times.
✗ Branch 7 not taken.
✓ Branch 9 taken 74 times.
✗ Branch 10 not taken.
74 n_listing = statistics->Register("catalog_mgr.n_listing",
199 "Number of listings");
200
3/6
✓ Branch 2 taken 74 times.
✗ Branch 3 not taken.
✓ Branch 6 taken 74 times.
✗ Branch 7 not taken.
✓ Branch 9 taken 74 times.
✗ Branch 10 not taken.
74 n_nested_listing = statistics->Register("catalog_mgr.n_nested_listing",
201 "Number of listings of nested catalogs");
202
3/6
✓ Branch 2 taken 74 times.
✗ Branch 3 not taken.
✓ Branch 6 taken 74 times.
✗ Branch 7 not taken.
✓ Branch 9 taken 74 times.
✗ Branch 10 not taken.
74 n_detach_siblings = statistics->Register("catalog_mgr.n_detach_siblings",
203 "Number of times the CVMFS_CATALOG_WATERMARK was hit");
204
3/6
✓ Branch 2 taken 74 times.
✗ Branch 3 not taken.
✓ Branch 6 taken 74 times.
✗ Branch 7 not taken.
✓ Branch 9 taken 74 times.
✗ Branch 10 not taken.
74 n_write_lock = statistics->Register("catalog_mgr.n_write_lock",
205 "number of write lock calls");
206
3/6
✓ Branch 2 taken 74 times.
✗ Branch 3 not taken.
✓ Branch 6 taken 74 times.
✗ Branch 7 not taken.
✓ Branch 9 taken 74 times.
✗ Branch 10 not taken.
74 ns_write_lock = statistics->Register("catalog_mgr.ns_write_lock",
207 "time spent in WriteLock() [ns]");
208
3/6
✓ Branch 2 taken 74 times.
✗ Branch 3 not taken.
✓ Branch 6 taken 74 times.
✗ Branch 7 not taken.
✓ Branch 9 taken 74 times.
✗ Branch 10 not taken.
74 catalog_revision = statistics->Register("catalog_revision",
209 "Revision number of the root file catalog");
210 74 }
211 };
212
213
214 template <class CatalogT>
215 class AbstractCatalogManager;
216
217
218 /**
219 * This class provides the read-only interface to a tree of catalogs
220 * representing a (subtree of a) repository.
221 * Mostly lookup functions filling DirectoryEntry objects.
222 * Reloading of expired catalogs, attaching of nested catalogs and delegating
223 * of lookups to the appropriate catalog is done transparently.
224 *
225 * The loading / creating of catalogs is up to derived classes.
226 *
227 * CatalogT is either Catalog or MockCatalog.
228 *
229 * Usage:
230 * DerivedCatalogManager *catalog_manager = new DerivedCatalogManager();
231 * catalog_manager->Init();
232 * catalog_manager->Lookup(<inode>, &<result_entry>);
233 */
234 template <class CatalogT>
235 class AbstractCatalogManager : public SingleCopy {
236 public:
237 typedef std::vector<CatalogT*> CatalogList;
238 typedef CatalogT catalog_t;
239
240 static const inode_t kInodeOffset = 255;
241 explicit AbstractCatalogManager(perf::Statistics *statistics);
242 virtual ~AbstractCatalogManager();
243
244 void SetInodeAnnotation(InodeAnnotation *new_annotation);
245 virtual bool Init();
246 LoadReturn RemountDryrun();
247 LoadReturn Remount();
248 LoadReturn ChangeRoot(const shash::Any &root_hash);
249 void DetachNested();
250
251 bool LookupPath(const PathString &path, const LookupOptions options,
252 DirectoryEntry *entry);
253 31 bool LookupPath(const std::string &path, const LookupOptions options,
254 DirectoryEntry *entry)
255 {
256 31 PathString p;
257
1/2
✓ Branch 3 taken 31 times.
✗ Branch 4 not taken.
31 p.Assign(&path[0], path.length());
258
1/2
✓ Branch 1 taken 31 times.
✗ Branch 2 not taken.
62 return LookupPath(p, options, entry);
259 31 }
260 bool LookupXattrs(const PathString &path, XattrList *xattrs);
261
262 bool LookupNested(const PathString &path,
263 PathString *mountpoint,
264 shash::Any *hash,
265 uint64_t *size);
266 bool ListCatalogSkein(const PathString &path,
267 std::vector<PathString> *result_list);
268
269 bool Listing(const PathString &path, DirectoryEntryList *listing,
270 const bool expand_symlink);
271 37 bool Listing(const PathString &path, DirectoryEntryList *listing) {
272 37 return Listing(path, listing, true);
273 }
274 21 bool Listing(const std::string &path, DirectoryEntryList *listing) {
275 21 PathString p;
276
1/2
✓ Branch 3 taken 21 times.
✗ Branch 4 not taken.
21 p.Assign(&path[0], path.length());
277
1/2
✓ Branch 1 taken 21 times.
✗ Branch 2 not taken.
42 return Listing(p, listing);
278 21 }
279 bool ListingStat(const PathString &path, StatEntryList *listing);
280
281 bool ListFileChunks(const PathString &path,
282 const shash::Algorithms interpret_hashes_as,
283 FileChunkList *chunks);
284 void SetOwnerMaps(const OwnerMap &uid_map, const OwnerMap &gid_map);
285 void SetCatalogWatermark(unsigned limit);
286
287 shash::Any GetNestedCatalogHash(const PathString &mountpoint);
288
289 6 Statistics statistics() const { return statistics_; }
290 uint64_t inode_gauge() {
291 ReadLock(); uint64_t r = inode_gauge_; Unlock(); return r;
292 }
293 24 bool volatile_flag() const { return volatile_flag_; }
294 uint64_t GetRevision() const;
295 uint64_t GetTimestamp() const;
296 uint64_t GetTTL() const;
297 bool HasExplicitTTL() const;
298 bool GetVOMSAuthz(std::string *authz) const;
299 int GetNumCatalogs() const;
300 std::string PrintHierarchy() const;
301 std::string PrintAllMemStatistics() const;
302
303 /**
304 * Get the inode number of the root DirectoryEntry
305 * ('root' means the root of the whole file system)
306 * @return the root inode number
307 */
308 2 inline inode_t GetRootInode() const {
309
2/2
✓ Branch 0 taken 1 times.
✓ Branch 1 taken 1 times.
2 return inode_annotation_ ?
310 2 inode_annotation_->Annotate(kInodeOffset + 1) : kInodeOffset + 1;
311 }
312 606 inline CatalogT* GetRootCatalog() const { return catalogs_.front(); }
313 /**
314 * Inodes are ambiquitous under some circumstances, to prevent problems
315 * they must be passed through this method first
316 * @param inode the raw inode
317 * @return the revised inode
318 */
319 inline inode_t MangleInode(const inode_t inode) const {
320 return (inode <= kInodeOffset) ? GetRootInode() : inode;
321 }
322
323 catalog::Counters LookupCounters(const PathString &path,
324 std::string *subcatalog_path,
325 shash::Any *hash);
326
327 protected:
328 /**
329 * Load the catalog and return a file name and the catalog hash.
330 *
331 * GetNewRootCatalogContext() populates CatalogContext object with the
332 * information needed to retrieve the most recent root catalog independent of
333 * its location.
334 * The CatalogContext object must be populated with at least hash and
335 * mountpoint to call LoadCatalogByHash().
336 *
337 * See class description of CatalogContext for more information.
338 */
339 virtual LoadReturn GetNewRootCatalogContext(CatalogContext *result) = 0;
340 virtual LoadReturn LoadCatalogByHash(CatalogContext *ctlg_context) = 0;
341 127 virtual void UnloadCatalog(const CatalogT *catalog) { }
342 33 virtual void ActivateCatalog(CatalogT *catalog) { }
343 1 const std::vector<CatalogT*>& GetCatalogs() const { return catalogs_; }
344
345 /**
346 * Opportunistic optimization: the client catalog manager uses this method
347 * to preload into the cache a nested catalog that is likely to be required
348 * next. Likely, because there is a race with the root catalog reload which
349 * may result in the wrong catalog being staged. That's not a fault though,
350 * the correct catalog will still be loaded with the write lock held.
351 * Note that this method is never used for root catalogs.
352 */
353 20 virtual void StageNestedCatalogByHash(const shash::Any & /*hash*/,
354 const PathString & /*mountpoint*/)
355 20 { }
356 /**
357 * Called within the ReadLock(), which will be released before downloading
358 * the catalog (and before leaving the method)
359 */
360 void StageNestedCatalogAndUnlock(const PathString &path,
361 const CatalogT *parent,
362 bool is_listable);
363
364 /**
365 * Create a new Catalog object.
366 * Every derived class has to implement this and return a newly
367 * created (derived) Catalog structure of it's desired type.
368 * @param mountpoint the future mountpoint of the catalog to create
369 * @param catalog_hash the content hash of the catalog database
370 * @param parent_catalog the parent of the catalog to create
371 * @return a newly created (derived) Catalog
372 */
373 virtual CatalogT* CreateCatalog(const PathString &mountpoint,
374 const shash::Any &catalog_hash,
375 CatalogT *parent_catalog) = 0;
376
377 CatalogT *MountCatalog(const PathString &mountpoint, const shash::Any &hash,
378 CatalogT *parent_catalog);
379 bool MountSubtree(const PathString &path,
380 const CatalogT *entry_point,
381 bool can_listing,
382 CatalogT **leaf_catalog);
383
384 CatalogT *LoadFreeCatalog(const PathString &mountpoint,
385 const shash::Any &hash);
386
387 bool AttachCatalog(const std::string &db_path, CatalogT *new_catalog);
388 void DetachCatalog(CatalogT *catalog);
389 void DetachSubtree(CatalogT *catalog);
390 void DetachSiblings(const PathString &current_tree);
391
2/2
✓ Branch 1 taken 68 times.
✓ Branch 2 taken 8 times.
76 void DetachAll() { if (!catalogs_.empty()) DetachSubtree(GetRootCatalog()); }
392 bool IsAttached(const PathString &root_path,
393 CatalogT **attached_catalog) const;
394
395 CatalogT *FindCatalog(const PathString &path) const;
396
397 uint64_t GetRevisionNoLock() const;
398 uint64_t GetTimestampNoLock() const;
399 215 inline void ReadLock() const {
400 215 int retval = pthread_rwlock_rdlock(rwlock_);
401
1/2
✗ Branch 0 not taken.
✓ Branch 1 taken 215 times.
215 assert(retval == 0);
402 215 }
403 103 inline void WriteLock() const {
404 103 uint64_t timestamp = platform_monotonic_time_ns();
405 103 int retval = pthread_rwlock_wrlock(rwlock_);
406
1/2
✗ Branch 0 not taken.
✓ Branch 1 taken 103 times.
103 assert(retval == 0);
407 103 perf::Inc(statistics_.n_write_lock);
408 103 uint64_t duration = platform_monotonic_time_ns() - timestamp;
409 103 perf::Xadd(statistics_.ns_write_lock, duration);
410 103 }
411 318 inline void Unlock() const {
412 318 int retval = pthread_rwlock_unlock(rwlock_);
413
1/2
✗ Branch 0 not taken.
✓ Branch 1 taken 318 times.
318 assert(retval == 0);
414 318 }
415 virtual void EnforceSqliteMemLimit();
416
417 private:
418 void CheckInodeWatermark();
419
420 /**
421 * The flat list of all attached catalogs.
422 */
423 CatalogList catalogs_;
424 int inode_watermark_status_; /**< 0: OK, 1: > 32bit */
425 uint64_t inode_gauge_; /**< highest issued inode */
426 uint64_t revision_cache_;
427 uint64_t timestamp_cache_;
428 /**
429 * Try to keep number of nested catalogs below the given limit. Zero means no
430 * limit. Surpassing the watermark on mounting a catalog triggers
431 * a DetachSiblings() call.
432 */
433 unsigned catalog_watermark_;
434 /**
435 * Not protected by a read lock because it can only change when the root
436 * catalog is exchanged (during big global lock of the file system).
437 */
438 bool volatile_flag_;
439 /**
440 * Saves the result of GetVOMSAuthz when a root catalog is attached
441 */
442 bool has_authz_cache_;
443 /**
444 * Saves the VOMS requirements when a root catalog is attached
445 */
446 std::string authz_cache_;
447 /**
448 * Counts how often the inodes have been invalidated.
449 */
450 uint64_t incarnation_;
451 // TODO(molina) we could just add an atomic global counter instead
452 InodeAnnotation *inode_annotation_; /**< applied to all catalogs */
453 pthread_rwlock_t *rwlock_;
454 Statistics statistics_;
455 pthread_key_t pkey_sqlitemem_;
456 OwnerMap uid_map_;
457 OwnerMap gid_map_;
458
459 // Not needed anymore since there are the glue buffers
460 // Catalog *Inode2Catalog(const inode_t inode);
461 std::string PrintHierarchyRecursively(const CatalogT *catalog,
462 const int level) const;
463 std::string PrintMemStatsRecursively(const CatalogT *catalog) const;
464
465 InodeRange AcquireInodes(uint64_t size);
466 void ReleaseInodes(const InodeRange chunk);
467 }; // class CatalogManager
468
469 class InodeGenerationAnnotation : public InodeAnnotation {
470 public:
471 33 InodeGenerationAnnotation() { inode_offset_ = 0; }
472 130 virtual ~InodeGenerationAnnotation() { }
473 virtual bool ValidInode(const uint64_t inode) {
474 return inode >= inode_offset_;
475 }
476 1 virtual inode_t Annotate(const inode_t raw_inode) {
477 1 return raw_inode + inode_offset_;
478 }
479 virtual inode_t Strip(const inode_t annotated_inode) {
480 return annotated_inode - inode_offset_;
481 }
482 1 virtual void IncGeneration(const uint64_t by) {
483 1 inode_offset_ += by;
484 1 LogCvmfs(kLogCatalog, kLogDebug, "set inode generation to %lu",
485 inode_offset_);
486 1 }
487 18 virtual inode_t GetGeneration() { return inode_offset_; }
488
489 private:
490 uint64_t inode_offset_;
491 };
492
493 /**
494 * In NFS mode, the root inode has to be always 256. Otherwise the inode maps
495 * lookup fails. In general, the catalog manager inodes in NFS mode are only
496 * used for the chunk tables.
497 */
498 class InodeNfsGenerationAnnotation : public InodeAnnotation {
499 public:
500 InodeNfsGenerationAnnotation() { inode_offset_ = 0; }
501 virtual ~InodeNfsGenerationAnnotation() { }
502 virtual bool ValidInode(const uint64_t inode) {
503 return (inode >= inode_offset_) || (inode == kRootInode);
504 }
505 virtual inode_t Annotate(const inode_t raw_inode) {
506 if (raw_inode <= kRootInode)
507 return kRootInode;
508 return raw_inode + inode_offset_;
509 }
510 virtual inode_t Strip(const inode_t annotated_inode) {
511 if (annotated_inode == kRootInode)
512 return annotated_inode;
513 return annotated_inode - inode_offset_;
514 }
515 virtual void IncGeneration(const uint64_t by) {
516 inode_offset_ += by;
517 LogCvmfs(kLogCatalog, kLogDebug, "set inode generation to %lu",
518 inode_offset_);
519 }
520 virtual inode_t GetGeneration() { return inode_offset_; }
521
522 private:
523 static const uint64_t kRootInode =
524 AbstractCatalogManager<Catalog>::kInodeOffset + 1;
525 uint64_t inode_offset_;
526 };
527
528 } // namespace catalog
529
530 #include "catalog_mgr_impl.h"
531
532 #endif // CVMFS_CATALOG_MGR_H_
533