CernVM-FS  2.13.0
 All Classes Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Macros Pages
catalog_mgr_impl.h
Go to the documentation of this file.
1 
5 // avoid clang-tidy false positives (at least starting with clang14)
6 // NOLINTBEGIN
7 
8 #ifndef CVMFS_CATALOG_MGR_IMPL_H_
9 #define CVMFS_CATALOG_MGR_IMPL_H_
10 
11 #ifndef __STDC_FORMAT_MACROS
12 #define __STDC_FORMAT_MACROS
13 #endif
14 
15 
16 #include <cassert>
17 #include <string>
18 #include <vector>
19 
20 #include "shortstring.h"
21 #include "statistics.h"
22 #include "util/logging.h"
23 #include "xattr.h"
24 
25 using namespace std; // NOLINT
26 
27 namespace catalog {
28 
29 template<class CatalogT>
31  perf::Statistics *statistics)
32  : statistics_(statistics) {
35  revision_cache_ = 0;
36  timestamp_cache_ = 0;
38  volatile_flag_ = false;
39  has_authz_cache_ = false;
40  inode_annotation_ = NULL;
41  incarnation_ = 0;
42  rwlock_ = reinterpret_cast<pthread_rwlock_t *>(
43  smalloc(sizeof(pthread_rwlock_t)));
44  int retval = pthread_rwlock_init(rwlock_, NULL);
45  assert(retval == 0);
46  retval = pthread_key_create(&pkey_sqlitemem_, NULL);
47  assert(retval == 0);
48 }
49 
50 template<class CatalogT>
52  DetachAll();
53  pthread_key_delete(pkey_sqlitemem_);
54  pthread_rwlock_destroy(rwlock_);
55  free(rwlock_);
56 }
57 
58 template<class CatalogT>
60  InodeAnnotation *new_annotation) {
61  assert(catalogs_.empty() || (new_annotation == inode_annotation_));
62  inode_annotation_ = new_annotation;
63 }
64 
65 template<class CatalogT>
67  const OwnerMap &gid_map) {
68  uid_map_ = uid_map;
69  gid_map_ = gid_map;
70 }
71 
72 template<class CatalogT>
74  catalog_watermark_ = limit;
75 }
76 
77 template<class CatalogT>
79  if (inode_watermark_status_ > 0)
80  return;
81 
82  uint64_t highest_inode = inode_gauge_;
83  if (inode_annotation_)
84  highest_inode += inode_annotation_->GetGeneration();
85  uint64_t uint32_border = 1;
86  uint32_border = uint32_border << 32;
87  if (highest_inode >= uint32_border) {
88  LogCvmfs(kLogCatalog, kLogDebug | kLogSyslogWarn, "inodes exceed 32bit");
89  inode_watermark_status_++;
90  }
91 }
92 
93 
98 template<class CatalogT>
100  LogCvmfs(kLogCatalog, kLogDebug, "Initialize catalog");
101  WriteLock();
102  bool attached = MountCatalog(PathString("", 0), shash::Any(), NULL);
103  Unlock();
104 
105  if (!attached) {
106  LogCvmfs(kLogCatalog, kLogDebug, "failed to initialize root catalog");
107  }
108 
109  return attached;
110 }
111 
112 
118 template<class CatalogT>
120  LogCvmfs(kLogCatalog, kLogDebug, "dryrun remounting repositories");
121  CatalogContext ctlg_context;
122  return GetNewRootCatalogContext(&ctlg_context);
123 }
124 
125 template<class CatalogT>
127  LogCvmfs(kLogCatalog, kLogDebug, "remounting repositories");
128  CatalogContext ctlg_context;
129 
130  if (GetNewRootCatalogContext(&ctlg_context) != kLoadNew
131  && GetNewRootCatalogContext(&ctlg_context) != kLoadUp2Date) {
133  "remounting repositories: "
134  "Did not find any valid root catalog to mount");
135  return kLoadFail;
136  }
137 
138  WriteLock();
139 
140  const LoadReturn load_error = LoadCatalogByHash(&ctlg_context);
141 
142  if (load_error == kLoadNew) {
143  inode_t old_inode_gauge = inode_gauge_;
144  DetachAll();
146 
147  CatalogT *new_root = CreateCatalog(ctlg_context.mountpoint(),
148  ctlg_context.hash(), NULL);
149  assert(new_root);
150  bool retval = AttachCatalog(ctlg_context.sqlite_path(), new_root);
151  assert(retval);
152 
153  if (inode_annotation_) {
154  inode_annotation_->IncGeneration(old_inode_gauge);
155  }
156  }
157  CheckInodeWatermark();
158  Unlock();
159 
160  return load_error;
161 }
162 
166 template<class CatalogT>
168  const shash::Any &root_hash) {
169  assert(!root_hash.IsNull());
170  LogCvmfs(kLogCatalog, kLogDebug, "switching to root hash %s",
171  root_hash.ToString().c_str());
172 
173  WriteLock();
174 
175  CatalogContext ctlg_context(root_hash, PathString("", 0),
177  // we do not need to set revision as LoadCatalogByHash
178  // needs only mountpoint, hash
179 
180  const LoadReturn load_error = LoadCatalogByHash(&ctlg_context);
181 
182  if (load_error == kLoadNew) {
183  inode_t old_inode_gauge = inode_gauge_;
184  DetachAll();
186 
187  CatalogT *new_root = CreateCatalog(PathString("", 0), ctlg_context.hash(),
188  NULL);
189  assert(new_root);
190  bool retval = AttachCatalog(ctlg_context.sqlite_path(), new_root);
191  assert(retval);
192 
193  if (inode_annotation_) {
194  inode_annotation_->IncGeneration(old_inode_gauge);
195  }
196  }
197  CheckInodeWatermark();
198  Unlock();
199 
200  return load_error;
201 }
202 
203 
207 template<class CatalogT>
209  WriteLock();
210  if (catalogs_.empty()) {
211  Unlock();
212  return;
213  }
214 
215  typename CatalogList::const_iterator i;
216  typename CatalogList::const_iterator iend;
217  CatalogList catalogs_to_detach = GetRootCatalog()->GetChildren();
218  for (i = catalogs_to_detach.begin(), iend = catalogs_to_detach.end();
219  i != iend; ++i) {
220  DetachSubtree(*i);
221  }
222 
223  Unlock();
224 }
225 
226 
230 template<class CatalogT>
232  const PathString &mountpoint) {
233  assert(!mountpoint.IsEmpty());
234  CatalogT *catalog = FindCatalog(mountpoint);
235  assert(catalog != NULL);
236  if (catalog->mountpoint() == mountpoint) {
237  catalog = catalog->parent();
238  assert(catalog != NULL);
239  }
240  shash::Any result;
241  uint64_t size;
242  catalog->FindNested(mountpoint, &result, &size);
243  return result;
244 }
245 
246 
256 template<class CatalogT>
258  const LookupOptions options,
259  DirectoryEntry *dirent) {
260  // initialize as non-negative
261  assert(dirent);
262  *dirent = DirectoryEntry();
263 
264  // create a dummy negative directory entry
265  const DirectoryEntry dirent_negative = DirectoryEntry(
267 
268  EnforceSqliteMemLimit();
269  ReadLock();
270 
271  CatalogT *best_fit = FindCatalog(path);
272  assert(best_fit != NULL);
273 
274  perf::Inc(statistics_.n_lookup_path);
275  LogCvmfs(kLogCatalog, kLogDebug, "looking up '%s' in catalog: '%s'",
276  path.c_str(), best_fit->mountpoint().c_str());
277  bool found = best_fit->LookupPath(path, dirent);
278 
279  // Possibly in a nested catalog
280  if (!found && MountSubtree(path, best_fit, false /* is_listable */, NULL)) {
281  LogCvmfs(kLogCatalog, kLogDebug, "looking up '%s' in a nested catalog",
282  path.c_str());
283  StageNestedCatalogAndUnlock(path, best_fit, false /* is_listable */);
284  WriteLock();
285  // Check again to avoid race
286  best_fit = FindCatalog(path);
287  assert(best_fit != NULL);
288  perf::Inc(statistics_.n_lookup_path);
289  found = best_fit->LookupPath(path, dirent);
290 
291  if (!found) {
293  "entry not found, we may have to load nested catalogs");
294 
295  CatalogT *nested_catalog;
296  found = MountSubtree(path, best_fit, false /* is_listable */,
297  &nested_catalog);
298 
299  if (!found) {
301  "failed to load nested catalog for '%s'", path.c_str());
302  goto lookup_path_notfound;
303  }
304 
305  if (nested_catalog != best_fit) {
306  perf::Inc(statistics_.n_lookup_path);
307  found = nested_catalog->LookupPath(path, dirent);
308  if (!found) {
310  "nested catalogs loaded but entry '%s' was still not found",
311  path.c_str());
312  if (dirent != NULL)
313  *dirent = dirent_negative;
314  goto lookup_path_notfound;
315  } else {
316  best_fit = nested_catalog;
317  }
318  } else {
319  LogCvmfs(kLogCatalog, kLogDebug, "no nested catalog fits");
320  if (dirent != NULL)
321  *dirent = dirent_negative;
322  goto lookup_path_notfound;
323  }
324  }
325  assert(found);
326  }
327  // Not in a nested catalog (because no nested cataog fits), ENOENT
328  if (!found) {
329  LogCvmfs(kLogCatalog, kLogDebug, "ENOENT: '%s'", path.c_str());
330  if (dirent != NULL)
331  *dirent = dirent_negative;
332  goto lookup_path_notfound;
333  }
334 
335  LogCvmfs(kLogCatalog, kLogDebug, "found entry '%s' in catalog '%s'",
336  path.c_str(), best_fit->mountpoint().c_str());
337 
338  if ((options & kLookupRawSymlink) == kLookupRawSymlink) {
339  LinkString raw_symlink;
340  bool retval = best_fit->LookupRawSymlink(path, &raw_symlink);
341  assert(retval); // Must be true, we have just found the entry
342  dirent->set_symlink(raw_symlink);
343  }
344 
345  Unlock();
346  return true;
347 
348 lookup_path_notfound:
349  Unlock();
350  // Includes both: ENOENT and not found due to I/O error
351  perf::Inc(statistics_.n_lookup_path_negative);
352  return false;
353 }
354 
355 
368 template<class CatalogT>
370  PathString *mountpoint,
371  shash::Any *hash,
372  uint64_t *size) {
373  EnforceSqliteMemLimit();
374  bool result = false;
375  ReadLock();
376 
377  // Look past current path to mount up to intended location
378  PathString catalog_path(path);
379  catalog_path.Append("/.cvmfscatalog", 14);
380 
381  // Find catalog, possibly load nested
382  CatalogT *best_fit = FindCatalog(catalog_path);
383  CatalogT *catalog = best_fit;
384  if (MountSubtree(catalog_path, best_fit, false /* is_listable */, NULL)) {
385  StageNestedCatalogAndUnlock(path, best_fit, false);
386  WriteLock();
387  // Check again to avoid race
388  best_fit = FindCatalog(catalog_path);
389  result = MountSubtree(catalog_path, best_fit, false /* is_listable */,
390  &catalog);
391  // Result is false if an available catalog failed to load (error happened)
392  if (!result) {
393  Unlock();
394  return false;
395  }
396  }
397 
398  // If the found catalog is the Root there is no parent to lookup
399  if (catalog->HasParent()) {
400  result = catalog->parent()->FindNested(catalog->root_prefix(), hash, size);
401  }
402 
403  // Mountpoint now points to the found catalog
404  mountpoint->Assign(catalog->root_prefix());
405 
406  // If the result is false, it means that no nested catalog was found for
407  // this path. As the root catalog does not have a Nested Catalog of
408  // itself, we manually set the values and leave the size as 0.
409  // TODO(nhazekam) Allow for Root Catalog to be returned
410  if (!result) {
411  *hash = GetRootCatalog()->hash();
412  *size = 0;
413  result = true;
414  }
415 
416  Unlock();
417  return result;
418 }
419 
420 
430 template<class CatalogT>
432  const PathString &path, std::vector<PathString> *result_list) {
433  EnforceSqliteMemLimit();
434  bool result;
435  ReadLock();
436 
437  // Look past current path to mount up to intended location
438  PathString test(path);
439  test.Append("/.cvmfscatalog", 14);
440 
441  // Find catalog, possibly load nested
442  CatalogT *best_fit = FindCatalog(test);
443  CatalogT *catalog = best_fit;
444  // True if there is an available nested catalog
445  if (MountSubtree(test, best_fit, false /* is_listable */, NULL)) {
446  StageNestedCatalogAndUnlock(path, best_fit, false);
447  WriteLock();
448  // Check again to avoid race
449  best_fit = FindCatalog(test);
450  result = MountSubtree(test, best_fit, false /* is_listable */, &catalog);
451  // result is false if an available catalog failed to load
452  if (!result) {
453  Unlock();
454  return false;
455  }
456  }
457 
458  // Build listing
459  CatalogT *cur_parent = catalog->parent();
460  if (cur_parent) {
461  // Walk up parent tree to find base
462  std::vector<catalog::Catalog *> parents;
463  while (cur_parent->HasParent()) {
464  parents.push_back(cur_parent);
465  cur_parent = cur_parent->parent();
466  }
467  parents.push_back(cur_parent);
468  while (!parents.empty()) {
469  // Add to list in order starting at root
470  result_list->push_back(parents.back()->root_prefix());
471  parents.pop_back();
472  }
473  }
474  // Add the current catalog
475  result_list->push_back(catalog->root_prefix());
476 
477  Catalog::NestedCatalogList children = catalog->ListOwnNestedCatalogs();
478 
479  // Add all children nested catalogs
480  for (unsigned i = 0; i < children.size(); i++) {
481  result_list->push_back(children.at(i).mountpoint);
482  }
483 
484  Unlock();
485  return true;
486 }
487 
488 
489 template<class CatalogT>
491  XattrList *xattrs) {
492  EnforceSqliteMemLimit();
493  bool result;
494  ReadLock();
495 
496  // Find catalog, possibly load nested
497  CatalogT *best_fit = FindCatalog(path);
498  CatalogT *catalog = best_fit;
499  if (MountSubtree(path, best_fit, false /* is_listable */, NULL)) {
500  StageNestedCatalogAndUnlock(path, best_fit, false);
501  WriteLock();
502  // Check again to avoid race
503  best_fit = FindCatalog(path);
504  result = MountSubtree(path, best_fit, false /* is_listable */, &catalog);
505  if (!result) {
506  Unlock();
507  return false;
508  }
509  }
510 
511  perf::Inc(statistics_.n_lookup_xattrs);
512  result = catalog->LookupXattrsPath(path, xattrs);
513 
514  Unlock();
515  return result;
516 }
517 
518 
525 template<class CatalogT>
527  DirectoryEntryList *listing,
528  const bool expand_symlink) {
529  EnforceSqliteMemLimit();
530  bool result;
531  ReadLock();
532 
533  // Find catalog, possibly load nested
534  CatalogT *best_fit = FindCatalog(path);
535  CatalogT *catalog = best_fit;
536  if (MountSubtree(path, best_fit, true /* is_listable */, NULL)) {
537  StageNestedCatalogAndUnlock(path, best_fit, true /* is_listable */);
538  WriteLock();
539  // Check again to avoid race
540  best_fit = FindCatalog(path);
541  result = MountSubtree(path, best_fit, true /* is_listable */, &catalog);
542  if (!result) {
543  Unlock();
544  return false;
545  }
546  }
547 
548  perf::Inc(statistics_.n_listing);
549  result = catalog->ListingPath(path, listing, expand_symlink);
550 
551  Unlock();
552  return result;
553 }
554 
555 
562 template<class CatalogT>
564  StatEntryList *listing) {
565  EnforceSqliteMemLimit();
566  bool result;
567  ReadLock();
568 
569  // Find catalog, possibly load nested
570  CatalogT *best_fit = FindCatalog(path);
571  CatalogT *catalog = best_fit;
572  if (MountSubtree(path, best_fit, true /* is_listable */, NULL)) {
573  StageNestedCatalogAndUnlock(path, best_fit, true /* is_listable */);
574  WriteLock();
575  // Check again to avoid race
576  best_fit = FindCatalog(path);
577  result = MountSubtree(path, best_fit, true /* is_listable */, &catalog);
578  if (!result) {
579  Unlock();
580  return false;
581  }
582  }
583 
584  perf::Inc(statistics_.n_listing);
585  result = catalog->ListingPathStat(path, listing);
586 
587  Unlock();
588  return result;
589 }
590 
591 
599 template<class CatalogT>
601  const PathString &path,
602  const shash::Algorithms interpret_hashes_as,
603  FileChunkList *chunks) {
604  EnforceSqliteMemLimit();
605  bool result;
606  ReadLock();
607 
608  // Find catalog, possibly load nested
609  CatalogT *best_fit = FindCatalog(path);
610  CatalogT *catalog = best_fit;
611  if (MountSubtree(path, best_fit, false /* is_listable */, NULL)) {
612  StageNestedCatalogAndUnlock(path, best_fit, false);
613  WriteLock();
614  // Check again to avoid race
615  best_fit = FindCatalog(path);
616  result = MountSubtree(path, best_fit, false /* is_listable */, &catalog);
617  if (!result) {
618  Unlock();
619  return false;
620  }
621  }
622 
623  result = catalog->ListPathChunks(path, interpret_hashes_as, chunks);
624 
625  Unlock();
626  return result;
627 }
628 
629 template<class CatalogT>
631  const PathString &path, std::string *subcatalog_path, shash::Any *hash) {
632  EnforceSqliteMemLimit();
633  bool result;
634  ReadLock();
635 
636  // Look past current path to mount up to intended location
637  PathString catalog_path(path);
638  catalog_path.Append("/.cvmfscatalog", 14);
639 
640  // Find catalog, possibly load nested
641  CatalogT *best_fit = FindCatalog(catalog_path);
642  CatalogT *catalog = best_fit;
643  if (MountSubtree(catalog_path, best_fit, false /* is_listable */, NULL)) {
644  StageNestedCatalogAndUnlock(path, best_fit, false /* is_listable */);
645  WriteLock();
646  // Check again to avoid race
647  best_fit = FindCatalog(catalog_path);
648  result = MountSubtree(catalog_path, best_fit, false /* is_listable */,
649  &catalog);
650  // Result is false if an available catalog failed to load (error happened)
651  if (!result) {
652  Unlock();
653  *subcatalog_path = "error: failed to load catalog!";
654  *hash = shash::Any();
655  return catalog::Counters();
656  }
657  }
658 
659  *hash = catalog->hash();
660  *subcatalog_path = catalog->mountpoint().ToString();
661  catalog::Counters counters = catalog->GetCounters();
662  Unlock();
663  return counters;
664 }
665 
666 
667 template<class CatalogT>
669  ReadLock();
670  const uint64_t revision = GetRevisionNoLock();
671  Unlock();
672 
673  return revision;
674 }
675 
681 template<class CatalogT>
683  return revision_cache_;
684 }
685 
686 template<class CatalogT>
688  ReadLock();
689  const uint64_t timestamp = GetTimestampNoLock();
690  Unlock();
691 
692  return timestamp;
693 }
694 
700 template<class CatalogT>
702  return timestamp_cache_;
703 }
704 
705 template<class CatalogT>
706 bool AbstractCatalogManager<CatalogT>::GetVOMSAuthz(std::string *authz) const {
707  ReadLock();
708  const bool has_authz = has_authz_cache_;
709  if (has_authz && authz)
710  *authz = authz_cache_;
711  Unlock();
712  return has_authz;
713 }
714 
715 
716 template<class CatalogT>
718  ReadLock();
719  const bool result = GetRootCatalog()->HasExplicitTTL();
720  Unlock();
721  return result;
722 }
723 
724 
725 template<class CatalogT>
727  ReadLock();
728  const uint64_t ttl = GetRootCatalog()->GetTTL();
729  Unlock();
730  return ttl;
731 }
732 
733 
734 template<class CatalogT>
736  ReadLock();
737  int result = catalogs_.size();
738  Unlock();
739  return result;
740 }
741 
742 
746 template<class CatalogT>
748  ReadLock();
749  string output = PrintHierarchyRecursively(GetRootCatalog(), 0);
750  Unlock();
751  return output;
752 }
753 
754 
758 template<class CatalogT>
760  InodeRange result;
761  result.offset = inode_gauge_;
762  result.size = size;
763 
764  inode_gauge_ += size;
765  LogCvmfs(kLogCatalog, kLogDebug, "allocating inodes from %lu to %lu.",
766  result.offset + 1, inode_gauge_);
767 
768  return result;
769 }
770 
771 
777 template<class CatalogT>
779  // TODO(jblomer) currently inodes are only released on remount
780 }
781 
782 
789 template<class CatalogT>
791  const PathString &path) const {
792  assert(catalogs_.size() > 0);
793 
794  // Start at the root catalog and successively go down the catalog tree
795  CatalogT *best_fit = GetRootCatalog();
796  CatalogT *next_fit = NULL;
797  while (best_fit->mountpoint() != path) {
798  next_fit = best_fit->FindSubtree(path);
799  if (next_fit == NULL)
800  break;
801  best_fit = next_fit;
802  }
803 
804  return best_fit;
805 }
806 
807 
814 template<class CatalogT>
816  const PathString &root_path, CatalogT **attached_catalog) const {
817  if (catalogs_.size() == 0)
818  return false;
819 
820  CatalogT *best_fit = FindCatalog(root_path);
821  if (best_fit->mountpoint() != root_path)
822  return false;
823 
824  if (attached_catalog != NULL)
825  *attached_catalog = best_fit;
826  return true;
827 }
828 
829 
830 template<class CatalogT>
832  const PathString &path, const CatalogT *parent, bool is_listable) {
833  assert(parent);
834  const unsigned path_len = path.GetLength();
835 
836  perf::Inc(statistics_.n_nested_listing);
837  typedef typename CatalogT::NestedCatalogList NestedCatalogList;
838  const NestedCatalogList &nested_catalogs = parent->ListNestedCatalogs();
839 
840  for (typename NestedCatalogList::const_iterator i = nested_catalogs.begin(),
841  iEnd = nested_catalogs.end();
842  i != iEnd;
843  ++i) {
844  if (!path.StartsWith(i->mountpoint))
845  continue;
846 
847  // in this case the path doesn't start with
848  // the mountpoint in a file path sense
849  // (e.g. path is /a/bc and mountpoint is /a/b), and will be ignored
850  const unsigned mountpoint_len = i->mountpoint.GetLength();
851  if (path_len > mountpoint_len && path.GetChars()[mountpoint_len] != '/')
852  continue;
853 
854  // Found a nested catalog transition point
855  if (!is_listable && (path_len == mountpoint_len))
856  break;
857 
858  Unlock();
859  LogCvmfs(kLogCatalog, kLogDebug, "staging nested catalog at %s (%s)",
860  i->mountpoint.c_str(), i->hash.ToString().c_str());
861  StageNestedCatalogByHash(i->hash, i->mountpoint);
862  return;
863  }
864  Unlock();
865 }
866 
876 template<class CatalogT>
878  const CatalogT *entry_point,
879  bool is_listable,
880  CatalogT **leaf_catalog) {
881  bool result = true;
882  CatalogT *parent = (entry_point == NULL)
883  ? GetRootCatalog()
884  : const_cast<CatalogT *>(entry_point);
885  assert(path.StartsWith(parent->mountpoint()));
886 
887  unsigned path_len = path.GetLength();
888 
889  // Try to find path as a super string of nested catalog mount points
890  perf::Inc(statistics_.n_nested_listing);
891  typedef typename CatalogT::NestedCatalogList NestedCatalogList;
892  const NestedCatalogList &nested_catalogs = parent->ListNestedCatalogs();
893  for (typename NestedCatalogList::const_iterator i = nested_catalogs.begin(),
894  iEnd = nested_catalogs.end();
895  i != iEnd;
896  ++i) {
897  // Next nesting level
898  if (path.StartsWith(i->mountpoint)) {
899  // in this case the path doesn't start with
900  // the mountpoint in a file path sense
901  // (e.g. path is /a/bc and mountpoint is /a/b), and will be ignored
902  unsigned mountpoint_len = i->mountpoint.GetLength();
903  if (path_len > mountpoint_len && path.GetChars()[mountpoint_len] != '/')
904  continue;
905 
906  // Found a nested catalog transition point
907  if (!is_listable && (path_len == mountpoint_len))
908  break;
909 
910  if (leaf_catalog == NULL)
911  return true;
912  CatalogT *new_nested;
913  LogCvmfs(kLogCatalog, kLogDebug, "load nested catalog at %s",
914  i->mountpoint.c_str());
915  // prevent endless recursion with corrupted catalogs
916  // (due to reloading root)
917  if (i->hash.IsNull())
918  return false;
919  new_nested = MountCatalog(i->mountpoint, i->hash, parent);
920  if (!new_nested)
921  return false;
922 
923  result = MountSubtree(path, new_nested, is_listable, &parent);
924  break;
925  }
926  }
927 
928  if (leaf_catalog == NULL)
929  return false;
930  *leaf_catalog = parent;
931  return result;
932 }
933 
934 
939 template<class CatalogT>
941  const PathString &mountpoint,
942  const shash::Any &hash,
943  CatalogT *parent_catalog) {
944  CatalogT *attached_catalog = NULL;
945  if (IsAttached(mountpoint, &attached_catalog)) {
946  return attached_catalog;
947  }
948 
949  CatalogContext ctlg_context(hash, mountpoint, kCtlgLocationMounted);
950 
951  if (ctlg_context.IsRootCatalog() && hash.IsNull()) {
952  if (GetNewRootCatalogContext(&ctlg_context) == kLoadFail) {
954  "failed to retrieve valid root catalog '%s'",
955  mountpoint.c_str());
956  return NULL;
957  }
958  }
959 
960  const LoadReturn retval = LoadCatalogByHash(&ctlg_context);
961  if ((retval == kLoadFail) || (retval == kLoadNoSpace)) {
962  LogCvmfs(kLogCatalog, kLogDebug, "failed to load catalog '%s' (%d - %s)",
963  mountpoint.c_str(), retval, Code2Ascii(retval));
964  return NULL;
965  }
966 
967  attached_catalog = CreateCatalog(
968  ctlg_context.mountpoint(), ctlg_context.hash(), parent_catalog);
969 
970  // Attach loaded catalog
971  if (!AttachCatalog(ctlg_context.sqlite_path(), attached_catalog)) {
972  LogCvmfs(kLogCatalog, kLogDebug, "failed to attach catalog '%s'",
973  mountpoint.c_str());
974  UnloadCatalog(attached_catalog);
975  return NULL;
976  }
977 
978  if ((catalog_watermark_ > 0) && (catalogs_.size() >= catalog_watermark_)) {
979  DetachSiblings(mountpoint);
980  }
981 
982  return attached_catalog;
983 }
984 
985 
990 template<class CatalogT>
992  const PathString &mountpoint, const shash::Any &hash) {
993  assert(!hash.IsNull());
994  CatalogContext ctlg_context(hash, mountpoint, kCtlgNoLocationNeeded);
995 
996  const LoadReturn load_ret = LoadCatalogByHash(&ctlg_context);
997 
998  if (load_ret != kLoadNew) {
999  return NULL;
1000  }
1001 
1002  CatalogT *catalog = CatalogT::AttachFreely(
1003  mountpoint.ToString(), ctlg_context.sqlite_path(), ctlg_context.hash());
1004  catalog->TakeDatabaseFileOwnership();
1005  return catalog;
1006 }
1007 
1008 
1015 template<class CatalogT>
1017  CatalogT *new_catalog) {
1018  LogCvmfs(kLogCatalog, kLogDebug, "attaching catalog file %s",
1019  db_path.c_str());
1020 
1021  // Initialize the new catalog
1022  if (!new_catalog->OpenDatabase(db_path)) {
1023  LogCvmfs(kLogCatalog, kLogDebug, "initialization of catalog %s failed",
1024  db_path.c_str());
1025  return false;
1026  }
1027 
1028  // Determine the inode offset of this catalog
1029  uint64_t inode_chunk_size = new_catalog->max_row_id();
1030  InodeRange range = AcquireInodes(inode_chunk_size);
1031  new_catalog->set_inode_range(range);
1032  new_catalog->SetInodeAnnotation(inode_annotation_);
1033  new_catalog->SetOwnerMaps(&uid_map_, &gid_map_);
1034 
1035  // Add catalog to the manager
1036  if (!new_catalog->IsInitialized()) {
1038  "catalog initialization failed (obscure data)");
1039  inode_gauge_ -= inode_chunk_size;
1040  return false;
1041  }
1042  CheckInodeWatermark();
1043 
1044  // The revision of the catalog tree is given by the root catalog revision
1045  if (catalogs_.empty()) {
1046  revision_cache_ = new_catalog->GetRevision();
1047  timestamp_cache_ = new_catalog->GetLastModified();
1048  statistics_.catalog_revision->Set(revision_cache_);
1049  has_authz_cache_ = new_catalog->GetVOMSAuthz(&authz_cache_);
1050  volatile_flag_ = new_catalog->volatile_flag();
1051  }
1052 
1053  catalogs_.push_back(new_catalog);
1054  ActivateCatalog(new_catalog);
1055  return true;
1056 }
1057 
1058 
1067 template<class CatalogT>
1069  if (catalog->HasParent())
1070  catalog->parent()->RemoveChild(catalog);
1071 
1072  ReleaseInodes(catalog->inode_range());
1073  UnloadCatalog(catalog);
1074 
1075  // Delete catalog from internal lists
1076  typename CatalogList::iterator i;
1077  typename CatalogList::const_iterator iend;
1078  for (i = catalogs_.begin(), iend = catalogs_.end(); i != iend; ++i) {
1079  if (*i == catalog) {
1080  catalogs_.erase(i);
1081  delete catalog;
1082  return;
1083  }
1084  }
1085 
1086  assert(false);
1087 }
1088 
1089 
1096 template<class CatalogT>
1098  // Detach all child catalogs recursively
1099  typename CatalogList::const_iterator i;
1100  typename CatalogList::const_iterator iend;
1101  CatalogList catalogs_to_detach = catalog->GetChildren();
1102  for (i = catalogs_to_detach.begin(), iend = catalogs_to_detach.end();
1103  i != iend; ++i) {
1104  DetachSubtree(*i);
1105  }
1106 
1107  DetachCatalog(catalog);
1108 }
1109 
1110 
1115 template<class CatalogT>
1117  const PathString &current_tree) {
1118  bool again;
1119  do {
1120  again = false;
1121  unsigned N = catalogs_.size();
1122  for (unsigned i = 0; i < N; ++i) {
1123  if (!HasPrefix(current_tree.ToString(),
1124  catalogs_[i]->mountpoint().ToString(),
1125  false /* ignore_case */)) {
1126  DetachSubtree(catalogs_[i]);
1127  again = true;
1128  break;
1129  }
1130  }
1131  } while (again);
1132  perf::Inc(statistics_.n_detach_siblings);
1133 }
1134 
1135 
1139 template<class CatalogT>
1141  const CatalogT *catalog, const int level) const {
1142  string output;
1143 
1144  // Indent according to level
1145  for (int i = 0; i < level; ++i)
1146  output += " ";
1147 
1148  output += "-> "
1149  + string(catalog->mountpoint().GetChars(),
1150  catalog->mountpoint().GetLength())
1151  + "\n";
1152 
1153  CatalogList children = catalog->GetChildren();
1154  typename CatalogList::const_iterator i = children.begin();
1155  typename CatalogList::const_iterator iend = children.end();
1156  for (; i != iend; ++i) {
1157  output += PrintHierarchyRecursively(*i, level + 1);
1158  }
1159 
1160  return output;
1161 }
1162 
1163 
1164 template<class CatalogT>
1166  const CatalogT *catalog) const {
1167  string result = catalog->PrintMemStatistics() + "\n";
1168 
1169  CatalogList children = catalog->GetChildren();
1170  typename CatalogList::const_iterator i = children.begin();
1171  typename CatalogList::const_iterator iend = children.end();
1172  for (; i != iend; ++i) {
1173  result += PrintMemStatsRecursively(*i);
1174  }
1175  return result;
1176 }
1177 
1178 
1182 template<class CatalogT>
1184  string result;
1185  ReadLock();
1186  result = PrintMemStatsRecursively(GetRootCatalog());
1187  Unlock();
1188  return result;
1189 }
1190 
1191 
1192 template<class CatalogT>
1194  char *mem_enforced = static_cast<char *>(
1195  pthread_getspecific(pkey_sqlitemem_));
1196  if (mem_enforced == NULL) {
1197  sqlite3_soft_heap_limit(kSqliteMemPerThread);
1198  pthread_setspecific(pkey_sqlitemem_, this);
1199  }
1200 }
1201 
1202 } // namespace catalog
1203 
1204 
1205 #endif // CVMFS_CATALOG_MGR_IMPL_H_
1206 // NOLINTEND
shash::Any hash() const
Definition: catalog_mgr.h:124
void DetachSubtree(CatalogT *catalog)
bool GetVOMSAuthz(std::string *authz) const
catalog::Counters LookupCounters(const PathString &path, std::string *subcatalog_path, shash::Any *hash)
bool IsNull() const
Definition: hash.h:371
CatalogT * LoadFreeCatalog(const PathString &mountpoint, const shash::Any &hash)
void StageNestedCatalogAndUnlock(const PathString &path, const CatalogT *parent, bool is_listable)
bool MountSubtree(const PathString &path, const CatalogT *entry_point, bool can_listing, CatalogT **leaf_catalog)
void DetachSiblings(const PathString &current_tree)
InodeAnnotation * inode_annotation_
Definition: catalog_mgr.h:461
std::string ToString(const bool with_suffix=false) const
Definition: hash.h:241
void Assign(const char *chars, const unsigned length)
Definition: shortstring.h:61
std::string PrintHierarchyRecursively(const CatalogT *catalog, const int level) const
unsigned LookupOptions
Definition: catalog_mgr.h:42
void set_symlink(const LinkString &symlink)
perf::Statistics * statistics_
Definition: repository.h:138
std::vector< CatalogT * > CatalogList
Definition: catalog_mgr.h:242
void DetachCatalog(CatalogT *catalog)
bool IsAttached(const PathString &root_path, CatalogT **attached_catalog) const
uint64_t inode_t
bool ListingStat(const PathString &path, StatEntryList *listing)
assert((mem||(size==0))&&"Out Of Memory")
bool LookupPath(const PathString &path, const LookupOptions options, DirectoryEntry *entry)
IntegerMap< uint64_t > OwnerMap
Definition: catalog.h:41
std::string sqlite_path() const
Definition: catalog_mgr.h:126
uint64_t size
Definition: catalog.h:51
Algorithms
Definition: hash.h:41
bool Listing(const PathString &path, DirectoryEntryList *listing, const bool expand_symlink)
std::vector< DirectoryEntry > DirectoryEntryList
bool AttachCatalog(const std::string &db_path, CatalogT *new_catalog)
LoadReturn ChangeRoot(const shash::Any &root_hash)
const unsigned kSqliteMemPerThread
Definition: catalog_mgr.h:34
shash::Any GetNestedCatalogHash(const PathString &mountpoint)
void Append(const char *chars, const unsigned length)
Definition: shortstring.h:80
void Inc(class Counter *counter)
Definition: statistics.h:50
bool HasPrefix(const string &str, const string &prefix, const bool ignore_case)
Definition: string.cc:279
const unsigned kLookupRawSymlink
Definition: catalog_mgr.h:44
bool ListCatalogSkein(const PathString &path, std::vector< PathString > *result_list)
InodeRange AcquireInodes(uint64_t size)
pthread_rwlock_t * rwlock_
Definition: catalog_mgr.h:462
std::string ToString() const
Definition: shortstring.h:139
std::vector< NestedCatalog > NestedCatalogList
Definition: catalog.h:204
bool IsEmpty() const
Definition: shortstring.h:137
bool ListFileChunks(const PathString &path, const shash::Algorithms interpret_hashes_as, FileChunkList *chunks)
ShortString< kDefaultMaxPath, 0 > PathString
Definition: shortstring.h:213
const char * Code2Ascii(const LoadReturn error)
Definition: catalog_mgr.h:167
bool LookupXattrs(const PathString &path, XattrList *xattrs)
PathString mountpoint() const
Definition: catalog_mgr.h:125
bool LookupNested(const PathString &path, PathString *mountpoint, shash::Any *hash, uint64_t *size)
CatalogT * MountCatalog(const PathString &mountpoint, const shash::Any &hash, CatalogT *parent_catalog)
bool StartsWith(const ShortString &other) const
Definition: shortstring.h:185
void SetCatalogWatermark(unsigned limit)
void SetOwnerMaps(const OwnerMap &uid_map, const OwnerMap &gid_map)
unsigned GetLength() const
Definition: shortstring.h:131
void ReleaseInodes(const InodeRange chunk)
CatalogT * FindCatalog(const PathString &path) const
uint64_t offset
Definition: catalog.h:50
const char * c_str() const
Definition: shortstring.h:143
const char * GetChars() const
Definition: shortstring.h:123
std::string PrintHierarchy() const
std::string PrintMemStatsRecursively(const CatalogT *catalog) const
std::string PrintAllMemStatistics() const
static void size_t size
Definition: smalloc.h:54
void SetInodeAnnotation(InodeAnnotation *new_annotation)
CVMFS_EXPORT void LogCvmfs(const LogSource source, const int mask, const char *format,...)
Definition: logging.cc:545