GCC Code Coverage Report


Directory: cvmfs/
File: cvmfs/cvmfs.cc
Date: 2025-07-21 10:50:29
Exec Total Coverage
Lines: 0 1606 0.0%
Branches: 0 2300 0.0%

Line Branch Exec Source
1 /**
2 * This file is part of the CernVM File System.
3 *
4 * CernVM-FS is a FUSE module which implements an HTTP read-only filesystem.
5 * The original idea is based on GROW-FS.
6 *
7 * CernVM-FS shows a remote HTTP directory as local file system. The client
8 * sees all available files. On first access, a file is downloaded and
9 * cached locally. All downloaded pieces are verified by a cryptographic
10 * content hash.
11 *
12 * To do so, a directory hive has to be transformed into a CVMFS2
13 * "repository". This can be done by the CernVM-FS server tools.
14 *
15 * This preparation of directories is transparent to web servers and
16 * web proxies. They just serve static content, i.e. arbitrary files.
17 * Any HTTP server should do the job. We use Apache + Squid. Serving
18 * files from the memory of a web proxy brings a significant performance
19 * improvement.
20 */
21
22 // TODO(jblomer): the file system root should probably always return 1 for an
23 // inode. See also integration test #23.
24
25 #define ENOATTR ENODATA /**< instead of including attr/xattr.h */
26
27 #ifndef __STDC_FORMAT_MACROS
28 #define __STDC_FORMAT_MACROS
29 #endif
30
31 // sys/xattr.h conflicts with linux/xattr.h and needs to be loaded very early
32 // clang-format off
33 #include <sys/xattr.h> // NOLINT
34 // clang-format on
35
36
37 #include "cvmfs.h"
38
39 #include <alloca.h>
40 #include <errno.h>
41 #include <fcntl.h>
42 #include <inttypes.h>
43 #include <pthread.h>
44 #include <stddef.h>
45 #include <stdint.h>
46 #include <sys/errno.h>
47 #include <sys/statvfs.h>
48 #include <sys/types.h>
49 #include <unistd.h>
50
51 #include <algorithm>
52 #include <cassert>
53 #include <cstdio>
54 #include <cstdlib>
55 #include <cstring>
56 #include <ctime>
57 #include <functional>
58 #include <google/dense_hash_map>
59 #include <string>
60 #include <utility>
61 #include <vector>
62
63 #include "authz/authz_session.h"
64 #include "auto_umount.h"
65 #include "backoff.h"
66 #include "bigvector.h"
67 #include "cache.h"
68 #include "cache_posix.h"
69 #include "cache_stream.h"
70 #include "catalog_mgr.h"
71 #include "catalog_mgr_client.h"
72 #include "clientctx.h"
73 #include "compat.h"
74 #include "compression/compression.h"
75 #include "crypto/crypto_util.h"
76 #include "crypto/hash.h"
77 #include "directory_entry.h"
78 #include "duplex_fuse.h"
79 #include "fence.h"
80 #include "fetch.h"
81 #include "file_chunk.h"
82 #include "fuse_evict.h"
83 #include "fuse_inode_gen.h"
84 #include "fuse_remount.h"
85 #include "glue_buffer.h"
86 #include "interrupt.h"
87 #include "loader.h"
88 #include "lru_md.h"
89 #include "magic_xattr.h"
90 #include "manifest_fetch.h"
91 #include "monitor.h"
92 #include "mountpoint.h"
93 #include "network/download.h"
94 #include "nfs_maps.h"
95 #include "notification_client.h"
96 #include "options.h"
97 #include "quota_listener.h"
98 #include "quota_posix.h"
99 #include "sanitizer.h"
100 #include "shortstring.h"
101 #include "sqlitevfs.h"
102 #include "statistics.h"
103 #include "talk.h"
104 #include "telemetry_aggregator.h"
105 #include "tracer.h"
106 #include "util/algorithm.h"
107 #include "util/exception.h"
108 #include "util/logging.h"
109 #include "util/mutex.h"
110 #include "util/pointer.h"
111 #include "util/posix.h"
112 #include "util/smalloc.h"
113 #include "util/string.h"
114 #include "util/testing.h"
115 #include "util/uuid.h"
116 #include "wpad.h"
117 #include "xattr.h"
118
119 using namespace std; // NOLINT
120
121 namespace cvmfs {
122
123 #ifndef __TEST_CVMFS_MOCKFUSE // will be mocked in tests
124 FileSystem *file_system_ = NULL;
125 MountPoint *mount_point_ = NULL;
126 TalkManager *talk_mgr_ = NULL;
127 NotificationClient *notification_client_ = NULL;
128 Watchdog *watchdog_ = NULL;
129 FuseRemounter *fuse_remounter_ = NULL;
130 InodeGenerationInfo inode_generation_info_;
131 #endif // __TEST_CVMFS_MOCKFUSE
132
133
134 /**
135 * For cvmfs_opendir / cvmfs_readdir
136 * TODO: use mmap for very large listings
137 */
138 struct DirectoryListing {
139 char *buffer; /**< Filled by fuse_add_direntry */
140
141 // Not really used anymore. But directory listing needs to be migrated during
142 // hotpatch. If buffer is allocated by smmap, capacity is zero.
143 size_t size;
144 size_t capacity;
145
146 DirectoryListing() : buffer(NULL), size(0), capacity(0) { }
147 };
148
149 const loader::LoaderExports *loader_exports_ = NULL;
150 OptionsManager *options_mgr_ = NULL;
151 pid_t pid_ = 0; /**< will be set after daemon() */
152 quota::ListenerHandle *watchdog_listener_ = NULL;
153 quota::ListenerHandle *unpin_listener_ = NULL;
154
155
156 typedef google::dense_hash_map<uint64_t, DirectoryListing,
157 hash_murmur<uint64_t> >
158 DirectoryHandles;
159 DirectoryHandles *directory_handles_ = NULL;
160 pthread_mutex_t lock_directory_handles_ = PTHREAD_MUTEX_INITIALIZER;
161 uint64_t next_directory_handle_ = 0;
162
163 unsigned int max_open_files_; /**< maximum allowed number of open files */
164 /**
165 * The refcounted cache manager should suppress checking the current number
166 * of files opened through cvmfs_open() against the process' file descriptor
167 * limit.
168 */
169 bool check_fd_overflow_ = true;
170 /**
171 * Number of reserved file descriptors for internal use
172 */
173 const int kNumReservedFd = 512;
174 /**
175 * Warn if the process has a lower limit for the number of open file descriptors
176 */
177 const unsigned int kMinOpenFiles = 8192;
178
179
180 class FuseInterruptCue : public InterruptCue {
181 public:
182 explicit FuseInterruptCue(fuse_req_t *r) : req_ptr_(r) { }
183 virtual ~FuseInterruptCue() { }
184 virtual bool IsCanceled() { return fuse_req_interrupted(*req_ptr_); }
185
186 private:
187 fuse_req_t *req_ptr_;
188 };
189
190 /**
191 * Options related to the fuse kernel connection. The capabilities are
192 * determined only once at mount time. If the capability trigger certain
193 * behavior of the cvmfs fuse module, it needs to be re-triggered on reload.
194 * Used in SaveState and RestoreState to store the details of symlink caching.
195 */
196 struct FuseState {
197 FuseState() : version(0), cache_symlinks(false), has_dentry_expire(false) { }
198 unsigned version;
199 bool cache_symlinks;
200 bool has_dentry_expire;
201 };
202
203
204 /**
205 * Atomic increase of the open files counter. If we use a non-refcounted
206 * POSIX cache manager, check for open fd overflow. Return false if too many
207 * files are opened. Otherwise return true (success).
208 */
209 static inline bool IncAndCheckNoOpenFiles() {
210 const int64_t no_open_files = perf::Xadd(file_system_->no_open_files(), 1);
211 if (!check_fd_overflow_)
212 return true;
213 return no_open_files < (static_cast<int>(max_open_files_) - kNumReservedFd);
214 }
215
216 static inline double GetKcacheTimeout() {
217 if (!fuse_remounter_->IsCaching())
218 return 0.0;
219 return mount_point_->kcache_timeout_sec();
220 }
221
222
223 void GetReloadStatus(bool *drainout_mode, bool *maintenance_mode) {
224 *drainout_mode = fuse_remounter_->IsInDrainoutMode();
225 *maintenance_mode = fuse_remounter_->IsInMaintenanceMode();
226 }
227
228 #ifndef __TEST_CVMFS_MOCKFUSE // will be mocked in tests
229 static bool UseWatchdog() {
230 if (loader_exports_ == NULL || loader_exports_->version < 2) {
231 return true; // spawn watchdog by default
232 // Note: with library versions before 2.1.8 it might not
233 // create stack traces properly in all cases
234 }
235
236 return !loader_exports_->disable_watchdog;
237 }
238 #endif
239
240 std::string PrintInodeGeneration() {
241 return "init-catalog-revision: "
242 + StringifyInt(inode_generation_info_.initial_revision) + " "
243 + "current-catalog-revision: "
244 + StringifyInt(mount_point_->catalog_mgr()->GetRevision()) + " "
245 + "incarnation: " + StringifyInt(inode_generation_info_.incarnation)
246 + " " + "inode generation: "
247 + StringifyInt(inode_generation_info_.inode_generation) + "\n";
248 }
249
250
251 static bool CheckVoms(const fuse_ctx &fctx) {
252 if (!mount_point_->has_membership_req())
253 return true;
254 const string mreq = mount_point_->membership_req();
255 LogCvmfs(kLogCvmfs, kLogDebug,
256 "Got VOMS authz %s from filesystem "
257 "properties",
258 mreq.c_str());
259
260 if (fctx.uid == 0)
261 return true;
262
263 return mount_point_->authz_session_mgr()->IsMemberOf(fctx.pid, mreq);
264 }
265
266 static bool MayBeInPageCacheTracker(const catalog::DirectoryEntry &dirent) {
267 return dirent.IsRegular()
268 && (dirent.inode() < mount_point_->catalog_mgr()->GetRootInode());
269 }
270
271 static bool HasDifferentContent(const catalog::DirectoryEntry &dirent,
272 const shash::Any &hash,
273 const struct stat &info) {
274 if (hash == dirent.checksum())
275 return false;
276 // For chunked files, we don't want to load the full list of chunk hashes
277 // so we only check the last modified timestamp
278 if (dirent.IsChunkedFile() && (info.st_mtime == dirent.mtime()))
279 return false;
280 return true;
281 }
282
283 #ifndef __TEST_CVMFS_MOCKFUSE
284 /**
285 * When we lookup an inode (cvmfs_lookup(), cvmfs_opendir()), we usually provide
286 * the live inode, i.e. the one in the inode tracker. However, if the inode
287 * refers to an open file that has a different content then the one from the
288 * current catalogs, we will replace the live inode in the tracker by the one
289 * from the current generation.
290 *
291 * To still access the old inode, e.g. for fstat() on the open file, the stat
292 * structure connected to this inode is taken from the page cache tracker.
293 */
294 static bool FixupOpenInode(const PathString &path,
295 catalog::DirectoryEntry *dirent) {
296 if (!MayBeInPageCacheTracker(*dirent))
297 return false;
298
299 CVMFS_TEST_INJECT_BARRIER("_CVMFS_TEST_BARRIER_INODE_REPLACE");
300
301 const bool is_stale = mount_point_->page_cache_tracker()->IsStale(*dirent);
302
303 if (is_stale) {
304 // Overwrite dirent with inode from current generation
305 const bool found = mount_point_->catalog_mgr()->LookupPath(
306 path, catalog::kLookupDefault, dirent);
307 assert(found);
308 }
309
310 return is_stale;
311 }
312
313 static bool GetDirentForInode(const fuse_ino_t ino,
314 catalog::DirectoryEntry *dirent) {
315 // Lookup inode in cache
316 if (mount_point_->inode_cache()->Lookup(ino, dirent))
317 return true;
318
319 // Look in the catalogs in 2 steps: lookup inode->path, lookup path
320 static const catalog::DirectoryEntry
321 dirent_negative = catalog::DirectoryEntry(catalog::kDirentNegative);
322 // Reset directory entry. If the function returns false and dirent is no
323 // the kDirentNegative, it was an I/O error
324 *dirent = catalog::DirectoryEntry();
325
326 catalog::ClientCatalogManager *catalog_mgr = mount_point_->catalog_mgr();
327
328 if (file_system_->IsNfsSource()) {
329 // NFS mode
330 PathString path;
331 const bool retval = file_system_->nfs_maps()->GetPath(ino, &path);
332 if (!retval) {
333 *dirent = dirent_negative;
334 return false;
335 }
336 if (catalog_mgr->LookupPath(path, catalog::kLookupDefault, dirent)) {
337 // Fix inodes
338 dirent->set_inode(ino);
339 mount_point_->inode_cache()->Insert(ino, *dirent);
340 return true;
341 }
342 return false; // Not found in catalog or catalog load error
343 }
344
345 // Non-NFS mode
346 PathString path;
347 if (ino == catalog_mgr->GetRootInode()) {
348 const bool retval = catalog_mgr->LookupPath(
349 PathString(), catalog::kLookupDefault, dirent);
350
351 if (!AssertOrLog(retval, kLogCvmfs, kLogSyslogWarn | kLogDebug,
352 "GetDirentForInode: Race condition? Not found dirent %s",
353 dirent->name().c_str())) {
354 return false;
355 }
356
357 dirent->set_inode(ino);
358 mount_point_->inode_cache()->Insert(ino, *dirent);
359 return true;
360 }
361
362 glue::InodeEx inode_ex(ino, glue::InodeEx::kUnknownType);
363 const bool retval = mount_point_->inode_tracker()->FindPath(&inode_ex, &path);
364 if (!retval) {
365 // This may be a retired inode whose stat information is only available
366 // in the page cache tracker because there is still an open file
367 LogCvmfs(kLogCvmfs, kLogDebug,
368 "GetDirentForInode inode lookup failure %" PRId64, ino);
369 *dirent = dirent_negative;
370 // Indicate that the inode was not found in the tracker rather than not
371 // found in the catalog
372 dirent->set_inode(ino);
373 return false;
374 }
375 if (catalog_mgr->LookupPath(path, catalog::kLookupDefault, dirent)) {
376 if (!inode_ex.IsCompatibleFileType(dirent->mode())) {
377 LogCvmfs(kLogCvmfs, kLogDebug,
378 "Warning: inode %" PRId64 " (%s) changed file type", ino,
379 path.c_str());
380 // TODO(jblomer): we detect this issue but let it continue unhandled.
381 // Fix me.
382 }
383
384 // Fix inodes
385 dirent->set_inode(ino);
386 mount_point_->inode_cache()->Insert(ino, *dirent);
387 return true;
388 }
389
390 // Can happen after reload of catalogs or on catalog load failure
391 LogCvmfs(kLogCvmfs, kLogDebug, "GetDirentForInode path lookup failure");
392 return false;
393 }
394
395
396 /**
397 * Returns 0 if the path does not exist
398 * 1 if the live inode is returned
399 * >1 the live inode, which is then stale and the inode in dirent
400 * comes from the catalog in the current generation
401 * (see FixupOpenInode)
402 */
403 static uint64_t GetDirentForPath(const PathString &path,
404 catalog::DirectoryEntry *dirent) {
405 uint64_t live_inode = 0;
406 if (!file_system_->IsNfsSource())
407 live_inode = mount_point_->inode_tracker()->FindInode(path);
408
409 LogCvmfs(kLogCvmfs, kLogDebug,
410 "GetDirentForPath: live inode for %s: %" PRIu64, path.c_str(),
411 live_inode);
412
413 const shash::Md5 md5path(path.GetChars(), path.GetLength());
414 if (mount_point_->md5path_cache()->Lookup(md5path, dirent)) {
415 if (dirent->GetSpecial() == catalog::kDirentNegative)
416 return false;
417 // We may have initially stored the entry with an old inode in the
418 // md5path cache and now should update it with the new one.
419 if (!file_system_->IsNfsSource() && (live_inode != 0))
420 dirent->set_inode(live_inode);
421 return 1;
422 }
423
424 catalog::ClientCatalogManager *catalog_mgr = mount_point_->catalog_mgr();
425
426 // Lookup inode in catalog TODO: not twice md5 calculation
427 bool retval;
428 retval = catalog_mgr->LookupPath(path, catalog::kLookupDefault, dirent);
429 if (retval) {
430 if (file_system_->IsNfsSource()) {
431 dirent->set_inode(file_system_->nfs_maps()->GetInode(path));
432 } else if (live_inode != 0) {
433 dirent->set_inode(live_inode);
434 if (FixupOpenInode(path, dirent)) {
435 LogCvmfs(kLogCvmfs, kLogDebug,
436 "content of %s change, replacing inode %" PRIu64
437 " --> %" PRIu64,
438 path.c_str(), live_inode, dirent->inode());
439 return live_inode;
440 // Do not populate the md5path cache until the inode tracker is fixed
441 }
442 }
443 mount_point_->md5path_cache()->Insert(md5path, *dirent);
444 return 1;
445 }
446
447 LogCvmfs(kLogCvmfs, kLogDebug, "GetDirentForPath, no entry");
448 // Only insert ENOENT results into negative cache. Otherwise it was an
449 // error loading nested catalogs
450 if (dirent->GetSpecial() == catalog::kDirentNegative)
451 mount_point_->md5path_cache()->InsertNegative(md5path);
452 return 0;
453 }
454 #endif
455
456
457 static bool GetPathForInode(const fuse_ino_t ino, PathString *path) {
458 // Check the path cache first
459 if (mount_point_->path_cache()->Lookup(ino, path))
460 return true;
461
462 if (file_system_->IsNfsSource()) {
463 // NFS mode, just a lookup
464 LogCvmfs(kLogCvmfs, kLogDebug, "MISS %lu - lookup in NFS maps", ino);
465 if (file_system_->nfs_maps()->GetPath(ino, path)) {
466 mount_point_->path_cache()->Insert(ino, *path);
467 return true;
468 }
469 return false;
470 }
471
472 if (ino == mount_point_->catalog_mgr()->GetRootInode())
473 return true;
474
475 LogCvmfs(kLogCvmfs, kLogDebug, "MISS %lu - looking in inode tracker", ino);
476 glue::InodeEx inode_ex(ino, glue::InodeEx::kUnknownType);
477 const bool retval = mount_point_->inode_tracker()->FindPath(&inode_ex, path);
478
479 if (!AssertOrLog(retval, kLogCvmfs, kLogSyslogWarn | kLogDebug,
480 "GetPathForInode: Race condition? "
481 "Inode not found in inode tracker at path %s",
482 path->c_str())) {
483 return false;
484 }
485
486
487 mount_point_->path_cache()->Insert(ino, *path);
488 return true;
489 }
490
491 static void DoTraceInode(const int event,
492 fuse_ino_t ino,
493 const std::string &msg) {
494 PathString path;
495 const bool found = GetPathForInode(ino, &path);
496 if (!found) {
497 LogCvmfs(kLogCvmfs, kLogDebug,
498 "Tracing: Could not find path for inode %" PRIu64, uint64_t(ino));
499 mount_point_->tracer()->Trace(event, PathString("@UNKNOWN"), msg);
500 } else {
501 mount_point_->tracer()->Trace(event, path, msg);
502 }
503 }
504
505 static void inline TraceInode(const int event,
506 fuse_ino_t ino,
507 const std::string &msg) {
508 if (mount_point_->tracer()->IsActive())
509 DoTraceInode(event, ino, msg);
510 }
511
512 /**
513 * Find the inode number of a file name in a directory given by inode.
514 * This or getattr is called as kind of prerequisite to every operation.
515 * We do check catalog TTL here (and reload, if necessary).
516 */
517 static void cvmfs_lookup(fuse_req_t req, fuse_ino_t parent, const char *name) {
518 const HighPrecisionTimer guard_timer(file_system_->hist_fs_lookup());
519
520 perf::Inc(file_system_->n_fs_lookup());
521 const struct fuse_ctx *fuse_ctx = fuse_req_ctx(req);
522 FuseInterruptCue ic(&req);
523 const ClientCtxGuard ctx_guard(fuse_ctx->uid, fuse_ctx->gid, fuse_ctx->pid,
524 &ic);
525 fuse_remounter_->TryFinish();
526
527 fuse_remounter_->fence()->Enter();
528 catalog::ClientCatalogManager *catalog_mgr = mount_point_->catalog_mgr();
529
530 const fuse_ino_t parent_fuse = parent;
531 parent = catalog_mgr->MangleInode(parent);
532 LogCvmfs(kLogCvmfs, kLogDebug,
533 "cvmfs_lookup in parent inode: %" PRIu64 " for name: %s",
534 uint64_t(parent), name);
535
536 PathString path;
537 PathString parent_path;
538 uint64_t live_inode = 0;
539 catalog::DirectoryEntry dirent;
540 struct fuse_entry_param result;
541
542 memset(&result, 0, sizeof(result));
543 const double timeout = GetKcacheTimeout();
544 result.attr_timeout = timeout;
545 result.entry_timeout = timeout;
546
547 // Special NFS lookups: . and ..
548 if ((strcmp(name, ".") == 0) || (strcmp(name, "..") == 0)) {
549 if (GetDirentForInode(parent, &dirent)) {
550 if (strcmp(name, ".") == 0) {
551 goto lookup_reply_positive;
552 } else {
553 // Lookup for ".."
554 if (dirent.inode() == catalog_mgr->GetRootInode()) {
555 dirent.set_inode(1);
556 goto lookup_reply_positive;
557 }
558 if (!GetPathForInode(parent, &parent_path))
559 goto lookup_reply_negative;
560 if (GetDirentForPath(GetParentPath(parent_path), &dirent) > 0)
561 goto lookup_reply_positive;
562 }
563 }
564 // No entry for "." or no entry for ".."
565 if (dirent.GetSpecial() == catalog::kDirentNegative)
566 goto lookup_reply_negative;
567 else
568 goto lookup_reply_error;
569 assert(false);
570 }
571
572 if (!GetPathForInode(parent, &parent_path)) {
573 LogCvmfs(kLogCvmfs, kLogDebug, "no path for parent inode found");
574 goto lookup_reply_negative;
575 }
576
577 path.Assign(parent_path);
578 path.Append("/", 1);
579 path.Append(name, strlen(name));
580 live_inode = GetDirentForPath(path, &dirent);
581 if (live_inode == 0) {
582 if (dirent.GetSpecial() == catalog::kDirentNegative)
583 goto lookup_reply_negative;
584 else
585 goto lookup_reply_error;
586 }
587
588 lookup_reply_positive:
589 mount_point_->tracer()->Trace(Tracer::kEventLookup, path, "lookup()");
590 if (!file_system_->IsNfsSource()) {
591 if (live_inode > 1) {
592 // live inode is stale (open file), we replace it
593 assert(dirent.IsRegular());
594 assert(dirent.inode() != live_inode);
595
596 // The new inode is put in the tracker with refcounter == 0
597 const bool replaced = mount_point_->inode_tracker()->ReplaceInode(
598 live_inode, glue::InodeEx(dirent.inode(), dirent.mode()));
599 if (replaced)
600 perf::Inc(file_system_->n_fs_inode_replace());
601 }
602 mount_point_->inode_tracker()->VfsGet(
603 glue::InodeEx(dirent.inode(), dirent.mode()), path);
604 }
605 // We do _not_ track (and evict) positive replies; among other things, test
606 // 076 fails with the following line uncommented
607 //
608 // WARNING! ENABLING THIS BREAKS ANY TYPE OF MOUNTPOINT POINTING TO THIS INODE
609 //
610 // only safe if fuse_expire_entry is available
611 if (mount_point_->fuse_expire_entry()
612 || (mount_point_->cache_symlinks() && dirent.IsLink())) {
613 LogCvmfs(kLogCache, kLogDebug, "Dentry to evict: %s", name);
614 mount_point_->dentry_tracker()->Add(parent_fuse, name,
615 static_cast<uint64_t>(timeout));
616 }
617
618 fuse_remounter_->fence()->Leave();
619 result.ino = dirent.inode();
620 result.attr = dirent.GetStatStructure();
621 fuse_reply_entry(req, &result);
622 return;
623
624 lookup_reply_negative:
625 mount_point_->tracer()->Trace(Tracer::kEventLookup, path,
626 "lookup()-NOTFOUND");
627 // Will be a no-op if there is no fuse cache eviction
628 mount_point_->dentry_tracker()->Add(parent_fuse, name, uint64_t(timeout));
629 fuse_remounter_->fence()->Leave();
630 perf::Inc(file_system_->n_fs_lookup_negative());
631 result.ino = 0;
632 fuse_reply_entry(req, &result);
633 return;
634
635 lookup_reply_error:
636 mount_point_->tracer()->Trace(Tracer::kEventLookup, path,
637 "lookup()-NOTFOUND");
638 fuse_remounter_->fence()->Leave();
639
640 LogCvmfs(kLogCvmfs, kLogDebug | kLogSyslogErr,
641 "EIO (01): lookup failed for %s", name);
642 perf::Inc(file_system_->n_eio_total());
643 perf::Inc(file_system_->n_eio_01());
644
645 fuse_reply_err(req, EIO);
646 }
647
648
649 /**
650 *
651 */
652 static void cvmfs_forget(fuse_req_t req,
653 fuse_ino_t ino,
654 #if CVMFS_USE_LIBFUSE == 2
655 unsigned long nlookup // NOLINT
656 #else
657 uint64_t nlookup
658 #endif
659 ) {
660 const HighPrecisionTimer guard_timer(file_system_->hist_fs_forget());
661
662 perf::Inc(file_system_->n_fs_forget());
663
664 // The libfuse high-level library does the same
665 if (ino == FUSE_ROOT_ID) {
666 fuse_reply_none(req);
667 return;
668 }
669
670 // Ensure that we don't need to call catalog_mgr()->MangleInode(ino)
671 assert(ino > mount_point_->catalog_mgr()->kInodeOffset);
672
673 LogCvmfs(kLogCvmfs, kLogDebug, "forget on inode %" PRIu64 " by %" PRIu64,
674 uint64_t(ino), nlookup);
675
676 if (!file_system_->IsNfsSource()) {
677 const bool removed = mount_point_->inode_tracker()->GetVfsPutRaii().VfsPut(
678 ino, nlookup);
679 if (removed)
680 mount_point_->page_cache_tracker()->GetEvictRaii().Evict(ino);
681 }
682
683 fuse_reply_none(req);
684 }
685
686
687 #if (FUSE_VERSION >= 29)
688 static void cvmfs_forget_multi(fuse_req_t req,
689 size_t count,
690 struct fuse_forget_data *forgets) {
691 const HighPrecisionTimer guard_timer(file_system_->hist_fs_forget_multi());
692
693 perf::Xadd(file_system_->n_fs_forget(), count);
694 if (file_system_->IsNfsSource()) {
695 fuse_reply_none(req);
696 return;
697 }
698
699 {
700 glue::InodeTracker::VfsPutRaii vfs_put_raii = mount_point_->inode_tracker()
701 ->GetVfsPutRaii();
702 glue::PageCacheTracker::EvictRaii
703 evict_raii = mount_point_->page_cache_tracker()->GetEvictRaii();
704 for (size_t i = 0; i < count; ++i) {
705 if (forgets[i].ino == FUSE_ROOT_ID) {
706 continue;
707 }
708
709 // Ensure that we don't need to call catalog_mgr()->MangleInode(ino)
710 assert(forgets[i].ino > mount_point_->catalog_mgr()->kInodeOffset);
711 LogCvmfs(kLogCvmfs, kLogDebug, "forget on inode %" PRIu64 " by %" PRIu64,
712 forgets[i].ino, forgets[i].nlookup);
713
714 const bool removed = vfs_put_raii.VfsPut(forgets[i].ino,
715 forgets[i].nlookup);
716 if (removed)
717 evict_raii.Evict(forgets[i].ino);
718 }
719 }
720
721 fuse_reply_none(req);
722 }
723 #endif // FUSE_VERSION >= 29
724
725
726 /**
727 * Looks into dirent to decide if this is an EIO negative reply or an
728 * ENOENT negative reply. We do not need to store the reply in the negative
729 * cache tracker because ReplyNegative is called on inode queries. Inodes,
730 * however, change anyway when a new catalog is applied.
731 */
732 static void ReplyNegative(const catalog::DirectoryEntry &dirent,
733 fuse_req_t req) {
734 if (dirent.GetSpecial() == catalog::kDirentNegative) {
735 fuse_reply_err(req, ENOENT);
736 } else {
737 const char *name = dirent.name().c_str();
738 const char *link = dirent.symlink().c_str();
739
740 LogCvmfs(
741 kLogCvmfs, kLogDebug | kLogSyslogErr,
742 "EIO (02): CVMFS-specific metadata not found for name=%s symlink=%s",
743 name ? name : "<unset>", link ? link : "<unset>");
744
745 perf::Inc(file_system_->n_eio_total());
746 perf::Inc(file_system_->n_eio_02());
747 fuse_reply_err(req, EIO);
748 }
749 }
750
751
752 /**
753 * Transform a cvmfs dirent into a struct stat.
754 */
755 static void cvmfs_getattr(fuse_req_t req, fuse_ino_t ino,
756 struct fuse_file_info *fi) {
757 const HighPrecisionTimer guard_timer(file_system_->hist_fs_getattr());
758
759 perf::Inc(file_system_->n_fs_stat());
760 const struct fuse_ctx *fuse_ctx = fuse_req_ctx(req);
761 FuseInterruptCue ic(&req);
762 const ClientCtxGuard ctx_guard(fuse_ctx->uid, fuse_ctx->gid, fuse_ctx->pid,
763 &ic);
764 fuse_remounter_->TryFinish();
765
766 fuse_remounter_->fence()->Enter();
767 ino = mount_point_->catalog_mgr()->MangleInode(ino);
768 LogCvmfs(kLogCvmfs, kLogDebug, "cvmfs_getattr (stat) for inode: %" PRIu64,
769 uint64_t(ino));
770
771 if (!CheckVoms(*fuse_ctx)) {
772 fuse_remounter_->fence()->Leave();
773 fuse_reply_err(req, EACCES);
774 return;
775 }
776 catalog::DirectoryEntry dirent;
777 const bool found = GetDirentForInode(ino, &dirent);
778 TraceInode(Tracer::kEventGetAttr, ino, "getattr()");
779 if ((!found && (dirent.inode() == ino)) || MayBeInPageCacheTracker(dirent)) {
780 // Serve retired inode from page cache tracker; even if we find it in the
781 // catalog, we replace the dirent by the page cache tracker version to
782 // not confuse open file handles
783 LogCvmfs(kLogCvmfs, kLogDebug,
784 "cvmfs_getattr %" PRIu64 " "
785 "served from page cache tracker",
786 ino);
787 shash::Any hash;
788 struct stat info;
789 const bool is_open = mount_point_->page_cache_tracker()->GetInfoIfOpen(
790 ino, &hash, &info);
791 if (is_open) {
792 fuse_remounter_->fence()->Leave();
793 if (found && HasDifferentContent(dirent, hash, info)) {
794 // We should from now on provide the new inode information instead
795 // of the stale one. To this end, we need to invalidate the dentry to
796 // trigger a fresh LOOKUP call
797 uint64_t parent_ino;
798 NameString name;
799 if (mount_point_->inode_tracker()->FindDentry(dirent.inode(),
800 &parent_ino, &name)) {
801 fuse_remounter_->InvalidateDentry(parent_ino, name);
802 }
803 perf::Inc(file_system_->n_fs_stat_stale());
804 }
805 fuse_reply_attr(req, &info, GetKcacheTimeout());
806 return;
807 }
808 }
809 fuse_remounter_->fence()->Leave();
810
811 if (!found) {
812 ReplyNegative(dirent, req);
813 return;
814 }
815
816 struct stat const info = dirent.GetStatStructure();
817
818 fuse_reply_attr(req, &info, GetKcacheTimeout());
819 }
820
821
822 /**
823 * Reads a symlink from the catalog. Environment variables are expanded.
824 */
825 static void cvmfs_readlink(fuse_req_t req, fuse_ino_t ino) {
826 const HighPrecisionTimer guard_timer(file_system_->hist_fs_readlink());
827
828 perf::Inc(file_system_->n_fs_readlink());
829 const struct fuse_ctx *fuse_ctx = fuse_req_ctx(req);
830 FuseInterruptCue ic(&req);
831 const ClientCtxGuard ctx_guard(fuse_ctx->uid, fuse_ctx->gid, fuse_ctx->pid,
832 &ic);
833
834 fuse_remounter_->fence()->Enter();
835 ino = mount_point_->catalog_mgr()->MangleInode(ino);
836 LogCvmfs(kLogCvmfs, kLogDebug, "cvmfs_readlink on inode: %" PRIu64,
837 uint64_t(ino));
838
839 catalog::DirectoryEntry dirent;
840 const bool found = GetDirentForInode(ino, &dirent);
841 TraceInode(Tracer::kEventReadlink, ino, "readlink()");
842 fuse_remounter_->fence()->Leave();
843
844 if (!found) {
845 ReplyNegative(dirent, req);
846 return;
847 }
848
849 if (!dirent.IsLink()) {
850 fuse_reply_err(req, EINVAL);
851 return;
852 }
853
854 fuse_reply_readlink(req, dirent.symlink().c_str());
855 }
856
857
858 static void AddToDirListing(const fuse_req_t req, const char *name,
859 const struct stat *stat_info,
860 BigVector<char> *listing) {
861 LogCvmfs(kLogCvmfs, kLogDebug, "Add to listing: %s, inode %" PRIu64, name,
862 uint64_t(stat_info->st_ino));
863 size_t remaining_size = listing->capacity() - listing->size();
864 const size_t entry_size = fuse_add_direntry(req, NULL, 0, name, stat_info, 0);
865
866 while (entry_size > remaining_size) {
867 listing->DoubleCapacity();
868 remaining_size = listing->capacity() - listing->size();
869 }
870
871 char *buffer;
872 bool large_alloc;
873 listing->ShareBuffer(&buffer, &large_alloc);
874 fuse_add_direntry(req, buffer + listing->size(), remaining_size, name,
875 stat_info, listing->size() + entry_size);
876 listing->SetSize(listing->size() + entry_size);
877 }
878
879
880 /**
881 * Open a directory for listing.
882 */
883 static void cvmfs_opendir(fuse_req_t req, fuse_ino_t ino,
884 struct fuse_file_info *fi) {
885 const HighPrecisionTimer guard_timer(file_system_->hist_fs_opendir());
886
887 const struct fuse_ctx *fuse_ctx = fuse_req_ctx(req);
888 FuseInterruptCue ic(&req);
889 const ClientCtxGuard ctx_guard(fuse_ctx->uid, fuse_ctx->gid, fuse_ctx->pid,
890 &ic);
891 fuse_remounter_->TryFinish();
892
893 fuse_remounter_->fence()->Enter();
894 catalog::ClientCatalogManager *catalog_mgr = mount_point_->catalog_mgr();
895 ino = catalog_mgr->MangleInode(ino);
896 LogCvmfs(kLogCvmfs, kLogDebug, "cvmfs_opendir on inode: %" PRIu64,
897 uint64_t(ino));
898 if (!CheckVoms(*fuse_ctx)) {
899 fuse_remounter_->fence()->Leave();
900 fuse_reply_err(req, EACCES);
901 return;
902 }
903
904 TraceInode(Tracer::kEventOpenDir, ino, "opendir()");
905 PathString path;
906 catalog::DirectoryEntry d;
907 bool found = GetPathForInode(ino, &path);
908 if (!found) {
909 fuse_remounter_->fence()->Leave();
910 fuse_reply_err(req, ENOENT);
911 return;
912 }
913 found = GetDirentForInode(ino, &d);
914
915 if (!found) {
916 fuse_remounter_->fence()->Leave();
917 ReplyNegative(d, req);
918 return;
919 }
920 if (!d.IsDirectory()) {
921 fuse_remounter_->fence()->Leave();
922 fuse_reply_err(req, ENOTDIR);
923 return;
924 }
925
926 LogCvmfs(kLogCvmfs, kLogDebug, "cvmfs_opendir on inode: %" PRIu64 ", path %s",
927 uint64_t(ino), path.c_str());
928
929 // Build listing
930 BigVector<char> fuse_listing(512);
931
932 // Add current directory link
933 struct stat info;
934 info = d.GetStatStructure();
935 AddToDirListing(req, ".", &info, &fuse_listing);
936
937 // Add parent directory link
938 catalog::DirectoryEntry p;
939 if (d.inode() != catalog_mgr->GetRootInode()
940 && (GetDirentForPath(GetParentPath(path), &p) > 0)) {
941 info = p.GetStatStructure();
942 AddToDirListing(req, "..", &info, &fuse_listing);
943 }
944
945 // Add all names
946 catalog::StatEntryList listing_from_catalog;
947 const bool retval = catalog_mgr->ListingStat(path, &listing_from_catalog);
948
949 if (!retval) {
950 fuse_remounter_->fence()->Leave();
951 fuse_listing.Clear(); // Buffer is shared, empty manually
952
953 LogCvmfs(kLogCvmfs, kLogDebug | kLogSyslogErr,
954 "EIO (03): failed to open directory at %s", path.c_str());
955 perf::Inc(file_system_->n_eio_total());
956 perf::Inc(file_system_->n_eio_03());
957 fuse_reply_err(req, EIO);
958 return;
959 }
960 for (unsigned i = 0; i < listing_from_catalog.size(); ++i) {
961 // Fix inodes
962 PathString entry_path;
963 entry_path.Assign(path);
964 entry_path.Append("/", 1);
965 entry_path.Append(listing_from_catalog.AtPtr(i)->name.GetChars(),
966 listing_from_catalog.AtPtr(i)->name.GetLength());
967
968 catalog::DirectoryEntry entry_dirent;
969 if (!GetDirentForPath(entry_path, &entry_dirent)) {
970 LogCvmfs(kLogCvmfs, kLogDebug, "listing entry %s vanished, skipping",
971 entry_path.c_str());
972 continue;
973 }
974
975 struct stat fixed_info = listing_from_catalog.AtPtr(i)->info;
976 fixed_info.st_ino = entry_dirent.inode();
977 AddToDirListing(req, listing_from_catalog.AtPtr(i)->name.c_str(),
978 &fixed_info, &fuse_listing);
979 }
980 fuse_remounter_->fence()->Leave();
981
982 DirectoryListing stream_listing;
983 stream_listing.size = fuse_listing.size();
984 stream_listing.capacity = fuse_listing.capacity();
985 bool large_alloc;
986 fuse_listing.ShareBuffer(&stream_listing.buffer, &large_alloc);
987 if (large_alloc)
988 stream_listing.capacity = 0;
989
990 // Save the directory listing and return a handle to the listing
991 {
992 const MutexLockGuard m(&lock_directory_handles_);
993 LogCvmfs(kLogCvmfs, kLogDebug,
994 "linking directory handle %lu to dir inode: %" PRIu64,
995 next_directory_handle_, uint64_t(ino));
996 (*directory_handles_)[next_directory_handle_] = stream_listing;
997 fi->fh = next_directory_handle_;
998 ++next_directory_handle_;
999 }
1000 perf::Inc(file_system_->n_fs_dir_open());
1001 perf::Inc(file_system_->no_open_dirs());
1002
1003 #if (FUSE_VERSION >= 30)
1004 #ifdef CVMFS_ENABLE_FUSE3_CACHE_READDIR
1005 // This affects only reads on the same open directory handle (e.g. multiple
1006 // reads with rewinddir() between them). A new opendir on the same directory
1007 // will trigger readdir calls independently of this setting.
1008 fi->cache_readdir = 1;
1009 #endif
1010 #endif
1011 fuse_reply_open(req, fi);
1012 }
1013
1014
1015 /**
1016 * Release a directory.
1017 */
1018 static void cvmfs_releasedir(fuse_req_t req, fuse_ino_t ino,
1019 struct fuse_file_info *fi) {
1020 const HighPrecisionTimer guard_timer(file_system_->hist_fs_releasedir());
1021
1022 ino = mount_point_->catalog_mgr()->MangleInode(ino);
1023 LogCvmfs(kLogCvmfs, kLogDebug,
1024 "cvmfs_releasedir on inode %" PRIu64 ", handle %lu", uint64_t(ino),
1025 fi->fh);
1026
1027 int reply = 0;
1028
1029 {
1030 const MutexLockGuard m(&lock_directory_handles_);
1031 const DirectoryHandles::iterator iter_handle = directory_handles_->find(
1032 fi->fh);
1033 if (iter_handle != directory_handles_->end()) {
1034 if (iter_handle->second.capacity == 0)
1035 smunmap(iter_handle->second.buffer);
1036 else
1037 free(iter_handle->second.buffer);
1038 directory_handles_->erase(iter_handle);
1039 perf::Dec(file_system_->no_open_dirs());
1040 } else {
1041 reply = EINVAL;
1042 }
1043 }
1044
1045 fuse_reply_err(req, reply);
1046 }
1047
1048
1049 /**
1050 * Very large directory listings have to be sent in slices.
1051 */
1052 static void ReplyBufferSlice(const fuse_req_t req, const char *buffer,
1053 const size_t buffer_size, const off_t offset,
1054 const size_t max_size) {
1055 if (offset < static_cast<int>(buffer_size)) {
1056 fuse_reply_buf(
1057 req, buffer + offset,
1058 std::min(static_cast<size_t>(buffer_size - offset), max_size));
1059 } else {
1060 fuse_reply_buf(req, NULL, 0);
1061 }
1062 }
1063
1064
1065 /**
1066 * Read the directory listing.
1067 */
1068 static void cvmfs_readdir(fuse_req_t req, fuse_ino_t ino, size_t size,
1069 off_t off, struct fuse_file_info *fi) {
1070 const HighPrecisionTimer guard_timer(file_system_->hist_fs_readdir());
1071
1072 LogCvmfs(kLogCvmfs, kLogDebug,
1073 "cvmfs_readdir on inode %" PRIu64
1074 " reading %lu bytes from offset %ld",
1075 static_cast<uint64_t>(mount_point_->catalog_mgr()->MangleInode(ino)),
1076 size, off);
1077
1078 DirectoryListing listing;
1079
1080 const MutexLockGuard m(&lock_directory_handles_);
1081 const DirectoryHandles::const_iterator iter_handle = directory_handles_->find(
1082 fi->fh);
1083 if (iter_handle != directory_handles_->end()) {
1084 listing = iter_handle->second;
1085
1086 ReplyBufferSlice(req, listing.buffer, listing.size, off, size);
1087 return;
1088 }
1089
1090 fuse_reply_err(req, EINVAL);
1091 }
1092
1093 static void FillOpenFlags(const glue::PageCacheTracker::OpenDirectives od,
1094 struct fuse_file_info *fi) {
1095 assert(!TestBit(glue::PageCacheTracker::kBitDirectIo, fi->fh));
1096 fi->keep_cache = od.keep_cache;
1097 fi->direct_io = od.direct_io;
1098 if (fi->direct_io)
1099 SetBit(glue::PageCacheTracker::kBitDirectIo, &fi->fh);
1100 }
1101
1102
1103 #ifdef __APPLE__
1104 // On macOS, xattr on a symlink opens and closes the file (with O_SYMLINK)
1105 // around the actual getxattr call. In order to not run into an I/O error
1106 // we use a special file handle for symlinks, from which one cannot read.
1107 static const uint64_t kFileHandleIgnore = static_cast<uint64_t>(2) << 60;
1108 #endif
1109
1110 /**
1111 * Open a file from cache. If necessary, file is downloaded first.
1112 *
1113 * \return Read-only file descriptor in fi->fh or kChunkedFileHandle for
1114 * chunked files
1115 */
1116 static void cvmfs_open(fuse_req_t req, fuse_ino_t ino,
1117 struct fuse_file_info *fi) {
1118 const HighPrecisionTimer guard_timer(file_system_->hist_fs_open());
1119
1120 const struct fuse_ctx *fuse_ctx = fuse_req_ctx(req);
1121 FuseInterruptCue ic(&req);
1122 const ClientCtxGuard ctx_guard(fuse_ctx->uid, fuse_ctx->gid, fuse_ctx->pid,
1123 &ic);
1124 fuse_remounter_->fence()->Enter();
1125 catalog::ClientCatalogManager *catalog_mgr = mount_point_->catalog_mgr();
1126 ino = catalog_mgr->MangleInode(ino);
1127 LogCvmfs(kLogCvmfs, kLogDebug, "cvmfs_open on inode: %" PRIu64,
1128 uint64_t(ino));
1129
1130 int fd = -1;
1131 catalog::DirectoryEntry dirent;
1132 PathString path;
1133
1134 bool found = GetPathForInode(ino, &path);
1135 if (!found) {
1136 fuse_remounter_->fence()->Leave();
1137 fuse_reply_err(req, ENOENT);
1138 return;
1139 }
1140 found = GetDirentForInode(ino, &dirent);
1141 if (!found) {
1142 fuse_remounter_->fence()->Leave();
1143 ReplyNegative(dirent, req);
1144 return;
1145 }
1146
1147 if (!CheckVoms(*fuse_ctx)) {
1148 fuse_remounter_->fence()->Leave();
1149 fuse_reply_err(req, EACCES);
1150 return;
1151 }
1152
1153 mount_point_->tracer()->Trace(Tracer::kEventOpen, path, "open()");
1154 // Don't check. Either done by the OS or one wants to purposefully work
1155 // around wrong open flags
1156 // if ((fi->flags & 3) != O_RDONLY) {
1157 // fuse_reply_err(req, EROFS);
1158 // return;
1159 // }
1160 #ifdef __APPLE__
1161 if ((fi->flags & O_SHLOCK) || (fi->flags & O_EXLOCK)) {
1162 fuse_remounter_->fence()->Leave();
1163 fuse_reply_err(req, EOPNOTSUPP);
1164 return;
1165 }
1166 if (fi->flags & O_SYMLINK) {
1167 fuse_remounter_->fence()->Leave();
1168 fi->fh = kFileHandleIgnore;
1169 fuse_reply_open(req, fi);
1170 return;
1171 }
1172 #endif
1173 if (fi->flags & O_EXCL) {
1174 fuse_remounter_->fence()->Leave();
1175 fuse_reply_err(req, EEXIST);
1176 return;
1177 }
1178
1179 perf::Inc(file_system_->n_fs_open()); // Count actual open / fetch operations
1180
1181 glue::PageCacheTracker::OpenDirectives open_directives;
1182 if (!dirent.IsChunkedFile()) {
1183 if (dirent.IsDirectIo()) {
1184 open_directives = mount_point_->page_cache_tracker()->OpenDirect();
1185 } else {
1186 open_directives = mount_point_->page_cache_tracker()->Open(
1187 ino, dirent.checksum(), dirent.GetStatStructure());
1188 }
1189 fuse_remounter_->fence()->Leave();
1190 } else {
1191 LogCvmfs(kLogCvmfs, kLogDebug,
1192 "chunked file %s opened (download delayed to read() call)",
1193 path.c_str());
1194
1195 if (!IncAndCheckNoOpenFiles()) {
1196 perf::Dec(file_system_->no_open_files());
1197 fuse_remounter_->fence()->Leave();
1198 LogCvmfs(kLogCvmfs, kLogSyslogErr, "open file descriptor limit exceeded");
1199 fuse_reply_err(req, EMFILE);
1200 perf::Inc(file_system_->n_emfile());
1201 return;
1202 }
1203
1204 // Figure out unique inode from annotated catalog
1205 // TODO(jblomer): we only need to lookup if the inode is not from the
1206 // current generation
1207 catalog::DirectoryEntry dirent_origin;
1208 if (!catalog_mgr->LookupPath(path, catalog::kLookupDefault,
1209 &dirent_origin)) {
1210 fuse_remounter_->fence()->Leave();
1211 LogCvmfs(kLogCvmfs, kLogDebug | kLogSyslogErr,
1212 "chunked file %s vanished unexpectedly", path.c_str());
1213 fuse_reply_err(req, ENOENT);
1214 return;
1215 }
1216 const uint64_t unique_inode = dirent_origin.inode();
1217
1218 ChunkTables *chunk_tables = mount_point_->chunk_tables();
1219 chunk_tables->Lock();
1220 if (!chunk_tables->inode2chunks.Contains(unique_inode)) {
1221 chunk_tables->Unlock();
1222
1223 // Retrieve File chunks from the catalog
1224 UniquePtr<FileChunkList> chunks(new FileChunkList());
1225 if (!catalog_mgr->ListFileChunks(path, dirent.hash_algorithm(),
1226 chunks.weak_ref())
1227 || chunks->IsEmpty()) {
1228 fuse_remounter_->fence()->Leave();
1229 LogCvmfs(kLogCvmfs, kLogDebug | kLogSyslogErr,
1230 "EIO (04): failed to open file %s. "
1231 "It is marked as 'chunked', but no chunks found.",
1232 path.c_str());
1233 perf::Inc(file_system_->n_eio_total());
1234 perf::Inc(file_system_->n_eio_04());
1235 fuse_reply_err(req, EIO);
1236 return;
1237 }
1238 fuse_remounter_->fence()->Leave();
1239
1240 chunk_tables->Lock();
1241 // Check again to avoid race
1242 if (!chunk_tables->inode2chunks.Contains(unique_inode)) {
1243 chunk_tables->inode2chunks.Insert(
1244 unique_inode, FileChunkReflist(chunks.Release(), path,
1245 dirent.compression_algorithm(),
1246 dirent.IsExternalFile()));
1247 chunk_tables->inode2references.Insert(unique_inode, 1);
1248 } else {
1249 uint32_t refctr;
1250 const bool retval = chunk_tables->inode2references.Lookup(unique_inode,
1251 &refctr);
1252 assert(retval);
1253 chunk_tables->inode2references.Insert(unique_inode, refctr + 1);
1254 }
1255 } else {
1256 fuse_remounter_->fence()->Leave();
1257 uint32_t refctr;
1258 const bool retval = chunk_tables->inode2references.Lookup(unique_inode,
1259 &refctr);
1260 assert(retval);
1261 chunk_tables->inode2references.Insert(unique_inode, refctr + 1);
1262 }
1263
1264 // Update the chunk handle list
1265 LogCvmfs(kLogCvmfs, kLogDebug,
1266 "linking chunk handle %lu to unique inode: %" PRIu64,
1267 chunk_tables->next_handle, uint64_t(unique_inode));
1268 chunk_tables->handle2fd.Insert(chunk_tables->next_handle, ChunkFd());
1269 chunk_tables->handle2uniqino.Insert(chunk_tables->next_handle,
1270 unique_inode);
1271
1272 // Generate artificial content hash as hash over chunk hashes
1273 // TODO(jblomer): we may want to cache the result in the chunk tables
1274 FileChunkReflist chunk_reflist;
1275 const bool retval = chunk_tables->inode2chunks.Lookup(unique_inode,
1276 &chunk_reflist);
1277 assert(retval);
1278
1279 fi->fh = chunk_tables->next_handle;
1280 if (dirent.IsDirectIo()) {
1281 open_directives = mount_point_->page_cache_tracker()->OpenDirect();
1282 } else {
1283 open_directives = mount_point_->page_cache_tracker()->Open(
1284 ino, chunk_reflist.HashChunkList(), dirent.GetStatStructure());
1285 }
1286 FillOpenFlags(open_directives, fi);
1287 fi->fh = static_cast<uint64_t>(-static_cast<int64_t>(fi->fh));
1288 ++chunk_tables->next_handle;
1289 chunk_tables->Unlock();
1290
1291 fuse_reply_open(req, fi);
1292 return;
1293 }
1294
1295 Fetcher *this_fetcher = dirent.IsExternalFile()
1296 ? mount_point_->external_fetcher()
1297 : mount_point_->fetcher();
1298 CacheManager::Label label;
1299 label.path = path.ToString();
1300 label.size = dirent.size();
1301 label.zip_algorithm = dirent.compression_algorithm();
1302 if (mount_point_->catalog_mgr()->volatile_flag())
1303 label.flags |= CacheManager::kLabelVolatile;
1304 if (dirent.IsExternalFile())
1305 label.flags |= CacheManager::kLabelExternal;
1306 fd = this_fetcher->Fetch(
1307 CacheManager::LabeledObject(dirent.checksum(), label));
1308
1309 if (fd >= 0) {
1310 if (IncAndCheckNoOpenFiles()) {
1311 LogCvmfs(kLogCvmfs, kLogDebug, "file %s opened (fd %d)", path.c_str(),
1312 fd);
1313 fi->fh = fd;
1314 FillOpenFlags(open_directives, fi);
1315 fuse_reply_open(req, fi);
1316 return;
1317 } else {
1318 if (file_system_->cache_mgr()->Close(fd) == 0)
1319 perf::Dec(file_system_->no_open_files());
1320 LogCvmfs(kLogCvmfs, kLogSyslogErr, "open file descriptor limit exceeded");
1321 // not returning an fd, so close the page cache tracker entry if required
1322 if (!dirent.IsDirectIo()) {
1323 fuse_remounter_->fence()->Enter();
1324 mount_point_->page_cache_tracker()->Close(ino);
1325 fuse_remounter_->fence()->Leave();
1326 }
1327 fuse_reply_err(req, EMFILE);
1328 perf::Inc(file_system_->n_emfile());
1329 return;
1330 }
1331 assert(false);
1332 }
1333
1334 // fd < 0
1335 // the download has failed. Close the page cache tracker entry if required
1336 if (!dirent.IsDirectIo()) {
1337 fuse_remounter_->fence()->Enter();
1338 mount_point_->page_cache_tracker()->Close(ino);
1339 fuse_remounter_->fence()->Leave();
1340 }
1341
1342 LogCvmfs(kLogCvmfs, kLogDebug | kLogSyslogErr,
1343 "failed to open inode: %" PRIu64 ", CAS key %s, error code %d",
1344 uint64_t(ino), dirent.checksum().ToString().c_str(), errno);
1345 if (errno == EMFILE) {
1346 LogCvmfs(kLogCvmfs, kLogSyslogErr, "open file descriptor limit exceeded");
1347 fuse_reply_err(req, EMFILE);
1348 perf::Inc(file_system_->n_emfile());
1349 return;
1350 }
1351
1352 mount_point_->backoff_throttle()->Throttle();
1353
1354 mount_point_->file_system()->io_error_info()->AddIoError();
1355 if (EIO == errno || EIO == -fd) {
1356 LogCvmfs(kLogCvmfs, kLogDebug | kLogSyslogErr,
1357 "EIO (06): Failed to open file %s", path.c_str());
1358 perf::Inc(file_system_->n_eio_total());
1359 perf::Inc(file_system_->n_eio_06());
1360 }
1361
1362 fuse_reply_err(req, -fd);
1363 }
1364
1365
1366 /**
1367 * Redirected to pread into cache.
1368 */
1369 static void cvmfs_read(fuse_req_t req, fuse_ino_t ino, size_t size, off_t off,
1370 struct fuse_file_info *fi) {
1371 const HighPrecisionTimer guard_timer(file_system_->hist_fs_read());
1372
1373 LogCvmfs(kLogCvmfs, kLogDebug,
1374 "cvmfs_read inode: %" PRIu64 " reading %lu bytes from offset %ld "
1375 "fd %lu",
1376 uint64_t(mount_point_->catalog_mgr()->MangleInode(ino)), size, off,
1377 fi->fh);
1378 perf::Inc(file_system_->n_fs_read());
1379
1380 #ifdef __APPLE__
1381 if (fi->fh == kFileHandleIgnore) {
1382 fuse_reply_err(req, EBADF);
1383 return;
1384 }
1385 #endif
1386
1387 // Get data chunk (<=128k guaranteed by Fuse)
1388 char *data = static_cast<char *>(alloca(size));
1389 unsigned int overall_bytes_fetched = 0;
1390
1391 const int64_t fd = static_cast<int64_t>(fi->fh);
1392 uint64_t abs_fd = (fd < 0) ? -fd : fd;
1393 ClearBit(glue::PageCacheTracker::kBitDirectIo, &abs_fd);
1394
1395 const struct fuse_ctx *fuse_ctx = fuse_req_ctx(req);
1396 FuseInterruptCue ic(&req);
1397 const ClientCtxGuard ctx_guard(fuse_ctx->uid, fuse_ctx->gid, fuse_ctx->pid,
1398 &ic);
1399
1400 // Do we have a a chunked file?
1401 if (fd < 0) {
1402 const uint64_t chunk_handle = abs_fd;
1403 uint64_t unique_inode;
1404 ChunkFd chunk_fd;
1405 FileChunkReflist chunks;
1406 bool retval;
1407
1408 // Fetch unique inode, chunk list and file descriptor
1409 ChunkTables *chunk_tables = mount_point_->chunk_tables();
1410 chunk_tables->Lock();
1411 retval = chunk_tables->handle2uniqino.Lookup(chunk_handle, &unique_inode);
1412 if (!retval) {
1413 LogCvmfs(kLogCvmfs, kLogDebug, "no unique inode, fall back to fuse ino");
1414 unique_inode = ino;
1415 }
1416 retval = chunk_tables->inode2chunks.Lookup(unique_inode, &chunks);
1417 assert(retval);
1418 chunk_tables->Unlock();
1419
1420 unsigned chunk_idx = chunks.FindChunkIdx(off);
1421
1422 // Lock chunk handle
1423 pthread_mutex_t *handle_lock = chunk_tables->Handle2Lock(chunk_handle);
1424 const MutexLockGuard m(handle_lock);
1425 chunk_tables->Lock();
1426 retval = chunk_tables->handle2fd.Lookup(chunk_handle, &chunk_fd);
1427 assert(retval);
1428 chunk_tables->Unlock();
1429
1430 // Fetch all needed chunks and read the requested data
1431 off_t offset_in_chunk = off - chunks.list->AtPtr(chunk_idx)->offset();
1432 do {
1433 // Open file descriptor to chunk
1434 if ((chunk_fd.fd == -1) || (chunk_fd.chunk_idx != chunk_idx)) {
1435 if (chunk_fd.fd != -1)
1436 file_system_->cache_mgr()->Close(chunk_fd.fd);
1437 Fetcher *this_fetcher = chunks.external_data
1438 ? mount_point_->external_fetcher()
1439 : mount_point_->fetcher();
1440 CacheManager::Label label;
1441 label.path = chunks.path.ToString();
1442 label.size = chunks.list->AtPtr(chunk_idx)->size();
1443 label.zip_algorithm = chunks.compression_alg;
1444 label.flags |= CacheManager::kLabelChunked;
1445 if (mount_point_->catalog_mgr()->volatile_flag())
1446 label.flags |= CacheManager::kLabelVolatile;
1447 if (chunks.external_data) {
1448 label.flags |= CacheManager::kLabelExternal;
1449 label.range_offset = chunks.list->AtPtr(chunk_idx)->offset();
1450 }
1451 chunk_fd.fd = this_fetcher->Fetch(CacheManager::LabeledObject(
1452 chunks.list->AtPtr(chunk_idx)->content_hash(), label));
1453 if (chunk_fd.fd < 0) {
1454 chunk_fd.fd = -1;
1455 chunk_tables->Lock();
1456 chunk_tables->handle2fd.Insert(chunk_handle, chunk_fd);
1457 chunk_tables->Unlock();
1458
1459 LogCvmfs(kLogCvmfs, kLogDebug | kLogSyslogErr,
1460 "EIO (05): Failed to fetch chunk %d from file %s", chunk_idx,
1461 chunks.path.ToString().c_str());
1462 perf::Inc(file_system_->n_eio_total());
1463 perf::Inc(file_system_->n_eio_05());
1464 fuse_reply_err(req, EIO);
1465 return;
1466 }
1467 chunk_fd.chunk_idx = chunk_idx;
1468 }
1469
1470 LogCvmfs(kLogCvmfs, kLogDebug, "reading from chunk fd %d", chunk_fd.fd);
1471 // Read data from chunk
1472 const size_t bytes_to_read = size - overall_bytes_fetched;
1473 const size_t remaining_bytes_in_chunk = chunks.list->AtPtr(chunk_idx)
1474 ->size()
1475 - offset_in_chunk;
1476 const size_t bytes_to_read_in_chunk = std::min(bytes_to_read,
1477 remaining_bytes_in_chunk);
1478 const int64_t bytes_fetched = file_system_->cache_mgr()->Pread(
1479 chunk_fd.fd,
1480 data + overall_bytes_fetched,
1481 bytes_to_read_in_chunk,
1482 offset_in_chunk);
1483
1484 if (bytes_fetched < 0) {
1485 LogCvmfs(kLogCvmfs, kLogSyslogErr, "read err no %" PRId64 " (%s)",
1486 bytes_fetched, chunks.path.ToString().c_str());
1487 chunk_tables->Lock();
1488 chunk_tables->handle2fd.Insert(chunk_handle, chunk_fd);
1489 chunk_tables->Unlock();
1490 if (EIO == errno || EIO == -bytes_fetched) {
1491 LogCvmfs(kLogCvmfs, kLogDebug | kLogSyslogErr,
1492 "EIO (07): Failed to read chunk %d from file %s", chunk_idx,
1493 chunks.path.ToString().c_str());
1494 perf::Inc(file_system_->n_eio_total());
1495 perf::Inc(file_system_->n_eio_07());
1496 }
1497 fuse_reply_err(req, -bytes_fetched);
1498 return;
1499 }
1500 overall_bytes_fetched += bytes_fetched;
1501
1502 // Proceed to the next chunk to keep on reading data
1503 ++chunk_idx;
1504 offset_in_chunk = 0;
1505 } while ((overall_bytes_fetched < size)
1506 && (chunk_idx < chunks.list->size()));
1507
1508 // Update chunk file descriptor
1509 chunk_tables->Lock();
1510 chunk_tables->handle2fd.Insert(chunk_handle, chunk_fd);
1511 chunk_tables->Unlock();
1512 LogCvmfs(kLogCvmfs, kLogDebug, "released chunk file descriptor %d",
1513 chunk_fd.fd);
1514 } else {
1515 const int64_t nbytes = file_system_->cache_mgr()->Pread(abs_fd, data, size,
1516 off);
1517 if (nbytes < 0) {
1518 if (EIO == errno || EIO == -nbytes) {
1519 PathString path;
1520 const bool found = GetPathForInode(ino, &path);
1521 if (found) {
1522 LogCvmfs(kLogCvmfs, kLogDebug | kLogSyslogErr,
1523 "EIO (08): Failed to read file %s", path.ToString().c_str());
1524 } else {
1525 LogCvmfs(kLogCvmfs, kLogDebug | kLogSyslogErr,
1526 "EIO (08): Failed to read from %s - <unknown inode>",
1527 path.ToString().c_str());
1528 }
1529 perf::Inc(file_system_->n_eio_total());
1530 perf::Inc(file_system_->n_eio_08());
1531 }
1532 fuse_reply_err(req, -nbytes);
1533 return;
1534 }
1535 overall_bytes_fetched = nbytes;
1536 }
1537
1538 // Push it to user
1539 fuse_reply_buf(req, data, overall_bytes_fetched);
1540 LogCvmfs(kLogCvmfs, kLogDebug, "pushed %d bytes to user",
1541 overall_bytes_fetched);
1542 }
1543
1544
1545 /**
1546 * File close operation, redirected into cache.
1547 */
1548 static void cvmfs_release(fuse_req_t req, fuse_ino_t ino,
1549 struct fuse_file_info *fi) {
1550 const HighPrecisionTimer guard_timer(file_system_->hist_fs_release());
1551
1552 ino = mount_point_->catalog_mgr()->MangleInode(ino);
1553 LogCvmfs(kLogCvmfs, kLogDebug, "cvmfs_release on inode: %" PRIu64,
1554 uint64_t(ino));
1555
1556 #ifdef __APPLE__
1557 if (fi->fh == kFileHandleIgnore) {
1558 fuse_reply_err(req, 0);
1559 return;
1560 }
1561 #endif
1562
1563 const int64_t fd = static_cast<int64_t>(fi->fh);
1564 uint64_t abs_fd = (fd < 0) ? -fd : fd;
1565 if (!TestBit(glue::PageCacheTracker::kBitDirectIo, abs_fd)) {
1566 mount_point_->page_cache_tracker()->Close(ino);
1567 }
1568 ClearBit(glue::PageCacheTracker::kBitDirectIo, &abs_fd);
1569
1570 // do we have a chunked file?
1571 if (fd < 0) {
1572 const uint64_t chunk_handle = abs_fd;
1573 LogCvmfs(kLogCvmfs, kLogDebug, "releasing chunk handle %" PRIu64,
1574 chunk_handle);
1575 uint64_t unique_inode;
1576 ChunkFd chunk_fd;
1577 const FileChunkReflist chunks;
1578 uint32_t refctr;
1579 bool retval;
1580
1581 ChunkTables *chunk_tables = mount_point_->chunk_tables();
1582 chunk_tables->Lock();
1583 retval = chunk_tables->handle2uniqino.Lookup(chunk_handle, &unique_inode);
1584 if (!retval) {
1585 LogCvmfs(kLogCvmfs, kLogDebug, "no unique inode, fall back to fuse ino");
1586 unique_inode = ino;
1587 } else {
1588 chunk_tables->handle2uniqino.Erase(chunk_handle);
1589 }
1590 retval = chunk_tables->handle2fd.Lookup(chunk_handle, &chunk_fd);
1591 assert(retval);
1592 chunk_tables->handle2fd.Erase(chunk_handle);
1593
1594 retval = chunk_tables->inode2references.Lookup(unique_inode, &refctr);
1595 assert(retval);
1596 refctr--;
1597 if (refctr == 0) {
1598 LogCvmfs(kLogCvmfs, kLogDebug, "releasing chunk list for inode %" PRIu64,
1599 uint64_t(unique_inode));
1600 FileChunkReflist to_delete;
1601 retval = chunk_tables->inode2chunks.Lookup(unique_inode, &to_delete);
1602 assert(retval);
1603 chunk_tables->inode2references.Erase(unique_inode);
1604 chunk_tables->inode2chunks.Erase(unique_inode);
1605 delete to_delete.list;
1606 } else {
1607 chunk_tables->inode2references.Insert(unique_inode, refctr);
1608 }
1609 chunk_tables->Unlock();
1610
1611 if (chunk_fd.fd != -1)
1612 file_system_->cache_mgr()->Close(chunk_fd.fd);
1613 perf::Dec(file_system_->no_open_files());
1614 } else {
1615 if (file_system_->cache_mgr()->Close(abs_fd) == 0) {
1616 perf::Dec(file_system_->no_open_files());
1617 }
1618 }
1619 fuse_reply_err(req, 0);
1620 }
1621
1622 /**
1623 * Returns information about a mounted filesystem. In this case it returns
1624 * information about the local cache occupancy of cvmfs.
1625 *
1626 * Note: If the elements of the struct statvfs *info are set to 0, it will cause
1627 * it to be ignored in commandline tool "df".
1628 */
1629 static void cvmfs_statfs(fuse_req_t req, fuse_ino_t ino) {
1630 ino = mount_point_->catalog_mgr()->MangleInode(ino);
1631 LogCvmfs(kLogCvmfs, kLogDebug, "cvmfs_statfs on inode: %" PRIu64,
1632 uint64_t(ino));
1633
1634 TraceInode(Tracer::kEventStatFs, ino, "statfs()");
1635
1636 perf::Inc(file_system_->n_fs_statfs());
1637
1638 // Unmanaged cache (no lock needed - statfs is never modified)
1639 if (!file_system_->cache_mgr()->quota_mgr()->HasCapability(
1640 QuotaManager::kCapIntrospectSize)) {
1641 LogCvmfs(kLogCvmfs, kLogDebug, "QuotaManager does not support statfs");
1642 fuse_reply_statfs(req, (mount_point_->statfs_cache()->info()));
1643 return;
1644 }
1645
1646 const MutexLockGuard m(mount_point_->statfs_cache()->lock());
1647
1648 const uint64_t deadline = *mount_point_->statfs_cache()->expiry_deadline();
1649 struct statvfs *info = mount_point_->statfs_cache()->info();
1650
1651 // cached version still valid
1652 if (platform_monotonic_time() < deadline) {
1653 perf::Inc(file_system_->n_fs_statfs_cached());
1654 fuse_reply_statfs(req, info);
1655 return;
1656 }
1657
1658 uint64_t available = 0;
1659 const uint64_t size = file_system_->cache_mgr()->quota_mgr()->GetSize();
1660 const uint64_t
1661 capacity = file_system_->cache_mgr()->quota_mgr()->GetCapacity();
1662 // Fuse/OS X doesn't like values < 512
1663 info->f_bsize = info->f_frsize = 512;
1664
1665 if (capacity == (uint64_t)(-1)) {
1666 // Unknown capacity, set capacity = size
1667 info->f_blocks = size / info->f_bsize;
1668 } else {
1669 // Take values from LRU module
1670 info->f_blocks = capacity / info->f_bsize;
1671 available = capacity - size;
1672 }
1673
1674 info->f_bfree = info->f_bavail = available / info->f_bsize;
1675
1676 // Inodes / entries
1677 fuse_remounter_->fence()->Enter();
1678 const uint64_t all_inodes = mount_point_->catalog_mgr()->all_inodes();
1679 const uint64_t loaded_inode = mount_point_->catalog_mgr()->loaded_inodes();
1680 info->f_files = all_inodes;
1681 info->f_ffree = info->f_favail = all_inodes - loaded_inode;
1682 fuse_remounter_->fence()->Leave();
1683
1684 *mount_point_->statfs_cache()
1685 ->expiry_deadline() = platform_monotonic_time()
1686 + mount_point_->statfs_cache()->cache_timeout();
1687
1688 fuse_reply_statfs(req, info);
1689 }
1690
1691 #ifdef __APPLE__
1692 static void cvmfs_getxattr(fuse_req_t req, fuse_ino_t ino, const char *name,
1693 size_t size, uint32_t position)
1694 #else
1695 static void cvmfs_getxattr(fuse_req_t req, fuse_ino_t ino, const char *name,
1696 size_t size)
1697 #endif
1698 {
1699 const struct fuse_ctx *fuse_ctx = fuse_req_ctx(req);
1700 FuseInterruptCue ic(&req);
1701 const ClientCtxGuard ctx_guard(fuse_ctx->uid, fuse_ctx->gid, fuse_ctx->pid,
1702 &ic);
1703
1704 fuse_remounter_->fence()->Enter();
1705 catalog::ClientCatalogManager *catalog_mgr = mount_point_->catalog_mgr();
1706 ino = catalog_mgr->MangleInode(ino);
1707 LogCvmfs(kLogCvmfs, kLogDebug,
1708 "cvmfs_getxattr on inode: %" PRIu64 " for xattr: %s", uint64_t(ino),
1709 name);
1710 if (!CheckVoms(*fuse_ctx)) {
1711 fuse_remounter_->fence()->Leave();
1712 fuse_reply_err(req, EACCES);
1713 return;
1714 }
1715 TraceInode(Tracer::kEventGetXAttr, ino, "getxattr()");
1716
1717 vector<string> tokens_mode_machine = SplitString(name, '~');
1718 vector<string> tokens_mode_human = SplitString(name, '@');
1719
1720 int32_t attr_req_page = 0;
1721 MagicXattrMode xattr_mode = kXattrMachineMode;
1722 string attr;
1723
1724 bool attr_req_is_valid = false;
1725 const sanitizer::PositiveIntegerSanitizer page_num_sanitizer;
1726
1727 if (tokens_mode_human.size() > 1) {
1728 const std::string token = tokens_mode_human[tokens_mode_human.size() - 1];
1729 if (token == "?") {
1730 attr_req_is_valid = true;
1731 attr_req_page = -1;
1732 } else {
1733 if (page_num_sanitizer.IsValid(token)) {
1734 attr_req_is_valid = true;
1735 attr_req_page = static_cast<int32_t>(String2Uint64(token));
1736 }
1737 }
1738 xattr_mode = kXattrHumanMode;
1739 attr = tokens_mode_human[0];
1740 } else if (tokens_mode_machine.size() > 1) {
1741 const std::string
1742 token = tokens_mode_machine[tokens_mode_machine.size() - 1];
1743 if (token == "?") {
1744 attr_req_is_valid = true;
1745 attr_req_page = -1;
1746 } else {
1747 if (page_num_sanitizer.IsValid(token)) {
1748 attr_req_is_valid = true;
1749 attr_req_page = static_cast<int32_t>(String2Uint64(token));
1750 }
1751 }
1752 xattr_mode = kXattrMachineMode;
1753 attr = tokens_mode_machine[0];
1754
1755 } else {
1756 attr_req_is_valid = true;
1757 attr = tokens_mode_machine[0];
1758 }
1759
1760 if (!attr_req_is_valid) {
1761 fuse_remounter_->fence()->Leave();
1762 fuse_reply_err(req, ENODATA);
1763 return;
1764 }
1765
1766 catalog::DirectoryEntry d;
1767 const bool found = GetDirentForInode(ino, &d);
1768
1769 if (!found) {
1770 fuse_remounter_->fence()->Leave();
1771 ReplyNegative(d, req);
1772 return;
1773 }
1774
1775 bool retval;
1776 XattrList xattrs;
1777 PathString path;
1778 retval = GetPathForInode(ino, &path);
1779
1780 if (!AssertOrLog(retval, kLogCvmfs, kLogSyslogWarn | kLogDebug,
1781 "cvmfs_statfs: Race condition? "
1782 "GetPathForInode did not succeed for path %s "
1783 "(path might have not been set)",
1784 path.c_str())) {
1785 fuse_remounter_->fence()->Leave();
1786 fuse_reply_err(req, ESTALE);
1787 return;
1788 }
1789
1790 if (d.IsLink()) {
1791 const catalog::LookupOptions
1792 lookup_options = static_cast<catalog::LookupOptions>(
1793 catalog::kLookupDefault | catalog::kLookupRawSymlink);
1794 catalog::DirectoryEntry raw_symlink;
1795 retval = catalog_mgr->LookupPath(path, lookup_options, &raw_symlink);
1796
1797 if (!AssertOrLog(retval, kLogCvmfs, kLogSyslogWarn | kLogDebug,
1798 "cvmfs_statfs: Race condition? "
1799 "LookupPath did not succeed for path %s",
1800 path.c_str())) {
1801 fuse_remounter_->fence()->Leave();
1802 fuse_reply_err(req, ESTALE);
1803 return;
1804 }
1805
1806 d.set_symlink(raw_symlink.symlink());
1807 }
1808 if (d.HasXattrs()) {
1809 retval = catalog_mgr->LookupXattrs(path, &xattrs);
1810
1811 if (!AssertOrLog(retval, kLogCvmfs, kLogSyslogWarn | kLogDebug,
1812 "cvmfs_statfs: Race condition? "
1813 "LookupXattrs did not succeed for path %s",
1814 path.c_str())) {
1815 fuse_remounter_->fence()->Leave();
1816 fuse_reply_err(req, ESTALE);
1817 return;
1818 }
1819 }
1820
1821 bool magic_xattr_success = true;
1822 const MagicXattrRAIIWrapper magic_xattr(
1823 mount_point_->magic_xattr_mgr()->GetLocked(attr, path, &d));
1824 if (!magic_xattr.IsNull()) {
1825 magic_xattr_success = magic_xattr->PrepareValueFencedProtected(
1826 fuse_ctx->gid);
1827 }
1828
1829 fuse_remounter_->fence()->Leave();
1830
1831 if (!magic_xattr_success) {
1832 fuse_reply_err(req, ENOATTR);
1833 return;
1834 }
1835
1836 std::pair<bool, std::string> attribute_result;
1837
1838 if (!magic_xattr.IsNull()) {
1839 attribute_result = magic_xattr->GetValue(attr_req_page, xattr_mode);
1840 } else {
1841 if (!xattrs.Get(attr, &attribute_result.second)) {
1842 fuse_reply_err(req, ENOATTR);
1843 return;
1844 }
1845 attribute_result.first = true;
1846 }
1847
1848 if (!attribute_result.first) {
1849 fuse_reply_err(req, ENODATA);
1850 } else if (size == 0) {
1851 fuse_reply_xattr(req, attribute_result.second.length());
1852 } else if (size >= attribute_result.second.length()) {
1853 fuse_reply_buf(req, &attribute_result.second[0],
1854 attribute_result.second.length());
1855 } else {
1856 fuse_reply_err(req, ERANGE);
1857 }
1858 }
1859
1860
1861 static void cvmfs_listxattr(fuse_req_t req, fuse_ino_t ino, size_t size) {
1862 const struct fuse_ctx *fuse_ctx = fuse_req_ctx(req);
1863 FuseInterruptCue ic(&req);
1864 const ClientCtxGuard ctx_guard(fuse_ctx->uid, fuse_ctx->gid, fuse_ctx->pid,
1865 &ic);
1866
1867 fuse_remounter_->fence()->Enter();
1868 catalog::ClientCatalogManager *catalog_mgr = mount_point_->catalog_mgr();
1869 ino = catalog_mgr->MangleInode(ino);
1870 TraceInode(Tracer::kEventListAttr, ino, "listxattr()");
1871 LogCvmfs(kLogCvmfs, kLogDebug,
1872 "cvmfs_listxattr on inode: %" PRIu64 ", size %zu [visibility %d]",
1873 uint64_t(ino), size, mount_point_->magic_xattr_mgr()->visibility());
1874
1875 catalog::DirectoryEntry d;
1876 const bool found = GetDirentForInode(ino, &d);
1877 XattrList xattrs;
1878 if (d.HasXattrs()) {
1879 PathString path;
1880 bool retval = GetPathForInode(ino, &path);
1881
1882 if (!AssertOrLog(retval, kLogCvmfs, kLogSyslogWarn | kLogDebug,
1883 "cvmfs_listxattr: Race condition? "
1884 "GetPathForInode did not succeed for ino %lu",
1885 ino)) {
1886 fuse_remounter_->fence()->Leave();
1887 fuse_reply_err(req, ESTALE);
1888 return;
1889 }
1890
1891 retval = catalog_mgr->LookupXattrs(path, &xattrs);
1892 if (!AssertOrLog(retval, kLogCvmfs, kLogSyslogWarn | kLogDebug,
1893 "cvmfs_listxattr: Race condition? "
1894 "LookupXattrs did not succeed for ino %lu",
1895 ino)) {
1896 fuse_remounter_->fence()->Leave();
1897 fuse_reply_err(req, ESTALE);
1898 return;
1899 }
1900 }
1901 fuse_remounter_->fence()->Leave();
1902
1903 if (!found) {
1904 ReplyNegative(d, req);
1905 return;
1906 }
1907
1908 string attribute_list;
1909 attribute_list = mount_point_->magic_xattr_mgr()->GetListString(&d);
1910 attribute_list += xattrs.ListKeysPosix(attribute_list);
1911
1912 if (size == 0) {
1913 fuse_reply_xattr(req, attribute_list.length());
1914 } else if (size >= attribute_list.length()) {
1915 if (attribute_list.empty())
1916 fuse_reply_buf(req, NULL, 0);
1917 else
1918 fuse_reply_buf(req, &attribute_list[0], attribute_list.length());
1919 } else {
1920 fuse_reply_err(req, ERANGE);
1921 }
1922 }
1923
1924 bool Evict(const string &path) {
1925 catalog::DirectoryEntry dirent;
1926 fuse_remounter_->fence()->Enter();
1927 const bool found = (GetDirentForPath(PathString(path), &dirent) > 0);
1928
1929 if (!found || !dirent.IsRegular()) {
1930 fuse_remounter_->fence()->Leave();
1931 return false;
1932 }
1933
1934 if (!dirent.IsChunkedFile()) {
1935 fuse_remounter_->fence()->Leave();
1936 } else {
1937 FileChunkList chunks;
1938 mount_point_->catalog_mgr()->ListFileChunks(
1939 PathString(path), dirent.hash_algorithm(), &chunks);
1940 fuse_remounter_->fence()->Leave();
1941 for (unsigned i = 0; i < chunks.size(); ++i) {
1942 file_system_->cache_mgr()->quota_mgr()->Remove(
1943 chunks.AtPtr(i)->content_hash());
1944 }
1945 }
1946 file_system_->cache_mgr()->quota_mgr()->Remove(dirent.checksum());
1947 return true;
1948 }
1949
1950
1951 bool Pin(const string &path) {
1952 catalog::DirectoryEntry dirent;
1953 fuse_remounter_->fence()->Enter();
1954 const bool found = (GetDirentForPath(PathString(path), &dirent) > 0);
1955 if (!found || !dirent.IsRegular()) {
1956 fuse_remounter_->fence()->Leave();
1957 return false;
1958 }
1959
1960 Fetcher *this_fetcher = dirent.IsExternalFile()
1961 ? mount_point_->external_fetcher()
1962 : mount_point_->fetcher();
1963
1964 if (!dirent.IsChunkedFile()) {
1965 fuse_remounter_->fence()->Leave();
1966 } else {
1967 FileChunkList chunks;
1968 mount_point_->catalog_mgr()->ListFileChunks(
1969 PathString(path), dirent.hash_algorithm(), &chunks);
1970 fuse_remounter_->fence()->Leave();
1971 for (unsigned i = 0; i < chunks.size(); ++i) {
1972 const bool retval = file_system_->cache_mgr()->quota_mgr()->Pin(
1973 chunks.AtPtr(i)->content_hash(), chunks.AtPtr(i)->size(),
1974 "Part of " + path, false);
1975 if (!retval)
1976 return false;
1977 int fd = -1;
1978 CacheManager::Label label;
1979 label.path = path;
1980 label.size = chunks.AtPtr(i)->size();
1981 label.zip_algorithm = dirent.compression_algorithm();
1982 label.flags |= CacheManager::kLabelPinned;
1983 label.flags |= CacheManager::kLabelChunked;
1984 if (dirent.IsExternalFile()) {
1985 label.flags |= CacheManager::kLabelExternal;
1986 label.range_offset = chunks.AtPtr(i)->offset();
1987 }
1988 fd = this_fetcher->Fetch(
1989 CacheManager::LabeledObject(chunks.AtPtr(i)->content_hash(), label));
1990 if (fd < 0) {
1991 return false;
1992 }
1993 file_system_->cache_mgr()->Close(fd);
1994 }
1995 return true;
1996 }
1997
1998 const bool retval = file_system_->cache_mgr()->quota_mgr()->Pin(
1999 dirent.checksum(), dirent.size(), path, false);
2000 if (!retval)
2001 return false;
2002 CacheManager::Label label;
2003 label.flags = CacheManager::kLabelPinned;
2004 label.size = dirent.size();
2005 label.path = path;
2006 label.zip_algorithm = dirent.compression_algorithm();
2007 const int fd = this_fetcher->Fetch(
2008 CacheManager::LabeledObject(dirent.checksum(), label));
2009 if (fd < 0) {
2010 return false;
2011 }
2012 file_system_->cache_mgr()->Close(fd);
2013 return true;
2014 }
2015
2016
2017 /**
2018 * Do after-daemon() initialization
2019 */
2020 static void cvmfs_init(void *userdata, struct fuse_conn_info *conn) {
2021 LogCvmfs(kLogCvmfs, kLogDebug, "cvmfs_init");
2022
2023 // NFS support
2024 #ifdef CVMFS_NFS_SUPPORT
2025 conn->want |= FUSE_CAP_EXPORT_SUPPORT;
2026 #endif
2027
2028 if (mount_point_->enforce_acls()) {
2029 #ifdef FUSE_CAP_POSIX_ACL
2030 if ((conn->capable & FUSE_CAP_POSIX_ACL) == 0) {
2031 PANIC(kLogDebug | kLogSyslogErr,
2032 "FUSE: ACL support requested but missing fuse kernel support, "
2033 "aborting");
2034 }
2035 conn->want |= FUSE_CAP_POSIX_ACL;
2036 LogCvmfs(kLogCvmfs, kLogDebug | kLogSyslog, "enforcing ACLs");
2037 #else
2038 PANIC(kLogDebug | kLogSyslogErr,
2039 "FUSE: ACL support requested but not available in this version of "
2040 "libfuse %d, aborting",
2041 FUSE_VERSION);
2042 #endif
2043 }
2044
2045 if (mount_point_->cache_symlinks()) {
2046 #ifdef FUSE_CAP_CACHE_SYMLINKS
2047 if ((conn->capable & FUSE_CAP_CACHE_SYMLINKS) == FUSE_CAP_CACHE_SYMLINKS) {
2048 conn->want |= FUSE_CAP_CACHE_SYMLINKS;
2049 LogCvmfs(kLogCvmfs, kLogDebug, "FUSE: Enable symlink caching");
2050 #ifndef FUSE_CAP_EXPIRE_ONLY
2051 LogCvmfs(
2052 kLogCvmfs, kLogDebug | kLogSyslogWarn,
2053 "FUSE: Symlink caching enabled but no support for fuse_expire_entry. "
2054 "Symlinks will be cached but mountpoints on top of symlinks will "
2055 "break! "
2056 "Current libfuse %d is too old; required: libfuse >= 3.16, "
2057 "kernel >= 6.2-rc1",
2058 FUSE_VERSION);
2059 #endif
2060 } else {
2061 mount_point_->DisableCacheSymlinks();
2062 LogCvmfs(
2063 kLogCvmfs, kLogDebug | kLogSyslogWarn,
2064 "FUSE: Symlink caching requested but missing fuse kernel support, "
2065 "falling back to no caching");
2066 }
2067 #else
2068 mount_point_->DisableCacheSymlinks();
2069 LogCvmfs(kLogCvmfs, kLogDebug | kLogSyslogWarn,
2070 "FUSE: Symlink caching requested but missing libfuse support, "
2071 "falling back to no caching. Current libfuse %d",
2072 FUSE_VERSION);
2073 #endif
2074 }
2075
2076 #ifdef FUSE_CAP_EXPIRE_ONLY
2077 if ((conn->capable & FUSE_CAP_EXPIRE_ONLY) == FUSE_CAP_EXPIRE_ONLY
2078 && FUSE_VERSION >= FUSE_MAKE_VERSION(3, 16)) {
2079 mount_point_->EnableFuseExpireEntry();
2080 LogCvmfs(kLogCvmfs, kLogDebug, "FUSE: Enable fuse_expire_entry ");
2081 } else if (mount_point_->cache_symlinks()) {
2082 LogCvmfs(
2083 kLogCvmfs, kLogDebug | kLogSyslogWarn,
2084 "FUSE: Symlink caching enabled but no support for fuse_expire_entry. "
2085 "Symlinks will be cached but mountpoints on top of symlinks will "
2086 "break! "
2087 "Current libfuse %d; required: libfuse >= 3.16, kernel >= 6.2-rc1",
2088 FUSE_VERSION);
2089 }
2090 #endif
2091 }
2092
2093 static void cvmfs_destroy(void *unused __attribute__((unused))) {
2094 // The debug log is already closed at this point
2095 LogCvmfs(kLogCvmfs, kLogDebug, "cvmfs_destroy");
2096 }
2097
2098 /**
2099 * Puts the callback functions in one single structure
2100 */
2101 static void SetCvmfsOperations(struct fuse_lowlevel_ops *cvmfs_operations) {
2102 memset(cvmfs_operations, 0, sizeof(*cvmfs_operations));
2103
2104 // Init/Fini
2105 cvmfs_operations->init = cvmfs_init;
2106 cvmfs_operations->destroy = cvmfs_destroy;
2107
2108 cvmfs_operations->lookup = cvmfs_lookup;
2109 cvmfs_operations->getattr = cvmfs_getattr;
2110 cvmfs_operations->readlink = cvmfs_readlink;
2111 cvmfs_operations->open = cvmfs_open;
2112 cvmfs_operations->read = cvmfs_read;
2113 cvmfs_operations->release = cvmfs_release;
2114 cvmfs_operations->opendir = cvmfs_opendir;
2115 cvmfs_operations->readdir = cvmfs_readdir;
2116 cvmfs_operations->releasedir = cvmfs_releasedir;
2117 cvmfs_operations->statfs = cvmfs_statfs;
2118 cvmfs_operations->getxattr = cvmfs_getxattr;
2119 cvmfs_operations->listxattr = cvmfs_listxattr;
2120 cvmfs_operations->forget = cvmfs_forget;
2121 #if (FUSE_VERSION >= 29)
2122 cvmfs_operations->forget_multi = cvmfs_forget_multi;
2123 #endif
2124 }
2125
2126 // Called by cvmfs_talk when switching into read-only cache mode
2127 void UnregisterQuotaListener() {
2128 if (cvmfs::unpin_listener_) {
2129 quota::UnregisterListener(cvmfs::unpin_listener_);
2130 cvmfs::unpin_listener_ = NULL;
2131 }
2132 if (cvmfs::watchdog_listener_) {
2133 quota::UnregisterListener(cvmfs::watchdog_listener_);
2134 cvmfs::watchdog_listener_ = NULL;
2135 }
2136 }
2137
2138 bool SendFuseFd(const std::string &socket_path) {
2139 int fuse_fd;
2140 #if (FUSE_VERSION >= 30)
2141 fuse_fd = fuse_session_fd(*reinterpret_cast<struct fuse_session **>(
2142 loader_exports_->fuse_channel_or_session));
2143 #else
2144 fuse_fd = fuse_chan_fd(*reinterpret_cast<struct fuse_chan **>(
2145 loader_exports_->fuse_channel_or_session));
2146 #endif
2147 assert(fuse_fd >= 0);
2148 const int sock_fd = ConnectSocket(socket_path);
2149 if (sock_fd < 0) {
2150 LogCvmfs(kLogCvmfs, kLogDebug, "cannot connect to socket %s: %d",
2151 socket_path.c_str(), errno);
2152 return false;
2153 }
2154 const bool retval = SendFd2Socket(sock_fd, fuse_fd);
2155 close(sock_fd);
2156 return retval;
2157 }
2158
2159 } // namespace cvmfs
2160
2161
2162 string *g_boot_error = NULL;
2163
2164 __attribute__((
2165 visibility("default"))) loader::CvmfsExports *g_cvmfs_exports = NULL;
2166
2167
2168 #ifndef __TEST_CVMFS_MOCKFUSE // will be mocked in tests
2169 /**
2170 * Begin section of cvmfs.cc-specific magic extended attributes
2171 */
2172
2173 class ExpiresMagicXattr : public BaseMagicXattr {
2174 time_t catalogs_valid_until_;
2175
2176 virtual bool PrepareValueFenced() {
2177 catalogs_valid_until_ = cvmfs::fuse_remounter_->catalogs_valid_until();
2178 return true;
2179 }
2180
2181 virtual void FinalizeValue() {
2182 if (catalogs_valid_until_ == MountPoint::kIndefiniteDeadline) {
2183 result_pages_.push_back("never (fixed root catalog)");
2184 return;
2185 } else {
2186 const time_t now = time(NULL);
2187 result_pages_.push_back(StringifyInt((catalogs_valid_until_ - now) / 60));
2188 }
2189 }
2190 };
2191
2192 class InodeMaxMagicXattr : public BaseMagicXattr {
2193 virtual void FinalizeValue() {
2194 result_pages_.push_back(StringifyInt(
2195 cvmfs::inode_generation_info_.inode_generation
2196 + xattr_mgr_->mount_point()->catalog_mgr()->inode_gauge()));
2197 }
2198 };
2199
2200 class MaxFdMagicXattr : public BaseMagicXattr {
2201 virtual void FinalizeValue() {
2202 result_pages_.push_back(
2203 StringifyInt(cvmfs::max_open_files_ - cvmfs::kNumReservedFd));
2204 }
2205 };
2206
2207 class PidMagicXattr : public BaseMagicXattr {
2208 virtual void FinalizeValue() {
2209 result_pages_.push_back(StringifyInt(cvmfs::pid_));
2210 }
2211 };
2212
2213 class UptimeMagicXattr : public BaseMagicXattr {
2214 virtual void FinalizeValue() {
2215 const time_t now = time(NULL);
2216 const uint64_t uptime = now - cvmfs::loader_exports_->boot_time;
2217 result_pages_.push_back(StringifyUint(uptime / 60));
2218 }
2219 };
2220
2221 /**
2222 * Register cvmfs.cc-specific magic extended attributes to mountpoint's
2223 * magic xattribute manager
2224 */
2225 static void RegisterMagicXattrs() {
2226 MagicXattrManager *mgr = cvmfs::mount_point_->magic_xattr_mgr();
2227 mgr->Register("user.expires", new ExpiresMagicXattr());
2228 mgr->Register("user.inode_max", new InodeMaxMagicXattr());
2229 mgr->Register("user.pid", new PidMagicXattr());
2230 mgr->Register("user.maxfd", new MaxFdMagicXattr());
2231 mgr->Register("user.uptime", new UptimeMagicXattr());
2232
2233 mgr->Freeze();
2234 }
2235
2236 /**
2237 * Construct a file system but prevent hanging when already mounted. That
2238 * means: at most one "system" mount of any given repository name.
2239 */
2240 static FileSystem *InitSystemFs(const string &mount_path,
2241 const string &fqrn,
2242 FileSystem::FileSystemInfo fs_info) {
2243 fs_info.wait_workspace = false;
2244 FileSystem *file_system = FileSystem::Create(fs_info);
2245
2246 if (file_system->boot_status() == loader::kFailLockWorkspace) {
2247 string fqrn_from_xattr;
2248 const int retval = platform_getxattr(mount_path, "user.fqrn",
2249 &fqrn_from_xattr);
2250 if (!retval) {
2251 // Cvmfs not mounted anymore, but another cvmfs process is still in
2252 // shutdown procedure. Try again and wait for lock
2253 delete file_system;
2254 fs_info.wait_workspace = true;
2255 file_system = FileSystem::Create(fs_info);
2256 } else {
2257 if (fqrn_from_xattr == fqrn) {
2258 LogCvmfs(kLogCvmfs, kLogDebug | kLogSyslogWarn,
2259 "repository already mounted on %s", mount_path.c_str());
2260 file_system->set_boot_status(loader::kFailDoubleMount);
2261 } else {
2262 LogCvmfs(kLogCvmfs, kLogDebug | kLogSyslogErr,
2263 "CernVM-FS repository %s already mounted on %s", fqrn.c_str(),
2264 mount_path.c_str());
2265 file_system->set_boot_status(loader::kFailOtherMount);
2266 }
2267 }
2268 }
2269
2270 return file_system;
2271 }
2272
2273
2274 static void InitOptionsMgr(const loader::LoaderExports *loader_exports) {
2275 if (loader_exports->version >= 3 && loader_exports->simple_options_parsing) {
2276 cvmfs::options_mgr_ = new SimpleOptionsParser(
2277 new DefaultOptionsTemplateManager(loader_exports->repository_name));
2278 } else {
2279 cvmfs::options_mgr_ = new BashOptionsManager(
2280 new DefaultOptionsTemplateManager(loader_exports->repository_name));
2281 }
2282
2283 if (loader_exports->config_files != "") {
2284 vector<string> tokens = SplitString(loader_exports->config_files, ':');
2285 for (unsigned i = 0, s = tokens.size(); i < s; ++i) {
2286 cvmfs::options_mgr_->ParsePath(tokens[i], false);
2287 }
2288 } else {
2289 cvmfs::options_mgr_->ParseDefault(loader_exports->repository_name);
2290 }
2291 }
2292
2293
2294 static unsigned CheckMaxOpenFiles() {
2295 static unsigned max_open_files;
2296 static bool already_done = false;
2297
2298 // check number of open files (lazy evaluation)
2299 if (!already_done) {
2300 unsigned soft_limit = 0;
2301 unsigned hard_limit = 0;
2302 GetLimitNoFile(&soft_limit, &hard_limit);
2303
2304 if (soft_limit < cvmfs::kMinOpenFiles) {
2305 LogCvmfs(kLogCvmfs, kLogSyslogWarn | kLogDebug,
2306 "Warning: current limits for number of open files are "
2307 "(%u/%u)\n"
2308 "CernVM-FS is likely to run out of file descriptors, "
2309 "set ulimit -n to at least %u",
2310 soft_limit, hard_limit, cvmfs::kMinOpenFiles);
2311 }
2312 max_open_files = soft_limit;
2313 already_done = true;
2314 }
2315
2316 return max_open_files;
2317 }
2318
2319
2320 static int Init(const loader::LoaderExports *loader_exports) {
2321 g_boot_error = new string("unknown error");
2322 cvmfs::loader_exports_ = loader_exports;
2323
2324 crypto::SetupLibcryptoMt();
2325
2326 InitOptionsMgr(loader_exports);
2327
2328 // We need logging set up before forking the watchdog
2329 FileSystem::SetupLoggingStandalone(*cvmfs::options_mgr_,
2330 loader_exports->repository_name);
2331
2332 // Monitor, check for maximum number of open files
2333 if (cvmfs::UseWatchdog()) {
2334 auto_umount::SetMountpoint(loader_exports->mount_point);
2335 cvmfs::watchdog_ = Watchdog::Create(auto_umount::UmountOnCrash);
2336 if (cvmfs::watchdog_ == NULL) {
2337 *g_boot_error = "failed to initialize watchdog.";
2338 return loader::kFailMonitor;
2339 }
2340 }
2341 cvmfs::max_open_files_ = CheckMaxOpenFiles();
2342
2343 FileSystem::FileSystemInfo fs_info;
2344 fs_info.type = FileSystem::kFsFuse;
2345 fs_info.name = loader_exports->repository_name;
2346 fs_info.exe_path = loader_exports->program_name;
2347 fs_info.options_mgr = cvmfs::options_mgr_;
2348 fs_info.foreground = loader_exports->foreground;
2349 cvmfs::file_system_ = InitSystemFs(loader_exports->mount_point,
2350 loader_exports->repository_name, fs_info);
2351 if (!cvmfs::file_system_->IsValid()) {
2352 *g_boot_error = cvmfs::file_system_->boot_error();
2353 return cvmfs::file_system_->boot_status();
2354 }
2355 if ((cvmfs::file_system_->cache_mgr()->id() == kPosixCacheManager)
2356 && dynamic_cast<PosixCacheManager *>(cvmfs::file_system_->cache_mgr())
2357 ->do_refcount()) {
2358 cvmfs::check_fd_overflow_ = false;
2359 }
2360
2361 cvmfs::mount_point_ = MountPoint::Create(loader_exports->repository_name,
2362 cvmfs::file_system_);
2363 if (!cvmfs::mount_point_->IsValid()) {
2364 *g_boot_error = cvmfs::mount_point_->boot_error();
2365 return cvmfs::mount_point_->boot_status();
2366 }
2367
2368 RegisterMagicXattrs();
2369
2370 cvmfs::directory_handles_ = new cvmfs::DirectoryHandles();
2371 cvmfs::directory_handles_->set_empty_key((uint64_t)(-1));
2372 cvmfs::directory_handles_->set_deleted_key((uint64_t)(-2));
2373
2374 LogCvmfs(kLogCvmfs, kLogDebug, "fuse inode size is %lu bits",
2375 sizeof(fuse_ino_t) * 8);
2376
2377 cvmfs::inode_generation_info_
2378 .initial_revision = cvmfs::mount_point_->catalog_mgr()->GetRevision();
2379 cvmfs::inode_generation_info_.inode_generation = cvmfs::mount_point_
2380 ->inode_annotation()
2381 ->GetGeneration();
2382 LogCvmfs(kLogCvmfs, kLogDebug, "root inode is %" PRIu64,
2383 uint64_t(cvmfs::mount_point_->catalog_mgr()->GetRootInode()));
2384
2385 void **channel_or_session = NULL;
2386 if (loader_exports->version >= 4) {
2387 channel_or_session = loader_exports->fuse_channel_or_session;
2388 }
2389
2390 bool fuse_notify_invalidation = true;
2391 std::string buf;
2392 if (cvmfs::options_mgr_->GetValue("CVMFS_FUSE_NOTIFY_INVALIDATION", &buf)) {
2393 if (!cvmfs::options_mgr_->IsOn(buf)) {
2394 fuse_notify_invalidation = false;
2395 cvmfs::mount_point_->dentry_tracker()->Disable();
2396 }
2397 }
2398 cvmfs::fuse_remounter_ = new FuseRemounter(
2399 cvmfs::mount_point_, &cvmfs::inode_generation_info_, channel_or_session,
2400 fuse_notify_invalidation);
2401
2402 // Control & command interface
2403 cvmfs::talk_mgr_ = TalkManager::Create(
2404 cvmfs::mount_point_->talk_socket_path(),
2405 cvmfs::mount_point_,
2406 cvmfs::fuse_remounter_);
2407 if ((cvmfs::mount_point_->talk_socket_uid() != 0)
2408 || (cvmfs::mount_point_->talk_socket_gid() != 0)) {
2409 const uid_t tgt_uid = cvmfs::mount_point_->talk_socket_uid();
2410 const gid_t tgt_gid = cvmfs::mount_point_->talk_socket_gid();
2411 const int rvi = chown(cvmfs::mount_point_->talk_socket_path().c_str(),
2412 tgt_uid, tgt_gid);
2413 if (rvi != 0) {
2414 *g_boot_error = std::string("failed to set talk socket ownership - ")
2415 + "target " + StringifyInt(tgt_uid) + ":"
2416 + StringifyInt(tgt_uid) + ", user "
2417 + StringifyInt(geteuid()) + ":" + StringifyInt(getegid());
2418 return loader::kFailTalk;
2419 }
2420 }
2421 if (cvmfs::talk_mgr_ == NULL) {
2422 *g_boot_error = "failed to initialize talk socket (" + StringifyInt(errno)
2423 + ")";
2424 return loader::kFailTalk;
2425 }
2426
2427 // Notification system client
2428 {
2429 OptionsManager *options = cvmfs::file_system_->options_mgr();
2430 if (options->IsDefined("CVMFS_NOTIFICATION_SERVER")) {
2431 std::string config;
2432 options->GetValue("CVMFS_NOTIFICATION_SERVER", &config);
2433 const std::string repo_name = cvmfs::mount_point_->fqrn();
2434 cvmfs::notification_client_ = new NotificationClient(
2435 config, repo_name, cvmfs::fuse_remounter_,
2436 cvmfs::mount_point_->download_mgr(),
2437 cvmfs::mount_point_->signature_mgr());
2438 }
2439 }
2440
2441 return loader::kFailOk;
2442 }
2443 #endif // __TEST_CVMFS_MOCKFUSE
2444
2445
2446 /**
2447 * Things that have to be executed after fork() / daemon()
2448 */
2449 static void Spawn() {
2450 // First thing: kick off the watchdog while we still have a single-threaded
2451 // well-defined state
2452 cvmfs::pid_ = getpid();
2453 if (cvmfs::watchdog_) {
2454 cvmfs::watchdog_->Spawn(GetCurrentWorkingDirectory() + "/stacktrace."
2455 + cvmfs::mount_point_->fqrn());
2456 }
2457
2458 cvmfs::fuse_remounter_->Spawn();
2459 if (cvmfs::mount_point_->dentry_tracker()->is_active()) {
2460 cvmfs::mount_point_->dentry_tracker()->SpawnCleaner(
2461 // Usually every minute
2462 static_cast<unsigned int>(cvmfs::mount_point_->kcache_timeout_sec()));
2463 }
2464
2465 cvmfs::mount_point_->download_mgr()->Spawn();
2466 cvmfs::mount_point_->external_download_mgr()->Spawn();
2467 if (cvmfs::mount_point_->resolv_conf_watcher() != NULL) {
2468 cvmfs::mount_point_->resolv_conf_watcher()->Spawn();
2469 }
2470 QuotaManager *quota_mgr = cvmfs::file_system_->cache_mgr()->quota_mgr();
2471 quota_mgr->Spawn();
2472 if (quota_mgr->HasCapability(QuotaManager::kCapListeners)) {
2473 cvmfs::watchdog_listener_ = quota::RegisterWatchdogListener(
2474 quota_mgr, cvmfs::mount_point_->uuid()->uuid() + "-watchdog");
2475 cvmfs::unpin_listener_ = quota::RegisterUnpinListener(
2476 quota_mgr,
2477 cvmfs::mount_point_->catalog_mgr(),
2478 cvmfs::mount_point_->uuid()->uuid() + "-unpin");
2479 }
2480 cvmfs::mount_point_->tracer()->Spawn();
2481 cvmfs::talk_mgr_->Spawn();
2482
2483 if (cvmfs::notification_client_ != NULL) {
2484 cvmfs::notification_client_->Spawn();
2485 }
2486
2487 if (cvmfs::file_system_->nfs_maps() != NULL) {
2488 cvmfs::file_system_->nfs_maps()->Spawn();
2489 }
2490
2491 cvmfs::file_system_->cache_mgr()->Spawn();
2492
2493 if (cvmfs::mount_point_->telemetry_aggr() != NULL) {
2494 cvmfs::mount_point_->telemetry_aggr()->Spawn();
2495 }
2496 }
2497
2498
2499 static string GetErrorMsg() {
2500 if (g_boot_error)
2501 return *g_boot_error;
2502 return "";
2503 }
2504
2505
2506 /**
2507 * Called alone at the end of SaveState; it performs a Fini() half way through,
2508 * enough to delete the catalog manager, so that no more open file handles
2509 * from file catalogs are active.
2510 */
2511 static void ShutdownMountpoint() {
2512 delete cvmfs::talk_mgr_;
2513 cvmfs::talk_mgr_ = NULL;
2514
2515 delete cvmfs::notification_client_;
2516 cvmfs::notification_client_ = NULL;
2517
2518 // The remonter has a reference to the mount point and the inode generation
2519 delete cvmfs::fuse_remounter_;
2520 cvmfs::fuse_remounter_ = NULL;
2521
2522 // The unpin listener requires the catalog, so this must be unregistered
2523 // before the catalog manager is removed
2524 if (cvmfs::unpin_listener_ != NULL) {
2525 quota::UnregisterListener(cvmfs::unpin_listener_);
2526 cvmfs::unpin_listener_ = NULL;
2527 }
2528 if (cvmfs::watchdog_listener_ != NULL) {
2529 quota::UnregisterListener(cvmfs::watchdog_listener_);
2530 cvmfs::watchdog_listener_ = NULL;
2531 }
2532
2533 delete cvmfs::directory_handles_;
2534 delete cvmfs::mount_point_;
2535 cvmfs::directory_handles_ = NULL;
2536 cvmfs::mount_point_ = NULL;
2537 }
2538
2539
2540 static void Fini() {
2541 ShutdownMountpoint();
2542
2543 delete cvmfs::file_system_;
2544 delete cvmfs::options_mgr_;
2545 cvmfs::file_system_ = NULL;
2546 cvmfs::options_mgr_ = NULL;
2547
2548 delete cvmfs::watchdog_;
2549 cvmfs::watchdog_ = NULL;
2550
2551 delete g_boot_error;
2552 g_boot_error = NULL;
2553 auto_umount::SetMountpoint("");
2554
2555 crypto::CleanupLibcryptoMt();
2556 }
2557
2558
2559 static int AltProcessFlavor(int argc, char **argv) {
2560 if (strcmp(argv[1], "__cachemgr__") == 0) {
2561 return PosixQuotaManager::MainCacheManager(argc, argv);
2562 }
2563 if (strcmp(argv[1], "__wpad__") == 0) {
2564 return download::MainResolveProxyDescription(argc, argv);
2565 }
2566 return 1;
2567 }
2568
2569
2570 static bool MaintenanceMode(const int fd_progress) {
2571 SendMsg2Socket(fd_progress, "Entering maintenance mode\n");
2572 string msg_progress = "Draining out kernel caches (";
2573 if (FuseInvalidator::HasFuseNotifyInval())
2574 msg_progress += "up to ";
2575 msg_progress += StringifyInt(static_cast<int>(
2576 cvmfs::mount_point_->kcache_timeout_sec()))
2577 + "s)\n";
2578 SendMsg2Socket(fd_progress, msg_progress);
2579 cvmfs::fuse_remounter_->EnterMaintenanceMode();
2580 return true;
2581 }
2582
2583 #ifndef __TEST_CVMFS_MOCKFUSE
2584 static bool SaveState(const int fd_progress, loader::StateList *saved_states) {
2585 string msg_progress;
2586
2587 const unsigned num_open_dirs = cvmfs::directory_handles_->size();
2588 if (num_open_dirs != 0) {
2589 #ifdef DEBUGMSG
2590 for (cvmfs::DirectoryHandles::iterator
2591 i = cvmfs::directory_handles_->begin(),
2592 iEnd = cvmfs::directory_handles_->end();
2593 i != iEnd;
2594 ++i) {
2595 LogCvmfs(kLogCvmfs, kLogDebug, "saving dirhandle %lu", i->first);
2596 }
2597 #endif
2598
2599 msg_progress = "Saving open directory handles ("
2600 + StringifyInt(num_open_dirs) + " handles)\n";
2601 SendMsg2Socket(fd_progress, msg_progress);
2602
2603 // TODO(jblomer): should rather be saved just in a malloc'd memory block
2604 cvmfs::DirectoryHandles *saved_handles = new cvmfs::DirectoryHandles(
2605 *cvmfs::directory_handles_);
2606 loader::SavedState *save_open_dirs = new loader::SavedState();
2607 save_open_dirs->state_id = loader::kStateOpenDirs;
2608 save_open_dirs->state = saved_handles;
2609 saved_states->push_back(save_open_dirs);
2610 }
2611
2612 if (!cvmfs::file_system_->IsNfsSource()) {
2613 msg_progress = "Saving inode tracker\n";
2614 SendMsg2Socket(fd_progress, msg_progress);
2615 glue::InodeTracker *saved_inode_tracker = new glue::InodeTracker(
2616 *cvmfs::mount_point_->inode_tracker());
2617 loader::SavedState *state_glue_buffer = new loader::SavedState();
2618 state_glue_buffer->state_id = loader::kStateGlueBufferV4;
2619 state_glue_buffer->state = saved_inode_tracker;
2620 saved_states->push_back(state_glue_buffer);
2621 }
2622
2623 msg_progress = "Saving negative entry cache\n";
2624 SendMsg2Socket(fd_progress, msg_progress);
2625 glue::DentryTracker *saved_dentry_tracker = new glue::DentryTracker(
2626 *cvmfs::mount_point_->dentry_tracker());
2627 loader::SavedState *state_dentry_tracker = new loader::SavedState();
2628 state_dentry_tracker->state_id = loader::kStateDentryTracker;
2629 state_dentry_tracker->state = saved_dentry_tracker;
2630 saved_states->push_back(state_dentry_tracker);
2631
2632 msg_progress = "Saving page cache entry tracker\n";
2633 SendMsg2Socket(fd_progress, msg_progress);
2634 glue::PageCacheTracker *saved_page_cache_tracker = new glue::PageCacheTracker(
2635 *cvmfs::mount_point_->page_cache_tracker());
2636 loader::SavedState *state_page_cache_tracker = new loader::SavedState();
2637 state_page_cache_tracker->state_id = loader::kStatePageCacheTracker;
2638 state_page_cache_tracker->state = saved_page_cache_tracker;
2639 saved_states->push_back(state_page_cache_tracker);
2640
2641 msg_progress = "Saving chunk tables\n";
2642 SendMsg2Socket(fd_progress, msg_progress);
2643 ChunkTables *saved_chunk_tables = new ChunkTables(
2644 *cvmfs::mount_point_->chunk_tables());
2645 loader::SavedState *state_chunk_tables = new loader::SavedState();
2646 state_chunk_tables->state_id = loader::kStateOpenChunksV4;
2647 state_chunk_tables->state = saved_chunk_tables;
2648 saved_states->push_back(state_chunk_tables);
2649
2650 msg_progress = "Saving inode generation\n";
2651 SendMsg2Socket(fd_progress, msg_progress);
2652 cvmfs::inode_generation_info_
2653 .inode_generation += cvmfs::mount_point_->catalog_mgr()->inode_gauge();
2654 cvmfs::InodeGenerationInfo
2655 *saved_inode_generation = new cvmfs::InodeGenerationInfo(
2656 cvmfs::inode_generation_info_);
2657 loader::SavedState *state_inode_generation = new loader::SavedState();
2658 state_inode_generation->state_id = loader::kStateInodeGeneration;
2659 state_inode_generation->state = saved_inode_generation;
2660 saved_states->push_back(state_inode_generation);
2661
2662 msg_progress = "Saving fuse state\n";
2663 SendMsg2Socket(fd_progress, msg_progress);
2664 cvmfs::FuseState *saved_fuse_state = new cvmfs::FuseState();
2665 saved_fuse_state->cache_symlinks = cvmfs::mount_point_->cache_symlinks();
2666 saved_fuse_state->has_dentry_expire = cvmfs::mount_point_
2667 ->fuse_expire_entry();
2668 loader::SavedState *state_fuse = new loader::SavedState();
2669 state_fuse->state_id = loader::kStateFuse;
2670 state_fuse->state = saved_fuse_state;
2671 saved_states->push_back(state_fuse);
2672
2673 // Close open file catalogs
2674 ShutdownMountpoint();
2675
2676 loader::SavedState *state_cache_mgr = new loader::SavedState();
2677 state_cache_mgr->state_id = loader::kStateOpenFiles;
2678 state_cache_mgr->state = cvmfs::file_system_->cache_mgr()->SaveState(
2679 fd_progress);
2680 saved_states->push_back(state_cache_mgr);
2681
2682 msg_progress = "Saving open files counter\n";
2683 uint32_t *saved_num_fd = new uint32_t(
2684 cvmfs::file_system_->no_open_files()->Get());
2685 loader::SavedState *state_num_fd = new loader::SavedState();
2686 state_num_fd->state_id = loader::kStateOpenFilesCounter;
2687 state_num_fd->state = saved_num_fd;
2688 saved_states->push_back(state_num_fd);
2689
2690 return true;
2691 }
2692
2693
2694 static bool RestoreState(const int fd_progress,
2695 const loader::StateList &saved_states) {
2696 // If we have no saved version of the page cache tracker, it is unsafe
2697 // to start using it. The page cache tracker has to run for the entire
2698 // lifetime of the mountpoint or not at all.
2699 cvmfs::mount_point_->page_cache_tracker()->Disable();
2700
2701 for (unsigned i = 0, l = saved_states.size(); i < l; ++i) {
2702 if (saved_states[i]->state_id == loader::kStateOpenDirs) {
2703 SendMsg2Socket(fd_progress, "Restoring open directory handles... ");
2704 delete cvmfs::directory_handles_;
2705 cvmfs::DirectoryHandles
2706 *saved_handles = (cvmfs::DirectoryHandles *)saved_states[i]->state;
2707 cvmfs::directory_handles_ = new cvmfs::DirectoryHandles(*saved_handles);
2708 cvmfs::file_system_->no_open_dirs()->Set(
2709 cvmfs::directory_handles_->size());
2710 cvmfs::DirectoryHandles::const_iterator i = cvmfs::directory_handles_
2711 ->begin();
2712 for (; i != cvmfs::directory_handles_->end(); ++i) {
2713 if (i->first >= cvmfs::next_directory_handle_)
2714 cvmfs::next_directory_handle_ = i->first + 1;
2715 }
2716
2717 SendMsg2Socket(
2718 fd_progress,
2719 StringifyInt(cvmfs::directory_handles_->size()) + " handles\n");
2720 }
2721
2722 if (saved_states[i]->state_id == loader::kStateGlueBuffer) {
2723 SendMsg2Socket(fd_progress, "Migrating inode tracker (v1 to v4)... ");
2724 compat::inode_tracker::InodeTracker
2725 *saved_inode_tracker = (compat::inode_tracker::InodeTracker *)
2726 saved_states[i]
2727 ->state;
2728 compat::inode_tracker::Migrate(saved_inode_tracker,
2729 cvmfs::mount_point_->inode_tracker());
2730 SendMsg2Socket(fd_progress, " done\n");
2731 }
2732
2733 if (saved_states[i]->state_id == loader::kStateGlueBufferV2) {
2734 SendMsg2Socket(fd_progress, "Migrating inode tracker (v2 to v4)... ");
2735 compat::inode_tracker_v2::InodeTracker
2736 *saved_inode_tracker = (compat::inode_tracker_v2::InodeTracker *)
2737 saved_states[i]
2738 ->state;
2739 compat::inode_tracker_v2::Migrate(saved_inode_tracker,
2740 cvmfs::mount_point_->inode_tracker());
2741 SendMsg2Socket(fd_progress, " done\n");
2742 }
2743
2744 if (saved_states[i]->state_id == loader::kStateGlueBufferV3) {
2745 SendMsg2Socket(fd_progress, "Migrating inode tracker (v3 to v4)... ");
2746 compat::inode_tracker_v3::InodeTracker
2747 *saved_inode_tracker = (compat::inode_tracker_v3::InodeTracker *)
2748 saved_states[i]
2749 ->state;
2750 compat::inode_tracker_v3::Migrate(saved_inode_tracker,
2751 cvmfs::mount_point_->inode_tracker());
2752 SendMsg2Socket(fd_progress, " done\n");
2753 }
2754
2755 if (saved_states[i]->state_id == loader::kStateGlueBufferV4) {
2756 SendMsg2Socket(fd_progress, "Restoring inode tracker... ");
2757 cvmfs::mount_point_->inode_tracker()->~InodeTracker();
2758 glue::InodeTracker
2759 *saved_inode_tracker = (glue::InodeTracker *)saved_states[i]->state;
2760 new (cvmfs::mount_point_->inode_tracker())
2761 glue::InodeTracker(*saved_inode_tracker);
2762 SendMsg2Socket(fd_progress, " done\n");
2763 }
2764
2765 if (saved_states[i]->state_id == loader::kStateDentryTracker) {
2766 SendMsg2Socket(fd_progress, "Restoring dentry tracker... ");
2767 cvmfs::mount_point_->dentry_tracker()->~DentryTracker();
2768 glue::DentryTracker
2769 *saved_dentry_tracker = static_cast<glue::DentryTracker *>(
2770 saved_states[i]->state);
2771 new (cvmfs::mount_point_->dentry_tracker())
2772 glue::DentryTracker(*saved_dentry_tracker);
2773 SendMsg2Socket(fd_progress, " done\n");
2774 }
2775
2776 if (saved_states[i]->state_id == loader::kStatePageCacheTracker) {
2777 SendMsg2Socket(fd_progress, "Restoring page cache entry tracker... ");
2778 cvmfs::mount_point_->page_cache_tracker()->~PageCacheTracker();
2779 glue::PageCacheTracker
2780 *saved_page_cache_tracker = (glue::PageCacheTracker *)saved_states[i]
2781 ->state;
2782 new (cvmfs::mount_point_->page_cache_tracker())
2783 glue::PageCacheTracker(*saved_page_cache_tracker);
2784 SendMsg2Socket(fd_progress, " done\n");
2785 }
2786
2787 ChunkTables *chunk_tables = cvmfs::mount_point_->chunk_tables();
2788
2789 if (saved_states[i]->state_id == loader::kStateOpenChunks) {
2790 SendMsg2Socket(fd_progress, "Migrating chunk tables (v1 to v4)... ");
2791 compat::chunk_tables::ChunkTables
2792 *saved_chunk_tables = (compat::chunk_tables::ChunkTables *)
2793 saved_states[i]
2794 ->state;
2795 compat::chunk_tables::Migrate(saved_chunk_tables, chunk_tables);
2796 SendMsg2Socket(
2797 fd_progress,
2798 StringifyInt(chunk_tables->handle2fd.size()) + " handles\n");
2799 }
2800
2801 if (saved_states[i]->state_id == loader::kStateOpenChunksV2) {
2802 SendMsg2Socket(fd_progress, "Migrating chunk tables (v2 to v4)... ");
2803 compat::chunk_tables_v2::ChunkTables
2804 *saved_chunk_tables = (compat::chunk_tables_v2::ChunkTables *)
2805 saved_states[i]
2806 ->state;
2807 compat::chunk_tables_v2::Migrate(saved_chunk_tables, chunk_tables);
2808 SendMsg2Socket(
2809 fd_progress,
2810 StringifyInt(chunk_tables->handle2fd.size()) + " handles\n");
2811 }
2812
2813 if (saved_states[i]->state_id == loader::kStateOpenChunksV3) {
2814 SendMsg2Socket(fd_progress, "Migrating chunk tables (v3 to v4)... ");
2815 compat::chunk_tables_v3::ChunkTables
2816 *saved_chunk_tables = (compat::chunk_tables_v3::ChunkTables *)
2817 saved_states[i]
2818 ->state;
2819 compat::chunk_tables_v3::Migrate(saved_chunk_tables, chunk_tables);
2820 SendMsg2Socket(
2821 fd_progress,
2822 StringifyInt(chunk_tables->handle2fd.size()) + " handles\n");
2823 }
2824
2825 if (saved_states[i]->state_id == loader::kStateOpenChunksV4) {
2826 SendMsg2Socket(fd_progress, "Restoring chunk tables... ");
2827 chunk_tables->~ChunkTables();
2828 ChunkTables *saved_chunk_tables = reinterpret_cast<ChunkTables *>(
2829 saved_states[i]->state);
2830 new (chunk_tables) ChunkTables(*saved_chunk_tables);
2831 SendMsg2Socket(fd_progress, " done\n");
2832 }
2833
2834 if (saved_states[i]->state_id == loader::kStateInodeGeneration) {
2835 SendMsg2Socket(fd_progress, "Restoring inode generation... ");
2836 cvmfs::InodeGenerationInfo
2837 *old_info = (cvmfs::InodeGenerationInfo *)saved_states[i]->state;
2838 if (old_info->version == 1) {
2839 // Migration
2840 cvmfs::inode_generation_info_.initial_revision = old_info
2841 ->initial_revision;
2842 cvmfs::inode_generation_info_.incarnation = old_info->incarnation;
2843 // Note: in the rare case of inode generation being 0 before, inode
2844 // can clash after reload before remount
2845 } else {
2846 cvmfs::inode_generation_info_ = *old_info;
2847 }
2848 ++cvmfs::inode_generation_info_.incarnation;
2849 SendMsg2Socket(fd_progress, " done\n");
2850 }
2851
2852 if (saved_states[i]->state_id == loader::kStateOpenFilesCounter) {
2853 SendMsg2Socket(fd_progress, "Restoring open files counter... ");
2854 cvmfs::file_system_->no_open_files()->Set(
2855 *(reinterpret_cast<uint32_t *>(saved_states[i]->state)));
2856 SendMsg2Socket(fd_progress, " done\n");
2857 }
2858
2859 if (saved_states[i]->state_id == loader::kStateOpenFiles) {
2860 const int old_root_fd = cvmfs::mount_point_->catalog_mgr()->root_fd();
2861
2862 // TODO(jblomer): make this less hacky
2863
2864 const CacheManagerIds saved_type = cvmfs::file_system_->cache_mgr()
2865 ->PeekState(
2866 saved_states[i]->state);
2867 int fixup_root_fd = -1;
2868
2869 if ((saved_type == kStreamingCacheManager)
2870 && (cvmfs::file_system_->cache_mgr()->id()
2871 != kStreamingCacheManager)) {
2872 // stick to the streaming cache manager
2873 StreamingCacheManager *new_cache_mgr = new StreamingCacheManager(
2874 cvmfs::max_open_files_,
2875 cvmfs::file_system_->cache_mgr(),
2876 cvmfs::mount_point_->download_mgr(),
2877 cvmfs::mount_point_->external_download_mgr(),
2878 StreamingCacheManager::kDefaultBufferSize,
2879 cvmfs::file_system_->statistics());
2880 fixup_root_fd = new_cache_mgr->PlantFd(old_root_fd);
2881 cvmfs::file_system_->ReplaceCacheManager(new_cache_mgr);
2882 cvmfs::mount_point_->fetcher()->ReplaceCacheManager(new_cache_mgr);
2883 cvmfs::mount_point_->external_fetcher()->ReplaceCacheManager(
2884 new_cache_mgr);
2885 }
2886
2887 if ((cvmfs::file_system_->cache_mgr()->id() == kStreamingCacheManager)
2888 && (saved_type != kStreamingCacheManager)) {
2889 // stick to the cache manager wrapped into the streaming cache
2890 CacheManager *wrapped_cache_mgr = dynamic_cast<StreamingCacheManager *>(
2891 cvmfs::file_system_->cache_mgr())
2892 ->MoveOutBackingCacheMgr(
2893 &fixup_root_fd);
2894 delete cvmfs::file_system_->cache_mgr();
2895 cvmfs::file_system_->ReplaceCacheManager(wrapped_cache_mgr);
2896 cvmfs::mount_point_->fetcher()->ReplaceCacheManager(wrapped_cache_mgr);
2897 cvmfs::mount_point_->external_fetcher()->ReplaceCacheManager(
2898 wrapped_cache_mgr);
2899 }
2900
2901 const int new_root_fd = cvmfs::file_system_->cache_mgr()->RestoreState(
2902 fd_progress, saved_states[i]->state);
2903 LogCvmfs(kLogCvmfs, kLogDebug, "new root file catalog descriptor @%d",
2904 new_root_fd);
2905 if (new_root_fd >= 0) {
2906 cvmfs::file_system_->RemapCatalogFd(old_root_fd, new_root_fd);
2907 } else if (fixup_root_fd >= 0) {
2908 LogCvmfs(kLogCvmfs, kLogDebug,
2909 "new root file catalog descriptor (fixup) @%d", fixup_root_fd);
2910 cvmfs::file_system_->RemapCatalogFd(old_root_fd, fixup_root_fd);
2911 }
2912 }
2913
2914 if (saved_states[i]->state_id == loader::kStateFuse) {
2915 SendMsg2Socket(fd_progress, "Restoring fuse state... ");
2916 cvmfs::FuseState *fuse_state = static_cast<cvmfs::FuseState *>(
2917 saved_states[i]->state);
2918 if (!fuse_state->cache_symlinks)
2919 cvmfs::mount_point_->DisableCacheSymlinks();
2920 if (fuse_state->has_dentry_expire)
2921 cvmfs::mount_point_->EnableFuseExpireEntry();
2922 SendMsg2Socket(fd_progress, " done\n");
2923 }
2924 }
2925 if (cvmfs::mount_point_->inode_annotation()) {
2926 const uint64_t saved_generation = cvmfs::inode_generation_info_
2927 .inode_generation;
2928 cvmfs::mount_point_->inode_annotation()->IncGeneration(saved_generation);
2929 }
2930
2931 return true;
2932 }
2933
2934
2935 static void FreeSavedState(const int fd_progress,
2936 const loader::StateList &saved_states) {
2937 for (unsigned i = 0, l = saved_states.size(); i < l; ++i) {
2938 switch (saved_states[i]->state_id) {
2939 case loader::kStateOpenDirs:
2940 SendMsg2Socket(fd_progress, "Releasing saved open directory handles\n");
2941 delete static_cast<cvmfs::DirectoryHandles *>(saved_states[i]->state);
2942 break;
2943 case loader::kStateGlueBuffer:
2944 SendMsg2Socket(fd_progress,
2945 "Releasing saved glue buffer (version 1)\n");
2946 delete static_cast<compat::inode_tracker::InodeTracker *>(
2947 saved_states[i]->state);
2948 break;
2949 case loader::kStateGlueBufferV2:
2950 SendMsg2Socket(fd_progress,
2951 "Releasing saved glue buffer (version 2)\n");
2952 delete static_cast<compat::inode_tracker_v2::InodeTracker *>(
2953 saved_states[i]->state);
2954 break;
2955 case loader::kStateGlueBufferV3:
2956 SendMsg2Socket(fd_progress,
2957 "Releasing saved glue buffer (version 3)\n");
2958 delete static_cast<compat::inode_tracker_v3::InodeTracker *>(
2959 saved_states[i]->state);
2960 break;
2961 case loader::kStateGlueBufferV4:
2962 SendMsg2Socket(fd_progress, "Releasing saved glue buffer\n");
2963 delete static_cast<glue::InodeTracker *>(saved_states[i]->state);
2964 break;
2965 case loader::kStateDentryTracker:
2966 SendMsg2Socket(fd_progress, "Releasing saved dentry tracker\n");
2967 delete static_cast<glue::DentryTracker *>(saved_states[i]->state);
2968 break;
2969 case loader::kStatePageCacheTracker:
2970 SendMsg2Socket(fd_progress, "Releasing saved page cache entry cache\n");
2971 delete static_cast<glue::PageCacheTracker *>(saved_states[i]->state);
2972 break;
2973 case loader::kStateOpenChunks:
2974 SendMsg2Socket(fd_progress, "Releasing chunk tables (version 1)\n");
2975 delete static_cast<compat::chunk_tables::ChunkTables *>(
2976 saved_states[i]->state);
2977 break;
2978 case loader::kStateOpenChunksV2:
2979 SendMsg2Socket(fd_progress, "Releasing chunk tables (version 2)\n");
2980 delete static_cast<compat::chunk_tables_v2::ChunkTables *>(
2981 saved_states[i]->state);
2982 break;
2983 case loader::kStateOpenChunksV3:
2984 SendMsg2Socket(fd_progress, "Releasing chunk tables (version 3)\n");
2985 delete static_cast<compat::chunk_tables_v3::ChunkTables *>(
2986 saved_states[i]->state);
2987 break;
2988 case loader::kStateOpenChunksV4:
2989 SendMsg2Socket(fd_progress, "Releasing chunk tables\n");
2990 delete static_cast<ChunkTables *>(saved_states[i]->state);
2991 break;
2992 case loader::kStateInodeGeneration:
2993 SendMsg2Socket(fd_progress, "Releasing saved inode generation info\n");
2994 delete static_cast<cvmfs::InodeGenerationInfo *>(
2995 saved_states[i]->state);
2996 break;
2997 case loader::kStateOpenFiles:
2998 cvmfs::file_system_->cache_mgr()->FreeState(fd_progress,
2999 saved_states[i]->state);
3000 break;
3001 case loader::kStateOpenFilesCounter:
3002 SendMsg2Socket(fd_progress, "Releasing open files counter\n");
3003 delete static_cast<uint32_t *>(saved_states[i]->state);
3004 break;
3005 case loader::kStateFuse:
3006 SendMsg2Socket(fd_progress, "Releasing fuse state\n");
3007 delete static_cast<cvmfs::FuseState *>(saved_states[i]->state);
3008 break;
3009 default:
3010 break;
3011 }
3012 }
3013 }
3014 #endif
3015
3016
3017 static void __attribute__((constructor)) LibraryMain() {
3018 g_cvmfs_exports = new loader::CvmfsExports();
3019 g_cvmfs_exports->so_version = CVMFS_VERSION;
3020 g_cvmfs_exports->fnAltProcessFlavor = AltProcessFlavor;
3021 g_cvmfs_exports->fnInit = Init;
3022 g_cvmfs_exports->fnSpawn = Spawn;
3023 g_cvmfs_exports->fnFini = Fini;
3024 g_cvmfs_exports->fnGetErrorMsg = GetErrorMsg;
3025 g_cvmfs_exports->fnMaintenanceMode = MaintenanceMode;
3026 #ifndef __TEST_CVMFS_MOCKFUSE
3027 g_cvmfs_exports->fnSaveState = SaveState;
3028 g_cvmfs_exports->fnRestoreState = RestoreState;
3029 g_cvmfs_exports->fnFreeSavedState = FreeSavedState;
3030 #endif
3031 cvmfs::SetCvmfsOperations(&g_cvmfs_exports->cvmfs_operations);
3032 }
3033
3034
3035 static void __attribute__((destructor)) LibraryExit() {
3036 delete g_cvmfs_exports;
3037 g_cvmfs_exports = NULL;
3038 }
3039