GCC Code Coverage Report


Directory: cvmfs/
File: cvmfs/cvmfs.cc
Date: 2025-06-29 02:35:41
Exec Total Coverage
Lines: 0 1605 0.0%
Branches: 0 2293 0.0%

Line Branch Exec Source
1 /**
2 * This file is part of the CernVM File System.
3 *
4 * CernVM-FS is a FUSE module which implements an HTTP read-only filesystem.
5 * The original idea is based on GROW-FS.
6 *
7 * CernVM-FS shows a remote HTTP directory as local file system. The client
8 * sees all available files. On first access, a file is downloaded and
9 * cached locally. All downloaded pieces are verified by a cryptographic
10 * content hash.
11 *
12 * To do so, a directory hive has to be transformed into a CVMFS2
13 * "repository". This can be done by the CernVM-FS server tools.
14 *
15 * This preparation of directories is transparent to web servers and
16 * web proxies. They just serve static content, i.e. arbitrary files.
17 * Any HTTP server should do the job. We use Apache + Squid. Serving
18 * files from the memory of a web proxy brings a significant performance
19 * improvement.
20 */
21
22 // TODO(jblomer): the file system root should probably always return 1 for an
23 // inode. See also integration test #23.
24
25 #define ENOATTR ENODATA /**< instead of including attr/xattr.h */
26
27 #ifndef __STDC_FORMAT_MACROS
28 #define __STDC_FORMAT_MACROS
29 #endif
30
31 // sys/xattr.h conflicts with linux/xattr.h and needs to be loaded very early
32 // clang-format off
33 #include <sys/xattr.h> // NOLINT
34 // clang-format on
35
36
37 #include "cvmfs.h"
38
39 #include <alloca.h>
40 #include <errno.h>
41 #include <fcntl.h>
42 #include <inttypes.h>
43 #include <pthread.h>
44 #include <stddef.h>
45 #include <stdint.h>
46 #include <sys/errno.h>
47 #include <sys/statvfs.h>
48 #include <sys/types.h>
49 #include <unistd.h>
50
51 #include <algorithm>
52 #include <cassert>
53 #include <cstdio>
54 #include <cstdlib>
55 #include <cstring>
56 #include <ctime>
57 #include <functional>
58 #include <google/dense_hash_map>
59 #include <string>
60 #include <utility>
61 #include <vector>
62
63 #include "authz/authz_session.h"
64 #include "auto_umount.h"
65 #include "backoff.h"
66 #include "bigvector.h"
67 #include "cache.h"
68 #include "cache_posix.h"
69 #include "cache_stream.h"
70 #include "catalog_mgr.h"
71 #include "catalog_mgr_client.h"
72 #include "clientctx.h"
73 #include "compat.h"
74 #include "compression/compression.h"
75 #include "crypto/crypto_util.h"
76 #include "crypto/hash.h"
77 #include "directory_entry.h"
78 #include "fence.h"
79 #include "fetch.h"
80 #include "file_chunk.h"
81 #include "fuse_evict.h"
82 #include "fuse_inode_gen.h"
83 #include "fuse_remount.h"
84 #include "glue_buffer.h"
85 #include "interrupt.h"
86 #include "loader.h"
87 #include "lru_md.h"
88 #include "magic_xattr.h"
89 #include "manifest_fetch.h"
90 #include "monitor.h"
91 #include "mountpoint.h"
92 #include "network/download.h"
93 #include "nfs_maps.h"
94 #include "notification_client.h"
95 #include "options.h"
96 #include "quota_listener.h"
97 #include "quota_posix.h"
98 #include "sanitizer.h"
99 #include "shortstring.h"
100 #include "sqlitevfs.h"
101 #include "statistics.h"
102 #include "talk.h"
103 #include "telemetry_aggregator.h"
104 #include "tracer.h"
105 #include "util/algorithm.h"
106 #include "util/exception.h"
107 #include "util/logging.h"
108 #include "util/mutex.h"
109 #include "util/pointer.h"
110 #include "util/smalloc.h"
111 #include "util/string.h"
112 #include "util/posix.h"
113 #include "util/testing.h"
114 #include "util/uuid.h"
115 #include "wpad.h"
116 #include "xattr.h"
117
118 using namespace std; // NOLINT
119
120 namespace cvmfs {
121
122 FileSystem *file_system_ = NULL;
123 MountPoint *mount_point_ = NULL;
124 TalkManager *talk_mgr_ = NULL;
125 NotificationClient *notification_client_ = NULL;
126 Watchdog *watchdog_ = NULL;
127 FuseRemounter *fuse_remounter_ = NULL;
128 InodeGenerationInfo inode_generation_info_;
129
130
131 /**
132 * For cvmfs_opendir / cvmfs_readdir
133 * TODO: use mmap for very large listings
134 */
135 struct DirectoryListing {
136 char *buffer; /**< Filled by fuse_add_direntry */
137
138 // Not really used anymore. But directory listing needs to be migrated during
139 // hotpatch. If buffer is allocated by smmap, capacity is zero.
140 size_t size;
141 size_t capacity;
142
143 DirectoryListing() : buffer(NULL), size(0), capacity(0) { }
144 };
145
146 const loader::LoaderExports *loader_exports_ = NULL;
147 OptionsManager *options_mgr_ = NULL;
148 pid_t pid_ = 0; /**< will be set after daemon() */
149 quota::ListenerHandle *watchdog_listener_ = NULL;
150 quota::ListenerHandle *unpin_listener_ = NULL;
151
152
153 typedef google::dense_hash_map<uint64_t, DirectoryListing,
154 hash_murmur<uint64_t> >
155 DirectoryHandles;
156 DirectoryHandles *directory_handles_ = NULL;
157 pthread_mutex_t lock_directory_handles_ = PTHREAD_MUTEX_INITIALIZER;
158 uint64_t next_directory_handle_ = 0;
159
160 unsigned max_open_files_; /**< maximum allowed number of open files */
161 /**
162 * The refcounted cache manager should suppress checking the current number
163 * of files opened through cvmfs_open() against the process' file descriptor
164 * limit.
165 */
166 bool check_fd_overflow_ = true;
167 /**
168 * Number of reserved file descriptors for internal use
169 */
170 const int kNumReservedFd = 512;
171 /**
172 * Warn if the process has a lower limit for the number of open file descriptors
173 */
174 const unsigned int kMinOpenFiles = 8192;
175
176
177 class FuseInterruptCue : public InterruptCue {
178 public:
179 explicit FuseInterruptCue(fuse_req_t *r) : req_ptr_(r) { }
180 virtual ~FuseInterruptCue() { }
181 virtual bool IsCanceled() { return fuse_req_interrupted(*req_ptr_); }
182
183 private:
184 fuse_req_t *req_ptr_;
185 };
186
187 /**
188 * Options related to the fuse kernel connection. The capabilities are
189 * determined only once at mount time. If the capability trigger certain
190 * behavior of the cvmfs fuse module, it needs to be re-triggered on reload.
191 * Used in SaveState and RestoreState to store the details of symlink caching.
192 */
193 struct FuseState {
194 FuseState() : version(0), cache_symlinks(false), has_dentry_expire(false) { }
195 unsigned version;
196 bool cache_symlinks;
197 bool has_dentry_expire;
198 };
199
200
201 /**
202 * Atomic increase of the open files counter. If we use a non-refcounted
203 * POSIX cache manager, check for open fd overflow. Return false if too many
204 * files are opened. Otherwise return true (success).
205 */
206 static inline bool IncAndCheckNoOpenFiles() {
207 const int64_t no_open_files = perf::Xadd(file_system_->no_open_files(), 1);
208 if (!check_fd_overflow_)
209 return true;
210 return no_open_files < (static_cast<int>(max_open_files_) - kNumReservedFd);
211 }
212
213 static inline double GetKcacheTimeout() {
214 if (!fuse_remounter_->IsCaching())
215 return 0.0;
216 return mount_point_->kcache_timeout_sec();
217 }
218
219
220 void GetReloadStatus(bool *drainout_mode, bool *maintenance_mode) {
221 *drainout_mode = fuse_remounter_->IsInDrainoutMode();
222 *maintenance_mode = fuse_remounter_->IsInMaintenanceMode();
223 }
224
225
226 static bool UseWatchdog() {
227 if (loader_exports_ == NULL || loader_exports_->version < 2) {
228 return true; // spawn watchdog by default
229 // Note: with library versions before 2.1.8 it might not
230 // create stack traces properly in all cases
231 }
232
233 return !loader_exports_->disable_watchdog;
234 }
235
236 std::string PrintInodeGeneration() {
237 return "init-catalog-revision: "
238 + StringifyInt(inode_generation_info_.initial_revision) + " "
239 + "current-catalog-revision: "
240 + StringifyInt(mount_point_->catalog_mgr()->GetRevision()) + " "
241 + "incarnation: " + StringifyInt(inode_generation_info_.incarnation)
242 + " " + "inode generation: "
243 + StringifyInt(inode_generation_info_.inode_generation) + "\n";
244 }
245
246
247 static bool CheckVoms(const fuse_ctx &fctx) {
248 if (!mount_point_->has_membership_req())
249 return true;
250 const string mreq = mount_point_->membership_req();
251 LogCvmfs(kLogCvmfs, kLogDebug,
252 "Got VOMS authz %s from filesystem "
253 "properties",
254 mreq.c_str());
255
256 if (fctx.uid == 0)
257 return true;
258
259 return mount_point_->authz_session_mgr()->IsMemberOf(fctx.pid, mreq);
260 }
261
262 static bool MayBeInPageCacheTracker(const catalog::DirectoryEntry &dirent) {
263 return dirent.IsRegular()
264 && (dirent.inode() < mount_point_->catalog_mgr()->GetRootInode());
265 }
266
267 static bool HasDifferentContent(const catalog::DirectoryEntry &dirent,
268 const shash::Any &hash,
269 const struct stat &info) {
270 if (hash == dirent.checksum())
271 return false;
272 // For chunked files, we don't want to load the full list of chunk hashes
273 // so we only check the last modified timestamp
274 if (dirent.IsChunkedFile() && (info.st_mtime == dirent.mtime()))
275 return false;
276 return true;
277 }
278
279 /**
280 * When we lookup an inode (cvmfs_lookup(), cvmfs_opendir()), we usually provide
281 * the live inode, i.e. the one in the inode tracker. However, if the inode
282 * refers to an open file that has a different content then the one from the
283 * current catalogs, we will replace the live inode in the tracker by the one
284 * from the current generation.
285 *
286 * To still access the old inode, e.g. for fstat() on the open file, the stat
287 * structure connected to this inode is taken from the page cache tracker.
288 */
289 static bool FixupOpenInode(const PathString &path,
290 catalog::DirectoryEntry *dirent) {
291 if (!MayBeInPageCacheTracker(*dirent))
292 return false;
293
294 CVMFS_TEST_INJECT_BARRIER("_CVMFS_TEST_BARRIER_INODE_REPLACE");
295
296 const bool is_stale = mount_point_->page_cache_tracker()->IsStale(*dirent);
297
298 if (is_stale) {
299 // Overwrite dirent with inode from current generation
300 const bool found = mount_point_->catalog_mgr()->LookupPath(
301 path, catalog::kLookupDefault, dirent);
302 assert(found);
303 }
304
305 return is_stale;
306 }
307
308 static bool GetDirentForInode(const fuse_ino_t ino,
309 catalog::DirectoryEntry *dirent) {
310 // Lookup inode in cache
311 if (mount_point_->inode_cache()->Lookup(ino, dirent))
312 return true;
313
314 // Look in the catalogs in 2 steps: lookup inode->path, lookup path
315 static const catalog::DirectoryEntry
316 dirent_negative = catalog::DirectoryEntry(catalog::kDirentNegative);
317 // Reset directory entry. If the function returns false and dirent is no
318 // the kDirentNegative, it was an I/O error
319 *dirent = catalog::DirectoryEntry();
320
321 catalog::ClientCatalogManager *catalog_mgr = mount_point_->catalog_mgr();
322
323 if (file_system_->IsNfsSource()) {
324 // NFS mode
325 PathString path;
326 const bool retval = file_system_->nfs_maps()->GetPath(ino, &path);
327 if (!retval) {
328 *dirent = dirent_negative;
329 return false;
330 }
331 if (catalog_mgr->LookupPath(path, catalog::kLookupDefault, dirent)) {
332 // Fix inodes
333 dirent->set_inode(ino);
334 mount_point_->inode_cache()->Insert(ino, *dirent);
335 return true;
336 }
337 return false; // Not found in catalog or catalog load error
338 }
339
340 // Non-NFS mode
341 PathString path;
342 if (ino == catalog_mgr->GetRootInode()) {
343 const bool retval = catalog_mgr->LookupPath(
344 PathString(), catalog::kLookupDefault, dirent);
345
346 if (!AssertOrLog(retval, kLogCvmfs, kLogSyslogWarn | kLogDebug,
347 "GetDirentForInode: Race condition? Not found dirent %s",
348 dirent->name().c_str())) {
349 return false;
350 }
351
352 dirent->set_inode(ino);
353 mount_point_->inode_cache()->Insert(ino, *dirent);
354 return true;
355 }
356
357 glue::InodeEx inode_ex(ino, glue::InodeEx::kUnknownType);
358 const bool retval = mount_point_->inode_tracker()->FindPath(&inode_ex, &path);
359 if (!retval) {
360 // This may be a retired inode whose stat information is only available
361 // in the page cache tracker because there is still an open file
362 LogCvmfs(kLogCvmfs, kLogDebug,
363 "GetDirentForInode inode lookup failure %" PRId64, ino);
364 *dirent = dirent_negative;
365 // Indicate that the inode was not found in the tracker rather than not
366 // found in the catalog
367 dirent->set_inode(ino);
368 return false;
369 }
370 if (catalog_mgr->LookupPath(path, catalog::kLookupDefault, dirent)) {
371 if (!inode_ex.IsCompatibleFileType(dirent->mode())) {
372 LogCvmfs(kLogCvmfs, kLogDebug,
373 "Warning: inode %" PRId64 " (%s) changed file type", ino,
374 path.c_str());
375 // TODO(jblomer): we detect this issue but let it continue unhandled.
376 // Fix me.
377 }
378
379 // Fix inodes
380 dirent->set_inode(ino);
381 mount_point_->inode_cache()->Insert(ino, *dirent);
382 return true;
383 }
384
385 // Can happen after reload of catalogs or on catalog load failure
386 LogCvmfs(kLogCvmfs, kLogDebug, "GetDirentForInode path lookup failure");
387 return false;
388 }
389
390
391 /**
392 * Returns 0 if the path does not exist
393 * 1 if the live inode is returned
394 * >1 the live inode, which is then stale and the inode in dirent
395 * comes from the catalog in the current generation
396 * (see FixupOpenInode)
397 */
398 static uint64_t GetDirentForPath(const PathString &path,
399 catalog::DirectoryEntry *dirent) {
400 uint64_t live_inode = 0;
401 if (!file_system_->IsNfsSource())
402 live_inode = mount_point_->inode_tracker()->FindInode(path);
403
404 LogCvmfs(kLogCvmfs, kLogDebug,
405 "GetDirentForPath: live inode for %s: %" PRIu64, path.c_str(),
406 live_inode);
407
408 const shash::Md5 md5path(path.GetChars(), path.GetLength());
409 if (mount_point_->md5path_cache()->Lookup(md5path, dirent)) {
410 if (dirent->GetSpecial() == catalog::kDirentNegative)
411 return false;
412 // We may have initially stored the entry with an old inode in the
413 // md5path cache and now should update it with the new one.
414 if (!file_system_->IsNfsSource() && (live_inode != 0))
415 dirent->set_inode(live_inode);
416 return 1;
417 }
418
419 catalog::ClientCatalogManager *catalog_mgr = mount_point_->catalog_mgr();
420
421 // Lookup inode in catalog TODO: not twice md5 calculation
422 bool retval;
423 retval = catalog_mgr->LookupPath(path, catalog::kLookupDefault, dirent);
424 if (retval) {
425 if (file_system_->IsNfsSource()) {
426 dirent->set_inode(file_system_->nfs_maps()->GetInode(path));
427 } else if (live_inode != 0) {
428 dirent->set_inode(live_inode);
429 if (FixupOpenInode(path, dirent)) {
430 LogCvmfs(kLogCvmfs, kLogDebug,
431 "content of %s change, replacing inode %" PRIu64
432 " --> %" PRIu64,
433 path.c_str(), live_inode, dirent->inode());
434 return live_inode;
435 // Do not populate the md5path cache until the inode tracker is fixed
436 }
437 }
438 mount_point_->md5path_cache()->Insert(md5path, *dirent);
439 return 1;
440 }
441
442 LogCvmfs(kLogCvmfs, kLogDebug, "GetDirentForPath, no entry");
443 // Only insert ENOENT results into negative cache. Otherwise it was an
444 // error loading nested catalogs
445 if (dirent->GetSpecial() == catalog::kDirentNegative)
446 mount_point_->md5path_cache()->InsertNegative(md5path);
447 return 0;
448 }
449
450
451 static bool GetPathForInode(const fuse_ino_t ino, PathString *path) {
452 // Check the path cache first
453 if (mount_point_->path_cache()->Lookup(ino, path))
454 return true;
455
456 if (file_system_->IsNfsSource()) {
457 // NFS mode, just a lookup
458 LogCvmfs(kLogCvmfs, kLogDebug, "MISS %lu - lookup in NFS maps", ino);
459 if (file_system_->nfs_maps()->GetPath(ino, path)) {
460 mount_point_->path_cache()->Insert(ino, *path);
461 return true;
462 }
463 return false;
464 }
465
466 if (ino == mount_point_->catalog_mgr()->GetRootInode())
467 return true;
468
469 LogCvmfs(kLogCvmfs, kLogDebug, "MISS %lu - looking in inode tracker", ino);
470 glue::InodeEx inode_ex(ino, glue::InodeEx::kUnknownType);
471 const bool retval = mount_point_->inode_tracker()->FindPath(&inode_ex, path);
472
473 if (!AssertOrLog(retval, kLogCvmfs, kLogSyslogWarn | kLogDebug,
474 "GetPathForInode: Race condition? "
475 "Inode not found in inode tracker at path %s",
476 path->c_str())) {
477 return false;
478 }
479
480
481 mount_point_->path_cache()->Insert(ino, *path);
482 return true;
483 }
484
485 static void DoTraceInode(const int event,
486 fuse_ino_t ino,
487 const std::string &msg) {
488 PathString path;
489 const bool found = GetPathForInode(ino, &path);
490 if (!found) {
491 LogCvmfs(kLogCvmfs, kLogDebug,
492 "Tracing: Could not find path for inode %" PRIu64, uint64_t(ino));
493 mount_point_->tracer()->Trace(event, PathString("@UNKNOWN"), msg);
494 } else {
495 mount_point_->tracer()->Trace(event, path, msg);
496 }
497 }
498
499 static void inline TraceInode(const int event,
500 fuse_ino_t ino,
501 const std::string &msg) {
502 if (mount_point_->tracer()->IsActive())
503 DoTraceInode(event, ino, msg);
504 }
505
506 /**
507 * Find the inode number of a file name in a directory given by inode.
508 * This or getattr is called as kind of prerequisite to every operation.
509 * We do check catalog TTL here (and reload, if necessary).
510 */
511 static void cvmfs_lookup(fuse_req_t req, fuse_ino_t parent, const char *name) {
512 const HighPrecisionTimer guard_timer(file_system_->hist_fs_lookup());
513
514 perf::Inc(file_system_->n_fs_lookup());
515 const struct fuse_ctx *fuse_ctx = fuse_req_ctx(req);
516 FuseInterruptCue ic(&req);
517 const ClientCtxGuard ctx_guard(fuse_ctx->uid, fuse_ctx->gid, fuse_ctx->pid,
518 &ic);
519 fuse_remounter_->TryFinish();
520
521 fuse_remounter_->fence()->Enter();
522 catalog::ClientCatalogManager *catalog_mgr = mount_point_->catalog_mgr();
523
524 const fuse_ino_t parent_fuse = parent;
525 parent = catalog_mgr->MangleInode(parent);
526 LogCvmfs(kLogCvmfs, kLogDebug,
527 "cvmfs_lookup in parent inode: %" PRIu64 " for name: %s",
528 uint64_t(parent), name);
529
530 PathString path;
531 PathString parent_path;
532 uint64_t live_inode = 0;
533 catalog::DirectoryEntry dirent;
534 struct fuse_entry_param result;
535
536 memset(&result, 0, sizeof(result));
537 const double timeout = GetKcacheTimeout();
538 result.attr_timeout = timeout;
539 result.entry_timeout = timeout;
540
541 // Special NFS lookups: . and ..
542 if ((strcmp(name, ".") == 0) || (strcmp(name, "..") == 0)) {
543 if (GetDirentForInode(parent, &dirent)) {
544 if (strcmp(name, ".") == 0) {
545 goto lookup_reply_positive;
546 } else {
547 // Lookup for ".."
548 if (dirent.inode() == catalog_mgr->GetRootInode()) {
549 dirent.set_inode(1);
550 goto lookup_reply_positive;
551 }
552 if (!GetPathForInode(parent, &parent_path))
553 goto lookup_reply_negative;
554 if (GetDirentForPath(GetParentPath(parent_path), &dirent) > 0)
555 goto lookup_reply_positive;
556 }
557 }
558 // No entry for "." or no entry for ".."
559 if (dirent.GetSpecial() == catalog::kDirentNegative)
560 goto lookup_reply_negative;
561 else
562 goto lookup_reply_error;
563 assert(false);
564 }
565
566 if (!GetPathForInode(parent, &parent_path)) {
567 LogCvmfs(kLogCvmfs, kLogDebug, "no path for parent inode found");
568 goto lookup_reply_negative;
569 }
570
571 path.Assign(parent_path);
572 path.Append("/", 1);
573 path.Append(name, strlen(name));
574 live_inode = GetDirentForPath(path, &dirent);
575 if (live_inode == 0) {
576 if (dirent.GetSpecial() == catalog::kDirentNegative)
577 goto lookup_reply_negative;
578 else
579 goto lookup_reply_error;
580 }
581
582 lookup_reply_positive:
583 mount_point_->tracer()->Trace(Tracer::kEventLookup, path, "lookup()");
584 if (!file_system_->IsNfsSource()) {
585 if (live_inode > 1) {
586 // live inode is stale (open file), we replace it
587 assert(dirent.IsRegular());
588 assert(dirent.inode() != live_inode);
589
590 // The new inode is put in the tracker with refcounter == 0
591 const bool replaced = mount_point_->inode_tracker()->ReplaceInode(
592 live_inode, glue::InodeEx(dirent.inode(), dirent.mode()));
593 if (replaced)
594 perf::Inc(file_system_->n_fs_inode_replace());
595 }
596 mount_point_->inode_tracker()->VfsGet(
597 glue::InodeEx(dirent.inode(), dirent.mode()), path);
598 }
599 // We do _not_ track (and evict) positive replies; among other things, test
600 // 076 fails with the following line uncommented
601 //
602 // WARNING! ENABLING THIS BREAKS ANY TYPE OF MOUNTPOINT POINTING TO THIS INODE
603 //
604 // only safe if fuse_expire_entry is available
605 if (mount_point_->fuse_expire_entry()
606 || (mount_point_->cache_symlinks() && dirent.IsLink())) {
607 LogCvmfs(kLogCache, kLogDebug, "Dentry to evict: %s", name);
608 mount_point_->dentry_tracker()->Add(parent_fuse, name,
609 static_cast<uint64_t>(timeout));
610 }
611
612 fuse_remounter_->fence()->Leave();
613 result.ino = dirent.inode();
614 result.attr = dirent.GetStatStructure();
615 fuse_reply_entry(req, &result);
616 return;
617
618 lookup_reply_negative:
619 mount_point_->tracer()->Trace(Tracer::kEventLookup, path,
620 "lookup()-NOTFOUND");
621 // Will be a no-op if there is no fuse cache eviction
622 mount_point_->dentry_tracker()->Add(parent_fuse, name, uint64_t(timeout));
623 fuse_remounter_->fence()->Leave();
624 perf::Inc(file_system_->n_fs_lookup_negative());
625 result.ino = 0;
626 fuse_reply_entry(req, &result);
627 return;
628
629 lookup_reply_error:
630 mount_point_->tracer()->Trace(Tracer::kEventLookup, path,
631 "lookup()-NOTFOUND");
632 fuse_remounter_->fence()->Leave();
633
634 LogCvmfs(kLogCvmfs, kLogDebug | kLogSyslogErr,
635 "EIO (01): lookup failed for %s", name);
636 perf::Inc(file_system_->n_eio_total());
637 perf::Inc(file_system_->n_eio_01());
638
639 fuse_reply_err(req, EIO);
640 }
641
642
643 /**
644 *
645 */
646 static void cvmfs_forget(fuse_req_t req,
647 fuse_ino_t ino,
648 #if CVMFS_USE_LIBFUSE == 2
649 unsigned long nlookup // NOLINT
650 #else
651 uint64_t nlookup
652 #endif
653 ) {
654 const HighPrecisionTimer guard_timer(file_system_->hist_fs_forget());
655
656 perf::Inc(file_system_->n_fs_forget());
657
658 // The libfuse high-level library does the same
659 if (ino == FUSE_ROOT_ID) {
660 fuse_reply_none(req);
661 return;
662 }
663
664 // Ensure that we don't need to call catalog_mgr()->MangleInode(ino)
665 assert(ino > mount_point_->catalog_mgr()->kInodeOffset);
666
667 LogCvmfs(kLogCvmfs, kLogDebug, "forget on inode %" PRIu64 " by %" PRIu64,
668 uint64_t(ino), nlookup);
669
670 if (!file_system_->IsNfsSource()) {
671 const bool removed = mount_point_->inode_tracker()->GetVfsPutRaii().VfsPut(
672 ino, nlookup);
673 if (removed)
674 mount_point_->page_cache_tracker()->GetEvictRaii().Evict(ino);
675 }
676
677 fuse_reply_none(req);
678 }
679
680
681 #if (FUSE_VERSION >= 29)
682 static void cvmfs_forget_multi(fuse_req_t req,
683 size_t count,
684 struct fuse_forget_data *forgets) {
685 const HighPrecisionTimer guard_timer(file_system_->hist_fs_forget_multi());
686
687 perf::Xadd(file_system_->n_fs_forget(), count);
688 if (file_system_->IsNfsSource()) {
689 fuse_reply_none(req);
690 return;
691 }
692
693 {
694 glue::InodeTracker::VfsPutRaii vfs_put_raii = mount_point_->inode_tracker()
695 ->GetVfsPutRaii();
696 glue::PageCacheTracker::EvictRaii
697 evict_raii = mount_point_->page_cache_tracker()->GetEvictRaii();
698 for (size_t i = 0; i < count; ++i) {
699 if (forgets[i].ino == FUSE_ROOT_ID) {
700 continue;
701 }
702
703 // Ensure that we don't need to call catalog_mgr()->MangleInode(ino)
704 assert(forgets[i].ino > mount_point_->catalog_mgr()->kInodeOffset);
705 LogCvmfs(kLogCvmfs, kLogDebug, "forget on inode %" PRIu64 " by %" PRIu64,
706 forgets[i].ino, forgets[i].nlookup);
707
708 const bool removed = vfs_put_raii.VfsPut(forgets[i].ino,
709 forgets[i].nlookup);
710 if (removed)
711 evict_raii.Evict(forgets[i].ino);
712 }
713 }
714
715 fuse_reply_none(req);
716 }
717 #endif // FUSE_VERSION >= 29
718
719
720 /**
721 * Looks into dirent to decide if this is an EIO negative reply or an
722 * ENOENT negative reply. We do not need to store the reply in the negative
723 * cache tracker because ReplyNegative is called on inode queries. Inodes,
724 * however, change anyway when a new catalog is applied.
725 */
726 static void ReplyNegative(const catalog::DirectoryEntry &dirent,
727 fuse_req_t req) {
728 if (dirent.GetSpecial() == catalog::kDirentNegative) {
729 fuse_reply_err(req, ENOENT);
730 } else {
731 const char *name = dirent.name().c_str();
732 const char *link = dirent.symlink().c_str();
733
734 LogCvmfs(
735 kLogCvmfs, kLogDebug | kLogSyslogErr,
736 "EIO (02): CVMFS-specific metadata not found for name=%s symlink=%s",
737 name ? name : "<unset>", link ? link : "<unset>");
738
739 perf::Inc(file_system_->n_eio_total());
740 perf::Inc(file_system_->n_eio_02());
741 fuse_reply_err(req, EIO);
742 }
743 }
744
745
746 /**
747 * Transform a cvmfs dirent into a struct stat.
748 */
749 static void cvmfs_getattr(fuse_req_t req, fuse_ino_t ino,
750 struct fuse_file_info *fi) {
751 const HighPrecisionTimer guard_timer(file_system_->hist_fs_getattr());
752
753 perf::Inc(file_system_->n_fs_stat());
754 const struct fuse_ctx *fuse_ctx = fuse_req_ctx(req);
755 FuseInterruptCue ic(&req);
756 const ClientCtxGuard ctx_guard(fuse_ctx->uid, fuse_ctx->gid, fuse_ctx->pid,
757 &ic);
758 fuse_remounter_->TryFinish();
759
760 fuse_remounter_->fence()->Enter();
761 ino = mount_point_->catalog_mgr()->MangleInode(ino);
762 LogCvmfs(kLogCvmfs, kLogDebug, "cvmfs_getattr (stat) for inode: %" PRIu64,
763 uint64_t(ino));
764
765 if (!CheckVoms(*fuse_ctx)) {
766 fuse_remounter_->fence()->Leave();
767 fuse_reply_err(req, EACCES);
768 return;
769 }
770 catalog::DirectoryEntry dirent;
771 const bool found = GetDirentForInode(ino, &dirent);
772 TraceInode(Tracer::kEventGetAttr, ino, "getattr()");
773 if ((!found && (dirent.inode() == ino)) || MayBeInPageCacheTracker(dirent)) {
774 // Serve retired inode from page cache tracker; even if we find it in the
775 // catalog, we replace the dirent by the page cache tracker version to
776 // not confuse open file handles
777 LogCvmfs(kLogCvmfs, kLogDebug,
778 "cvmfs_getattr %" PRIu64 " "
779 "served from page cache tracker",
780 ino);
781 shash::Any hash;
782 struct stat info;
783 const bool is_open = mount_point_->page_cache_tracker()->GetInfoIfOpen(
784 ino, &hash, &info);
785 if (is_open) {
786 fuse_remounter_->fence()->Leave();
787 if (found && HasDifferentContent(dirent, hash, info)) {
788 // We should from now on provide the new inode information instead
789 // of the stale one. To this end, we need to invalidate the dentry to
790 // trigger a fresh LOOKUP call
791 uint64_t parent_ino;
792 NameString name;
793 if (mount_point_->inode_tracker()->FindDentry(dirent.inode(),
794 &parent_ino, &name)) {
795 fuse_remounter_->InvalidateDentry(parent_ino, name);
796 }
797 perf::Inc(file_system_->n_fs_stat_stale());
798 }
799 fuse_reply_attr(req, &info, GetKcacheTimeout());
800 return;
801 }
802 }
803 fuse_remounter_->fence()->Leave();
804
805 if (!found) {
806 ReplyNegative(dirent, req);
807 return;
808 }
809
810 struct stat const info = dirent.GetStatStructure();
811
812 fuse_reply_attr(req, &info, GetKcacheTimeout());
813 }
814
815
816 /**
817 * Reads a symlink from the catalog. Environment variables are expanded.
818 */
819 static void cvmfs_readlink(fuse_req_t req, fuse_ino_t ino) {
820 const HighPrecisionTimer guard_timer(file_system_->hist_fs_readlink());
821
822 perf::Inc(file_system_->n_fs_readlink());
823 const struct fuse_ctx *fuse_ctx = fuse_req_ctx(req);
824 FuseInterruptCue ic(&req);
825 const ClientCtxGuard ctx_guard(fuse_ctx->uid, fuse_ctx->gid, fuse_ctx->pid,
826 &ic);
827
828 fuse_remounter_->fence()->Enter();
829 ino = mount_point_->catalog_mgr()->MangleInode(ino);
830 LogCvmfs(kLogCvmfs, kLogDebug, "cvmfs_readlink on inode: %" PRIu64,
831 uint64_t(ino));
832
833 catalog::DirectoryEntry dirent;
834 const bool found = GetDirentForInode(ino, &dirent);
835 TraceInode(Tracer::kEventReadlink, ino, "readlink()");
836 fuse_remounter_->fence()->Leave();
837
838 if (!found) {
839 ReplyNegative(dirent, req);
840 return;
841 }
842
843 if (!dirent.IsLink()) {
844 fuse_reply_err(req, EINVAL);
845 return;
846 }
847
848 fuse_reply_readlink(req, dirent.symlink().c_str());
849 }
850
851
852 static void AddToDirListing(const fuse_req_t req, const char *name,
853 const struct stat *stat_info,
854 BigVector<char> *listing) {
855 LogCvmfs(kLogCvmfs, kLogDebug, "Add to listing: %s, inode %" PRIu64, name,
856 uint64_t(stat_info->st_ino));
857 size_t remaining_size = listing->capacity() - listing->size();
858 const size_t entry_size = fuse_add_direntry(req, NULL, 0, name, stat_info, 0);
859
860 while (entry_size > remaining_size) {
861 listing->DoubleCapacity();
862 remaining_size = listing->capacity() - listing->size();
863 }
864
865 char *buffer;
866 bool large_alloc;
867 listing->ShareBuffer(&buffer, &large_alloc);
868 fuse_add_direntry(req, buffer + listing->size(), remaining_size, name,
869 stat_info, listing->size() + entry_size);
870 listing->SetSize(listing->size() + entry_size);
871 }
872
873
874 /**
875 * Open a directory for listing.
876 */
877 static void cvmfs_opendir(fuse_req_t req, fuse_ino_t ino,
878 struct fuse_file_info *fi) {
879 const HighPrecisionTimer guard_timer(file_system_->hist_fs_opendir());
880
881 const struct fuse_ctx *fuse_ctx = fuse_req_ctx(req);
882 FuseInterruptCue ic(&req);
883 const ClientCtxGuard ctx_guard(fuse_ctx->uid, fuse_ctx->gid, fuse_ctx->pid,
884 &ic);
885 fuse_remounter_->TryFinish();
886
887 fuse_remounter_->fence()->Enter();
888 catalog::ClientCatalogManager *catalog_mgr = mount_point_->catalog_mgr();
889 ino = catalog_mgr->MangleInode(ino);
890 LogCvmfs(kLogCvmfs, kLogDebug, "cvmfs_opendir on inode: %" PRIu64,
891 uint64_t(ino));
892 if (!CheckVoms(*fuse_ctx)) {
893 fuse_remounter_->fence()->Leave();
894 fuse_reply_err(req, EACCES);
895 return;
896 }
897
898 TraceInode(Tracer::kEventOpenDir, ino, "opendir()");
899 PathString path;
900 catalog::DirectoryEntry d;
901 bool found = GetPathForInode(ino, &path);
902 if (!found) {
903 fuse_remounter_->fence()->Leave();
904 fuse_reply_err(req, ENOENT);
905 return;
906 }
907 found = GetDirentForInode(ino, &d);
908
909 if (!found) {
910 fuse_remounter_->fence()->Leave();
911 ReplyNegative(d, req);
912 return;
913 }
914 if (!d.IsDirectory()) {
915 fuse_remounter_->fence()->Leave();
916 fuse_reply_err(req, ENOTDIR);
917 return;
918 }
919
920 LogCvmfs(kLogCvmfs, kLogDebug, "cvmfs_opendir on inode: %" PRIu64 ", path %s",
921 uint64_t(ino), path.c_str());
922
923 // Build listing
924 BigVector<char> fuse_listing(512);
925
926 // Add current directory link
927 struct stat info;
928 info = d.GetStatStructure();
929 AddToDirListing(req, ".", &info, &fuse_listing);
930
931 // Add parent directory link
932 catalog::DirectoryEntry p;
933 if (d.inode() != catalog_mgr->GetRootInode()
934 && (GetDirentForPath(GetParentPath(path), &p) > 0)) {
935 info = p.GetStatStructure();
936 AddToDirListing(req, "..", &info, &fuse_listing);
937 }
938
939 // Add all names
940 catalog::StatEntryList listing_from_catalog;
941 const bool retval = catalog_mgr->ListingStat(path, &listing_from_catalog);
942
943 if (!retval) {
944 fuse_remounter_->fence()->Leave();
945 fuse_listing.Clear(); // Buffer is shared, empty manually
946
947 LogCvmfs(kLogCvmfs, kLogDebug | kLogSyslogErr,
948 "EIO (03): failed to open directory at %s", path.c_str());
949 perf::Inc(file_system_->n_eio_total());
950 perf::Inc(file_system_->n_eio_03());
951 fuse_reply_err(req, EIO);
952 return;
953 }
954 for (unsigned i = 0; i < listing_from_catalog.size(); ++i) {
955 // Fix inodes
956 PathString entry_path;
957 entry_path.Assign(path);
958 entry_path.Append("/", 1);
959 entry_path.Append(listing_from_catalog.AtPtr(i)->name.GetChars(),
960 listing_from_catalog.AtPtr(i)->name.GetLength());
961
962 catalog::DirectoryEntry entry_dirent;
963 if (!GetDirentForPath(entry_path, &entry_dirent)) {
964 LogCvmfs(kLogCvmfs, kLogDebug, "listing entry %s vanished, skipping",
965 entry_path.c_str());
966 continue;
967 }
968
969 struct stat fixed_info = listing_from_catalog.AtPtr(i)->info;
970 fixed_info.st_ino = entry_dirent.inode();
971 AddToDirListing(req, listing_from_catalog.AtPtr(i)->name.c_str(),
972 &fixed_info, &fuse_listing);
973 }
974 fuse_remounter_->fence()->Leave();
975
976 DirectoryListing stream_listing;
977 stream_listing.size = fuse_listing.size();
978 stream_listing.capacity = fuse_listing.capacity();
979 bool large_alloc;
980 fuse_listing.ShareBuffer(&stream_listing.buffer, &large_alloc);
981 if (large_alloc)
982 stream_listing.capacity = 0;
983
984 // Save the directory listing and return a handle to the listing
985 {
986 const MutexLockGuard m(&lock_directory_handles_);
987 LogCvmfs(kLogCvmfs, kLogDebug,
988 "linking directory handle %lu to dir inode: %" PRIu64,
989 next_directory_handle_, uint64_t(ino));
990 (*directory_handles_)[next_directory_handle_] = stream_listing;
991 fi->fh = next_directory_handle_;
992 ++next_directory_handle_;
993 }
994 perf::Inc(file_system_->n_fs_dir_open());
995 perf::Inc(file_system_->no_open_dirs());
996
997 #if (FUSE_VERSION >= 30)
998 #ifdef CVMFS_ENABLE_FUSE3_CACHE_READDIR
999 // This affects only reads on the same open directory handle (e.g. multiple
1000 // reads with rewinddir() between them). A new opendir on the same directory
1001 // will trigger readdir calls independently of this setting.
1002 fi->cache_readdir = 1;
1003 #endif
1004 #endif
1005 fuse_reply_open(req, fi);
1006 }
1007
1008
1009 /**
1010 * Release a directory.
1011 */
1012 static void cvmfs_releasedir(fuse_req_t req, fuse_ino_t ino,
1013 struct fuse_file_info *fi) {
1014 const HighPrecisionTimer guard_timer(file_system_->hist_fs_releasedir());
1015
1016 ino = mount_point_->catalog_mgr()->MangleInode(ino);
1017 LogCvmfs(kLogCvmfs, kLogDebug,
1018 "cvmfs_releasedir on inode %" PRIu64 ", handle %lu", uint64_t(ino),
1019 fi->fh);
1020
1021 int reply = 0;
1022
1023 {
1024 const MutexLockGuard m(&lock_directory_handles_);
1025 const DirectoryHandles::iterator iter_handle = directory_handles_->find(
1026 fi->fh);
1027 if (iter_handle != directory_handles_->end()) {
1028 if (iter_handle->second.capacity == 0)
1029 smunmap(iter_handle->second.buffer);
1030 else
1031 free(iter_handle->second.buffer);
1032 directory_handles_->erase(iter_handle);
1033 perf::Dec(file_system_->no_open_dirs());
1034 } else {
1035 reply = EINVAL;
1036 }
1037 }
1038
1039 fuse_reply_err(req, reply);
1040 }
1041
1042
1043 /**
1044 * Very large directory listings have to be sent in slices.
1045 */
1046 static void ReplyBufferSlice(const fuse_req_t req, const char *buffer,
1047 const size_t buffer_size, const off_t offset,
1048 const size_t max_size) {
1049 if (offset < static_cast<int>(buffer_size)) {
1050 fuse_reply_buf(
1051 req, buffer + offset,
1052 std::min(static_cast<size_t>(buffer_size - offset), max_size));
1053 } else {
1054 fuse_reply_buf(req, NULL, 0);
1055 }
1056 }
1057
1058
1059 /**
1060 * Read the directory listing.
1061 */
1062 static void cvmfs_readdir(fuse_req_t req, fuse_ino_t ino, size_t size,
1063 off_t off, struct fuse_file_info *fi) {
1064 const HighPrecisionTimer guard_timer(file_system_->hist_fs_readdir());
1065
1066 LogCvmfs(kLogCvmfs, kLogDebug,
1067 "cvmfs_readdir on inode %" PRIu64
1068 " reading %lu bytes from offset %ld",
1069 static_cast<uint64_t>(mount_point_->catalog_mgr()->MangleInode(ino)),
1070 size, off);
1071
1072 DirectoryListing listing;
1073
1074 const MutexLockGuard m(&lock_directory_handles_);
1075 const DirectoryHandles::const_iterator iter_handle = directory_handles_->find(
1076 fi->fh);
1077 if (iter_handle != directory_handles_->end()) {
1078 listing = iter_handle->second;
1079
1080 ReplyBufferSlice(req, listing.buffer, listing.size, off, size);
1081 return;
1082 }
1083
1084 fuse_reply_err(req, EINVAL);
1085 }
1086
1087 static void FillOpenFlags(const glue::PageCacheTracker::OpenDirectives od,
1088 struct fuse_file_info *fi) {
1089 assert(!TestBit(glue::PageCacheTracker::kBitDirectIo, fi->fh));
1090 fi->keep_cache = od.keep_cache;
1091 fi->direct_io = od.direct_io;
1092 if (fi->direct_io)
1093 SetBit(glue::PageCacheTracker::kBitDirectIo, &fi->fh);
1094 }
1095
1096
1097 #ifdef __APPLE__
1098 // On macOS, xattr on a symlink opens and closes the file (with O_SYMLINK)
1099 // around the actual getxattr call. In order to not run into an I/O error
1100 // we use a special file handle for symlinks, from which one cannot read.
1101 static const uint64_t kFileHandleIgnore = static_cast<uint64_t>(2) << 60;
1102 #endif
1103
1104 /**
1105 * Open a file from cache. If necessary, file is downloaded first.
1106 *
1107 * \return Read-only file descriptor in fi->fh or kChunkedFileHandle for
1108 * chunked files
1109 */
1110 static void cvmfs_open(fuse_req_t req, fuse_ino_t ino,
1111 struct fuse_file_info *fi) {
1112 const HighPrecisionTimer guard_timer(file_system_->hist_fs_open());
1113
1114 const struct fuse_ctx *fuse_ctx = fuse_req_ctx(req);
1115 FuseInterruptCue ic(&req);
1116 const ClientCtxGuard ctx_guard(fuse_ctx->uid, fuse_ctx->gid, fuse_ctx->pid,
1117 &ic);
1118 fuse_remounter_->fence()->Enter();
1119 catalog::ClientCatalogManager *catalog_mgr = mount_point_->catalog_mgr();
1120 ino = catalog_mgr->MangleInode(ino);
1121 LogCvmfs(kLogCvmfs, kLogDebug, "cvmfs_open on inode: %" PRIu64,
1122 uint64_t(ino));
1123
1124 int fd = -1;
1125 catalog::DirectoryEntry dirent;
1126 PathString path;
1127
1128 bool found = GetPathForInode(ino, &path);
1129 if (!found) {
1130 fuse_remounter_->fence()->Leave();
1131 fuse_reply_err(req, ENOENT);
1132 return;
1133 }
1134 found = GetDirentForInode(ino, &dirent);
1135 if (!found) {
1136 fuse_remounter_->fence()->Leave();
1137 ReplyNegative(dirent, req);
1138 return;
1139 }
1140
1141 if (!CheckVoms(*fuse_ctx)) {
1142 fuse_remounter_->fence()->Leave();
1143 fuse_reply_err(req, EACCES);
1144 return;
1145 }
1146
1147 mount_point_->tracer()->Trace(Tracer::kEventOpen, path, "open()");
1148 // Don't check. Either done by the OS or one wants to purposefully work
1149 // around wrong open flags
1150 // if ((fi->flags & 3) != O_RDONLY) {
1151 // fuse_reply_err(req, EROFS);
1152 // return;
1153 // }
1154 #ifdef __APPLE__
1155 if ((fi->flags & O_SHLOCK) || (fi->flags & O_EXLOCK)) {
1156 fuse_remounter_->fence()->Leave();
1157 fuse_reply_err(req, EOPNOTSUPP);
1158 return;
1159 }
1160 if (fi->flags & O_SYMLINK) {
1161 fuse_remounter_->fence()->Leave();
1162 fi->fh = kFileHandleIgnore;
1163 fuse_reply_open(req, fi);
1164 return;
1165 }
1166 #endif
1167 if (fi->flags & O_EXCL) {
1168 fuse_remounter_->fence()->Leave();
1169 fuse_reply_err(req, EEXIST);
1170 return;
1171 }
1172
1173 perf::Inc(file_system_->n_fs_open()); // Count actual open / fetch operations
1174
1175 glue::PageCacheTracker::OpenDirectives open_directives;
1176 if (!dirent.IsChunkedFile()) {
1177 if (dirent.IsDirectIo()) {
1178 open_directives = mount_point_->page_cache_tracker()->OpenDirect();
1179 } else {
1180 open_directives = mount_point_->page_cache_tracker()->Open(
1181 ino, dirent.checksum(), dirent.GetStatStructure());
1182 }
1183 fuse_remounter_->fence()->Leave();
1184 } else {
1185 LogCvmfs(kLogCvmfs, kLogDebug,
1186 "chunked file %s opened (download delayed to read() call)",
1187 path.c_str());
1188
1189 if (!IncAndCheckNoOpenFiles()) {
1190 perf::Dec(file_system_->no_open_files());
1191 fuse_remounter_->fence()->Leave();
1192 LogCvmfs(kLogCvmfs, kLogSyslogErr, "open file descriptor limit exceeded");
1193 fuse_reply_err(req, EMFILE);
1194 perf::Inc(file_system_->n_emfile());
1195 return;
1196 }
1197
1198 // Figure out unique inode from annotated catalog
1199 // TODO(jblomer): we only need to lookup if the inode is not from the
1200 // current generation
1201 catalog::DirectoryEntry dirent_origin;
1202 if (!catalog_mgr->LookupPath(path, catalog::kLookupDefault,
1203 &dirent_origin)) {
1204 fuse_remounter_->fence()->Leave();
1205 LogCvmfs(kLogCvmfs, kLogDebug | kLogSyslogErr,
1206 "chunked file %s vanished unexpectedly", path.c_str());
1207 fuse_reply_err(req, ENOENT);
1208 return;
1209 }
1210 const uint64_t unique_inode = dirent_origin.inode();
1211
1212 ChunkTables *chunk_tables = mount_point_->chunk_tables();
1213 chunk_tables->Lock();
1214 if (!chunk_tables->inode2chunks.Contains(unique_inode)) {
1215 chunk_tables->Unlock();
1216
1217 // Retrieve File chunks from the catalog
1218 UniquePtr<FileChunkList> chunks(new FileChunkList());
1219 if (!catalog_mgr->ListFileChunks(path, dirent.hash_algorithm(),
1220 chunks.weak_ref())
1221 || chunks->IsEmpty()) {
1222 fuse_remounter_->fence()->Leave();
1223 LogCvmfs(kLogCvmfs, kLogDebug | kLogSyslogErr,
1224 "EIO (04): failed to open file %s. "
1225 "It is marked as 'chunked', but no chunks found.",
1226 path.c_str());
1227 perf::Inc(file_system_->n_eio_total());
1228 perf::Inc(file_system_->n_eio_04());
1229 fuse_reply_err(req, EIO);
1230 return;
1231 }
1232 fuse_remounter_->fence()->Leave();
1233
1234 chunk_tables->Lock();
1235 // Check again to avoid race
1236 if (!chunk_tables->inode2chunks.Contains(unique_inode)) {
1237 chunk_tables->inode2chunks.Insert(
1238 unique_inode, FileChunkReflist(chunks.Release(), path,
1239 dirent.compression_algorithm(),
1240 dirent.IsExternalFile()));
1241 chunk_tables->inode2references.Insert(unique_inode, 1);
1242 } else {
1243 uint32_t refctr;
1244 const bool retval = chunk_tables->inode2references.Lookup(unique_inode,
1245 &refctr);
1246 assert(retval);
1247 chunk_tables->inode2references.Insert(unique_inode, refctr + 1);
1248 }
1249 } else {
1250 fuse_remounter_->fence()->Leave();
1251 uint32_t refctr;
1252 const bool retval = chunk_tables->inode2references.Lookup(unique_inode,
1253 &refctr);
1254 assert(retval);
1255 chunk_tables->inode2references.Insert(unique_inode, refctr + 1);
1256 }
1257
1258 // Update the chunk handle list
1259 LogCvmfs(kLogCvmfs, kLogDebug,
1260 "linking chunk handle %lu to unique inode: %" PRIu64,
1261 chunk_tables->next_handle, uint64_t(unique_inode));
1262 chunk_tables->handle2fd.Insert(chunk_tables->next_handle, ChunkFd());
1263 chunk_tables->handle2uniqino.Insert(chunk_tables->next_handle,
1264 unique_inode);
1265
1266 // Generate artificial content hash as hash over chunk hashes
1267 // TODO(jblomer): we may want to cache the result in the chunk tables
1268 FileChunkReflist chunk_reflist;
1269 const bool retval = chunk_tables->inode2chunks.Lookup(unique_inode,
1270 &chunk_reflist);
1271 assert(retval);
1272
1273 fi->fh = chunk_tables->next_handle;
1274 if (dirent.IsDirectIo()) {
1275 open_directives = mount_point_->page_cache_tracker()->OpenDirect();
1276 } else {
1277 open_directives = mount_point_->page_cache_tracker()->Open(
1278 ino, chunk_reflist.HashChunkList(), dirent.GetStatStructure());
1279 }
1280 FillOpenFlags(open_directives, fi);
1281 fi->fh = static_cast<uint64_t>(-static_cast<int64_t>(fi->fh));
1282 ++chunk_tables->next_handle;
1283 chunk_tables->Unlock();
1284
1285 fuse_reply_open(req, fi);
1286 return;
1287 }
1288
1289 Fetcher *this_fetcher = dirent.IsExternalFile()
1290 ? mount_point_->external_fetcher()
1291 : mount_point_->fetcher();
1292 CacheManager::Label label;
1293 label.path = path.ToString();
1294 label.size = dirent.size();
1295 label.zip_algorithm = dirent.compression_algorithm();
1296 if (mount_point_->catalog_mgr()->volatile_flag())
1297 label.flags |= CacheManager::kLabelVolatile;
1298 if (dirent.IsExternalFile())
1299 label.flags |= CacheManager::kLabelExternal;
1300 fd = this_fetcher->Fetch(
1301 CacheManager::LabeledObject(dirent.checksum(), label));
1302
1303 if (fd >= 0) {
1304 if (IncAndCheckNoOpenFiles()) {
1305 LogCvmfs(kLogCvmfs, kLogDebug, "file %s opened (fd %d)", path.c_str(),
1306 fd);
1307 fi->fh = fd;
1308 FillOpenFlags(open_directives, fi);
1309 fuse_reply_open(req, fi);
1310 return;
1311 } else {
1312 if (file_system_->cache_mgr()->Close(fd) == 0)
1313 perf::Dec(file_system_->no_open_files());
1314 LogCvmfs(kLogCvmfs, kLogSyslogErr, "open file descriptor limit exceeded");
1315 // not returning an fd, so close the page cache tracker entry if required
1316 if (!dirent.IsDirectIo()) {
1317 fuse_remounter_->fence()->Enter();
1318 mount_point_->page_cache_tracker()->Close(ino);
1319 fuse_remounter_->fence()->Leave();
1320 }
1321 fuse_reply_err(req, EMFILE);
1322 perf::Inc(file_system_->n_emfile());
1323 return;
1324 }
1325 assert(false);
1326 }
1327
1328 // fd < 0
1329 // the download has failed. Close the page cache tracker entry if required
1330 if (!dirent.IsDirectIo()) {
1331 fuse_remounter_->fence()->Enter();
1332 mount_point_->page_cache_tracker()->Close(ino);
1333 fuse_remounter_->fence()->Leave();
1334 }
1335
1336 LogCvmfs(kLogCvmfs, kLogDebug | kLogSyslogErr,
1337 "failed to open inode: %" PRIu64 ", CAS key %s, error code %d",
1338 uint64_t(ino), dirent.checksum().ToString().c_str(), errno);
1339 if (errno == EMFILE) {
1340 LogCvmfs(kLogCvmfs, kLogSyslogErr, "open file descriptor limit exceeded");
1341 fuse_reply_err(req, EMFILE);
1342 perf::Inc(file_system_->n_emfile());
1343 return;
1344 }
1345
1346 mount_point_->backoff_throttle()->Throttle();
1347
1348 mount_point_->file_system()->io_error_info()->AddIoError();
1349 if (EIO == errno || EIO == -fd) {
1350 LogCvmfs(kLogCvmfs, kLogDebug | kLogSyslogErr,
1351 "EIO (06): Failed to open file %s", path.c_str());
1352 perf::Inc(file_system_->n_eio_total());
1353 perf::Inc(file_system_->n_eio_06());
1354 }
1355
1356 fuse_reply_err(req, -fd);
1357 }
1358
1359
1360 /**
1361 * Redirected to pread into cache.
1362 */
1363 static void cvmfs_read(fuse_req_t req, fuse_ino_t ino, size_t size, off_t off,
1364 struct fuse_file_info *fi) {
1365 const HighPrecisionTimer guard_timer(file_system_->hist_fs_read());
1366
1367 LogCvmfs(kLogCvmfs, kLogDebug,
1368 "cvmfs_read inode: %" PRIu64 " reading %lu bytes from offset %ld "
1369 "fd %lu",
1370 uint64_t(mount_point_->catalog_mgr()->MangleInode(ino)), size, off,
1371 fi->fh);
1372 perf::Inc(file_system_->n_fs_read());
1373
1374 #ifdef __APPLE__
1375 if (fi->fh == kFileHandleIgnore) {
1376 fuse_reply_err(req, EBADF);
1377 return;
1378 }
1379 #endif
1380
1381 // Get data chunk (<=128k guaranteed by Fuse)
1382 char *data = static_cast<char *>(alloca(size));
1383 unsigned int overall_bytes_fetched = 0;
1384
1385 const int64_t fd = static_cast<int64_t>(fi->fh);
1386 uint64_t abs_fd = (fd < 0) ? -fd : fd;
1387 ClearBit(glue::PageCacheTracker::kBitDirectIo, &abs_fd);
1388
1389 const struct fuse_ctx *fuse_ctx = fuse_req_ctx(req);
1390 FuseInterruptCue ic(&req);
1391 const ClientCtxGuard ctx_guard(fuse_ctx->uid, fuse_ctx->gid, fuse_ctx->pid,
1392 &ic);
1393
1394 // Do we have a a chunked file?
1395 if (fd < 0) {
1396 const uint64_t chunk_handle = abs_fd;
1397 uint64_t unique_inode;
1398 ChunkFd chunk_fd;
1399 FileChunkReflist chunks;
1400 bool retval;
1401
1402 // Fetch unique inode, chunk list and file descriptor
1403 ChunkTables *chunk_tables = mount_point_->chunk_tables();
1404 chunk_tables->Lock();
1405 retval = chunk_tables->handle2uniqino.Lookup(chunk_handle, &unique_inode);
1406 if (!retval) {
1407 LogCvmfs(kLogCvmfs, kLogDebug, "no unique inode, fall back to fuse ino");
1408 unique_inode = ino;
1409 }
1410 retval = chunk_tables->inode2chunks.Lookup(unique_inode, &chunks);
1411 assert(retval);
1412 chunk_tables->Unlock();
1413
1414 unsigned chunk_idx = chunks.FindChunkIdx(off);
1415
1416 // Lock chunk handle
1417 pthread_mutex_t *handle_lock = chunk_tables->Handle2Lock(chunk_handle);
1418 const MutexLockGuard m(handle_lock);
1419 chunk_tables->Lock();
1420 retval = chunk_tables->handle2fd.Lookup(chunk_handle, &chunk_fd);
1421 assert(retval);
1422 chunk_tables->Unlock();
1423
1424 // Fetch all needed chunks and read the requested data
1425 off_t offset_in_chunk = off - chunks.list->AtPtr(chunk_idx)->offset();
1426 do {
1427 // Open file descriptor to chunk
1428 if ((chunk_fd.fd == -1) || (chunk_fd.chunk_idx != chunk_idx)) {
1429 if (chunk_fd.fd != -1)
1430 file_system_->cache_mgr()->Close(chunk_fd.fd);
1431 Fetcher *this_fetcher = chunks.external_data
1432 ? mount_point_->external_fetcher()
1433 : mount_point_->fetcher();
1434 CacheManager::Label label;
1435 label.path = chunks.path.ToString();
1436 label.size = chunks.list->AtPtr(chunk_idx)->size();
1437 label.zip_algorithm = chunks.compression_alg;
1438 label.flags |= CacheManager::kLabelChunked;
1439 if (mount_point_->catalog_mgr()->volatile_flag())
1440 label.flags |= CacheManager::kLabelVolatile;
1441 if (chunks.external_data) {
1442 label.flags |= CacheManager::kLabelExternal;
1443 label.range_offset = chunks.list->AtPtr(chunk_idx)->offset();
1444 }
1445 chunk_fd.fd = this_fetcher->Fetch(CacheManager::LabeledObject(
1446 chunks.list->AtPtr(chunk_idx)->content_hash(), label));
1447 if (chunk_fd.fd < 0) {
1448 chunk_fd.fd = -1;
1449 chunk_tables->Lock();
1450 chunk_tables->handle2fd.Insert(chunk_handle, chunk_fd);
1451 chunk_tables->Unlock();
1452
1453 LogCvmfs(kLogCvmfs, kLogDebug | kLogSyslogErr,
1454 "EIO (05): Failed to fetch chunk %d from file %s", chunk_idx,
1455 chunks.path.ToString().c_str());
1456 perf::Inc(file_system_->n_eio_total());
1457 perf::Inc(file_system_->n_eio_05());
1458 fuse_reply_err(req, EIO);
1459 return;
1460 }
1461 chunk_fd.chunk_idx = chunk_idx;
1462 }
1463
1464 LogCvmfs(kLogCvmfs, kLogDebug, "reading from chunk fd %d", chunk_fd.fd);
1465 // Read data from chunk
1466 const size_t bytes_to_read = size - overall_bytes_fetched;
1467 const size_t remaining_bytes_in_chunk = chunks.list->AtPtr(chunk_idx)
1468 ->size()
1469 - offset_in_chunk;
1470 const size_t bytes_to_read_in_chunk = std::min(bytes_to_read,
1471 remaining_bytes_in_chunk);
1472 const int64_t bytes_fetched = file_system_->cache_mgr()->Pread(
1473 chunk_fd.fd,
1474 data + overall_bytes_fetched,
1475 bytes_to_read_in_chunk,
1476 offset_in_chunk);
1477
1478 if (bytes_fetched < 0) {
1479 LogCvmfs(kLogCvmfs, kLogSyslogErr, "read err no %" PRId64 " (%s)",
1480 bytes_fetched, chunks.path.ToString().c_str());
1481 chunk_tables->Lock();
1482 chunk_tables->handle2fd.Insert(chunk_handle, chunk_fd);
1483 chunk_tables->Unlock();
1484 if (EIO == errno || EIO == -bytes_fetched) {
1485 LogCvmfs(kLogCvmfs, kLogDebug | kLogSyslogErr,
1486 "EIO (07): Failed to read chunk %d from file %s", chunk_idx,
1487 chunks.path.ToString().c_str());
1488 perf::Inc(file_system_->n_eio_total());
1489 perf::Inc(file_system_->n_eio_07());
1490 }
1491 fuse_reply_err(req, -bytes_fetched);
1492 return;
1493 }
1494 overall_bytes_fetched += bytes_fetched;
1495
1496 // Proceed to the next chunk to keep on reading data
1497 ++chunk_idx;
1498 offset_in_chunk = 0;
1499 } while ((overall_bytes_fetched < size)
1500 && (chunk_idx < chunks.list->size()));
1501
1502 // Update chunk file descriptor
1503 chunk_tables->Lock();
1504 chunk_tables->handle2fd.Insert(chunk_handle, chunk_fd);
1505 chunk_tables->Unlock();
1506 LogCvmfs(kLogCvmfs, kLogDebug, "released chunk file descriptor %d",
1507 chunk_fd.fd);
1508 } else {
1509 const int64_t nbytes = file_system_->cache_mgr()->Pread(abs_fd, data, size,
1510 off);
1511 if (nbytes < 0) {
1512 if (EIO == errno || EIO == -nbytes) {
1513 PathString path;
1514 const bool found = GetPathForInode(ino, &path);
1515 if (found) {
1516 LogCvmfs(kLogCvmfs, kLogDebug | kLogSyslogErr,
1517 "EIO (08): Failed to read file %s", path.ToString().c_str());
1518 } else {
1519 LogCvmfs(kLogCvmfs, kLogDebug | kLogSyslogErr,
1520 "EIO (08): Failed to read from %s - <unknown inode>",
1521 path.ToString().c_str());
1522 }
1523 perf::Inc(file_system_->n_eio_total());
1524 perf::Inc(file_system_->n_eio_08());
1525 }
1526 fuse_reply_err(req, -nbytes);
1527 return;
1528 }
1529 overall_bytes_fetched = nbytes;
1530 }
1531
1532 // Push it to user
1533 fuse_reply_buf(req, data, overall_bytes_fetched);
1534 LogCvmfs(kLogCvmfs, kLogDebug, "pushed %d bytes to user",
1535 overall_bytes_fetched);
1536 }
1537
1538
1539 /**
1540 * File close operation, redirected into cache.
1541 */
1542 static void cvmfs_release(fuse_req_t req, fuse_ino_t ino,
1543 struct fuse_file_info *fi) {
1544 const HighPrecisionTimer guard_timer(file_system_->hist_fs_release());
1545
1546 ino = mount_point_->catalog_mgr()->MangleInode(ino);
1547 LogCvmfs(kLogCvmfs, kLogDebug, "cvmfs_release on inode: %" PRIu64,
1548 uint64_t(ino));
1549
1550 #ifdef __APPLE__
1551 if (fi->fh == kFileHandleIgnore) {
1552 fuse_reply_err(req, 0);
1553 return;
1554 }
1555 #endif
1556
1557 const int64_t fd = static_cast<int64_t>(fi->fh);
1558 uint64_t abs_fd = (fd < 0) ? -fd : fd;
1559 if (!TestBit(glue::PageCacheTracker::kBitDirectIo, abs_fd)) {
1560 mount_point_->page_cache_tracker()->Close(ino);
1561 }
1562 ClearBit(glue::PageCacheTracker::kBitDirectIo, &abs_fd);
1563
1564 // do we have a chunked file?
1565 if (fd < 0) {
1566 const uint64_t chunk_handle = abs_fd;
1567 LogCvmfs(kLogCvmfs, kLogDebug, "releasing chunk handle %" PRIu64,
1568 chunk_handle);
1569 uint64_t unique_inode;
1570 ChunkFd chunk_fd;
1571 const FileChunkReflist chunks;
1572 uint32_t refctr;
1573 bool retval;
1574
1575 ChunkTables *chunk_tables = mount_point_->chunk_tables();
1576 chunk_tables->Lock();
1577 retval = chunk_tables->handle2uniqino.Lookup(chunk_handle, &unique_inode);
1578 if (!retval) {
1579 LogCvmfs(kLogCvmfs, kLogDebug, "no unique inode, fall back to fuse ino");
1580 unique_inode = ino;
1581 } else {
1582 chunk_tables->handle2uniqino.Erase(chunk_handle);
1583 }
1584 retval = chunk_tables->handle2fd.Lookup(chunk_handle, &chunk_fd);
1585 assert(retval);
1586 chunk_tables->handle2fd.Erase(chunk_handle);
1587
1588 retval = chunk_tables->inode2references.Lookup(unique_inode, &refctr);
1589 assert(retval);
1590 refctr--;
1591 if (refctr == 0) {
1592 LogCvmfs(kLogCvmfs, kLogDebug, "releasing chunk list for inode %" PRIu64,
1593 uint64_t(unique_inode));
1594 FileChunkReflist to_delete;
1595 retval = chunk_tables->inode2chunks.Lookup(unique_inode, &to_delete);
1596 assert(retval);
1597 chunk_tables->inode2references.Erase(unique_inode);
1598 chunk_tables->inode2chunks.Erase(unique_inode);
1599 delete to_delete.list;
1600 } else {
1601 chunk_tables->inode2references.Insert(unique_inode, refctr);
1602 }
1603 chunk_tables->Unlock();
1604
1605 if (chunk_fd.fd != -1)
1606 file_system_->cache_mgr()->Close(chunk_fd.fd);
1607 perf::Dec(file_system_->no_open_files());
1608 } else {
1609 if (file_system_->cache_mgr()->Close(abs_fd) == 0) {
1610 perf::Dec(file_system_->no_open_files());
1611 }
1612 }
1613 fuse_reply_err(req, 0);
1614 }
1615
1616 /**
1617 * Returns information about a mounted filesystem. In this case it returns
1618 * information about the local cache occupancy of cvmfs.
1619 *
1620 * Note: If the elements of the struct statvfs *info are set to 0, it will cause
1621 * it to be ignored in commandline tool "df".
1622 */
1623 static void cvmfs_statfs(fuse_req_t req, fuse_ino_t ino) {
1624 ino = mount_point_->catalog_mgr()->MangleInode(ino);
1625 LogCvmfs(kLogCvmfs, kLogDebug, "cvmfs_statfs on inode: %" PRIu64,
1626 uint64_t(ino));
1627
1628 TraceInode(Tracer::kEventStatFs, ino, "statfs()");
1629
1630 perf::Inc(file_system_->n_fs_statfs());
1631
1632 // Unmanaged cache (no lock needed - statfs is never modified)
1633 if (!file_system_->cache_mgr()->quota_mgr()->HasCapability(
1634 QuotaManager::kCapIntrospectSize)) {
1635 LogCvmfs(kLogCvmfs, kLogDebug, "QuotaManager does not support statfs");
1636 fuse_reply_statfs(req, (mount_point_->statfs_cache()->info()));
1637 return;
1638 }
1639
1640 const MutexLockGuard m(mount_point_->statfs_cache()->lock());
1641
1642 const uint64_t deadline = *mount_point_->statfs_cache()->expiry_deadline();
1643 struct statvfs *info = mount_point_->statfs_cache()->info();
1644
1645 // cached version still valid
1646 if (platform_monotonic_time() < deadline) {
1647 perf::Inc(file_system_->n_fs_statfs_cached());
1648 fuse_reply_statfs(req, info);
1649 return;
1650 }
1651
1652 uint64_t available = 0;
1653 const uint64_t size = file_system_->cache_mgr()->quota_mgr()->GetSize();
1654 const uint64_t
1655 capacity = file_system_->cache_mgr()->quota_mgr()->GetCapacity();
1656 // Fuse/OS X doesn't like values < 512
1657 info->f_bsize = info->f_frsize = 512;
1658
1659 if (capacity == (uint64_t)(-1)) {
1660 // Unknown capacity, set capacity = size
1661 info->f_blocks = size / info->f_bsize;
1662 } else {
1663 // Take values from LRU module
1664 info->f_blocks = capacity / info->f_bsize;
1665 available = capacity - size;
1666 }
1667
1668 info->f_bfree = info->f_bavail = available / info->f_bsize;
1669
1670 // Inodes / entries
1671 fuse_remounter_->fence()->Enter();
1672 const uint64_t all_inodes = mount_point_->catalog_mgr()->all_inodes();
1673 const uint64_t loaded_inode = mount_point_->catalog_mgr()->loaded_inodes();
1674 info->f_files = all_inodes;
1675 info->f_ffree = info->f_favail = all_inodes - loaded_inode;
1676 fuse_remounter_->fence()->Leave();
1677
1678 *mount_point_->statfs_cache()
1679 ->expiry_deadline() = platform_monotonic_time()
1680 + mount_point_->statfs_cache()->cache_timeout();
1681
1682 fuse_reply_statfs(req, info);
1683 }
1684
1685 #ifdef __APPLE__
1686 static void cvmfs_getxattr(fuse_req_t req, fuse_ino_t ino, const char *name,
1687 size_t size, uint32_t position)
1688 #else
1689 static void cvmfs_getxattr(fuse_req_t req, fuse_ino_t ino, const char *name,
1690 size_t size)
1691 #endif
1692 {
1693 const struct fuse_ctx *fuse_ctx = fuse_req_ctx(req);
1694 FuseInterruptCue ic(&req);
1695 const ClientCtxGuard ctx_guard(fuse_ctx->uid, fuse_ctx->gid, fuse_ctx->pid,
1696 &ic);
1697
1698 fuse_remounter_->fence()->Enter();
1699 catalog::ClientCatalogManager *catalog_mgr = mount_point_->catalog_mgr();
1700 ino = catalog_mgr->MangleInode(ino);
1701 LogCvmfs(kLogCvmfs, kLogDebug,
1702 "cvmfs_getxattr on inode: %" PRIu64 " for xattr: %s", uint64_t(ino),
1703 name);
1704 if (!CheckVoms(*fuse_ctx)) {
1705 fuse_remounter_->fence()->Leave();
1706 fuse_reply_err(req, EACCES);
1707 return;
1708 }
1709 TraceInode(Tracer::kEventGetXAttr, ino, "getxattr()");
1710
1711 vector<string> tokens_mode_machine = SplitString(name, '~');
1712 vector<string> tokens_mode_human = SplitString(name, '@');
1713
1714 int32_t attr_req_page = 0;
1715 MagicXattrMode xattr_mode = kXattrMachineMode;
1716 string attr;
1717
1718 bool attr_req_is_valid = false;
1719 const sanitizer::PositiveIntegerSanitizer page_num_sanitizer;
1720
1721 if (tokens_mode_human.size() > 1) {
1722 const std::string token = tokens_mode_human[tokens_mode_human.size() - 1];
1723 if (token == "?") {
1724 attr_req_is_valid = true;
1725 attr_req_page = -1;
1726 } else {
1727 if (page_num_sanitizer.IsValid(token)) {
1728 attr_req_is_valid = true;
1729 attr_req_page = static_cast<int32_t>(String2Uint64(token));
1730 }
1731 }
1732 xattr_mode = kXattrHumanMode;
1733 attr = tokens_mode_human[0];
1734 } else if (tokens_mode_machine.size() > 1) {
1735 const std::string
1736 token = tokens_mode_machine[tokens_mode_machine.size() - 1];
1737 if (token == "?") {
1738 attr_req_is_valid = true;
1739 attr_req_page = -1;
1740 } else {
1741 if (page_num_sanitizer.IsValid(token)) {
1742 attr_req_is_valid = true;
1743 attr_req_page = static_cast<int32_t>(String2Uint64(token));
1744 }
1745 }
1746 xattr_mode = kXattrMachineMode;
1747 attr = tokens_mode_machine[0];
1748
1749 } else {
1750 attr_req_is_valid = true;
1751 attr = tokens_mode_machine[0];
1752 }
1753
1754 if (!attr_req_is_valid) {
1755 fuse_remounter_->fence()->Leave();
1756 fuse_reply_err(req, ENODATA);
1757 return;
1758 }
1759
1760 catalog::DirectoryEntry d;
1761 const bool found = GetDirentForInode(ino, &d);
1762
1763 if (!found) {
1764 fuse_remounter_->fence()->Leave();
1765 ReplyNegative(d, req);
1766 return;
1767 }
1768
1769 bool retval;
1770 XattrList xattrs;
1771 PathString path;
1772 retval = GetPathForInode(ino, &path);
1773
1774 if (!AssertOrLog(retval, kLogCvmfs, kLogSyslogWarn | kLogDebug,
1775 "cvmfs_statfs: Race condition? "
1776 "GetPathForInode did not succeed for path %s "
1777 "(path might have not been set)",
1778 path.c_str())) {
1779 fuse_remounter_->fence()->Leave();
1780 fuse_reply_err(req, ESTALE);
1781 return;
1782 }
1783
1784 if (d.IsLink()) {
1785 const catalog::LookupOptions
1786 lookup_options = static_cast<catalog::LookupOptions>(
1787 catalog::kLookupDefault | catalog::kLookupRawSymlink);
1788 catalog::DirectoryEntry raw_symlink;
1789 retval = catalog_mgr->LookupPath(path, lookup_options, &raw_symlink);
1790
1791 if (!AssertOrLog(retval, kLogCvmfs, kLogSyslogWarn | kLogDebug,
1792 "cvmfs_statfs: Race condition? "
1793 "LookupPath did not succeed for path %s",
1794 path.c_str())) {
1795 fuse_remounter_->fence()->Leave();
1796 fuse_reply_err(req, ESTALE);
1797 return;
1798 }
1799
1800 d.set_symlink(raw_symlink.symlink());
1801 }
1802 if (d.HasXattrs()) {
1803 retval = catalog_mgr->LookupXattrs(path, &xattrs);
1804
1805 if (!AssertOrLog(retval, kLogCvmfs, kLogSyslogWarn | kLogDebug,
1806 "cvmfs_statfs: Race condition? "
1807 "LookupXattrs did not succeed for path %s",
1808 path.c_str())) {
1809 fuse_remounter_->fence()->Leave();
1810 fuse_reply_err(req, ESTALE);
1811 return;
1812 }
1813 }
1814
1815 bool magic_xattr_success = true;
1816 const MagicXattrRAIIWrapper magic_xattr(
1817 mount_point_->magic_xattr_mgr()->GetLocked(attr, path, &d));
1818 if (!magic_xattr.IsNull()) {
1819 magic_xattr_success = magic_xattr->PrepareValueFencedProtected(
1820 fuse_ctx->gid);
1821 }
1822
1823 fuse_remounter_->fence()->Leave();
1824
1825 if (!magic_xattr_success) {
1826 fuse_reply_err(req, ENOATTR);
1827 return;
1828 }
1829
1830 std::pair<bool, std::string> attribute_result;
1831
1832 if (!magic_xattr.IsNull()) {
1833 attribute_result = magic_xattr->GetValue(attr_req_page, xattr_mode);
1834 } else {
1835 if (!xattrs.Get(attr, &attribute_result.second)) {
1836 fuse_reply_err(req, ENOATTR);
1837 return;
1838 }
1839 attribute_result.first = true;
1840 }
1841
1842 if (!attribute_result.first) {
1843 fuse_reply_err(req, ENODATA);
1844 } else if (size == 0) {
1845 fuse_reply_xattr(req, attribute_result.second.length());
1846 } else if (size >= attribute_result.second.length()) {
1847 fuse_reply_buf(req, &attribute_result.second[0],
1848 attribute_result.second.length());
1849 } else {
1850 fuse_reply_err(req, ERANGE);
1851 }
1852 }
1853
1854
1855 static void cvmfs_listxattr(fuse_req_t req, fuse_ino_t ino, size_t size) {
1856 const struct fuse_ctx *fuse_ctx = fuse_req_ctx(req);
1857 FuseInterruptCue ic(&req);
1858 const ClientCtxGuard ctx_guard(fuse_ctx->uid, fuse_ctx->gid, fuse_ctx->pid,
1859 &ic);
1860
1861 fuse_remounter_->fence()->Enter();
1862 catalog::ClientCatalogManager *catalog_mgr = mount_point_->catalog_mgr();
1863 ino = catalog_mgr->MangleInode(ino);
1864 TraceInode(Tracer::kEventListAttr, ino, "listxattr()");
1865 LogCvmfs(kLogCvmfs, kLogDebug,
1866 "cvmfs_listxattr on inode: %" PRIu64 ", size %zu [visibility %d]",
1867 uint64_t(ino), size, mount_point_->magic_xattr_mgr()->visibility());
1868
1869 catalog::DirectoryEntry d;
1870 const bool found = GetDirentForInode(ino, &d);
1871 XattrList xattrs;
1872 if (d.HasXattrs()) {
1873 PathString path;
1874 bool retval = GetPathForInode(ino, &path);
1875
1876 if (!AssertOrLog(retval, kLogCvmfs, kLogSyslogWarn | kLogDebug,
1877 "cvmfs_listxattr: Race condition? "
1878 "GetPathForInode did not succeed for ino %lu",
1879 ino)) {
1880 fuse_remounter_->fence()->Leave();
1881 fuse_reply_err(req, ESTALE);
1882 return;
1883 }
1884
1885 retval = catalog_mgr->LookupXattrs(path, &xattrs);
1886 if (!AssertOrLog(retval, kLogCvmfs, kLogSyslogWarn | kLogDebug,
1887 "cvmfs_listxattr: Race condition? "
1888 "LookupXattrs did not succeed for ino %lu",
1889 ino)) {
1890 fuse_remounter_->fence()->Leave();
1891 fuse_reply_err(req, ESTALE);
1892 return;
1893 }
1894 }
1895 fuse_remounter_->fence()->Leave();
1896
1897 if (!found) {
1898 ReplyNegative(d, req);
1899 return;
1900 }
1901
1902 string attribute_list;
1903 attribute_list = mount_point_->magic_xattr_mgr()->GetListString(&d);
1904 attribute_list += xattrs.ListKeysPosix(attribute_list);
1905
1906 if (size == 0) {
1907 fuse_reply_xattr(req, attribute_list.length());
1908 } else if (size >= attribute_list.length()) {
1909 if (attribute_list.empty())
1910 fuse_reply_buf(req, NULL, 0);
1911 else
1912 fuse_reply_buf(req, &attribute_list[0], attribute_list.length());
1913 } else {
1914 fuse_reply_err(req, ERANGE);
1915 }
1916 }
1917
1918 bool Evict(const string &path) {
1919 catalog::DirectoryEntry dirent;
1920 fuse_remounter_->fence()->Enter();
1921 const bool found = (GetDirentForPath(PathString(path), &dirent) > 0);
1922
1923 if (!found || !dirent.IsRegular()) {
1924 fuse_remounter_->fence()->Leave();
1925 return false;
1926 }
1927
1928 if (!dirent.IsChunkedFile()) {
1929 fuse_remounter_->fence()->Leave();
1930 } else {
1931 FileChunkList chunks;
1932 mount_point_->catalog_mgr()->ListFileChunks(
1933 PathString(path), dirent.hash_algorithm(), &chunks);
1934 fuse_remounter_->fence()->Leave();
1935 for (unsigned i = 0; i < chunks.size(); ++i) {
1936 file_system_->cache_mgr()->quota_mgr()->Remove(
1937 chunks.AtPtr(i)->content_hash());
1938 }
1939 }
1940 file_system_->cache_mgr()->quota_mgr()->Remove(dirent.checksum());
1941 return true;
1942 }
1943
1944
1945 bool Pin(const string &path) {
1946 catalog::DirectoryEntry dirent;
1947 fuse_remounter_->fence()->Enter();
1948 const bool found = (GetDirentForPath(PathString(path), &dirent) > 0);
1949 if (!found || !dirent.IsRegular()) {
1950 fuse_remounter_->fence()->Leave();
1951 return false;
1952 }
1953
1954 Fetcher *this_fetcher = dirent.IsExternalFile()
1955 ? mount_point_->external_fetcher()
1956 : mount_point_->fetcher();
1957
1958 if (!dirent.IsChunkedFile()) {
1959 fuse_remounter_->fence()->Leave();
1960 } else {
1961 FileChunkList chunks;
1962 mount_point_->catalog_mgr()->ListFileChunks(
1963 PathString(path), dirent.hash_algorithm(), &chunks);
1964 fuse_remounter_->fence()->Leave();
1965 for (unsigned i = 0; i < chunks.size(); ++i) {
1966 const bool retval = file_system_->cache_mgr()->quota_mgr()->Pin(
1967 chunks.AtPtr(i)->content_hash(), chunks.AtPtr(i)->size(),
1968 "Part of " + path, false);
1969 if (!retval)
1970 return false;
1971 int fd = -1;
1972 CacheManager::Label label;
1973 label.path = path;
1974 label.size = chunks.AtPtr(i)->size();
1975 label.zip_algorithm = dirent.compression_algorithm();
1976 label.flags |= CacheManager::kLabelPinned;
1977 label.flags |= CacheManager::kLabelChunked;
1978 if (dirent.IsExternalFile()) {
1979 label.flags |= CacheManager::kLabelExternal;
1980 label.range_offset = chunks.AtPtr(i)->offset();
1981 }
1982 fd = this_fetcher->Fetch(
1983 CacheManager::LabeledObject(chunks.AtPtr(i)->content_hash(), label));
1984 if (fd < 0) {
1985 return false;
1986 }
1987 file_system_->cache_mgr()->Close(fd);
1988 }
1989 return true;
1990 }
1991
1992 const bool retval = file_system_->cache_mgr()->quota_mgr()->Pin(
1993 dirent.checksum(), dirent.size(), path, false);
1994 if (!retval)
1995 return false;
1996 CacheManager::Label label;
1997 label.flags = CacheManager::kLabelPinned;
1998 label.size = dirent.size();
1999 label.path = path;
2000 label.zip_algorithm = dirent.compression_algorithm();
2001 const int fd = this_fetcher->Fetch(
2002 CacheManager::LabeledObject(dirent.checksum(), label));
2003 if (fd < 0) {
2004 return false;
2005 }
2006 file_system_->cache_mgr()->Close(fd);
2007 return true;
2008 }
2009
2010
2011 /**
2012 * Do after-daemon() initialization
2013 */
2014 static void cvmfs_init(void *userdata, struct fuse_conn_info *conn) {
2015 LogCvmfs(kLogCvmfs, kLogDebug, "cvmfs_init");
2016
2017 // NFS support
2018 #ifdef CVMFS_NFS_SUPPORT
2019 conn->want |= FUSE_CAP_EXPORT_SUPPORT;
2020 #endif
2021
2022 if (mount_point_->enforce_acls()) {
2023 #ifdef FUSE_CAP_POSIX_ACL
2024 if ((conn->capable & FUSE_CAP_POSIX_ACL) == 0) {
2025 PANIC(kLogDebug | kLogSyslogErr,
2026 "FUSE: ACL support requested but missing fuse kernel support, "
2027 "aborting");
2028 }
2029 conn->want |= FUSE_CAP_POSIX_ACL;
2030 LogCvmfs(kLogCvmfs, kLogDebug | kLogSyslog, "enforcing ACLs");
2031 #else
2032 PANIC(kLogDebug | kLogSyslogErr,
2033 "FUSE: ACL support requested but not available in this version of "
2034 "libfuse %d, aborting",
2035 FUSE_VERSION);
2036 #endif
2037 }
2038
2039 if (mount_point_->cache_symlinks()) {
2040 #ifdef FUSE_CAP_CACHE_SYMLINKS
2041 if ((conn->capable & FUSE_CAP_CACHE_SYMLINKS) == FUSE_CAP_CACHE_SYMLINKS) {
2042 conn->want |= FUSE_CAP_CACHE_SYMLINKS;
2043 LogCvmfs(kLogCvmfs, kLogDebug, "FUSE: Enable symlink caching");
2044 #ifndef FUSE_CAP_EXPIRE_ONLY
2045 LogCvmfs(
2046 kLogCvmfs, kLogDebug | kLogSyslogWarn,
2047 "FUSE: Symlink caching enabled but no support for fuse_expire_entry. "
2048 "Symlinks will be cached but mountpoints on top of symlinks will "
2049 "break! "
2050 "Current libfuse %d is too old; required: libfuse >= 3.16, "
2051 "kernel >= 6.2-rc1",
2052 FUSE_VERSION);
2053 #endif
2054 } else {
2055 mount_point_->DisableCacheSymlinks();
2056 LogCvmfs(
2057 kLogCvmfs, kLogDebug | kLogSyslogWarn,
2058 "FUSE: Symlink caching requested but missing fuse kernel support, "
2059 "falling back to no caching");
2060 }
2061 #else
2062 mount_point_->DisableCacheSymlinks();
2063 LogCvmfs(kLogCvmfs, kLogDebug | kLogSyslogWarn,
2064 "FUSE: Symlink caching requested but missing libfuse support, "
2065 "falling back to no caching. Current libfuse %d",
2066 FUSE_VERSION);
2067 #endif
2068 }
2069
2070 #ifdef FUSE_CAP_EXPIRE_ONLY
2071 if ((conn->capable & FUSE_CAP_EXPIRE_ONLY) == FUSE_CAP_EXPIRE_ONLY
2072 && FUSE_VERSION >= FUSE_MAKE_VERSION(3, 16)) {
2073 mount_point_->EnableFuseExpireEntry();
2074 LogCvmfs(kLogCvmfs, kLogDebug, "FUSE: Enable fuse_expire_entry ");
2075 } else if (mount_point_->cache_symlinks()) {
2076 LogCvmfs(
2077 kLogCvmfs, kLogDebug | kLogSyslogWarn,
2078 "FUSE: Symlink caching enabled but no support for fuse_expire_entry. "
2079 "Symlinks will be cached but mountpoints on top of symlinks will "
2080 "break! "
2081 "Current libfuse %d; required: libfuse >= 3.16, kernel >= 6.2-rc1",
2082 FUSE_VERSION);
2083 }
2084 #endif
2085 }
2086
2087 static void cvmfs_destroy(void *unused __attribute__((unused))) {
2088 // The debug log is already closed at this point
2089 LogCvmfs(kLogCvmfs, kLogDebug, "cvmfs_destroy");
2090 }
2091
2092 /**
2093 * Puts the callback functions in one single structure
2094 */
2095 static void SetCvmfsOperations(struct fuse_lowlevel_ops *cvmfs_operations) {
2096 memset(cvmfs_operations, 0, sizeof(*cvmfs_operations));
2097
2098 // Init/Fini
2099 cvmfs_operations->init = cvmfs_init;
2100 cvmfs_operations->destroy = cvmfs_destroy;
2101
2102 cvmfs_operations->lookup = cvmfs_lookup;
2103 cvmfs_operations->getattr = cvmfs_getattr;
2104 cvmfs_operations->readlink = cvmfs_readlink;
2105 cvmfs_operations->open = cvmfs_open;
2106 cvmfs_operations->read = cvmfs_read;
2107 cvmfs_operations->release = cvmfs_release;
2108 cvmfs_operations->opendir = cvmfs_opendir;
2109 cvmfs_operations->readdir = cvmfs_readdir;
2110 cvmfs_operations->releasedir = cvmfs_releasedir;
2111 cvmfs_operations->statfs = cvmfs_statfs;
2112 cvmfs_operations->getxattr = cvmfs_getxattr;
2113 cvmfs_operations->listxattr = cvmfs_listxattr;
2114 cvmfs_operations->forget = cvmfs_forget;
2115 #if (FUSE_VERSION >= 29)
2116 cvmfs_operations->forget_multi = cvmfs_forget_multi;
2117 #endif
2118 }
2119
2120 // Called by cvmfs_talk when switching into read-only cache mode
2121 void UnregisterQuotaListener() {
2122 if (cvmfs::unpin_listener_) {
2123 quota::UnregisterListener(cvmfs::unpin_listener_);
2124 cvmfs::unpin_listener_ = NULL;
2125 }
2126 if (cvmfs::watchdog_listener_) {
2127 quota::UnregisterListener(cvmfs::watchdog_listener_);
2128 cvmfs::watchdog_listener_ = NULL;
2129 }
2130 }
2131
2132 bool SendFuseFd(const std::string &socket_path) {
2133 int fuse_fd;
2134 #if (FUSE_VERSION >= 30)
2135 fuse_fd = fuse_session_fd(*reinterpret_cast<struct fuse_session **>(
2136 loader_exports_->fuse_channel_or_session));
2137 #else
2138 fuse_fd = fuse_chan_fd(*reinterpret_cast<struct fuse_chan **>(
2139 loader_exports_->fuse_channel_or_session));
2140 #endif
2141 assert(fuse_fd >= 0);
2142 const int sock_fd = ConnectSocket(socket_path);
2143 if (sock_fd < 0) {
2144 LogCvmfs(kLogCvmfs, kLogDebug, "cannot connect to socket %s: %d",
2145 socket_path.c_str(), errno);
2146 return false;
2147 }
2148 const bool retval = SendFd2Socket(sock_fd, fuse_fd);
2149 close(sock_fd);
2150 return retval;
2151 }
2152
2153 } // namespace cvmfs
2154
2155
2156 string *g_boot_error = NULL;
2157
2158 __attribute__((
2159 visibility("default"))) loader::CvmfsExports *g_cvmfs_exports = NULL;
2160
2161 /**
2162 * Begin section of cvmfs.cc-specific magic extended attributes
2163 */
2164
2165 class ExpiresMagicXattr : public BaseMagicXattr {
2166 time_t catalogs_valid_until_;
2167
2168 virtual bool PrepareValueFenced() {
2169 catalogs_valid_until_ = cvmfs::fuse_remounter_->catalogs_valid_until();
2170 return true;
2171 }
2172
2173 virtual void FinalizeValue() {
2174 if (catalogs_valid_until_ == MountPoint::kIndefiniteDeadline) {
2175 result_pages_.push_back("never (fixed root catalog)");
2176 return;
2177 } else {
2178 const time_t now = time(NULL);
2179 result_pages_.push_back(StringifyInt((catalogs_valid_until_ - now) / 60));
2180 }
2181 }
2182 };
2183
2184 class InodeMaxMagicXattr : public BaseMagicXattr {
2185 virtual void FinalizeValue() {
2186 result_pages_.push_back(StringifyInt(
2187 cvmfs::inode_generation_info_.inode_generation
2188 + xattr_mgr_->mount_point()->catalog_mgr()->inode_gauge()));
2189 }
2190 };
2191
2192 class MaxFdMagicXattr : public BaseMagicXattr {
2193 virtual void FinalizeValue() {
2194 result_pages_.push_back(
2195 StringifyInt(cvmfs::max_open_files_ - cvmfs::kNumReservedFd));
2196 }
2197 };
2198
2199 class PidMagicXattr : public BaseMagicXattr {
2200 virtual void FinalizeValue() {
2201 result_pages_.push_back(StringifyInt(cvmfs::pid_));
2202 }
2203 };
2204
2205 class UptimeMagicXattr : public BaseMagicXattr {
2206 virtual void FinalizeValue() {
2207 const time_t now = time(NULL);
2208 const uint64_t uptime = now - cvmfs::loader_exports_->boot_time;
2209 result_pages_.push_back(StringifyUint(uptime / 60));
2210 }
2211 };
2212
2213 /**
2214 * Register cvmfs.cc-specific magic extended attributes to mountpoint's
2215 * magic xattribute manager
2216 */
2217 static void RegisterMagicXattrs() {
2218 MagicXattrManager *mgr = cvmfs::mount_point_->magic_xattr_mgr();
2219 mgr->Register("user.expires", new ExpiresMagicXattr());
2220 mgr->Register("user.inode_max", new InodeMaxMagicXattr());
2221 mgr->Register("user.pid", new PidMagicXattr());
2222 mgr->Register("user.maxfd", new MaxFdMagicXattr());
2223 mgr->Register("user.uptime", new UptimeMagicXattr());
2224
2225 mgr->Freeze();
2226 }
2227
2228 /**
2229 * Construct a file system but prevent hanging when already mounted. That
2230 * means: at most one "system" mount of any given repository name.
2231 */
2232 static FileSystem *InitSystemFs(const string &mount_path,
2233 const string &fqrn,
2234 FileSystem::FileSystemInfo fs_info) {
2235 fs_info.wait_workspace = false;
2236 FileSystem *file_system = FileSystem::Create(fs_info);
2237
2238 if (file_system->boot_status() == loader::kFailLockWorkspace) {
2239 string fqrn_from_xattr;
2240 const int retval = platform_getxattr(mount_path, "user.fqrn",
2241 &fqrn_from_xattr);
2242 if (!retval) {
2243 // Cvmfs not mounted anymore, but another cvmfs process is still in
2244 // shutdown procedure. Try again and wait for lock
2245 delete file_system;
2246 fs_info.wait_workspace = true;
2247 file_system = FileSystem::Create(fs_info);
2248 } else {
2249 if (fqrn_from_xattr == fqrn) {
2250 LogCvmfs(kLogCvmfs, kLogDebug | kLogSyslogWarn,
2251 "repository already mounted on %s", mount_path.c_str());
2252 file_system->set_boot_status(loader::kFailDoubleMount);
2253 } else {
2254 LogCvmfs(kLogCvmfs, kLogDebug | kLogSyslogErr,
2255 "CernVM-FS repository %s already mounted on %s", fqrn.c_str(),
2256 mount_path.c_str());
2257 file_system->set_boot_status(loader::kFailOtherMount);
2258 }
2259 }
2260 }
2261
2262 return file_system;
2263 }
2264
2265
2266 static void InitOptionsMgr(const loader::LoaderExports *loader_exports) {
2267 if (loader_exports->version >= 3 && loader_exports->simple_options_parsing) {
2268 cvmfs::options_mgr_ = new SimpleOptionsParser(
2269 new DefaultOptionsTemplateManager(loader_exports->repository_name));
2270 } else {
2271 cvmfs::options_mgr_ = new BashOptionsManager(
2272 new DefaultOptionsTemplateManager(loader_exports->repository_name));
2273 }
2274
2275 if (loader_exports->config_files != "") {
2276 vector<string> tokens = SplitString(loader_exports->config_files, ':');
2277 for (unsigned i = 0, s = tokens.size(); i < s; ++i) {
2278 cvmfs::options_mgr_->ParsePath(tokens[i], false);
2279 }
2280 } else {
2281 cvmfs::options_mgr_->ParseDefault(loader_exports->repository_name);
2282 }
2283 }
2284
2285
2286 static unsigned CheckMaxOpenFiles() {
2287 static unsigned max_open_files;
2288 static bool already_done = false;
2289
2290 // check number of open files (lazy evaluation)
2291 if (!already_done) {
2292 unsigned soft_limit = 0;
2293 unsigned hard_limit = 0;
2294 GetLimitNoFile(&soft_limit, &hard_limit);
2295
2296 if (soft_limit < cvmfs::kMinOpenFiles) {
2297 LogCvmfs(kLogCvmfs, kLogSyslogWarn | kLogDebug,
2298 "Warning: current limits for number of open files are "
2299 "(%u/%u)\n"
2300 "CernVM-FS is likely to run out of file descriptors, "
2301 "set ulimit -n to at least %u",
2302 soft_limit, hard_limit, cvmfs::kMinOpenFiles);
2303 }
2304 max_open_files = soft_limit;
2305 already_done = true;
2306 }
2307
2308 return max_open_files;
2309 }
2310
2311
2312 static int Init(const loader::LoaderExports *loader_exports) {
2313 g_boot_error = new string("unknown error");
2314 cvmfs::loader_exports_ = loader_exports;
2315
2316 crypto::SetupLibcryptoMt();
2317
2318 InitOptionsMgr(loader_exports);
2319
2320 // We need logging set up before forking the watchdog
2321 FileSystem::SetupLoggingStandalone(*cvmfs::options_mgr_,
2322 loader_exports->repository_name);
2323
2324 // Monitor, check for maximum number of open files
2325 if (cvmfs::UseWatchdog()) {
2326 auto_umount::SetMountpoint(loader_exports->mount_point);
2327 cvmfs::watchdog_ = Watchdog::Create(auto_umount::UmountOnCrash);
2328 if (cvmfs::watchdog_ == NULL) {
2329 *g_boot_error = "failed to initialize watchdog.";
2330 return loader::kFailMonitor;
2331 }
2332 }
2333 cvmfs::max_open_files_ = CheckMaxOpenFiles();
2334
2335 FileSystem::FileSystemInfo fs_info;
2336 fs_info.type = FileSystem::kFsFuse;
2337 fs_info.name = loader_exports->repository_name;
2338 fs_info.exe_path = loader_exports->program_name;
2339 fs_info.options_mgr = cvmfs::options_mgr_;
2340 fs_info.foreground = loader_exports->foreground;
2341 cvmfs::file_system_ = InitSystemFs(loader_exports->mount_point,
2342 loader_exports->repository_name, fs_info);
2343 if (!cvmfs::file_system_->IsValid()) {
2344 *g_boot_error = cvmfs::file_system_->boot_error();
2345 return cvmfs::file_system_->boot_status();
2346 }
2347 if ((cvmfs::file_system_->cache_mgr()->id() == kPosixCacheManager)
2348 && dynamic_cast<PosixCacheManager *>(cvmfs::file_system_->cache_mgr())
2349 ->do_refcount()) {
2350 cvmfs::check_fd_overflow_ = false;
2351 }
2352
2353 cvmfs::mount_point_ = MountPoint::Create(loader_exports->repository_name,
2354 cvmfs::file_system_);
2355 if (!cvmfs::mount_point_->IsValid()) {
2356 *g_boot_error = cvmfs::mount_point_->boot_error();
2357 return cvmfs::mount_point_->boot_status();
2358 }
2359
2360 RegisterMagicXattrs();
2361
2362 cvmfs::directory_handles_ = new cvmfs::DirectoryHandles();
2363 cvmfs::directory_handles_->set_empty_key((uint64_t)(-1));
2364 cvmfs::directory_handles_->set_deleted_key((uint64_t)(-2));
2365
2366 LogCvmfs(kLogCvmfs, kLogDebug, "fuse inode size is %lu bits",
2367 sizeof(fuse_ino_t) * 8);
2368
2369 cvmfs::inode_generation_info_
2370 .initial_revision = cvmfs::mount_point_->catalog_mgr()->GetRevision();
2371 cvmfs::inode_generation_info_.inode_generation = cvmfs::mount_point_
2372 ->inode_annotation()
2373 ->GetGeneration();
2374 LogCvmfs(kLogCvmfs, kLogDebug, "root inode is %" PRIu64,
2375 uint64_t(cvmfs::mount_point_->catalog_mgr()->GetRootInode()));
2376
2377 void **channel_or_session = NULL;
2378 if (loader_exports->version >= 4) {
2379 channel_or_session = loader_exports->fuse_channel_or_session;
2380 }
2381
2382 bool fuse_notify_invalidation = true;
2383 std::string buf;
2384 if (cvmfs::options_mgr_->GetValue("CVMFS_FUSE_NOTIFY_INVALIDATION", &buf)) {
2385 if (!cvmfs::options_mgr_->IsOn(buf)) {
2386 fuse_notify_invalidation = false;
2387 cvmfs::mount_point_->dentry_tracker()->Disable();
2388 }
2389 }
2390 cvmfs::fuse_remounter_ = new FuseRemounter(
2391 cvmfs::mount_point_, &cvmfs::inode_generation_info_, channel_or_session,
2392 fuse_notify_invalidation);
2393
2394 // Control & command interface
2395 cvmfs::talk_mgr_ = TalkManager::Create(
2396 cvmfs::mount_point_->talk_socket_path(),
2397 cvmfs::mount_point_,
2398 cvmfs::fuse_remounter_);
2399 if ((cvmfs::mount_point_->talk_socket_uid() != 0)
2400 || (cvmfs::mount_point_->talk_socket_gid() != 0)) {
2401 const uid_t tgt_uid = cvmfs::mount_point_->talk_socket_uid();
2402 const gid_t tgt_gid = cvmfs::mount_point_->talk_socket_gid();
2403 const int rvi = chown(cvmfs::mount_point_->talk_socket_path().c_str(),
2404 tgt_uid, tgt_gid);
2405 if (rvi != 0) {
2406 *g_boot_error = std::string("failed to set talk socket ownership - ")
2407 + "target " + StringifyInt(tgt_uid) + ":"
2408 + StringifyInt(tgt_uid) + ", user "
2409 + StringifyInt(geteuid()) + ":" + StringifyInt(getegid());
2410 return loader::kFailTalk;
2411 }
2412 }
2413 if (cvmfs::talk_mgr_ == NULL) {
2414 *g_boot_error = "failed to initialize talk socket (" + StringifyInt(errno)
2415 + ")";
2416 return loader::kFailTalk;
2417 }
2418
2419 // Notification system client
2420 {
2421 OptionsManager *options = cvmfs::file_system_->options_mgr();
2422 if (options->IsDefined("CVMFS_NOTIFICATION_SERVER")) {
2423 std::string config;
2424 options->GetValue("CVMFS_NOTIFICATION_SERVER", &config);
2425 const std::string repo_name = cvmfs::mount_point_->fqrn();
2426 cvmfs::notification_client_ = new NotificationClient(
2427 config, repo_name, cvmfs::fuse_remounter_,
2428 cvmfs::mount_point_->download_mgr(),
2429 cvmfs::mount_point_->signature_mgr());
2430 }
2431 }
2432
2433 return loader::kFailOk;
2434 }
2435
2436
2437 /**
2438 * Things that have to be executed after fork() / daemon()
2439 */
2440 static void Spawn() {
2441 // First thing: kick off the watchdog while we still have a single-threaded
2442 // well-defined state
2443 cvmfs::pid_ = getpid();
2444 if (cvmfs::watchdog_) {
2445 cvmfs::watchdog_->Spawn(GetCurrentWorkingDirectory() + "/stacktrace."
2446 + cvmfs::mount_point_->fqrn());
2447 }
2448
2449 cvmfs::fuse_remounter_->Spawn();
2450 if (cvmfs::mount_point_->dentry_tracker()->is_active()) {
2451 cvmfs::mount_point_->dentry_tracker()->SpawnCleaner(
2452 // Usually every minute
2453 static_cast<unsigned int>(cvmfs::mount_point_->kcache_timeout_sec()));
2454 }
2455
2456 cvmfs::mount_point_->download_mgr()->Spawn();
2457 cvmfs::mount_point_->external_download_mgr()->Spawn();
2458 if (cvmfs::mount_point_->resolv_conf_watcher() != NULL) {
2459 cvmfs::mount_point_->resolv_conf_watcher()->Spawn();
2460 }
2461 QuotaManager *quota_mgr = cvmfs::file_system_->cache_mgr()->quota_mgr();
2462 quota_mgr->Spawn();
2463 if (quota_mgr->HasCapability(QuotaManager::kCapListeners)) {
2464 cvmfs::watchdog_listener_ = quota::RegisterWatchdogListener(
2465 quota_mgr, cvmfs::mount_point_->uuid()->uuid() + "-watchdog");
2466 cvmfs::unpin_listener_ = quota::RegisterUnpinListener(
2467 quota_mgr,
2468 cvmfs::mount_point_->catalog_mgr(),
2469 cvmfs::mount_point_->uuid()->uuid() + "-unpin");
2470 }
2471 cvmfs::mount_point_->tracer()->Spawn();
2472 cvmfs::talk_mgr_->Spawn();
2473
2474 if (cvmfs::notification_client_ != NULL) {
2475 cvmfs::notification_client_->Spawn();
2476 }
2477
2478 if (cvmfs::file_system_->nfs_maps() != NULL) {
2479 cvmfs::file_system_->nfs_maps()->Spawn();
2480 }
2481
2482 cvmfs::file_system_->cache_mgr()->Spawn();
2483
2484 if (cvmfs::mount_point_->telemetry_aggr() != NULL) {
2485 cvmfs::mount_point_->telemetry_aggr()->Spawn();
2486 }
2487 }
2488
2489
2490 static string GetErrorMsg() {
2491 if (g_boot_error)
2492 return *g_boot_error;
2493 return "";
2494 }
2495
2496
2497 /**
2498 * Called alone at the end of SaveState; it performs a Fini() half way through,
2499 * enough to delete the catalog manager, so that no more open file handles
2500 * from file catalogs are active.
2501 */
2502 static void ShutdownMountpoint() {
2503 delete cvmfs::talk_mgr_;
2504 cvmfs::talk_mgr_ = NULL;
2505
2506 delete cvmfs::notification_client_;
2507 cvmfs::notification_client_ = NULL;
2508
2509 // The remonter has a reference to the mount point and the inode generation
2510 delete cvmfs::fuse_remounter_;
2511 cvmfs::fuse_remounter_ = NULL;
2512
2513 // The unpin listener requires the catalog, so this must be unregistered
2514 // before the catalog manager is removed
2515 if (cvmfs::unpin_listener_ != NULL) {
2516 quota::UnregisterListener(cvmfs::unpin_listener_);
2517 cvmfs::unpin_listener_ = NULL;
2518 }
2519 if (cvmfs::watchdog_listener_ != NULL) {
2520 quota::UnregisterListener(cvmfs::watchdog_listener_);
2521 cvmfs::watchdog_listener_ = NULL;
2522 }
2523
2524 delete cvmfs::directory_handles_;
2525 delete cvmfs::mount_point_;
2526 cvmfs::directory_handles_ = NULL;
2527 cvmfs::mount_point_ = NULL;
2528 }
2529
2530
2531 static void Fini() {
2532 ShutdownMountpoint();
2533
2534 delete cvmfs::file_system_;
2535 delete cvmfs::options_mgr_;
2536 cvmfs::file_system_ = NULL;
2537 cvmfs::options_mgr_ = NULL;
2538
2539 delete cvmfs::watchdog_;
2540 cvmfs::watchdog_ = NULL;
2541
2542 delete g_boot_error;
2543 g_boot_error = NULL;
2544 auto_umount::SetMountpoint("");
2545
2546 crypto::CleanupLibcryptoMt();
2547 }
2548
2549
2550 static int AltProcessFlavor(int argc, char **argv) {
2551 if (strcmp(argv[1], "__cachemgr__") == 0) {
2552 return PosixQuotaManager::MainCacheManager(argc, argv);
2553 }
2554 if (strcmp(argv[1], "__wpad__") == 0) {
2555 return download::MainResolveProxyDescription(argc, argv);
2556 }
2557 return 1;
2558 }
2559
2560
2561 static bool MaintenanceMode(const int fd_progress) {
2562 SendMsg2Socket(fd_progress, "Entering maintenance mode\n");
2563 string msg_progress = "Draining out kernel caches (";
2564 if (FuseInvalidator::HasFuseNotifyInval())
2565 msg_progress += "up to ";
2566 msg_progress += StringifyInt(static_cast<int>(
2567 cvmfs::mount_point_->kcache_timeout_sec()))
2568 + "s)\n";
2569 SendMsg2Socket(fd_progress, msg_progress);
2570 cvmfs::fuse_remounter_->EnterMaintenanceMode();
2571 return true;
2572 }
2573
2574
2575 static bool SaveState(const int fd_progress, loader::StateList *saved_states) {
2576 string msg_progress;
2577
2578 const unsigned num_open_dirs = cvmfs::directory_handles_->size();
2579 if (num_open_dirs != 0) {
2580 #ifdef DEBUGMSG
2581 for (cvmfs::DirectoryHandles::iterator
2582 i = cvmfs::directory_handles_->begin(),
2583 iEnd = cvmfs::directory_handles_->end();
2584 i != iEnd;
2585 ++i) {
2586 LogCvmfs(kLogCvmfs, kLogDebug, "saving dirhandle %lu", i->first);
2587 }
2588 #endif
2589
2590 msg_progress = "Saving open directory handles ("
2591 + StringifyInt(num_open_dirs) + " handles)\n";
2592 SendMsg2Socket(fd_progress, msg_progress);
2593
2594 // TODO(jblomer): should rather be saved just in a malloc'd memory block
2595 cvmfs::DirectoryHandles *saved_handles = new cvmfs::DirectoryHandles(
2596 *cvmfs::directory_handles_);
2597 loader::SavedState *save_open_dirs = new loader::SavedState();
2598 save_open_dirs->state_id = loader::kStateOpenDirs;
2599 save_open_dirs->state = saved_handles;
2600 saved_states->push_back(save_open_dirs);
2601 }
2602
2603 if (!cvmfs::file_system_->IsNfsSource()) {
2604 msg_progress = "Saving inode tracker\n";
2605 SendMsg2Socket(fd_progress, msg_progress);
2606 glue::InodeTracker *saved_inode_tracker = new glue::InodeTracker(
2607 *cvmfs::mount_point_->inode_tracker());
2608 loader::SavedState *state_glue_buffer = new loader::SavedState();
2609 state_glue_buffer->state_id = loader::kStateGlueBufferV4;
2610 state_glue_buffer->state = saved_inode_tracker;
2611 saved_states->push_back(state_glue_buffer);
2612 }
2613
2614 msg_progress = "Saving negative entry cache\n";
2615 SendMsg2Socket(fd_progress, msg_progress);
2616 glue::DentryTracker *saved_dentry_tracker = new glue::DentryTracker(
2617 *cvmfs::mount_point_->dentry_tracker());
2618 loader::SavedState *state_dentry_tracker = new loader::SavedState();
2619 state_dentry_tracker->state_id = loader::kStateDentryTracker;
2620 state_dentry_tracker->state = saved_dentry_tracker;
2621 saved_states->push_back(state_dentry_tracker);
2622
2623 msg_progress = "Saving page cache entry tracker\n";
2624 SendMsg2Socket(fd_progress, msg_progress);
2625 glue::PageCacheTracker *saved_page_cache_tracker = new glue::PageCacheTracker(
2626 *cvmfs::mount_point_->page_cache_tracker());
2627 loader::SavedState *state_page_cache_tracker = new loader::SavedState();
2628 state_page_cache_tracker->state_id = loader::kStatePageCacheTracker;
2629 state_page_cache_tracker->state = saved_page_cache_tracker;
2630 saved_states->push_back(state_page_cache_tracker);
2631
2632 msg_progress = "Saving chunk tables\n";
2633 SendMsg2Socket(fd_progress, msg_progress);
2634 ChunkTables *saved_chunk_tables = new ChunkTables(
2635 *cvmfs::mount_point_->chunk_tables());
2636 loader::SavedState *state_chunk_tables = new loader::SavedState();
2637 state_chunk_tables->state_id = loader::kStateOpenChunksV4;
2638 state_chunk_tables->state = saved_chunk_tables;
2639 saved_states->push_back(state_chunk_tables);
2640
2641 msg_progress = "Saving inode generation\n";
2642 SendMsg2Socket(fd_progress, msg_progress);
2643 cvmfs::inode_generation_info_
2644 .inode_generation += cvmfs::mount_point_->catalog_mgr()->inode_gauge();
2645 cvmfs::InodeGenerationInfo
2646 *saved_inode_generation = new cvmfs::InodeGenerationInfo(
2647 cvmfs::inode_generation_info_);
2648 loader::SavedState *state_inode_generation = new loader::SavedState();
2649 state_inode_generation->state_id = loader::kStateInodeGeneration;
2650 state_inode_generation->state = saved_inode_generation;
2651 saved_states->push_back(state_inode_generation);
2652
2653 msg_progress = "Saving fuse state\n";
2654 SendMsg2Socket(fd_progress, msg_progress);
2655 cvmfs::FuseState *saved_fuse_state = new cvmfs::FuseState();
2656 saved_fuse_state->cache_symlinks = cvmfs::mount_point_->cache_symlinks();
2657 saved_fuse_state->has_dentry_expire = cvmfs::mount_point_
2658 ->fuse_expire_entry();
2659 loader::SavedState *state_fuse = new loader::SavedState();
2660 state_fuse->state_id = loader::kStateFuse;
2661 state_fuse->state = saved_fuse_state;
2662 saved_states->push_back(state_fuse);
2663
2664 // Close open file catalogs
2665 ShutdownMountpoint();
2666
2667 loader::SavedState *state_cache_mgr = new loader::SavedState();
2668 state_cache_mgr->state_id = loader::kStateOpenFiles;
2669 state_cache_mgr->state = cvmfs::file_system_->cache_mgr()->SaveState(
2670 fd_progress);
2671 saved_states->push_back(state_cache_mgr);
2672
2673 msg_progress = "Saving open files counter\n";
2674 uint32_t *saved_num_fd = new uint32_t(
2675 cvmfs::file_system_->no_open_files()->Get());
2676 loader::SavedState *state_num_fd = new loader::SavedState();
2677 state_num_fd->state_id = loader::kStateOpenFilesCounter;
2678 state_num_fd->state = saved_num_fd;
2679 saved_states->push_back(state_num_fd);
2680
2681 return true;
2682 }
2683
2684
2685 static bool RestoreState(const int fd_progress,
2686 const loader::StateList &saved_states) {
2687 // If we have no saved version of the page cache tracker, it is unsafe
2688 // to start using it. The page cache tracker has to run for the entire
2689 // lifetime of the mountpoint or not at all.
2690 cvmfs::mount_point_->page_cache_tracker()->Disable();
2691
2692 for (unsigned i = 0, l = saved_states.size(); i < l; ++i) {
2693 if (saved_states[i]->state_id == loader::kStateOpenDirs) {
2694 SendMsg2Socket(fd_progress, "Restoring open directory handles... ");
2695 delete cvmfs::directory_handles_;
2696 cvmfs::DirectoryHandles
2697 *saved_handles = (cvmfs::DirectoryHandles *)saved_states[i]->state;
2698 cvmfs::directory_handles_ = new cvmfs::DirectoryHandles(*saved_handles);
2699 cvmfs::file_system_->no_open_dirs()->Set(
2700 cvmfs::directory_handles_->size());
2701 cvmfs::DirectoryHandles::const_iterator i = cvmfs::directory_handles_
2702 ->begin();
2703 for (; i != cvmfs::directory_handles_->end(); ++i) {
2704 if (i->first >= cvmfs::next_directory_handle_)
2705 cvmfs::next_directory_handle_ = i->first + 1;
2706 }
2707
2708 SendMsg2Socket(
2709 fd_progress,
2710 StringifyInt(cvmfs::directory_handles_->size()) + " handles\n");
2711 }
2712
2713 if (saved_states[i]->state_id == loader::kStateGlueBuffer) {
2714 SendMsg2Socket(fd_progress, "Migrating inode tracker (v1 to v4)... ");
2715 compat::inode_tracker::InodeTracker
2716 *saved_inode_tracker = (compat::inode_tracker::InodeTracker *)
2717 saved_states[i]
2718 ->state;
2719 compat::inode_tracker::Migrate(saved_inode_tracker,
2720 cvmfs::mount_point_->inode_tracker());
2721 SendMsg2Socket(fd_progress, " done\n");
2722 }
2723
2724 if (saved_states[i]->state_id == loader::kStateGlueBufferV2) {
2725 SendMsg2Socket(fd_progress, "Migrating inode tracker (v2 to v4)... ");
2726 compat::inode_tracker_v2::InodeTracker
2727 *saved_inode_tracker = (compat::inode_tracker_v2::InodeTracker *)
2728 saved_states[i]
2729 ->state;
2730 compat::inode_tracker_v2::Migrate(saved_inode_tracker,
2731 cvmfs::mount_point_->inode_tracker());
2732 SendMsg2Socket(fd_progress, " done\n");
2733 }
2734
2735 if (saved_states[i]->state_id == loader::kStateGlueBufferV3) {
2736 SendMsg2Socket(fd_progress, "Migrating inode tracker (v3 to v4)... ");
2737 compat::inode_tracker_v3::InodeTracker
2738 *saved_inode_tracker = (compat::inode_tracker_v3::InodeTracker *)
2739 saved_states[i]
2740 ->state;
2741 compat::inode_tracker_v3::Migrate(saved_inode_tracker,
2742 cvmfs::mount_point_->inode_tracker());
2743 SendMsg2Socket(fd_progress, " done\n");
2744 }
2745
2746 if (saved_states[i]->state_id == loader::kStateGlueBufferV4) {
2747 SendMsg2Socket(fd_progress, "Restoring inode tracker... ");
2748 cvmfs::mount_point_->inode_tracker()->~InodeTracker();
2749 glue::InodeTracker
2750 *saved_inode_tracker = (glue::InodeTracker *)saved_states[i]->state;
2751 new (cvmfs::mount_point_->inode_tracker())
2752 glue::InodeTracker(*saved_inode_tracker);
2753 SendMsg2Socket(fd_progress, " done\n");
2754 }
2755
2756 if (saved_states[i]->state_id == loader::kStateDentryTracker) {
2757 SendMsg2Socket(fd_progress, "Restoring dentry tracker... ");
2758 cvmfs::mount_point_->dentry_tracker()->~DentryTracker();
2759 glue::DentryTracker
2760 *saved_dentry_tracker = static_cast<glue::DentryTracker *>(
2761 saved_states[i]->state);
2762 new (cvmfs::mount_point_->dentry_tracker())
2763 glue::DentryTracker(*saved_dentry_tracker);
2764 SendMsg2Socket(fd_progress, " done\n");
2765 }
2766
2767 if (saved_states[i]->state_id == loader::kStatePageCacheTracker) {
2768 SendMsg2Socket(fd_progress, "Restoring page cache entry tracker... ");
2769 cvmfs::mount_point_->page_cache_tracker()->~PageCacheTracker();
2770 glue::PageCacheTracker
2771 *saved_page_cache_tracker = (glue::PageCacheTracker *)saved_states[i]
2772 ->state;
2773 new (cvmfs::mount_point_->page_cache_tracker())
2774 glue::PageCacheTracker(*saved_page_cache_tracker);
2775 SendMsg2Socket(fd_progress, " done\n");
2776 }
2777
2778 ChunkTables *chunk_tables = cvmfs::mount_point_->chunk_tables();
2779
2780 if (saved_states[i]->state_id == loader::kStateOpenChunks) {
2781 SendMsg2Socket(fd_progress, "Migrating chunk tables (v1 to v4)... ");
2782 compat::chunk_tables::ChunkTables
2783 *saved_chunk_tables = (compat::chunk_tables::ChunkTables *)
2784 saved_states[i]
2785 ->state;
2786 compat::chunk_tables::Migrate(saved_chunk_tables, chunk_tables);
2787 SendMsg2Socket(
2788 fd_progress,
2789 StringifyInt(chunk_tables->handle2fd.size()) + " handles\n");
2790 }
2791
2792 if (saved_states[i]->state_id == loader::kStateOpenChunksV2) {
2793 SendMsg2Socket(fd_progress, "Migrating chunk tables (v2 to v4)... ");
2794 compat::chunk_tables_v2::ChunkTables
2795 *saved_chunk_tables = (compat::chunk_tables_v2::ChunkTables *)
2796 saved_states[i]
2797 ->state;
2798 compat::chunk_tables_v2::Migrate(saved_chunk_tables, chunk_tables);
2799 SendMsg2Socket(
2800 fd_progress,
2801 StringifyInt(chunk_tables->handle2fd.size()) + " handles\n");
2802 }
2803
2804 if (saved_states[i]->state_id == loader::kStateOpenChunksV3) {
2805 SendMsg2Socket(fd_progress, "Migrating chunk tables (v3 to v4)... ");
2806 compat::chunk_tables_v3::ChunkTables
2807 *saved_chunk_tables = (compat::chunk_tables_v3::ChunkTables *)
2808 saved_states[i]
2809 ->state;
2810 compat::chunk_tables_v3::Migrate(saved_chunk_tables, chunk_tables);
2811 SendMsg2Socket(
2812 fd_progress,
2813 StringifyInt(chunk_tables->handle2fd.size()) + " handles\n");
2814 }
2815
2816 if (saved_states[i]->state_id == loader::kStateOpenChunksV4) {
2817 SendMsg2Socket(fd_progress, "Restoring chunk tables... ");
2818 chunk_tables->~ChunkTables();
2819 ChunkTables *saved_chunk_tables = reinterpret_cast<ChunkTables *>(
2820 saved_states[i]->state);
2821 new (chunk_tables) ChunkTables(*saved_chunk_tables);
2822 SendMsg2Socket(fd_progress, " done\n");
2823 }
2824
2825 if (saved_states[i]->state_id == loader::kStateInodeGeneration) {
2826 SendMsg2Socket(fd_progress, "Restoring inode generation... ");
2827 cvmfs::InodeGenerationInfo
2828 *old_info = (cvmfs::InodeGenerationInfo *)saved_states[i]->state;
2829 if (old_info->version == 1) {
2830 // Migration
2831 cvmfs::inode_generation_info_.initial_revision = old_info
2832 ->initial_revision;
2833 cvmfs::inode_generation_info_.incarnation = old_info->incarnation;
2834 // Note: in the rare case of inode generation being 0 before, inode
2835 // can clash after reload before remount
2836 } else {
2837 cvmfs::inode_generation_info_ = *old_info;
2838 }
2839 ++cvmfs::inode_generation_info_.incarnation;
2840 SendMsg2Socket(fd_progress, " done\n");
2841 }
2842
2843 if (saved_states[i]->state_id == loader::kStateOpenFilesCounter) {
2844 SendMsg2Socket(fd_progress, "Restoring open files counter... ");
2845 cvmfs::file_system_->no_open_files()->Set(
2846 *(reinterpret_cast<uint32_t *>(saved_states[i]->state)));
2847 SendMsg2Socket(fd_progress, " done\n");
2848 }
2849
2850 if (saved_states[i]->state_id == loader::kStateOpenFiles) {
2851 const int old_root_fd = cvmfs::mount_point_->catalog_mgr()->root_fd();
2852
2853 // TODO(jblomer): make this less hacky
2854
2855 const CacheManagerIds saved_type = cvmfs::file_system_->cache_mgr()
2856 ->PeekState(
2857 saved_states[i]->state);
2858 int fixup_root_fd = -1;
2859
2860 if ((saved_type == kStreamingCacheManager)
2861 && (cvmfs::file_system_->cache_mgr()->id()
2862 != kStreamingCacheManager)) {
2863 // stick to the streaming cache manager
2864 StreamingCacheManager *new_cache_mgr = new StreamingCacheManager(
2865 cvmfs::max_open_files_,
2866 cvmfs::file_system_->cache_mgr(),
2867 cvmfs::mount_point_->download_mgr(),
2868 cvmfs::mount_point_->external_download_mgr(),
2869 StreamingCacheManager::kDefaultBufferSize,
2870 cvmfs::file_system_->statistics());
2871 fixup_root_fd = new_cache_mgr->PlantFd(old_root_fd);
2872 cvmfs::file_system_->ReplaceCacheManager(new_cache_mgr);
2873 cvmfs::mount_point_->fetcher()->ReplaceCacheManager(new_cache_mgr);
2874 cvmfs::mount_point_->external_fetcher()->ReplaceCacheManager(
2875 new_cache_mgr);
2876 }
2877
2878 if ((cvmfs::file_system_->cache_mgr()->id() == kStreamingCacheManager)
2879 && (saved_type != kStreamingCacheManager)) {
2880 // stick to the cache manager wrapped into the streaming cache
2881 CacheManager *wrapped_cache_mgr = dynamic_cast<StreamingCacheManager *>(
2882 cvmfs::file_system_->cache_mgr())
2883 ->MoveOutBackingCacheMgr(
2884 &fixup_root_fd);
2885 delete cvmfs::file_system_->cache_mgr();
2886 cvmfs::file_system_->ReplaceCacheManager(wrapped_cache_mgr);
2887 cvmfs::mount_point_->fetcher()->ReplaceCacheManager(wrapped_cache_mgr);
2888 cvmfs::mount_point_->external_fetcher()->ReplaceCacheManager(
2889 wrapped_cache_mgr);
2890 }
2891
2892 const int new_root_fd = cvmfs::file_system_->cache_mgr()->RestoreState(
2893 fd_progress, saved_states[i]->state);
2894 LogCvmfs(kLogCvmfs, kLogDebug, "new root file catalog descriptor @%d",
2895 new_root_fd);
2896 if (new_root_fd >= 0) {
2897 cvmfs::file_system_->RemapCatalogFd(old_root_fd, new_root_fd);
2898 } else if (fixup_root_fd >= 0) {
2899 LogCvmfs(kLogCvmfs, kLogDebug,
2900 "new root file catalog descriptor (fixup) @%d", fixup_root_fd);
2901 cvmfs::file_system_->RemapCatalogFd(old_root_fd, fixup_root_fd);
2902 }
2903 }
2904
2905 if (saved_states[i]->state_id == loader::kStateFuse) {
2906 SendMsg2Socket(fd_progress, "Restoring fuse state... ");
2907 cvmfs::FuseState *fuse_state = static_cast<cvmfs::FuseState *>(
2908 saved_states[i]->state);
2909 if (!fuse_state->cache_symlinks)
2910 cvmfs::mount_point_->DisableCacheSymlinks();
2911 if (fuse_state->has_dentry_expire)
2912 cvmfs::mount_point_->EnableFuseExpireEntry();
2913 SendMsg2Socket(fd_progress, " done\n");
2914 }
2915 }
2916 if (cvmfs::mount_point_->inode_annotation()) {
2917 const uint64_t saved_generation = cvmfs::inode_generation_info_
2918 .inode_generation;
2919 cvmfs::mount_point_->inode_annotation()->IncGeneration(saved_generation);
2920 }
2921
2922 return true;
2923 }
2924
2925
2926 static void FreeSavedState(const int fd_progress,
2927 const loader::StateList &saved_states) {
2928 for (unsigned i = 0, l = saved_states.size(); i < l; ++i) {
2929 switch (saved_states[i]->state_id) {
2930 case loader::kStateOpenDirs:
2931 SendMsg2Socket(fd_progress, "Releasing saved open directory handles\n");
2932 delete static_cast<cvmfs::DirectoryHandles *>(saved_states[i]->state);
2933 break;
2934 case loader::kStateGlueBuffer:
2935 SendMsg2Socket(fd_progress,
2936 "Releasing saved glue buffer (version 1)\n");
2937 delete static_cast<compat::inode_tracker::InodeTracker *>(
2938 saved_states[i]->state);
2939 break;
2940 case loader::kStateGlueBufferV2:
2941 SendMsg2Socket(fd_progress,
2942 "Releasing saved glue buffer (version 2)\n");
2943 delete static_cast<compat::inode_tracker_v2::InodeTracker *>(
2944 saved_states[i]->state);
2945 break;
2946 case loader::kStateGlueBufferV3:
2947 SendMsg2Socket(fd_progress,
2948 "Releasing saved glue buffer (version 3)\n");
2949 delete static_cast<compat::inode_tracker_v3::InodeTracker *>(
2950 saved_states[i]->state);
2951 break;
2952 case loader::kStateGlueBufferV4:
2953 SendMsg2Socket(fd_progress, "Releasing saved glue buffer\n");
2954 delete static_cast<glue::InodeTracker *>(saved_states[i]->state);
2955 break;
2956 case loader::kStateDentryTracker:
2957 SendMsg2Socket(fd_progress, "Releasing saved dentry tracker\n");
2958 delete static_cast<glue::DentryTracker *>(saved_states[i]->state);
2959 break;
2960 case loader::kStatePageCacheTracker:
2961 SendMsg2Socket(fd_progress, "Releasing saved page cache entry cache\n");
2962 delete static_cast<glue::PageCacheTracker *>(saved_states[i]->state);
2963 break;
2964 case loader::kStateOpenChunks:
2965 SendMsg2Socket(fd_progress, "Releasing chunk tables (version 1)\n");
2966 delete static_cast<compat::chunk_tables::ChunkTables *>(
2967 saved_states[i]->state);
2968 break;
2969 case loader::kStateOpenChunksV2:
2970 SendMsg2Socket(fd_progress, "Releasing chunk tables (version 2)\n");
2971 delete static_cast<compat::chunk_tables_v2::ChunkTables *>(
2972 saved_states[i]->state);
2973 break;
2974 case loader::kStateOpenChunksV3:
2975 SendMsg2Socket(fd_progress, "Releasing chunk tables (version 3)\n");
2976 delete static_cast<compat::chunk_tables_v3::ChunkTables *>(
2977 saved_states[i]->state);
2978 break;
2979 case loader::kStateOpenChunksV4:
2980 SendMsg2Socket(fd_progress, "Releasing chunk tables\n");
2981 delete static_cast<ChunkTables *>(saved_states[i]->state);
2982 break;
2983 case loader::kStateInodeGeneration:
2984 SendMsg2Socket(fd_progress, "Releasing saved inode generation info\n");
2985 delete static_cast<cvmfs::InodeGenerationInfo *>(
2986 saved_states[i]->state);
2987 break;
2988 case loader::kStateOpenFiles:
2989 cvmfs::file_system_->cache_mgr()->FreeState(fd_progress,
2990 saved_states[i]->state);
2991 break;
2992 case loader::kStateOpenFilesCounter:
2993 SendMsg2Socket(fd_progress, "Releasing open files counter\n");
2994 delete static_cast<uint32_t *>(saved_states[i]->state);
2995 break;
2996 case loader::kStateFuse:
2997 SendMsg2Socket(fd_progress, "Releasing fuse state\n");
2998 delete static_cast<cvmfs::FuseState *>(saved_states[i]->state);
2999 break;
3000 default:
3001 break;
3002 }
3003 }
3004 }
3005
3006
3007 static void __attribute__((constructor)) LibraryMain() {
3008 g_cvmfs_exports = new loader::CvmfsExports();
3009 g_cvmfs_exports->so_version = CVMFS_VERSION;
3010 g_cvmfs_exports->fnAltProcessFlavor = AltProcessFlavor;
3011 g_cvmfs_exports->fnInit = Init;
3012 g_cvmfs_exports->fnSpawn = Spawn;
3013 g_cvmfs_exports->fnFini = Fini;
3014 g_cvmfs_exports->fnGetErrorMsg = GetErrorMsg;
3015 g_cvmfs_exports->fnMaintenanceMode = MaintenanceMode;
3016 g_cvmfs_exports->fnSaveState = SaveState;
3017 g_cvmfs_exports->fnRestoreState = RestoreState;
3018 g_cvmfs_exports->fnFreeSavedState = FreeSavedState;
3019 cvmfs::SetCvmfsOperations(&g_cvmfs_exports->cvmfs_operations);
3020 }
3021
3022
3023 static void __attribute__((destructor)) LibraryExit() {
3024 delete g_cvmfs_exports;
3025 g_cvmfs_exports = NULL;
3026 }
3027