GCC Code Coverage Report


Directory: cvmfs/
File: cvmfs/cvmfs.cc
Date: 2024-04-21 02:33:16
Exec Total Coverage
Lines: 0 1572 0.0%
Branches: 0 2278 0.0%

Line Branch Exec Source
1 /**
2 * This file is part of the CernVM File System.
3 *
4 * CernVM-FS is a FUSE module which implements an HTTP read-only filesystem.
5 * The original idea is based on GROW-FS.
6 *
7 * CernVM-FS shows a remote HTTP directory as local file system. The client
8 * sees all available files. On first access, a file is downloaded and
9 * cached locally. All downloaded pieces are verified by a cryptographic
10 * content hash.
11 *
12 * To do so, a directory hive has to be transformed into a CVMFS2
13 * "repository". This can be done by the CernVM-FS server tools.
14 *
15 * This preparation of directories is transparent to web servers and
16 * web proxies. They just serve static content, i.e. arbitrary files.
17 * Any HTTP server should do the job. We use Apache + Squid. Serving
18 * files from the memory of a web proxy brings a significant performance
19 * improvement.
20 */
21
22 // TODO(jblomer): the file system root should probably always return 1 for an
23 // inode. See also integration test #23.
24
25 #define ENOATTR ENODATA /**< instead of including attr/xattr.h */
26
27 #ifndef __STDC_FORMAT_MACROS
28 #define __STDC_FORMAT_MACROS
29 #endif
30
31 // sys/xattr.h conflicts with linux/xattr.h and needs to be loaded very early
32 #include <sys/xattr.h> // NOLINT
33
34 #include "cvmfs_config.h"
35 #include "cvmfs.h"
36
37 #include <dirent.h>
38 #include <errno.h>
39 #include <fcntl.h>
40 #include <google/dense_hash_map>
41 #include <inttypes.h>
42 #include <pthread.h>
43 #include <stddef.h>
44 #include <stdint.h>
45 #include <sys/errno.h>
46 #include <sys/file.h>
47 #include <sys/mount.h>
48 #include <sys/resource.h>
49 #include <sys/stat.h>
50 #include <sys/time.h>
51 #include <sys/types.h>
52 #include <sys/wait.h>
53 #include <unistd.h>
54
55 #include <algorithm>
56 #include <cassert>
57 #include <cstdio>
58 #include <cstdlib>
59 #include <cstring>
60 #include <ctime>
61 #include <functional>
62 #include <map>
63 #include <new>
64 #include <string>
65 #include <utility>
66 #include <vector>
67
68 #include "authz/authz_session.h"
69 #include "auto_umount.h"
70 #include "backoff.h"
71 #include "cache.h"
72 #include "cache_posix.h"
73 #include "cache_stream.h"
74 #include "catalog_mgr_client.h"
75 #include "clientctx.h"
76 #include "compat.h"
77 #include "compression.h"
78 #include "crypto/crypto_util.h"
79 #include "crypto/hash.h"
80 #include "crypto/signature.h"
81 #include "directory_entry.h"
82 #include "duplex_fuse.h"
83 #include "fence.h"
84 #include "fetch.h"
85 #include "file_chunk.h"
86 #include "fuse_inode_gen.h"
87 #include "fuse_remount.h"
88 #include "globals.h"
89 #include "glue_buffer.h"
90 #include "history_sqlite.h"
91 #include "interrupt.h"
92 #include "loader.h"
93 #include "lru_md.h"
94 #include "magic_xattr.h"
95 #include "manifest_fetch.h"
96 #include "monitor.h"
97 #include "mountpoint.h"
98 #include "network/download.h"
99 #include "nfs_maps.h"
100 #include "notification_client.h"
101 #include "options.h"
102 #include "quota_listener.h"
103 #include "quota_posix.h"
104 #include "sanitizer.h"
105 #include "shortstring.h"
106 #include "sqlitemem.h"
107 #include "sqlitevfs.h"
108 #include "statistics.h"
109 #include "talk.h"
110 #include "telemetry_aggregator.h"
111 #include "tracer.h"
112 #include "util/algorithm.h"
113 #include "util/atomic.h"
114 #include "util/concurrency.h"
115 #include "util/exception.h"
116 #include "util/logging.h"
117 #include "util/platform.h"
118 #include "util/smalloc.h"
119 #include "util/uuid.h"
120 #include "wpad.h"
121 #include "xattr.h"
122
123 using namespace std; // NOLINT
124
125 namespace cvmfs {
126
127 FileSystem *file_system_ = NULL;
128 MountPoint *mount_point_ = NULL;
129 TalkManager *talk_mgr_ = NULL;
130 NotificationClient *notification_client_ = NULL;
131 Watchdog *watchdog_ = NULL;
132 FuseRemounter *fuse_remounter_ = NULL;
133 InodeGenerationInfo inode_generation_info_;
134
135
136 /**
137 * For cvmfs_opendir / cvmfs_readdir
138 * TODO: use mmap for very large listings
139 */
140 struct DirectoryListing {
141 char *buffer; /**< Filled by fuse_add_direntry */
142
143 // Not really used anymore. But directory listing needs to be migrated during
144 // hotpatch. If buffer is allocated by smmap, capacity is zero.
145 size_t size;
146 size_t capacity;
147
148 DirectoryListing() : buffer(NULL), size(0), capacity(0) { }
149 };
150
151 const loader::LoaderExports *loader_exports_ = NULL;
152 OptionsManager *options_mgr_ = NULL;
153 pid_t pid_ = 0; /**< will be set after daemon() */
154 quota::ListenerHandle *watchdog_listener_ = NULL;
155 quota::ListenerHandle *unpin_listener_ = NULL;
156
157
158 typedef google::dense_hash_map<uint64_t, DirectoryListing,
159 hash_murmur<uint64_t> >
160 DirectoryHandles;
161 DirectoryHandles *directory_handles_ = NULL;
162 pthread_mutex_t lock_directory_handles_ = PTHREAD_MUTEX_INITIALIZER;
163 uint64_t next_directory_handle_ = 0;
164
165 unsigned max_open_files_; /**< maximum allowed number of open files */
166 /**
167 * The refcounted cache manager should suppress checking the current number
168 * of files opened through cvmfs_open() against the process' file descriptor
169 * limit.
170 */
171 bool check_fd_overflow_ = true;
172 /**
173 * Number of reserved file descriptors for internal use
174 */
175 const int kNumReservedFd = 512;
176 /**
177 * Warn if the process has a lower limit for the number of open file descriptors
178 */
179 const unsigned int kMinOpenFiles = 8192;
180
181
182 class FuseInterruptCue : public InterruptCue {
183 public:
184 explicit FuseInterruptCue(fuse_req_t *r) : req_ptr_(r) { }
185 virtual ~FuseInterruptCue() { }
186 virtual bool IsCanceled() { return fuse_req_interrupted(*req_ptr_); }
187 private:
188 fuse_req_t *req_ptr_;
189 };
190
191 /**
192 * Options related to the fuse kernel connection. The capabilities are
193 * determined only once at mount time. If the capability trigger certain
194 * behavior of the cvmfs fuse module, it needs to be re-triggered on reload.
195 * Used in SaveState and RestoreState to store the details of symlink caching.
196 */
197 struct FuseState {
198 FuseState() : version(0), cache_symlinks(false), has_dentry_expire(false) {}
199 unsigned version;
200 bool cache_symlinks;
201 bool has_dentry_expire;
202 };
203
204
205 /**
206 * Atomic increase of the open files counter. If we use a non-refcounted
207 * POSIX cache manager, check for open fd overflow. Return false if too many
208 * files are opened. Otherwise return true (success).
209 */
210 static inline bool IncAndCheckNoOpenFiles() {
211 const int64_t no_open_files = perf::Xadd(file_system_->no_open_files(), 1);
212 if (!check_fd_overflow_)
213 return true;
214 return no_open_files < (static_cast<int>(max_open_files_) - kNumReservedFd);
215 }
216
217 static inline double GetKcacheTimeout() {
218 if (!fuse_remounter_->IsCaching())
219 return 0.0;
220 return mount_point_->kcache_timeout_sec();
221 }
222
223
224 void GetReloadStatus(bool *drainout_mode, bool *maintenance_mode) {
225 *drainout_mode = fuse_remounter_->IsInDrainoutMode();
226 *maintenance_mode = fuse_remounter_->IsInMaintenanceMode();
227 }
228
229
230 static bool UseWatchdog() {
231 if (loader_exports_ == NULL || loader_exports_->version < 2) {
232 return true; // spawn watchdog by default
233 // Note: with library versions before 2.1.8 it might not
234 // create stack traces properly in all cases
235 }
236
237 return !loader_exports_->disable_watchdog;
238 }
239
240 std::string PrintInodeGeneration() {
241 return "init-catalog-revision: " +
242 StringifyInt(inode_generation_info_.initial_revision) + " " +
243 "current-catalog-revision: " +
244 StringifyInt(mount_point_->catalog_mgr()->GetRevision()) + " " +
245 "incarnation: " + StringifyInt(inode_generation_info_.incarnation) + " " +
246 "inode generation: " + StringifyInt(inode_generation_info_.inode_generation)
247 + "\n";
248 }
249
250
251 static bool CheckVoms(const fuse_ctx &fctx) {
252 if (!mount_point_->has_membership_req())
253 return true;
254 string mreq = mount_point_->membership_req();
255 LogCvmfs(kLogCvmfs, kLogDebug, "Got VOMS authz %s from filesystem "
256 "properties", mreq.c_str());
257
258 if (fctx.uid == 0)
259 return true;
260
261 return mount_point_->authz_session_mgr()->IsMemberOf(fctx.pid, mreq);
262 }
263
264 static bool MayBeInPageCacheTracker(const catalog::DirectoryEntry &dirent) {
265 return dirent.IsRegular() &&
266 (dirent.inode() < mount_point_->catalog_mgr()->GetRootInode());
267 }
268
269 static bool HasDifferentContent(
270 const catalog::DirectoryEntry &dirent,
271 const shash::Any &hash,
272 const struct stat &info)
273 {
274 if (hash == dirent.checksum())
275 return false;
276 // For chunked files, we don't want to load the full list of chunk hashes
277 // so we only check the last modified timestamp
278 if (dirent.IsChunkedFile() && (info.st_mtime == dirent.mtime()))
279 return false;
280 return true;
281 }
282
283 /**
284 * When we lookup an inode (cvmfs_lookup(), cvmfs_opendir()), we usually provide
285 * the live inode, i.e. the one in the inode tracker. However, if the inode
286 * refers to an open file that has a different content then the one from the
287 * current catalogs, we will replace the live inode in the tracker by the one
288 * from the current generation.
289 *
290 * To still access the old inode, e.g. for fstat() on the open file, the stat
291 * structure connected to this inode is taken from the page cache tracker.
292 */
293 static bool FixupOpenInode(const PathString &path,
294 catalog::DirectoryEntry *dirent)
295 {
296 if (!MayBeInPageCacheTracker(*dirent))
297 return false;
298
299 shash::Any hash_open;
300 struct stat info;
301 bool is_open = mount_point_->page_cache_tracker()->GetInfoIfOpen(
302 dirent->inode(), &hash_open, &info);
303 if (!is_open)
304 return false;
305 if (!HasDifferentContent(*dirent, hash_open, info))
306 return false;
307
308 // Overwrite dirent with inode from current generation
309 bool found = mount_point_->catalog_mgr()->LookupPath(
310 path, catalog::kLookupDefault, dirent);
311 assert(found);
312
313 return true;
314 }
315
316 static bool GetDirentForInode(const fuse_ino_t ino,
317 catalog::DirectoryEntry *dirent)
318 {
319 // Lookup inode in cache
320 if (mount_point_->inode_cache()->Lookup(ino, dirent))
321 return true;
322
323 // Look in the catalogs in 2 steps: lookup inode->path, lookup path
324 static catalog::DirectoryEntry dirent_negative =
325 catalog::DirectoryEntry(catalog::kDirentNegative);
326 // Reset directory entry. If the function returns false and dirent is no
327 // the kDirentNegative, it was an I/O error
328 *dirent = catalog::DirectoryEntry();
329
330 catalog::ClientCatalogManager *catalog_mgr = mount_point_->catalog_mgr();
331
332 if (file_system_->IsNfsSource()) {
333 // NFS mode
334 PathString path;
335 bool retval = file_system_->nfs_maps()->GetPath(ino, &path);
336 if (!retval) {
337 *dirent = dirent_negative;
338 return false;
339 }
340 if (catalog_mgr->LookupPath(path, catalog::kLookupDefault, dirent)) {
341 // Fix inodes
342 dirent->set_inode(ino);
343 mount_point_->inode_cache()->Insert(ino, *dirent);
344 return true;
345 }
346 return false; // Not found in catalog or catalog load error
347 }
348
349 // Non-NFS mode
350 PathString path;
351 if (ino == catalog_mgr->GetRootInode()) {
352 bool retval =
353 catalog_mgr->LookupPath(PathString(), catalog::kLookupDefault, dirent);
354
355 if (!AssertOrLog(retval, kLogCvmfs, kLogSyslogWarn | kLogDebug,
356 "GetDirentForInode: Race condition? Not found dirent %s",
357 dirent->name().c_str())) {
358 return false;
359 }
360
361 dirent->set_inode(ino);
362 mount_point_->inode_cache()->Insert(ino, *dirent);
363 return true;
364 }
365
366 glue::InodeEx inode_ex(ino, glue::InodeEx::kUnknownType);
367 bool retval = mount_point_->inode_tracker()->FindPath(&inode_ex, &path);
368 if (!retval) {
369 // This may be a retired inode whose stat information is only available
370 // in the page cache tracker because there is still an open file
371 LogCvmfs(kLogCvmfs, kLogDebug,
372 "GetDirentForInode inode lookup failure %" PRId64, ino);
373 *dirent = dirent_negative;
374 // Indicate that the inode was not found in the tracker rather than not
375 // found in the catalog
376 dirent->set_inode(ino);
377 return false;
378 }
379 if (catalog_mgr->LookupPath(path, catalog::kLookupDefault, dirent)) {
380 if (!inode_ex.IsCompatibleFileType(dirent->mode())) {
381 LogCvmfs(kLogCvmfs, kLogDebug,
382 "Warning: inode %" PRId64 " (%s) changed file type",
383 ino, path.c_str());
384 // TODO(jblomer): we detect this issue but let it continue unhandled.
385 // Fix me.
386 }
387
388 // Fix inodes
389 dirent->set_inode(ino);
390 mount_point_->inode_cache()->Insert(ino, *dirent);
391 return true;
392 }
393
394 // Can happen after reload of catalogs or on catalog load failure
395 LogCvmfs(kLogCvmfs, kLogDebug, "GetDirentForInode path lookup failure");
396 return false;
397 }
398
399
400 /**
401 * Returns 0 if the path does not exist
402 * 1 if the live inode is returned
403 * >1 the live inode, which is then stale and the inode in dirent
404 * comes from the catalog in the current generation
405 * (see FixupOpenInode)
406 */
407 static uint64_t GetDirentForPath(const PathString &path,
408 catalog::DirectoryEntry *dirent)
409 {
410 uint64_t live_inode = 0;
411 if (!file_system_->IsNfsSource())
412 live_inode = mount_point_->inode_tracker()->FindInode(path);
413
414 shash::Md5 md5path(path.GetChars(), path.GetLength());
415 if (mount_point_->md5path_cache()->Lookup(md5path, dirent)) {
416 if (dirent->GetSpecial() == catalog::kDirentNegative)
417 return false;
418 // We may have initially stored the entry with an old inode in the
419 // md5path cache and now should update it with the new one.
420 if (!file_system_->IsNfsSource() && (live_inode != 0))
421 dirent->set_inode(live_inode);
422 return 1;
423 }
424
425 catalog::ClientCatalogManager *catalog_mgr = mount_point_->catalog_mgr();
426
427 // Lookup inode in catalog TODO: not twice md5 calculation
428 bool retval;
429 retval = catalog_mgr->LookupPath(path, catalog::kLookupDefault, dirent);
430 if (retval) {
431 if (file_system_->IsNfsSource()) {
432 dirent->set_inode(file_system_->nfs_maps()->GetInode(path));
433 } else if (live_inode != 0) {
434 dirent->set_inode(live_inode);
435 if (FixupOpenInode(path, dirent)) {
436 LogCvmfs(kLogCvmfs, kLogDebug,
437 "content of %s change, replacing inode %" PRIu64 " --> %" PRIu64,
438 path.c_str(), live_inode, dirent->inode());
439 return live_inode;
440 // Do not populate the md5path cache until the inode tracker is fixed
441 }
442 }
443 mount_point_->md5path_cache()->Insert(md5path, *dirent);
444 return 1;
445 }
446
447 LogCvmfs(kLogCvmfs, kLogDebug, "GetDirentForPath, no entry");
448 // Only insert ENOENT results into negative cache. Otherwise it was an
449 // error loading nested catalogs
450 if (dirent->GetSpecial() == catalog::kDirentNegative)
451 mount_point_->md5path_cache()->InsertNegative(md5path);
452 return 0;
453 }
454
455
456 static bool GetPathForInode(const fuse_ino_t ino, PathString *path) {
457 // Check the path cache first
458 if (mount_point_->path_cache()->Lookup(ino, path))
459 return true;
460
461 if (file_system_->IsNfsSource()) {
462 // NFS mode, just a lookup
463 LogCvmfs(kLogCvmfs, kLogDebug, "MISS %lu - lookup in NFS maps", ino);
464 if (file_system_->nfs_maps()->GetPath(ino, path)) {
465 mount_point_->path_cache()->Insert(ino, *path);
466 return true;
467 }
468 return false;
469 }
470
471 if (ino == mount_point_->catalog_mgr()->GetRootInode())
472 return true;
473
474 LogCvmfs(kLogCvmfs, kLogDebug, "MISS %lu - looking in inode tracker", ino);
475 glue::InodeEx inode_ex(ino, glue::InodeEx::kUnknownType);
476 bool retval = mount_point_->inode_tracker()->FindPath(&inode_ex, path);
477
478 if (!AssertOrLog(retval, kLogCvmfs, kLogSyslogWarn | kLogDebug,
479 "GetPathForInode: Race condition? "
480 "Inode not found in inode tracker at path %s",
481 path->c_str())) {
482 return false;
483 }
484
485
486 mount_point_->path_cache()->Insert(ino, *path);
487 return true;
488 }
489
490 static void DoTraceInode(const int event,
491 fuse_ino_t ino,
492 const std::string &msg)
493 {
494 PathString path;
495 bool found = GetPathForInode(ino, &path);
496 if (!found) {
497 LogCvmfs(kLogCvmfs, kLogDebug,
498 "Tracing: Could not find path for inode %" PRIu64, uint64_t(ino));
499 mount_point_->tracer()->Trace(event, PathString("@UNKNOWN"), msg);
500 } else {
501 mount_point_->tracer()->Trace(event, path, msg);
502 }
503 }
504
505 static void inline TraceInode(const int event,
506 fuse_ino_t ino,
507 const std::string &msg)
508 {
509 if (mount_point_->tracer()->IsActive()) DoTraceInode(event, ino, msg);
510 }
511
512 /**
513 * Find the inode number of a file name in a directory given by inode.
514 * This or getattr is called as kind of prerequisite to every operation.
515 * We do check catalog TTL here (and reload, if necessary).
516 */
517 static void cvmfs_lookup(fuse_req_t req, fuse_ino_t parent, const char *name) {
518 HighPrecisionTimer guard_timer(file_system_->hist_fs_lookup());
519
520 perf::Inc(file_system_->n_fs_lookup());
521 const struct fuse_ctx *fuse_ctx = fuse_req_ctx(req);
522 FuseInterruptCue ic(&req);
523 ClientCtxGuard ctx_guard(fuse_ctx->uid, fuse_ctx->gid, fuse_ctx->pid, &ic);
524 fuse_remounter_->TryFinish();
525
526 fuse_remounter_->fence()->Enter();
527 catalog::ClientCatalogManager *catalog_mgr = mount_point_->catalog_mgr();
528
529 fuse_ino_t parent_fuse = parent;
530 parent = catalog_mgr->MangleInode(parent);
531 LogCvmfs(kLogCvmfs, kLogDebug,
532 "cvmfs_lookup in parent inode: %" PRIu64 " for name: %s",
533 uint64_t(parent), name);
534
535 PathString path;
536 PathString parent_path;
537 uint64_t live_inode = 0;
538 catalog::DirectoryEntry dirent;
539 struct fuse_entry_param result;
540
541 memset(&result, 0, sizeof(result));
542 double timeout = GetKcacheTimeout();
543 result.attr_timeout = timeout;
544 result.entry_timeout = timeout;
545
546 // Special NFS lookups: . and ..
547 if ((strcmp(name, ".") == 0) || (strcmp(name, "..") == 0)) {
548 if (GetDirentForInode(parent, &dirent)) {
549 if (strcmp(name, ".") == 0) {
550 goto lookup_reply_positive;
551 } else {
552 // Lookup for ".."
553 if (dirent.inode() == catalog_mgr->GetRootInode()) {
554 dirent.set_inode(1);
555 goto lookup_reply_positive;
556 }
557 if (!GetPathForInode(parent, &parent_path))
558 goto lookup_reply_negative;
559 if (GetDirentForPath(GetParentPath(parent_path), &dirent) > 0)
560 goto lookup_reply_positive;
561 }
562 }
563 // No entry for "." or no entry for ".."
564 if (dirent.GetSpecial() == catalog::kDirentNegative)
565 goto lookup_reply_negative;
566 else
567 goto lookup_reply_error;
568 assert(false);
569 }
570
571 if (!GetPathForInode(parent, &parent_path)) {
572 LogCvmfs(kLogCvmfs, kLogDebug, "no path for parent inode found");
573 goto lookup_reply_negative;
574 }
575
576 path.Assign(parent_path);
577 path.Append("/", 1);
578 path.Append(name, strlen(name));
579 mount_point_->tracer()->Trace(Tracer::kEventLookup, path, "lookup()");
580 live_inode = GetDirentForPath(path, &dirent);
581 if (live_inode == 0) {
582 if (dirent.GetSpecial() == catalog::kDirentNegative)
583 goto lookup_reply_negative;
584 else
585 goto lookup_reply_error;
586 }
587
588 lookup_reply_positive:
589 if (!file_system_->IsNfsSource()) {
590 if (live_inode > 1) {
591 // live inode is stale (open file), we replace it
592 assert(dirent.IsRegular());
593 assert(dirent.inode() != live_inode);
594 // The new inode is put in the tracker with refcounter == 0
595 bool replaced = mount_point_->inode_tracker()->ReplaceInode(
596 live_inode, glue::InodeEx(dirent.inode(), dirent.mode()));
597 if (replaced)
598 perf::Inc(file_system_->n_fs_inode_replace());
599 }
600 mount_point_->inode_tracker()->VfsGet(
601 glue::InodeEx(dirent.inode(), dirent.mode()), path);
602 }
603 // We do _not_ track (and evict) positive replies; among other things, test
604 // 076 fails with the following line uncommented
605 //
606 // WARNING! ENABLING THIS BREAKS ANY TYPE OF MOUNTPOINT POINTING TO THIS INODE
607 //
608 // only safe if fuse_expire_entry is available
609 if (mount_point_->fuse_expire_entry()
610 || (mount_point_->cache_symlinks() && dirent.IsLink())) {
611 LogCvmfs(kLogCache, kLogDebug, "Dentry to evict: %s", name);
612 mount_point_->dentry_tracker()->Add(parent_fuse, name,
613 static_cast<uint64_t>(timeout));
614 }
615
616 fuse_remounter_->fence()->Leave();
617 result.ino = dirent.inode();
618 result.attr = dirent.GetStatStructure();
619 fuse_reply_entry(req, &result);
620 return;
621
622 lookup_reply_negative:
623 // Will be a no-op if there is no fuse cache eviction
624 mount_point_->dentry_tracker()->Add(parent_fuse, name, uint64_t(timeout));
625 fuse_remounter_->fence()->Leave();
626 perf::Inc(file_system_->n_fs_lookup_negative());
627 result.ino = 0;
628 fuse_reply_entry(req, &result);
629 return;
630
631 lookup_reply_error:
632 fuse_remounter_->fence()->Leave();
633
634 LogCvmfs(kLogCvmfs, kLogDebug | kLogSyslogErr, "EIO (01) on %s", name);
635 perf::Inc(file_system_->n_eio_total());
636 perf::Inc(file_system_->n_eio_01());
637
638 fuse_reply_err(req, EIO);
639 }
640
641
642 /**
643 *
644 */
645 static void cvmfs_forget(
646 fuse_req_t req,
647 fuse_ino_t ino,
648 #if CVMFS_USE_LIBFUSE == 2
649 unsigned long nlookup // NOLINT
650 #else
651 uint64_t nlookup
652 #endif
653 ) {
654 HighPrecisionTimer guard_timer(file_system_->hist_fs_forget());
655
656 perf::Inc(file_system_->n_fs_forget());
657
658 // The libfuse high-level library does the same
659 if (ino == FUSE_ROOT_ID) {
660 fuse_reply_none(req);
661 return;
662 }
663
664 fuse_remounter_->fence()->Enter();
665 ino = mount_point_->catalog_mgr()->MangleInode(ino);
666 // This has been seen to deadlock on the debug log mutex on SL5. Problem of
667 // old kernel/fuse?
668 #if CVMFS_USE_LIBCVMFS == 2
669 LogCvmfs(kLogCvmfs, kLogDebug, "forget on inode %" PRIu64 " by %u",
670 uint64_t(ino), nlookup);
671 #else
672 LogCvmfs(kLogCvmfs, kLogDebug, "forget on inode %" PRIu64 " by %" PRIu64,
673 uint64_t(ino), nlookup);
674 #endif
675 if (!file_system_->IsNfsSource()) {
676 bool removed =
677 mount_point_->inode_tracker()->GetVfsPutRaii().VfsPut(ino, nlookup);
678 if (removed)
679 mount_point_->page_cache_tracker()->GetEvictRaii().Evict(ino);
680 }
681 fuse_remounter_->fence()->Leave();
682 fuse_reply_none(req);
683 }
684
685
686 #if (FUSE_VERSION >= 29)
687 static void cvmfs_forget_multi(
688 fuse_req_t req,
689 size_t count,
690 struct fuse_forget_data *forgets
691 ) {
692 HighPrecisionTimer guard_timer(file_system_->hist_fs_forget_multi());
693
694 perf::Xadd(file_system_->n_fs_forget(), count);
695 if (file_system_->IsNfsSource()) {
696 fuse_reply_none(req);
697 return;
698 }
699
700 fuse_remounter_->fence()->Enter();
701 {
702 glue::InodeTracker::VfsPutRaii vfs_put_raii =
703 mount_point_->inode_tracker()->GetVfsPutRaii();
704 glue::PageCacheTracker::EvictRaii evict_raii =
705 mount_point_->page_cache_tracker()->GetEvictRaii();
706 for (size_t i = 0; i < count; ++i) {
707 if (forgets[i].ino == FUSE_ROOT_ID) {
708 continue;
709 }
710
711 uint64_t ino = mount_point_->catalog_mgr()->MangleInode(forgets[i].ino);
712 LogCvmfs(kLogCvmfs, kLogDebug, "forget on inode %" PRIu64 " by %" PRIu64,
713 ino, forgets[i].nlookup);
714
715 bool removed = vfs_put_raii.VfsPut(ino, forgets[i].nlookup);
716 if (removed)
717 evict_raii.Evict(ino);
718 }
719 }
720 fuse_remounter_->fence()->Leave();
721
722 fuse_reply_none(req);
723 }
724 #endif // FUSE_VERSION >= 29
725
726
727 /**
728 * Looks into dirent to decide if this is an EIO negative reply or an
729 * ENOENT negative reply. We do not need to store the reply in the negative
730 * cache tracker because ReplyNegative is called on inode queries. Inodes,
731 * however, change anyway when a new catalog is applied.
732 */
733 static void ReplyNegative(const catalog::DirectoryEntry &dirent,
734 fuse_req_t req)
735 {
736 if (dirent.GetSpecial() == catalog::kDirentNegative) {
737 fuse_reply_err(req, ENOENT);
738 } else {
739 const char * name = dirent.name().c_str();
740 const char * link = dirent.symlink().c_str();
741
742 LogCvmfs(kLogCvmfs, kLogDebug | kLogSyslogErr,
743 "EIO (02) name=%s symlink=%s",
744 name ? name: "<unset>",
745 link ? link: "<unset>");
746
747 perf::Inc(file_system_->n_eio_total());
748 perf::Inc(file_system_->n_eio_02());
749 fuse_reply_err(req, EIO);
750 }
751 }
752
753
754 /**
755 * Transform a cvmfs dirent into a struct stat.
756 */
757 static void cvmfs_getattr(fuse_req_t req, fuse_ino_t ino,
758 struct fuse_file_info *fi)
759 {
760 HighPrecisionTimer guard_timer(file_system_->hist_fs_getattr());
761
762 perf::Inc(file_system_->n_fs_stat());
763 const struct fuse_ctx *fuse_ctx = fuse_req_ctx(req);
764 FuseInterruptCue ic(&req);
765 ClientCtxGuard ctx_guard(fuse_ctx->uid, fuse_ctx->gid, fuse_ctx->pid, &ic);
766 fuse_remounter_->TryFinish();
767
768 fuse_remounter_->fence()->Enter();
769 ino = mount_point_->catalog_mgr()->MangleInode(ino);
770 LogCvmfs(kLogCvmfs, kLogDebug, "cvmfs_getattr (stat) for inode: %" PRIu64,
771 uint64_t(ino));
772
773 if (!CheckVoms(*fuse_ctx)) {
774 fuse_remounter_->fence()->Leave();
775 fuse_reply_err(req, EACCES);
776 return;
777 }
778 catalog::DirectoryEntry dirent;
779 bool found = GetDirentForInode(ino, &dirent);
780 TraceInode(Tracer::kEventGetAttr, ino, "getattr()");
781 if ((!found && (dirent.inode() == ino)) || MayBeInPageCacheTracker(dirent)) {
782 // Serve retired inode from page cache tracker; even if we find it in the
783 // catalog, we replace the dirent by the page cache tracker version to
784 // not confuse open file handles
785 LogCvmfs(kLogCvmfs, kLogDebug, "cvmfs_getattr %" PRIu64 " "
786 "served from page cache tracker", ino);
787 shash::Any hash;
788 struct stat info;
789 bool is_open =
790 mount_point_->page_cache_tracker()->GetInfoIfOpen(ino, &hash, &info);
791 if (is_open) {
792 fuse_remounter_->fence()->Leave();
793 if (found && HasDifferentContent(dirent, hash, info)) {
794 // We should from now on provide the new inode information instead
795 // of the stale one. To this end, we need to invalidate the dentry to
796 // trigger a fresh LOOKUP call
797 uint64_t parent_ino;
798 NameString name;
799 if (mount_point_->inode_tracker()->FindDentry(
800 dirent.inode(), &parent_ino, &name))
801 {
802 fuse_remounter_->InvalidateDentry(parent_ino, name);
803 }
804 perf::Inc(file_system_->n_fs_stat_stale());
805 }
806 fuse_reply_attr(req, &info, GetKcacheTimeout());
807 return;
808 }
809 }
810 fuse_remounter_->fence()->Leave();
811
812 if (!found) {
813 ReplyNegative(dirent, req);
814 return;
815 }
816
817 struct stat info = dirent.GetStatStructure();
818
819 fuse_reply_attr(req, &info, GetKcacheTimeout());
820 }
821
822
823 /**
824 * Reads a symlink from the catalog. Environment variables are expanded.
825 */
826 static void cvmfs_readlink(fuse_req_t req, fuse_ino_t ino) {
827 HighPrecisionTimer guard_timer(file_system_->hist_fs_readlink());
828
829 perf::Inc(file_system_->n_fs_readlink());
830 const struct fuse_ctx *fuse_ctx = fuse_req_ctx(req);
831 FuseInterruptCue ic(&req);
832 ClientCtxGuard ctx_guard(fuse_ctx->uid, fuse_ctx->gid, fuse_ctx->pid, &ic);
833
834 fuse_remounter_->fence()->Enter();
835 ino = mount_point_->catalog_mgr()->MangleInode(ino);
836 LogCvmfs(kLogCvmfs, kLogDebug, "cvmfs_readlink on inode: %" PRIu64,
837 uint64_t(ino));
838
839 catalog::DirectoryEntry dirent;
840 const bool found = GetDirentForInode(ino, &dirent);
841 TraceInode(Tracer::kEventReadlink, ino, "readlink()");
842 fuse_remounter_->fence()->Leave();
843
844 if (!found) {
845 ReplyNegative(dirent, req);
846 return;
847 }
848
849 if (!dirent.IsLink()) {
850 fuse_reply_err(req, EINVAL);
851 return;
852 }
853
854 fuse_reply_readlink(req, dirent.symlink().c_str());
855 }
856
857
858 static void AddToDirListing(const fuse_req_t req,
859 const char *name, const struct stat *stat_info,
860 BigVector<char> *listing)
861 {
862 LogCvmfs(kLogCvmfs, kLogDebug, "Add to listing: %s, inode %" PRIu64,
863 name, uint64_t(stat_info->st_ino));
864 size_t remaining_size = listing->capacity() - listing->size();
865 const size_t entry_size = fuse_add_direntry(req, NULL, 0, name, stat_info, 0);
866
867 while (entry_size > remaining_size) {
868 listing->DoubleCapacity();
869 remaining_size = listing->capacity() - listing->size();
870 }
871
872 char *buffer;
873 bool large_alloc;
874 listing->ShareBuffer(&buffer, &large_alloc);
875 fuse_add_direntry(req, buffer + listing->size(),
876 remaining_size, name, stat_info,
877 listing->size() + entry_size);
878 listing->SetSize(listing->size() + entry_size);
879 }
880
881
882 /**
883 * Open a directory for listing.
884 */
885 static void cvmfs_opendir(fuse_req_t req, fuse_ino_t ino,
886 struct fuse_file_info *fi)
887 {
888 HighPrecisionTimer guard_timer(file_system_->hist_fs_opendir());
889
890 const struct fuse_ctx *fuse_ctx = fuse_req_ctx(req);
891 FuseInterruptCue ic(&req);
892 ClientCtxGuard ctx_guard(fuse_ctx->uid, fuse_ctx->gid, fuse_ctx->pid, &ic);
893 fuse_remounter_->TryFinish();
894
895 fuse_remounter_->fence()->Enter();
896 catalog::ClientCatalogManager *catalog_mgr = mount_point_->catalog_mgr();
897 ino = catalog_mgr->MangleInode(ino);
898 LogCvmfs(kLogCvmfs, kLogDebug, "cvmfs_opendir on inode: %" PRIu64,
899 uint64_t(ino));
900 if (!CheckVoms(*fuse_ctx)) {
901 fuse_remounter_->fence()->Leave();
902 fuse_reply_err(req, EACCES);
903 return;
904 }
905
906 TraceInode(Tracer::kEventOpenDir, ino, "opendir()");
907 PathString path;
908 catalog::DirectoryEntry d;
909 bool found = GetPathForInode(ino, &path);
910 if (!found) {
911 fuse_remounter_->fence()->Leave();
912 fuse_reply_err(req, ENOENT);
913 return;
914 }
915 found = GetDirentForInode(ino, &d);
916
917 if (!found) {
918 fuse_remounter_->fence()->Leave();
919 ReplyNegative(d, req);
920 return;
921 }
922 if (!d.IsDirectory()) {
923 fuse_remounter_->fence()->Leave();
924 fuse_reply_err(req, ENOTDIR);
925 return;
926 }
927
928 LogCvmfs(kLogCvmfs, kLogDebug, "cvmfs_opendir on inode: %" PRIu64 ", path %s",
929 uint64_t(ino), path.c_str());
930
931 // Build listing
932 BigVector<char> fuse_listing(512);
933
934 // Add current directory link
935 struct stat info;
936 info = d.GetStatStructure();
937 AddToDirListing(req, ".", &info, &fuse_listing);
938
939 // Add parent directory link
940 catalog::DirectoryEntry p;
941 if (d.inode() != catalog_mgr->GetRootInode() &&
942 (GetDirentForPath(GetParentPath(path), &p) > 0))
943 {
944 info = p.GetStatStructure();
945 AddToDirListing(req, "..", &info, &fuse_listing);
946 }
947
948 // Add all names
949 catalog::StatEntryList listing_from_catalog;
950 bool retval = catalog_mgr->ListingStat(path, &listing_from_catalog);
951
952 if (!retval) {
953 fuse_remounter_->fence()->Leave();
954 fuse_listing.Clear(); // Buffer is shared, empty manually
955
956 LogCvmfs(kLogCvmfs, kLogDebug | kLogSyslogErr,
957 "EIO (03) on %s", path.c_str());
958 perf::Inc(file_system_->n_eio_total());
959 perf::Inc(file_system_->n_eio_03());
960 fuse_reply_err(req, EIO);
961 return;
962 }
963 for (unsigned i = 0; i < listing_from_catalog.size(); ++i) {
964 // Fix inodes
965 PathString entry_path;
966 entry_path.Assign(path);
967 entry_path.Append("/", 1);
968 entry_path.Append(listing_from_catalog.AtPtr(i)->name.GetChars(),
969 listing_from_catalog.AtPtr(i)->name.GetLength());
970
971 catalog::DirectoryEntry entry_dirent;
972 if (!GetDirentForPath(entry_path, &entry_dirent)) {
973 LogCvmfs(kLogCvmfs, kLogDebug, "listing entry %s vanished, skipping",
974 entry_path.c_str());
975 continue;
976 }
977
978 struct stat fixed_info = listing_from_catalog.AtPtr(i)->info;
979 fixed_info.st_ino = entry_dirent.inode();
980 AddToDirListing(req, listing_from_catalog.AtPtr(i)->name.c_str(),
981 &fixed_info, &fuse_listing);
982 }
983 fuse_remounter_->fence()->Leave();
984
985 DirectoryListing stream_listing;
986 stream_listing.size = fuse_listing.size();
987 stream_listing.capacity = fuse_listing.capacity();
988 bool large_alloc;
989 fuse_listing.ShareBuffer(&stream_listing.buffer, &large_alloc);
990 if (large_alloc)
991 stream_listing.capacity = 0;
992
993 // Save the directory listing and return a handle to the listing
994 {
995 MutexLockGuard m(&lock_directory_handles_);
996 LogCvmfs(kLogCvmfs, kLogDebug,
997 "linking directory handle %lu to dir inode: %" PRIu64,
998 next_directory_handle_, uint64_t(ino));
999 (*directory_handles_)[next_directory_handle_] = stream_listing;
1000 fi->fh = next_directory_handle_;
1001 ++next_directory_handle_;
1002 }
1003 perf::Inc(file_system_->n_fs_dir_open());
1004 perf::Inc(file_system_->no_open_dirs());
1005
1006 #if (FUSE_VERSION >= 30)
1007 #ifdef CVMFS_ENABLE_FUSE3_CACHE_READDIR
1008 // This affects only reads on the same open directory handle (e.g. multiple
1009 // reads with rewinddir() between them). A new opendir on the same directory
1010 // will trigger readdir calls independently of this setting.
1011 fi->cache_readdir = 1;
1012 #endif
1013 #endif
1014 fuse_reply_open(req, fi);
1015 }
1016
1017
1018 /**
1019 * Release a directory.
1020 */
1021 static void cvmfs_releasedir(fuse_req_t req, fuse_ino_t ino,
1022 struct fuse_file_info *fi)
1023 {
1024 HighPrecisionTimer guard_timer(file_system_->hist_fs_releasedir());
1025
1026 ino = mount_point_->catalog_mgr()->MangleInode(ino);
1027 LogCvmfs(kLogCvmfs, kLogDebug, "cvmfs_releasedir on inode %" PRIu64
1028 ", handle %lu", uint64_t(ino), fi->fh);
1029
1030 int reply = 0;
1031
1032 {
1033 MutexLockGuard m(&lock_directory_handles_);
1034 DirectoryHandles::iterator iter_handle = directory_handles_->find(fi->fh);
1035 if (iter_handle != directory_handles_->end()) {
1036 if (iter_handle->second.capacity == 0)
1037 smunmap(iter_handle->second.buffer);
1038 else
1039 free(iter_handle->second.buffer);
1040 directory_handles_->erase(iter_handle);
1041 perf::Dec(file_system_->no_open_dirs());
1042 } else {
1043 reply = EINVAL;
1044 }
1045 }
1046
1047 fuse_reply_err(req, reply);
1048 }
1049
1050
1051 /**
1052 * Very large directory listings have to be sent in slices.
1053 */
1054 static void ReplyBufferSlice(const fuse_req_t req, const char *buffer,
1055 const size_t buffer_size, const off_t offset,
1056 const size_t max_size)
1057 {
1058 if (offset < static_cast<int>(buffer_size)) {
1059 fuse_reply_buf(req, buffer + offset,
1060 std::min(static_cast<size_t>(buffer_size - offset), max_size));
1061 } else {
1062 fuse_reply_buf(req, NULL, 0);
1063 }
1064 }
1065
1066
1067 /**
1068 * Read the directory listing.
1069 */
1070 static void cvmfs_readdir(fuse_req_t req, fuse_ino_t ino, size_t size,
1071 off_t off, struct fuse_file_info *fi)
1072 {
1073 HighPrecisionTimer guard_timer(file_system_->hist_fs_readdir());
1074
1075 LogCvmfs(kLogCvmfs, kLogDebug,
1076 "cvmfs_readdir on inode %" PRIu64 " reading %lu bytes from offset %ld",
1077 static_cast<uint64_t>(mount_point_->catalog_mgr()->MangleInode(ino)),
1078 size, off);
1079
1080 DirectoryListing listing;
1081
1082 MutexLockGuard m(&lock_directory_handles_);
1083 DirectoryHandles::const_iterator iter_handle =
1084 directory_handles_->find(fi->fh);
1085 if (iter_handle != directory_handles_->end()) {
1086 listing = iter_handle->second;
1087
1088 ReplyBufferSlice(req, listing.buffer, listing.size, off, size);
1089 return;
1090 }
1091
1092 fuse_reply_err(req, EINVAL);
1093 }
1094
1095 static void FillOpenFlags(const glue::PageCacheTracker::OpenDirectives od,
1096 struct fuse_file_info *fi)
1097 {
1098 assert(!TestBit(glue::PageCacheTracker::kBitDirectIo, fi->fh));
1099 fi->keep_cache = od.keep_cache;
1100 fi->direct_io = od.direct_io;
1101 if (fi->direct_io)
1102 SetBit(glue::PageCacheTracker::kBitDirectIo, &fi->fh);
1103 }
1104
1105
1106 #ifdef __APPLE__
1107 // On macOS, xattr on a symlink opens and closes the file (with O_SYMLINK)
1108 // around the actual getxattr call. In order to not run into an I/O error
1109 // we use a special file handle for symlinks, from which one cannot read.
1110 static const uint64_t kFileHandleIgnore = static_cast<uint64_t>(2) << 60;
1111 #endif
1112
1113 /**
1114 * Open a file from cache. If necessary, file is downloaded first.
1115 *
1116 * \return Read-only file descriptor in fi->fh or kChunkedFileHandle for
1117 * chunked files
1118 */
1119 static void cvmfs_open(fuse_req_t req, fuse_ino_t ino,
1120 struct fuse_file_info *fi)
1121 {
1122 HighPrecisionTimer guard_timer(file_system_->hist_fs_open());
1123
1124 const struct fuse_ctx *fuse_ctx = fuse_req_ctx(req);
1125 FuseInterruptCue ic(&req);
1126 ClientCtxGuard ctx_guard(fuse_ctx->uid, fuse_ctx->gid, fuse_ctx->pid, &ic);
1127 fuse_remounter_->fence()->Enter();
1128 catalog::ClientCatalogManager *catalog_mgr = mount_point_->catalog_mgr();
1129 ino = catalog_mgr->MangleInode(ino);
1130 LogCvmfs(kLogCvmfs, kLogDebug, "cvmfs_open on inode: %" PRIu64,
1131 uint64_t(ino));
1132
1133 int fd = -1;
1134 catalog::DirectoryEntry dirent;
1135 PathString path;
1136
1137 bool found = GetPathForInode(ino, &path);
1138 if (!found) {
1139 fuse_remounter_->fence()->Leave();
1140 fuse_reply_err(req, ENOENT);
1141 return;
1142 }
1143 found = GetDirentForInode(ino, &dirent);
1144 if (!found) {
1145 fuse_remounter_->fence()->Leave();
1146 ReplyNegative(dirent, req);
1147 return;
1148 }
1149
1150 if (!CheckVoms(*fuse_ctx)) {
1151 fuse_remounter_->fence()->Leave();
1152 fuse_reply_err(req, EACCES);
1153 return;
1154 }
1155
1156 mount_point_->tracer()->Trace(Tracer::kEventOpen, path, "open()");
1157 // Don't check. Either done by the OS or one wants to purposefully work
1158 // around wrong open flags
1159 // if ((fi->flags & 3) != O_RDONLY) {
1160 // fuse_reply_err(req, EROFS);
1161 // return;
1162 // }
1163 #ifdef __APPLE__
1164 if ((fi->flags & O_SHLOCK) || (fi->flags & O_EXLOCK)) {
1165 fuse_remounter_->fence()->Leave();
1166 fuse_reply_err(req, EOPNOTSUPP);
1167 return;
1168 }
1169 if (fi->flags & O_SYMLINK) {
1170 fuse_remounter_->fence()->Leave();
1171 fi->fh = kFileHandleIgnore;
1172 fuse_reply_open(req, fi);
1173 return;
1174 }
1175 #endif
1176 if (fi->flags & O_EXCL) {
1177 fuse_remounter_->fence()->Leave();
1178 fuse_reply_err(req, EEXIST);
1179 return;
1180 }
1181
1182 perf::Inc(file_system_->n_fs_open()); // Count actual open / fetch operations
1183
1184 glue::PageCacheTracker::OpenDirectives open_directives;
1185 if (!dirent.IsChunkedFile()) {
1186 if (dirent.IsDirectIo()) {
1187 open_directives = mount_point_->page_cache_tracker()->OpenDirect();
1188 } else {
1189 open_directives =
1190 mount_point_->page_cache_tracker()->Open(
1191 ino, dirent.checksum(), dirent.GetStatStructure());
1192 }
1193 fuse_remounter_->fence()->Leave();
1194 } else {
1195 LogCvmfs(kLogCvmfs, kLogDebug,
1196 "chunked file %s opened (download delayed to read() call)",
1197 path.c_str());
1198
1199 if (!IncAndCheckNoOpenFiles()) {
1200 perf::Dec(file_system_->no_open_files());
1201 fuse_remounter_->fence()->Leave();
1202 LogCvmfs(kLogCvmfs, kLogSyslogErr, "open file descriptor limit exceeded");
1203 fuse_reply_err(req, EMFILE);
1204 return;
1205 }
1206
1207 // Figure out unique inode from annotated catalog
1208 // TODO(jblomer): we only need to lookup if the inode is not from the
1209 // current generation
1210 catalog::DirectoryEntry dirent_origin;
1211 if (!catalog_mgr->LookupPath(path, catalog::kLookupDefault,
1212 &dirent_origin)) {
1213 fuse_remounter_->fence()->Leave();
1214 LogCvmfs(kLogCvmfs, kLogDebug | kLogSyslogErr,
1215 "chunked file %s vanished unexpectedly", path.c_str());
1216 fuse_reply_err(req, ENOENT);
1217 return;
1218 }
1219 const uint64_t unique_inode = dirent_origin.inode();
1220
1221 ChunkTables *chunk_tables = mount_point_->chunk_tables();
1222 chunk_tables->Lock();
1223 if (!chunk_tables->inode2chunks.Contains(unique_inode)) {
1224 chunk_tables->Unlock();
1225
1226 // Retrieve File chunks from the catalog
1227 UniquePtr<FileChunkList> chunks(new FileChunkList());
1228 if (!catalog_mgr->ListFileChunks(path, dirent.hash_algorithm(),
1229 chunks.weak_ref()) ||
1230 chunks->IsEmpty())
1231 {
1232 fuse_remounter_->fence()->Leave();
1233 LogCvmfs(kLogCvmfs, kLogDebug| kLogSyslogErr,
1234 "EIO (04) file %s is marked as 'chunked', but no chunks found.",
1235 path.c_str());
1236 perf::Inc(file_system_->n_eio_total());
1237 perf::Inc(file_system_->n_eio_04());
1238 fuse_reply_err(req, EIO);
1239 return;
1240 }
1241 fuse_remounter_->fence()->Leave();
1242
1243 chunk_tables->Lock();
1244 // Check again to avoid race
1245 if (!chunk_tables->inode2chunks.Contains(unique_inode)) {
1246 chunk_tables->inode2chunks.Insert(
1247 unique_inode, FileChunkReflist(chunks.Release(), path,
1248 dirent.compression_algorithm(),
1249 dirent.IsExternalFile()));
1250 chunk_tables->inode2references.Insert(unique_inode, 1);
1251 } else {
1252 uint32_t refctr;
1253 bool retval =
1254 chunk_tables->inode2references.Lookup(unique_inode, &refctr);
1255 assert(retval);
1256 chunk_tables->inode2references.Insert(unique_inode, refctr+1);
1257 }
1258 } else {
1259 fuse_remounter_->fence()->Leave();
1260 uint32_t refctr;
1261 bool retval =
1262 chunk_tables->inode2references.Lookup(unique_inode, &refctr);
1263 assert(retval);
1264 chunk_tables->inode2references.Insert(unique_inode, refctr+1);
1265 }
1266
1267 // Update the chunk handle list
1268 LogCvmfs(kLogCvmfs, kLogDebug,
1269 "linking chunk handle %lu to unique inode: %" PRIu64,
1270 chunk_tables->next_handle, uint64_t(unique_inode));
1271 chunk_tables->handle2fd.Insert(chunk_tables->next_handle, ChunkFd());
1272 chunk_tables->handle2uniqino.Insert(chunk_tables->next_handle,
1273 unique_inode);
1274
1275 // Generate artificial content hash as hash over chunk hashes
1276 // TODO(jblomer): we may want to cache the result in the chunk tables
1277 FileChunkReflist chunk_reflist;
1278 bool retval =
1279 chunk_tables->inode2chunks.Lookup(unique_inode, &chunk_reflist);
1280 assert(retval);
1281
1282 fi->fh = chunk_tables->next_handle;
1283 if (dirent.IsDirectIo()) {
1284 open_directives = mount_point_->page_cache_tracker()->OpenDirect();
1285 } else {
1286 open_directives = mount_point_->page_cache_tracker()->Open(
1287 ino, chunk_reflist.HashChunkList(), dirent.GetStatStructure());
1288 }
1289 FillOpenFlags(open_directives, fi);
1290 fi->fh = static_cast<uint64_t>(-static_cast<int64_t>(fi->fh));
1291 ++chunk_tables->next_handle;
1292 chunk_tables->Unlock();
1293
1294 fuse_reply_open(req, fi);
1295 return;
1296 }
1297
1298 Fetcher *this_fetcher = dirent.IsExternalFile()
1299 ? mount_point_->external_fetcher()
1300 : mount_point_->fetcher();
1301 CacheManager::Label label;
1302 label.path = path.ToString();
1303 label.size = dirent.size();
1304 label.zip_algorithm = dirent.compression_algorithm();
1305 if (mount_point_->catalog_mgr()->volatile_flag())
1306 label.flags |= CacheManager::kLabelVolatile;
1307 if (dirent.IsExternalFile())
1308 label.flags |= CacheManager::kLabelExternal;
1309 fd =
1310 this_fetcher->Fetch(CacheManager::LabeledObject(dirent.checksum(), label));
1311
1312 if (fd >= 0) {
1313 if (IncAndCheckNoOpenFiles()) {
1314 LogCvmfs(kLogCvmfs, kLogDebug, "file %s opened (fd %d)",
1315 path.c_str(), fd);
1316 fi->fh = fd;
1317 FillOpenFlags(open_directives, fi);
1318 fuse_reply_open(req, fi);
1319 return;
1320 } else {
1321 if (file_system_->cache_mgr()->Close(fd) == 0)
1322 perf::Dec(file_system_->no_open_files());
1323 LogCvmfs(kLogCvmfs, kLogSyslogErr, "open file descriptor limit exceeded");
1324 fuse_reply_err(req, EMFILE);
1325 return;
1326 }
1327 assert(false);
1328 }
1329
1330 // fd < 0
1331 LogCvmfs(kLogCvmfs, kLogDebug | kLogSyslogErr,
1332 "failed to open inode: %" PRIu64 ", CAS key %s, error code %d",
1333 uint64_t(ino), dirent.checksum().ToString().c_str(), errno);
1334 if (errno == EMFILE) {
1335 fuse_reply_err(req, EMFILE);
1336 return;
1337 }
1338
1339 mount_point_->backoff_throttle()->Throttle();
1340
1341 mount_point_->file_system()->io_error_info()->AddIoError();
1342 if (EIO == errno || EIO == -fd) {
1343 LogCvmfs(kLogCvmfs, kLogDebug | kLogSyslogErr,
1344 "EIO (06) on %s", path.c_str() );
1345 perf::Inc(file_system_->n_eio_total());
1346 perf::Inc(file_system_->n_eio_06());
1347 }
1348
1349 fuse_reply_err(req, -fd);
1350 }
1351
1352
1353 /**
1354 * Redirected to pread into cache.
1355 */
1356 static void cvmfs_read(fuse_req_t req, fuse_ino_t ino, size_t size, off_t off,
1357 struct fuse_file_info *fi)
1358 {
1359 HighPrecisionTimer guard_timer(file_system_->hist_fs_read());
1360
1361 LogCvmfs(kLogCvmfs, kLogDebug,
1362 "cvmfs_read inode: %" PRIu64 " reading %lu bytes from offset %ld "
1363 "fd %lu", uint64_t(mount_point_->catalog_mgr()->MangleInode(ino)),
1364 size, off, fi->fh);
1365 perf::Inc(file_system_->n_fs_read());
1366
1367 #ifdef __APPLE__
1368 if (fi->fh == kFileHandleIgnore) {
1369 fuse_reply_err(req, EBADF);
1370 return;
1371 }
1372 #endif
1373
1374 // Get data chunk (<=128k guaranteed by Fuse)
1375 char *data = static_cast<char *>(alloca(size));
1376 unsigned int overall_bytes_fetched = 0;
1377
1378 int64_t fd = static_cast<int64_t>(fi->fh);
1379 uint64_t abs_fd = (fd < 0) ? -fd : fd;
1380 ClearBit(glue::PageCacheTracker::kBitDirectIo, &abs_fd);
1381
1382 // Do we have a a chunked file?
1383 if (fd < 0) {
1384 const struct fuse_ctx *fuse_ctx = fuse_req_ctx(req);
1385 FuseInterruptCue ic(&req);
1386 ClientCtxGuard ctx_guard(fuse_ctx->uid, fuse_ctx->gid, fuse_ctx->pid, &ic);
1387
1388 const uint64_t chunk_handle = abs_fd;
1389 uint64_t unique_inode;
1390 ChunkFd chunk_fd;
1391 FileChunkReflist chunks;
1392 bool retval;
1393
1394 // Fetch unique inode, chunk list and file descriptor
1395 ChunkTables *chunk_tables = mount_point_->chunk_tables();
1396 chunk_tables->Lock();
1397 retval = chunk_tables->handle2uniqino.Lookup(chunk_handle, &unique_inode);
1398 if (!retval) {
1399 LogCvmfs(kLogCvmfs, kLogDebug, "no unique inode, fall back to fuse ino");
1400 unique_inode = ino;
1401 }
1402 retval = chunk_tables->inode2chunks.Lookup(unique_inode, &chunks);
1403 assert(retval);
1404 chunk_tables->Unlock();
1405
1406 unsigned chunk_idx = chunks.FindChunkIdx(off);
1407
1408 // Lock chunk handle
1409 pthread_mutex_t *handle_lock = chunk_tables->Handle2Lock(chunk_handle);
1410 MutexLockGuard m(handle_lock);
1411 chunk_tables->Lock();
1412 retval = chunk_tables->handle2fd.Lookup(chunk_handle, &chunk_fd);
1413 assert(retval);
1414 chunk_tables->Unlock();
1415
1416 // Fetch all needed chunks and read the requested data
1417 off_t offset_in_chunk = off - chunks.list->AtPtr(chunk_idx)->offset();
1418 do {
1419 // Open file descriptor to chunk
1420 if ((chunk_fd.fd == -1) || (chunk_fd.chunk_idx != chunk_idx)) {
1421 if (chunk_fd.fd != -1) file_system_->cache_mgr()->Close(chunk_fd.fd);
1422 Fetcher *this_fetcher = chunks.external_data
1423 ? mount_point_->external_fetcher()
1424 : mount_point_->fetcher();
1425 CacheManager::Label label;
1426 label.path = chunks.path.ToString();
1427 label.size = chunks.list->AtPtr(chunk_idx)->size();
1428 label.zip_algorithm = chunks.compression_alg;
1429 label.flags |= CacheManager::kLabelChunked;
1430 if (mount_point_->catalog_mgr()->volatile_flag())
1431 label.flags |= CacheManager::kLabelVolatile;
1432 if (chunks.external_data) {
1433 label.flags |= CacheManager::kLabelExternal;
1434 label.range_offset = chunks.list->AtPtr(chunk_idx)->offset();
1435 }
1436 chunk_fd.fd = this_fetcher->Fetch(CacheManager::LabeledObject(
1437 chunks.list->AtPtr(chunk_idx)->content_hash(), label));
1438 if (chunk_fd.fd < 0) {
1439 chunk_fd.fd = -1;
1440 chunk_tables->Lock();
1441 chunk_tables->handle2fd.Insert(chunk_handle, chunk_fd);
1442 chunk_tables->Unlock();
1443
1444 LogCvmfs(kLogCvmfs, kLogDebug | kLogSyslogErr,
1445 "EIO (05) on %s", chunks.path.ToString().c_str() );
1446 perf::Inc(file_system_->n_eio_total());
1447 perf::Inc(file_system_->n_eio_05());
1448 fuse_reply_err(req, EIO);
1449 return;
1450 }
1451 chunk_fd.chunk_idx = chunk_idx;
1452 }
1453
1454 LogCvmfs(kLogCvmfs, kLogDebug, "reading from chunk fd %d",
1455 chunk_fd.fd);
1456 // Read data from chunk
1457 const size_t bytes_to_read = size - overall_bytes_fetched;
1458 const size_t remaining_bytes_in_chunk =
1459 chunks.list->AtPtr(chunk_idx)->size() - offset_in_chunk;
1460 size_t bytes_to_read_in_chunk =
1461 std::min(bytes_to_read, remaining_bytes_in_chunk);
1462 const int64_t bytes_fetched = file_system_->cache_mgr()->Pread(
1463 chunk_fd.fd,
1464 data + overall_bytes_fetched,
1465 bytes_to_read_in_chunk,
1466 offset_in_chunk);
1467
1468 if (bytes_fetched < 0) {
1469 LogCvmfs(kLogCvmfs, kLogSyslogErr, "read err no %" PRId64 " (%s)",
1470 bytes_fetched, chunks.path.ToString().c_str());
1471 chunk_tables->Lock();
1472 chunk_tables->handle2fd.Insert(chunk_handle, chunk_fd);
1473 chunk_tables->Unlock();
1474 if ( EIO == errno || EIO == -bytes_fetched ) {
1475 LogCvmfs(kLogCvmfs, kLogDebug | kLogSyslogErr,
1476 "EIO (07) on %s", chunks.path.ToString().c_str() );
1477 perf::Inc(file_system_->n_eio_total());
1478 perf::Inc(file_system_->n_eio_07());
1479 }
1480 fuse_reply_err(req, -bytes_fetched);
1481 return;
1482 }
1483 overall_bytes_fetched += bytes_fetched;
1484
1485 // Proceed to the next chunk to keep on reading data
1486 ++chunk_idx;
1487 offset_in_chunk = 0;
1488 } while ((overall_bytes_fetched < size) &&
1489 (chunk_idx < chunks.list->size()));
1490
1491 // Update chunk file descriptor
1492 chunk_tables->Lock();
1493 chunk_tables->handle2fd.Insert(chunk_handle, chunk_fd);
1494 chunk_tables->Unlock();
1495 LogCvmfs(kLogCvmfs, kLogDebug, "released chunk file descriptor %d",
1496 chunk_fd.fd);
1497 } else {
1498 int64_t nbytes = file_system_->cache_mgr()->Pread(abs_fd, data, size, off);
1499 if (nbytes < 0) {
1500 if ( EIO == errno || EIO == -nbytes ) {
1501 PathString path;
1502 bool found = GetPathForInode(ino, &path);
1503 if ( found ) {
1504 LogCvmfs(kLogCvmfs, kLogDebug | kLogSyslogErr,
1505 "EIO (08) on %s", path.ToString().c_str() );
1506 } else {
1507 LogCvmfs(kLogCvmfs, kLogDebug | kLogSyslogErr,
1508 "EIO (08) on <unknown inode>");
1509 }
1510 perf::Inc(file_system_->n_eio_total());
1511 perf::Inc(file_system_->n_eio_08());
1512 }
1513 fuse_reply_err(req, -nbytes);
1514 return;
1515 }
1516 overall_bytes_fetched = nbytes;
1517 }
1518
1519 // Push it to user
1520 fuse_reply_buf(req, data, overall_bytes_fetched);
1521 LogCvmfs(kLogCvmfs, kLogDebug, "pushed %d bytes to user",
1522 overall_bytes_fetched);
1523 }
1524
1525
1526 /**
1527 * File close operation, redirected into cache.
1528 */
1529 static void cvmfs_release(fuse_req_t req, fuse_ino_t ino,
1530 struct fuse_file_info *fi)
1531 {
1532 HighPrecisionTimer guard_timer(file_system_->hist_fs_release());
1533
1534 ino = mount_point_->catalog_mgr()->MangleInode(ino);
1535 LogCvmfs(kLogCvmfs, kLogDebug, "cvmfs_release on inode: %" PRIu64,
1536 uint64_t(ino));
1537
1538 #ifdef __APPLE__
1539 if (fi->fh == kFileHandleIgnore) {
1540 fuse_reply_err(req, 0);
1541 return;
1542 }
1543 #endif
1544
1545 int64_t fd = static_cast<int64_t>(fi->fh);
1546 uint64_t abs_fd = (fd < 0) ? -fd : fd;
1547 if (!TestBit(glue::PageCacheTracker::kBitDirectIo, abs_fd)) {
1548 mount_point_->page_cache_tracker()->Close(ino);
1549 }
1550 ClearBit(glue::PageCacheTracker::kBitDirectIo, &abs_fd);
1551
1552 // do we have a chunked file?
1553 if (fd < 0) {
1554 const uint64_t chunk_handle = abs_fd;
1555 LogCvmfs(kLogCvmfs, kLogDebug, "releasing chunk handle %" PRIu64,
1556 chunk_handle);
1557 uint64_t unique_inode;
1558 ChunkFd chunk_fd;
1559 FileChunkReflist chunks;
1560 uint32_t refctr;
1561 bool retval;
1562
1563 ChunkTables *chunk_tables = mount_point_->chunk_tables();
1564 chunk_tables->Lock();
1565 retval = chunk_tables->handle2uniqino.Lookup(chunk_handle, &unique_inode);
1566 if (!retval) {
1567 LogCvmfs(kLogCvmfs, kLogDebug, "no unique inode, fall back to fuse ino");
1568 unique_inode = ino;
1569 } else {
1570 chunk_tables->handle2uniqino.Erase(chunk_handle);
1571 }
1572 retval = chunk_tables->handle2fd.Lookup(chunk_handle, &chunk_fd);
1573 assert(retval);
1574 chunk_tables->handle2fd.Erase(chunk_handle);
1575
1576 retval = chunk_tables->inode2references.Lookup(unique_inode, &refctr);
1577 assert(retval);
1578 refctr--;
1579 if (refctr == 0) {
1580 LogCvmfs(kLogCvmfs, kLogDebug, "releasing chunk list for inode %" PRIu64,
1581 uint64_t(unique_inode));
1582 FileChunkReflist to_delete;
1583 retval = chunk_tables->inode2chunks.Lookup(unique_inode, &to_delete);
1584 assert(retval);
1585 chunk_tables->inode2references.Erase(unique_inode);
1586 chunk_tables->inode2chunks.Erase(unique_inode);
1587 delete to_delete.list;
1588 } else {
1589 chunk_tables->inode2references.Insert(unique_inode, refctr);
1590 }
1591 chunk_tables->Unlock();
1592
1593 if (chunk_fd.fd != -1)
1594 file_system_->cache_mgr()->Close(chunk_fd.fd);
1595 perf::Dec(file_system_->no_open_files());
1596 } else {
1597 if (file_system_->cache_mgr()->Close(abs_fd) == 0) {
1598 perf::Dec(file_system_->no_open_files());
1599 }
1600 }
1601 fuse_reply_err(req, 0);
1602 }
1603
1604 /**
1605 * Returns information about a mounted filesystem. In this case it returns
1606 * information about the local cache occupancy of cvmfs.
1607 *
1608 * Note: If the elements of the struct statvfs *info are set to 0, it will cause
1609 * it to be ignored in commandline tool "df".
1610 */
1611 static void cvmfs_statfs(fuse_req_t req, fuse_ino_t ino) {
1612 ino = mount_point_->catalog_mgr()->MangleInode(ino);
1613 LogCvmfs(kLogCvmfs, kLogDebug, "cvmfs_statfs on inode: %" PRIu64,
1614 uint64_t(ino));
1615
1616 TraceInode(Tracer::kEventStatFs, ino, "statfs()");
1617
1618 perf::Inc(file_system_->n_fs_statfs());
1619
1620 // Unmanaged cache (no lock needed - statfs is never modified)
1621 if (!file_system_->cache_mgr()->quota_mgr()->HasCapability(
1622 QuotaManager::kCapIntrospectSize))
1623 {
1624 LogCvmfs(kLogCvmfs, kLogDebug, "QuotaManager does not support statfs");
1625 fuse_reply_statfs(req, (mount_point_->statfs_cache()->info()));
1626 return;
1627 }
1628
1629 MutexLockGuard m(mount_point_->statfs_cache()->lock());
1630
1631 const uint64_t deadline = *mount_point_->statfs_cache()->expiry_deadline();
1632 struct statvfs *info = mount_point_->statfs_cache()->info();
1633
1634 // cached version still valid
1635 if ( platform_monotonic_time() < deadline ) {
1636 perf::Inc(file_system_->n_fs_statfs_cached());
1637 fuse_reply_statfs(req, info);
1638 return;
1639 }
1640
1641 uint64_t available = 0;
1642 uint64_t size = file_system_->cache_mgr()->quota_mgr()->GetSize();
1643 uint64_t capacity = file_system_->cache_mgr()->quota_mgr()->GetCapacity();
1644 // Fuse/OS X doesn't like values < 512
1645 info->f_bsize = info->f_frsize = 512;
1646
1647 if (capacity == (uint64_t)(-1)) {
1648 // Unknown capacity, set capacity = size
1649 info->f_blocks = size / info->f_bsize;
1650 } else {
1651 // Take values from LRU module
1652 info->f_blocks = capacity / info->f_bsize;
1653 available = capacity - size;
1654 }
1655
1656 info->f_bfree = info->f_bavail = available / info->f_bsize;
1657
1658 // Inodes / entries
1659 fuse_remounter_->fence()->Enter();
1660 uint64_t all_inodes = mount_point_->catalog_mgr()->all_inodes();
1661 uint64_t loaded_inode = mount_point_->catalog_mgr()->loaded_inodes();
1662 info->f_files = all_inodes;
1663 info->f_ffree = info->f_favail = all_inodes - loaded_inode;
1664 fuse_remounter_->fence()->Leave();
1665
1666 *mount_point_->statfs_cache()->expiry_deadline() =
1667 platform_monotonic_time()
1668 + mount_point_->statfs_cache()->cache_timeout();
1669
1670 fuse_reply_statfs(req, info);
1671 }
1672
1673 #ifdef __APPLE__
1674 static void cvmfs_getxattr(fuse_req_t req, fuse_ino_t ino, const char *name,
1675 size_t size, uint32_t position)
1676 #else
1677 static void cvmfs_getxattr(fuse_req_t req, fuse_ino_t ino, const char *name,
1678 size_t size)
1679 #endif
1680 {
1681 const struct fuse_ctx *fuse_ctx = fuse_req_ctx(req);
1682 FuseInterruptCue ic(&req);
1683 ClientCtxGuard ctx_guard(fuse_ctx->uid, fuse_ctx->gid, fuse_ctx->pid, &ic);
1684
1685 fuse_remounter_->fence()->Enter();
1686 catalog::ClientCatalogManager *catalog_mgr = mount_point_->catalog_mgr();
1687 ino = catalog_mgr->MangleInode(ino);
1688 LogCvmfs(kLogCvmfs, kLogDebug,
1689 "cvmfs_getxattr on inode: %" PRIu64 " for xattr: %s",
1690 uint64_t(ino), name);
1691 if (!CheckVoms(*fuse_ctx)) {
1692 fuse_remounter_->fence()->Leave();
1693 fuse_reply_err(req, EACCES);
1694 return;
1695 }
1696 TraceInode(Tracer::kEventGetXAttr, ino, "getxattr()");
1697
1698 vector<string> tokens_mode_machine = SplitString(name, '~');
1699 vector<string> tokens_mode_human = SplitString(name, '@');
1700
1701 int32_t attr_req_page = 0;
1702 MagicXattrMode xattr_mode = kXattrMachineMode;
1703 string attr;
1704
1705 bool attr_req_is_valid = false;
1706 const sanitizer::PositiveIntegerSanitizer page_num_sanitizer;
1707
1708 if (tokens_mode_human.size() > 1) {
1709 const std::string token = tokens_mode_human[tokens_mode_human.size() - 1];
1710 if (token == "?") {
1711 attr_req_is_valid = true;
1712 attr_req_page = -1;
1713 } else {
1714 if (page_num_sanitizer.IsValid(token)) {
1715 attr_req_is_valid = true;
1716 attr_req_page = static_cast<int32_t>(String2Uint64(token));
1717 }
1718 }
1719 xattr_mode = kXattrHumanMode;
1720 attr = tokens_mode_human[0];
1721 } else if (tokens_mode_machine.size() > 1) {
1722 const std::string token =
1723 tokens_mode_machine[tokens_mode_machine.size() - 1];
1724 if (token == "?") {
1725 attr_req_is_valid = true;
1726 attr_req_page = -1;
1727 } else {
1728 if (page_num_sanitizer.IsValid(token)) {
1729 attr_req_is_valid = true;
1730 attr_req_page = static_cast<int32_t>(String2Uint64(token));
1731 }
1732 }
1733 xattr_mode = kXattrMachineMode;
1734 attr = tokens_mode_machine[0];
1735
1736 } else {
1737 attr_req_is_valid = true;
1738 attr = tokens_mode_machine[0];
1739 }
1740
1741 if (!attr_req_is_valid) {
1742 fuse_remounter_->fence()->Leave();
1743 fuse_reply_err(req, ENODATA);
1744 return;
1745 }
1746
1747 catalog::DirectoryEntry d;
1748 const bool found = GetDirentForInode(ino, &d);
1749
1750 if (!found) {
1751 fuse_remounter_->fence()->Leave();
1752 ReplyNegative(d, req);
1753 return;
1754 }
1755
1756 bool retval;
1757 XattrList xattrs;
1758 PathString path;
1759 retval = GetPathForInode(ino, &path);
1760
1761 if (!AssertOrLog(retval, kLogCvmfs, kLogSyslogWarn | kLogDebug,
1762 "cvmfs_statfs: Race condition? "
1763 "GetPathForInode did not succeed for path %s "
1764 "(path might have not been set)",
1765 path.c_str())) {
1766 fuse_remounter_->fence()->Leave();
1767 fuse_reply_err(req, ESTALE);
1768 return;
1769 }
1770
1771 if (d.IsLink()) {
1772 catalog::LookupOptions lookup_options = static_cast<catalog::LookupOptions>(
1773 catalog::kLookupDefault | catalog::kLookupRawSymlink);
1774 catalog::DirectoryEntry raw_symlink;
1775 retval = catalog_mgr->LookupPath(path, lookup_options, &raw_symlink);
1776
1777 if (!AssertOrLog(retval, kLogCvmfs, kLogSyslogWarn | kLogDebug,
1778 "cvmfs_statfs: Race condition? "
1779 "LookupPath did not succeed for path %s",
1780 path.c_str())) {
1781 fuse_remounter_->fence()->Leave();
1782 fuse_reply_err(req, ESTALE);
1783 return;
1784 }
1785
1786 d.set_symlink(raw_symlink.symlink());
1787 }
1788 if (d.HasXattrs()) {
1789 retval = catalog_mgr->LookupXattrs(path, &xattrs);
1790
1791 if (!AssertOrLog(retval, kLogCvmfs, kLogSyslogWarn | kLogDebug,
1792 "cvmfs_statfs: Race condition? "
1793 "LookupXattrs did not succeed for path %s",
1794 path.c_str())) {
1795 fuse_remounter_->fence()->Leave();
1796 fuse_reply_err(req, ESTALE);
1797 return;
1798 }
1799 }
1800
1801 bool magic_xattr_success = true;
1802 MagicXattrRAIIWrapper magic_xattr(mount_point_->magic_xattr_mgr()->GetLocked(
1803 attr, path, &d));
1804 if (!magic_xattr.IsNull()) {
1805 magic_xattr_success = magic_xattr->
1806 PrepareValueFencedProtected(fuse_ctx->gid);
1807 }
1808
1809 fuse_remounter_->fence()->Leave();
1810
1811 if (!magic_xattr_success) {
1812 fuse_reply_err(req, ENOATTR);
1813 return;
1814 }
1815
1816 std::pair<bool, std::string> attribute_result;
1817
1818 if (!magic_xattr.IsNull()) {
1819 attribute_result = magic_xattr->GetValue(attr_req_page, xattr_mode);
1820 } else {
1821 if (!xattrs.Get(attr, &attribute_result.second)) {
1822 fuse_reply_err(req, ENOATTR);
1823 return;
1824 }
1825 attribute_result.first = true;
1826 }
1827
1828 if (!attribute_result.first) {
1829 fuse_reply_err(req, ENODATA);
1830 } else if (size == 0) {
1831 fuse_reply_xattr(req, attribute_result.second.length());
1832 } else if (size >= attribute_result.second.length()) {
1833 fuse_reply_buf(req, &attribute_result.second[0],
1834 attribute_result.second.length());
1835 } else {
1836 fuse_reply_err(req, ERANGE);
1837 }
1838 }
1839
1840
1841 static void cvmfs_listxattr(fuse_req_t req, fuse_ino_t ino, size_t size) {
1842 const struct fuse_ctx *fuse_ctx = fuse_req_ctx(req);
1843 FuseInterruptCue ic(&req);
1844 ClientCtxGuard ctx_guard(fuse_ctx->uid, fuse_ctx->gid, fuse_ctx->pid, &ic);
1845
1846 fuse_remounter_->fence()->Enter();
1847 catalog::ClientCatalogManager *catalog_mgr = mount_point_->catalog_mgr();
1848 ino = catalog_mgr->MangleInode(ino);
1849 TraceInode(Tracer::kEventListAttr, ino, "listxattr()");
1850 LogCvmfs(kLogCvmfs, kLogDebug,
1851 "cvmfs_listxattr on inode: %" PRIu64 ", size %zu [visibility %d]",
1852 uint64_t(ino), size,
1853 mount_point_->magic_xattr_mgr()->visibility());
1854
1855 catalog::DirectoryEntry d;
1856 const bool found = GetDirentForInode(ino, &d);
1857 XattrList xattrs;
1858 if (d.HasXattrs()) {
1859 PathString path;
1860 bool retval = GetPathForInode(ino, &path);
1861
1862 if (!AssertOrLog(retval, kLogCvmfs, kLogSyslogWarn | kLogDebug,
1863 "cvmfs_listxattr: Race condition? "
1864 "GetPathForInode did not succeed for ino %lu",
1865 ino)) {
1866 fuse_remounter_->fence()->Leave();
1867 fuse_reply_err(req, ESTALE);
1868 return;
1869 }
1870
1871 retval = catalog_mgr->LookupXattrs(path, &xattrs);
1872 if (!AssertOrLog(retval, kLogCvmfs, kLogSyslogWarn | kLogDebug,
1873 "cvmfs_listxattr: Race condition? "
1874 "LookupXattrs did not succeed for ino %lu",
1875 ino)) {
1876 fuse_remounter_->fence()->Leave();
1877 fuse_reply_err(req, ESTALE);
1878 return;
1879 }
1880 }
1881 fuse_remounter_->fence()->Leave();
1882
1883 if (!found) {
1884 ReplyNegative(d, req);
1885 return;
1886 }
1887
1888 string attribute_list;
1889 attribute_list = mount_point_->magic_xattr_mgr()->GetListString(&d);
1890 attribute_list += xattrs.ListKeysPosix(attribute_list);
1891
1892 if (size == 0) {
1893 fuse_reply_xattr(req, attribute_list.length());
1894 } else if (size >= attribute_list.length()) {
1895 if (attribute_list.empty())
1896 fuse_reply_buf(req, NULL, 0);
1897 else
1898 fuse_reply_buf(req, &attribute_list[0], attribute_list.length());
1899 } else {
1900 fuse_reply_err(req, ERANGE);
1901 }
1902 }
1903
1904 bool Evict(const string &path) {
1905 catalog::DirectoryEntry dirent;
1906 fuse_remounter_->fence()->Enter();
1907 const bool found = (GetDirentForPath(PathString(path), &dirent) > 0);
1908
1909 if (!found || !dirent.IsRegular()) {
1910 fuse_remounter_->fence()->Leave();
1911 return false;
1912 }
1913
1914 if (!dirent.IsChunkedFile()) {
1915 fuse_remounter_->fence()->Leave();
1916 } else {
1917 FileChunkList chunks;
1918 mount_point_->catalog_mgr()->ListFileChunks(
1919 PathString(path), dirent.hash_algorithm(), &chunks);
1920 fuse_remounter_->fence()->Leave();
1921 for (unsigned i = 0; i < chunks.size(); ++i) {
1922 file_system_->cache_mgr()->quota_mgr()
1923 ->Remove(chunks.AtPtr(i)->content_hash());
1924 }
1925 }
1926 file_system_->cache_mgr()->quota_mgr()->Remove(dirent.checksum());
1927 return true;
1928 }
1929
1930
1931 bool Pin(const string &path) {
1932 catalog::DirectoryEntry dirent;
1933 fuse_remounter_->fence()->Enter();
1934 const bool found = (GetDirentForPath(PathString(path), &dirent) > 0);
1935 if (!found || !dirent.IsRegular()) {
1936 fuse_remounter_->fence()->Leave();
1937 return false;
1938 }
1939
1940 Fetcher *this_fetcher = dirent.IsExternalFile()
1941 ? mount_point_->external_fetcher()
1942 : mount_point_->fetcher();
1943
1944 if (!dirent.IsChunkedFile()) {
1945 fuse_remounter_->fence()->Leave();
1946 } else {
1947 FileChunkList chunks;
1948 mount_point_->catalog_mgr()->ListFileChunks(
1949 PathString(path), dirent.hash_algorithm(), &chunks);
1950 fuse_remounter_->fence()->Leave();
1951 for (unsigned i = 0; i < chunks.size(); ++i) {
1952 bool retval =
1953 file_system_->cache_mgr()->quota_mgr()->Pin(
1954 chunks.AtPtr(i)->content_hash(),
1955 chunks.AtPtr(i)->size(),
1956 "Part of " + path,
1957 false);
1958 if (!retval)
1959 return false;
1960 int fd = -1;
1961 CacheManager::Label label;
1962 label.path = path;
1963 label.size = chunks.AtPtr(i)->size();
1964 label.zip_algorithm = dirent.compression_algorithm();
1965 label.flags |= CacheManager::kLabelPinned;
1966 label.flags |= CacheManager::kLabelChunked;
1967 if (dirent.IsExternalFile()) {
1968 label.flags |= CacheManager::kLabelExternal;
1969 label.range_offset = chunks.AtPtr(i)->offset();
1970 }
1971 fd = this_fetcher->Fetch(CacheManager::LabeledObject(
1972 chunks.AtPtr(i)->content_hash(), label));
1973 if (fd < 0) {
1974 return false;
1975 }
1976 file_system_->cache_mgr()->Close(fd);
1977 }
1978 return true;
1979 }
1980
1981 bool retval = file_system_->cache_mgr()->quota_mgr()->Pin(
1982 dirent.checksum(), dirent.size(), path, false);
1983 if (!retval)
1984 return false;
1985 CacheManager::Label label;
1986 label.flags = CacheManager::kLabelPinned;
1987 label.size = dirent.size();
1988 label.path = path;
1989 label.zip_algorithm = dirent.compression_algorithm();
1990 int fd = this_fetcher->Fetch(
1991 CacheManager::LabeledObject(dirent.checksum(), label));
1992 if (fd < 0) {
1993 return false;
1994 }
1995 file_system_->cache_mgr()->Close(fd);
1996 return true;
1997 }
1998
1999
2000 /**
2001 * Do after-daemon() initialization
2002 */
2003 static void cvmfs_init(void *userdata, struct fuse_conn_info *conn) {
2004 LogCvmfs(kLogCvmfs, kLogDebug, "cvmfs_init");
2005
2006 // NFS support
2007 #ifdef CVMFS_NFS_SUPPORT
2008 conn->want |= FUSE_CAP_EXPORT_SUPPORT;
2009 #endif
2010
2011 if (mount_point_->enforce_acls()) {
2012 #ifdef FUSE_CAP_POSIX_ACL
2013 if ((conn->capable & FUSE_CAP_POSIX_ACL) == 0) {
2014 PANIC(kLogDebug | kLogSyslogErr,
2015 "FUSE: ACL support requested but missing fuse kernel support, "
2016 "aborting");
2017 }
2018 conn->want |= FUSE_CAP_POSIX_ACL;
2019 LogCvmfs(kLogCvmfs, kLogDebug | kLogSyslog, "enforcing ACLs");
2020 #else
2021 PANIC(kLogDebug | kLogSyslogErr,
2022 "FUSE: ACL support requested but not available in this version of "
2023 "libfuse %d, aborting", FUSE_VERSION);
2024 #endif
2025 }
2026
2027 if ( mount_point_->cache_symlinks() ) {
2028 #ifdef FUSE_CAP_CACHE_SYMLINKS
2029 if ((conn->capable & FUSE_CAP_CACHE_SYMLINKS) == FUSE_CAP_CACHE_SYMLINKS) {
2030 conn->want |= FUSE_CAP_CACHE_SYMLINKS;
2031 LogCvmfs(kLogCvmfs, kLogDebug, "FUSE: Enable symlink caching");
2032 #ifndef FUSE_CAP_EXPIRE_ONLY
2033 LogCvmfs(kLogCvmfs, kLogDebug | kLogSyslogWarn,
2034 "FUSE: Symlink caching enabled but no support for fuse_expire_entry. "
2035 "Symlinks will be cached but mountpoints on top of symlinks will "
2036 "break! "
2037 "Current libfuse %d is too old; required: libfuse >= 3.16, "
2038 "kernel >= 6.2-rc1",
2039 FUSE_VERSION);
2040 #endif
2041 } else {
2042 mount_point_->DisableCacheSymlinks();
2043 LogCvmfs(kLogCvmfs, kLogDebug | kLogSyslogWarn,
2044 "FUSE: Symlink caching requested but missing fuse kernel support, "
2045 "falling back to no caching");
2046 }
2047 #else
2048 mount_point_->DisableCacheSymlinks();
2049 LogCvmfs(kLogCvmfs, kLogDebug | kLogSyslogWarn,
2050 "FUSE: Symlink caching requested but missing libfuse support, "
2051 "falling back to no caching. Current libfuse %d",
2052 FUSE_VERSION);
2053 #endif
2054 }
2055
2056 #ifdef FUSE_CAP_EXPIRE_ONLY
2057 if ((conn->capable & FUSE_CAP_EXPIRE_ONLY) == FUSE_CAP_EXPIRE_ONLY &&
2058 FUSE_VERSION >= FUSE_MAKE_VERSION(3, 16)) {
2059 mount_point_->EnableFuseExpireEntry();
2060 LogCvmfs(kLogCvmfs, kLogDebug, "FUSE: Enable fuse_expire_entry ");
2061 } else if (mount_point_->cache_symlinks()) {
2062 LogCvmfs(kLogCvmfs, kLogDebug | kLogSyslogWarn,
2063 "FUSE: Symlink caching enabled but no support for fuse_expire_entry. "
2064 "Symlinks will be cached but mountpoints on top of symlinks will break! "
2065 "Current libfuse %d; required: libfuse >= 3.16, kernel >= 6.2-rc1",
2066 FUSE_VERSION);
2067 }
2068 #endif
2069 }
2070
2071 static void cvmfs_destroy(void *unused __attribute__((unused))) {
2072 // The debug log is already closed at this point
2073 LogCvmfs(kLogCvmfs, kLogDebug, "cvmfs_destroy");
2074 }
2075
2076 /**
2077 * Puts the callback functions in one single structure
2078 */
2079 static void SetCvmfsOperations(struct fuse_lowlevel_ops *cvmfs_operations) {
2080 memset(cvmfs_operations, 0, sizeof(*cvmfs_operations));
2081
2082 // Init/Fini
2083 cvmfs_operations->init = cvmfs_init;
2084 cvmfs_operations->destroy = cvmfs_destroy;
2085
2086 cvmfs_operations->lookup = cvmfs_lookup;
2087 cvmfs_operations->getattr = cvmfs_getattr;
2088 cvmfs_operations->readlink = cvmfs_readlink;
2089 cvmfs_operations->open = cvmfs_open;
2090 cvmfs_operations->read = cvmfs_read;
2091 cvmfs_operations->release = cvmfs_release;
2092 cvmfs_operations->opendir = cvmfs_opendir;
2093 cvmfs_operations->readdir = cvmfs_readdir;
2094 cvmfs_operations->releasedir = cvmfs_releasedir;
2095 cvmfs_operations->statfs = cvmfs_statfs;
2096 cvmfs_operations->getxattr = cvmfs_getxattr;
2097 cvmfs_operations->listxattr = cvmfs_listxattr;
2098 cvmfs_operations->forget = cvmfs_forget;
2099 #if (FUSE_VERSION >= 29)
2100 cvmfs_operations->forget_multi = cvmfs_forget_multi;
2101 #endif
2102 }
2103
2104 // Called by cvmfs_talk when switching into read-only cache mode
2105 void UnregisterQuotaListener() {
2106 if (cvmfs::unpin_listener_) {
2107 quota::UnregisterListener(cvmfs::unpin_listener_);
2108 cvmfs::unpin_listener_ = NULL;
2109 }
2110 if (cvmfs::watchdog_listener_) {
2111 quota::UnregisterListener(cvmfs::watchdog_listener_);
2112 cvmfs::watchdog_listener_ = NULL;
2113 }
2114 }
2115
2116 bool SendFuseFd(const std::string &socket_path) {
2117 int fuse_fd;
2118 #if (FUSE_VERSION >= 30)
2119 fuse_fd = fuse_session_fd(*reinterpret_cast<struct fuse_session**>(
2120 loader_exports_->fuse_channel_or_session));
2121 #else
2122 fuse_fd = fuse_chan_fd(*reinterpret_cast<struct fuse_chan**>(
2123 loader_exports_->fuse_channel_or_session));
2124 #endif
2125 assert(fuse_fd >= 0);
2126 int sock_fd = ConnectSocket(socket_path);
2127 if (sock_fd < 0) {
2128 LogCvmfs(kLogCvmfs, kLogDebug, "cannot connect to socket %s: %d",
2129 socket_path.c_str(), errno);
2130 return false;
2131 }
2132 bool retval = SendFd2Socket(sock_fd, fuse_fd);
2133 close(sock_fd);
2134 return retval;
2135 }
2136
2137 } // namespace cvmfs
2138
2139
2140 string *g_boot_error = NULL;
2141
2142 __attribute__((visibility("default")))
2143 loader::CvmfsExports *g_cvmfs_exports = NULL;
2144
2145 /**
2146 * Begin section of cvmfs.cc-specific magic extended attributes
2147 */
2148
2149 class ExpiresMagicXattr : public BaseMagicXattr {
2150 time_t catalogs_valid_until_;
2151
2152 virtual bool PrepareValueFenced() {
2153 catalogs_valid_until_ = cvmfs::fuse_remounter_->catalogs_valid_until();
2154 return true;
2155 }
2156
2157 virtual void FinalizeValue() {
2158 if (catalogs_valid_until_ == MountPoint::kIndefiniteDeadline) {
2159 result_pages_.push_back("never (fixed root catalog)");
2160 return;
2161 } else {
2162 time_t now = time(NULL);
2163 result_pages_.push_back(StringifyInt((catalogs_valid_until_ - now) / 60));
2164 }
2165 }
2166 };
2167
2168 class InodeMaxMagicXattr : public BaseMagicXattr {
2169 virtual void FinalizeValue() {
2170 result_pages_.push_back(StringifyInt(
2171 cvmfs::inode_generation_info_.inode_generation +
2172 xattr_mgr_->mount_point()->catalog_mgr()->inode_gauge()));
2173 }
2174 };
2175
2176 class MaxFdMagicXattr : public BaseMagicXattr {
2177 virtual void FinalizeValue() {
2178 result_pages_.push_back(StringifyInt(
2179 cvmfs::max_open_files_ - cvmfs::kNumReservedFd));
2180 }
2181 };
2182
2183 class PidMagicXattr : public BaseMagicXattr {
2184 virtual void FinalizeValue() {
2185 result_pages_.push_back(StringifyInt(cvmfs::pid_)); }
2186 };
2187
2188 class UptimeMagicXattr : public BaseMagicXattr {
2189 virtual void FinalizeValue(){
2190 time_t now = time(NULL);
2191 uint64_t uptime = now - cvmfs::loader_exports_->boot_time;
2192 result_pages_.push_back(StringifyUint(uptime / 60));
2193 }
2194 };
2195
2196 /**
2197 * Register cvmfs.cc-specific magic extended attributes to mountpoint's
2198 * magic xattribute manager
2199 */
2200 static void RegisterMagicXattrs() {
2201 MagicXattrManager *mgr = cvmfs::mount_point_->magic_xattr_mgr();
2202 mgr->Register("user.expires", new ExpiresMagicXattr());
2203 mgr->Register("user.inode_max", new InodeMaxMagicXattr());
2204 mgr->Register("user.pid", new PidMagicXattr());
2205 mgr->Register("user.maxfd", new MaxFdMagicXattr());
2206 mgr->Register("user.uptime", new UptimeMagicXattr());
2207
2208 mgr->Freeze();
2209 }
2210
2211 /**
2212 * Construct a file system but prevent hanging when already mounted. That
2213 * means: at most one "system" mount of any given repository name.
2214 */
2215 static FileSystem *InitSystemFs(
2216 const string &mount_path,
2217 const string &fqrn,
2218 FileSystem::FileSystemInfo fs_info)
2219 {
2220 fs_info.wait_workspace = false;
2221 FileSystem *file_system = FileSystem::Create(fs_info);
2222
2223 if (file_system->boot_status() == loader::kFailLockWorkspace) {
2224 string fqrn_from_xattr;
2225 int retval = platform_getxattr(mount_path, "user.fqrn", &fqrn_from_xattr);
2226 if (!retval) {
2227 // Cvmfs not mounted anymore, but another cvmfs process is still in
2228 // shutdown procedure. Try again and wait for lock
2229 delete file_system;
2230 fs_info.wait_workspace = true;
2231 file_system = FileSystem::Create(fs_info);
2232 } else {
2233 if (fqrn_from_xattr == fqrn) {
2234 LogCvmfs(kLogCvmfs, kLogDebug | kLogSyslogWarn,
2235 "repository already mounted on %s", mount_path.c_str());
2236 file_system->set_boot_status(loader::kFailDoubleMount);
2237 } else {
2238 LogCvmfs(kLogCvmfs, kLogDebug | kLogSyslogErr,
2239 "CernVM-FS repository %s already mounted on %s",
2240 fqrn.c_str(), mount_path.c_str());
2241 file_system->set_boot_status(loader::kFailOtherMount);
2242 }
2243 }
2244 }
2245
2246 return file_system;
2247 }
2248
2249
2250 static void InitOptionsMgr(const loader::LoaderExports *loader_exports) {
2251 if (loader_exports->version >= 3 && loader_exports->simple_options_parsing) {
2252 cvmfs::options_mgr_ = new SimpleOptionsParser(
2253 new DefaultOptionsTemplateManager(loader_exports->repository_name));
2254 } else {
2255 cvmfs::options_mgr_ = new BashOptionsManager(
2256 new DefaultOptionsTemplateManager(loader_exports->repository_name));
2257 }
2258
2259 if (loader_exports->config_files != "") {
2260 vector<string> tokens = SplitString(loader_exports->config_files, ':');
2261 for (unsigned i = 0, s = tokens.size(); i < s; ++i) {
2262 cvmfs::options_mgr_->ParsePath(tokens[i], false);
2263 }
2264 } else {
2265 cvmfs::options_mgr_->ParseDefault(loader_exports->repository_name);
2266 }
2267 }
2268
2269
2270 static unsigned CheckMaxOpenFiles() {
2271 static unsigned max_open_files;
2272 static bool already_done = false;
2273
2274 // check number of open files (lazy evaluation)
2275 if (!already_done) {
2276 unsigned soft_limit = 0;
2277 unsigned hard_limit = 0;
2278 GetLimitNoFile(&soft_limit, &hard_limit);
2279
2280 if (soft_limit < cvmfs::kMinOpenFiles) {
2281 LogCvmfs(kLogCvmfs, kLogSyslogWarn | kLogDebug,
2282 "Warning: current limits for number of open files are "
2283 "(%u/%u)\n"
2284 "CernVM-FS is likely to run out of file descriptors, "
2285 "set ulimit -n to at least %u",
2286 soft_limit, hard_limit, cvmfs::kMinOpenFiles);
2287 }
2288 max_open_files = soft_limit;
2289 already_done = true;
2290 }
2291
2292 return max_open_files;
2293 }
2294
2295
2296 static int Init(const loader::LoaderExports *loader_exports) {
2297 g_boot_error = new string("unknown error");
2298 cvmfs::loader_exports_ = loader_exports;
2299
2300 crypto::SetupLibcryptoMt();
2301
2302 InitOptionsMgr(loader_exports);
2303
2304 // We need logging set up before forking the watchdog
2305 FileSystem::SetupLoggingStandalone(
2306 *cvmfs::options_mgr_, loader_exports->repository_name);
2307
2308 // Monitor, check for maximum number of open files
2309 if (cvmfs::UseWatchdog()) {
2310 auto_umount::SetMountpoint(loader_exports->mount_point);
2311 cvmfs::watchdog_ = Watchdog::Create(auto_umount::UmountOnCrash);
2312 if (cvmfs::watchdog_ == NULL) {
2313 *g_boot_error = "failed to initialize watchdog.";
2314 return loader::kFailMonitor;
2315 }
2316 }
2317 cvmfs::max_open_files_ = CheckMaxOpenFiles();
2318
2319 FileSystem::FileSystemInfo fs_info;
2320 fs_info.type = FileSystem::kFsFuse;
2321 fs_info.name = loader_exports->repository_name;
2322 fs_info.exe_path = loader_exports->program_name;
2323 fs_info.options_mgr = cvmfs::options_mgr_;
2324 fs_info.foreground = loader_exports->foreground;
2325 cvmfs::file_system_ = InitSystemFs(
2326 loader_exports->mount_point,
2327 loader_exports->repository_name,
2328 fs_info);
2329 if (!cvmfs::file_system_->IsValid()) {
2330 *g_boot_error = cvmfs::file_system_->boot_error();
2331 return cvmfs::file_system_->boot_status();
2332 }
2333 if ((cvmfs::file_system_->cache_mgr()->id() == kPosixCacheManager) &&
2334 dynamic_cast<PosixCacheManager *>(
2335 cvmfs::file_system_->cache_mgr())->do_refcount())
2336 {
2337 cvmfs::check_fd_overflow_ = false;
2338 }
2339
2340 cvmfs::mount_point_ = MountPoint::Create(loader_exports->repository_name,
2341 cvmfs::file_system_);
2342 if (!cvmfs::mount_point_->IsValid()) {
2343 *g_boot_error = cvmfs::mount_point_->boot_error();
2344 return cvmfs::mount_point_->boot_status();
2345 }
2346
2347 RegisterMagicXattrs();
2348
2349 cvmfs::directory_handles_ = new cvmfs::DirectoryHandles();
2350 cvmfs::directory_handles_->set_empty_key((uint64_t)(-1));
2351 cvmfs::directory_handles_->set_deleted_key((uint64_t)(-2));
2352
2353 LogCvmfs(kLogCvmfs, kLogDebug, "fuse inode size is %lu bits",
2354 sizeof(fuse_ino_t) * 8);
2355
2356 cvmfs::inode_generation_info_.initial_revision =
2357 cvmfs::mount_point_->catalog_mgr()->GetRevision();
2358 cvmfs::inode_generation_info_.inode_generation =
2359 cvmfs::mount_point_->inode_annotation()->GetGeneration();
2360 LogCvmfs(kLogCvmfs, kLogDebug, "root inode is %" PRIu64,
2361 uint64_t(cvmfs::mount_point_->catalog_mgr()->GetRootInode()));
2362
2363 void **channel_or_session = NULL;
2364 if (loader_exports->version >= 4) {
2365 channel_or_session = loader_exports->fuse_channel_or_session;
2366 }
2367
2368 bool fuse_notify_invalidation = true;
2369 std::string buf;
2370 if (cvmfs::options_mgr_->GetValue("CVMFS_FUSE_NOTIFY_INVALIDATION",
2371 &buf)) {
2372 if (!cvmfs::options_mgr_->IsOn(buf)) {
2373 fuse_notify_invalidation = false;
2374 cvmfs::mount_point_->dentry_tracker()->Disable();
2375 }
2376 }
2377 cvmfs::fuse_remounter_ =
2378 new FuseRemounter(cvmfs::mount_point_, &cvmfs::inode_generation_info_,
2379 channel_or_session, fuse_notify_invalidation);
2380
2381 // Control & command interface
2382 cvmfs::talk_mgr_ = TalkManager::Create(
2383 cvmfs::mount_point_->talk_socket_path(),
2384 cvmfs::mount_point_,
2385 cvmfs::fuse_remounter_);
2386 if ((cvmfs::mount_point_->talk_socket_uid() != 0) ||
2387 (cvmfs::mount_point_->talk_socket_gid() != 0))
2388 {
2389 uid_t tgt_uid = cvmfs::mount_point_->talk_socket_uid();
2390 gid_t tgt_gid = cvmfs::mount_point_->talk_socket_gid();
2391 int rvi = chown(cvmfs::mount_point_->talk_socket_path().c_str(),
2392 tgt_uid, tgt_gid);
2393 if (rvi != 0) {
2394 *g_boot_error = std::string("failed to set talk socket ownership - ")
2395 + "target " + StringifyInt(tgt_uid) + ":" + StringifyInt(tgt_uid)
2396 + ", user " + StringifyInt(geteuid()) + ":" + StringifyInt(getegid());
2397 return loader::kFailTalk;
2398 }
2399 }
2400 if (cvmfs::talk_mgr_ == NULL) {
2401 *g_boot_error = "failed to initialize talk socket (" +
2402 StringifyInt(errno) + ")";
2403 return loader::kFailTalk;
2404 }
2405
2406 // Notification system client
2407 {
2408 OptionsManager* options = cvmfs::file_system_->options_mgr();
2409 if (options->IsDefined("CVMFS_NOTIFICATION_SERVER")) {
2410 std::string config;
2411 options->GetValue("CVMFS_NOTIFICATION_SERVER", &config);
2412 const std::string repo_name = cvmfs::mount_point_->fqrn();
2413 cvmfs::notification_client_ =
2414 new NotificationClient(config, repo_name, cvmfs::fuse_remounter_,
2415 cvmfs::mount_point_->download_mgr(),
2416 cvmfs::mount_point_->signature_mgr());
2417 }
2418 }
2419
2420 return loader::kFailOk;
2421 }
2422
2423
2424 /**
2425 * Things that have to be executed after fork() / daemon()
2426 */
2427 static void Spawn() {
2428 // First thing: kick off the watchdog while we still have a single-threaded
2429 // well-defined state
2430 cvmfs::pid_ = getpid();
2431 if (cvmfs::watchdog_) {
2432 cvmfs::watchdog_->Spawn(GetCurrentWorkingDirectory() + "/stacktrace." +
2433 cvmfs::mount_point_->fqrn());
2434 }
2435
2436 cvmfs::fuse_remounter_->Spawn();
2437 if (cvmfs::mount_point_->dentry_tracker()->is_active()) {
2438 cvmfs::mount_point_->dentry_tracker()->SpawnCleaner(
2439 // Usually every minute
2440 static_cast<unsigned int>(cvmfs::mount_point_->kcache_timeout_sec()));
2441 }
2442
2443 cvmfs::mount_point_->download_mgr()->Spawn();
2444 cvmfs::mount_point_->external_download_mgr()->Spawn();
2445 if (cvmfs::mount_point_->resolv_conf_watcher() != NULL) {
2446 cvmfs::mount_point_->resolv_conf_watcher()->Spawn();
2447 }
2448 QuotaManager *quota_mgr = cvmfs::file_system_->cache_mgr()->quota_mgr();
2449 quota_mgr->Spawn();
2450 if (quota_mgr->HasCapability(QuotaManager::kCapListeners)) {
2451 cvmfs::watchdog_listener_ = quota::RegisterWatchdogListener(
2452 quota_mgr,
2453 cvmfs::mount_point_->uuid()->uuid() + "-watchdog");
2454 cvmfs::unpin_listener_ = quota::RegisterUnpinListener(
2455 quota_mgr,
2456 cvmfs::mount_point_->catalog_mgr(),
2457 cvmfs::mount_point_->uuid()->uuid() + "-unpin");
2458 }
2459 cvmfs::mount_point_->tracer()->Spawn();
2460 cvmfs::talk_mgr_->Spawn();
2461
2462 if (cvmfs::notification_client_ != NULL) {
2463 cvmfs::notification_client_->Spawn();
2464 }
2465
2466 if (cvmfs::file_system_->nfs_maps() != NULL) {
2467 cvmfs::file_system_->nfs_maps()->Spawn();
2468 }
2469
2470 cvmfs::file_system_->cache_mgr()->Spawn();
2471
2472 if (cvmfs::mount_point_->telemetry_aggr() != NULL) {
2473 cvmfs::mount_point_->telemetry_aggr()->Spawn();
2474 }
2475 }
2476
2477
2478 static string GetErrorMsg() {
2479 if (g_boot_error)
2480 return *g_boot_error;
2481 return "";
2482 }
2483
2484
2485 /**
2486 * Called alone at the end of SaveState; it performs a Fini() half way through,
2487 * enough to delete the catalog manager, so that no more open file handles
2488 * from file catalogs are active.
2489 */
2490 static void ShutdownMountpoint() {
2491 delete cvmfs::talk_mgr_;
2492 cvmfs::talk_mgr_ = NULL;
2493
2494 delete cvmfs::notification_client_;
2495 cvmfs::notification_client_ = NULL;
2496
2497 // The remonter has a reference to the mount point and the inode generation
2498 delete cvmfs::fuse_remounter_;
2499 cvmfs::fuse_remounter_ = NULL;
2500
2501 // The unpin listener requires the catalog, so this must be unregistered
2502 // before the catalog manager is removed
2503 if (cvmfs::unpin_listener_ != NULL) {
2504 quota::UnregisterListener(cvmfs::unpin_listener_);
2505 cvmfs::unpin_listener_ = NULL;
2506 }
2507 if (cvmfs::watchdog_listener_ != NULL) {
2508 quota::UnregisterListener(cvmfs::watchdog_listener_);
2509 cvmfs::watchdog_listener_ = NULL;
2510 }
2511
2512 delete cvmfs::directory_handles_;
2513 delete cvmfs::mount_point_;
2514 cvmfs::directory_handles_ = NULL;
2515 cvmfs::mount_point_ = NULL;
2516 }
2517
2518
2519 static void Fini() {
2520 ShutdownMountpoint();
2521
2522 delete cvmfs::file_system_;
2523 delete cvmfs::options_mgr_;
2524 cvmfs::file_system_ = NULL;
2525 cvmfs::options_mgr_ = NULL;
2526
2527 delete cvmfs::watchdog_;
2528 cvmfs::watchdog_ = NULL;
2529
2530 delete g_boot_error;
2531 g_boot_error = NULL;
2532 auto_umount::SetMountpoint("");
2533
2534 crypto::CleanupLibcryptoMt();
2535 }
2536
2537
2538 static int AltProcessFlavor(int argc, char **argv) {
2539 if (strcmp(argv[1], "__cachemgr__") == 0) {
2540 return PosixQuotaManager::MainCacheManager(argc, argv);
2541 }
2542 if (strcmp(argv[1], "__wpad__") == 0) {
2543 return download::MainResolveProxyDescription(argc, argv);
2544 }
2545 return 1;
2546 }
2547
2548
2549 static bool MaintenanceMode(const int fd_progress) {
2550 SendMsg2Socket(fd_progress, "Entering maintenance mode\n");
2551 string msg_progress = "Draining out kernel caches (";
2552 if (FuseInvalidator::HasFuseNotifyInval())
2553 msg_progress += "up to ";
2554 msg_progress += StringifyInt(static_cast<int>(
2555 cvmfs::mount_point_->kcache_timeout_sec())) +
2556 "s)\n";
2557 SendMsg2Socket(fd_progress, msg_progress);
2558 cvmfs::fuse_remounter_->EnterMaintenanceMode();
2559 return true;
2560 }
2561
2562
2563 static bool SaveState(const int fd_progress, loader::StateList *saved_states) {
2564 string msg_progress;
2565
2566 unsigned num_open_dirs = cvmfs::directory_handles_->size();
2567 if (num_open_dirs != 0) {
2568 #ifdef DEBUGMSG
2569 for (cvmfs::DirectoryHandles::iterator i =
2570 cvmfs::directory_handles_->begin(),
2571 iEnd = cvmfs::directory_handles_->end(); i != iEnd; ++i)
2572 {
2573 LogCvmfs(kLogCvmfs, kLogDebug, "saving dirhandle %lu", i->first);
2574 }
2575 #endif
2576
2577 msg_progress = "Saving open directory handles (" +
2578 StringifyInt(num_open_dirs) + " handles)\n";
2579 SendMsg2Socket(fd_progress, msg_progress);
2580
2581 // TODO(jblomer): should rather be saved just in a malloc'd memory block
2582 cvmfs::DirectoryHandles *saved_handles =
2583 new cvmfs::DirectoryHandles(*cvmfs::directory_handles_);
2584 loader::SavedState *save_open_dirs = new loader::SavedState();
2585 save_open_dirs->state_id = loader::kStateOpenDirs;
2586 save_open_dirs->state = saved_handles;
2587 saved_states->push_back(save_open_dirs);
2588 }
2589
2590 if (!cvmfs::file_system_->IsNfsSource()) {
2591 msg_progress = "Saving inode tracker\n";
2592 SendMsg2Socket(fd_progress, msg_progress);
2593 glue::InodeTracker *saved_inode_tracker =
2594 new glue::InodeTracker(*cvmfs::mount_point_->inode_tracker());
2595 loader::SavedState *state_glue_buffer = new loader::SavedState();
2596 state_glue_buffer->state_id = loader::kStateGlueBufferV4;
2597 state_glue_buffer->state = saved_inode_tracker;
2598 saved_states->push_back(state_glue_buffer);
2599 }
2600
2601 msg_progress = "Saving negative entry cache\n";
2602 SendMsg2Socket(fd_progress, msg_progress);
2603 glue::DentryTracker *saved_dentry_tracker =
2604 new glue::DentryTracker(*cvmfs::mount_point_->dentry_tracker());
2605 loader::SavedState *state_dentry_tracker = new loader::SavedState();
2606 state_dentry_tracker->state_id = loader::kStateDentryTracker;
2607 state_dentry_tracker->state = saved_dentry_tracker;
2608 saved_states->push_back(state_dentry_tracker);
2609
2610 msg_progress = "Saving page cache entry tracker\n";
2611 SendMsg2Socket(fd_progress, msg_progress);
2612 glue::PageCacheTracker *saved_page_cache_tracker =
2613 new glue::PageCacheTracker(*cvmfs::mount_point_->page_cache_tracker());
2614 loader::SavedState *state_page_cache_tracker = new loader::SavedState();
2615 state_page_cache_tracker->state_id = loader::kStatePageCacheTracker;
2616 state_page_cache_tracker->state = saved_page_cache_tracker;
2617 saved_states->push_back(state_page_cache_tracker);
2618
2619 msg_progress = "Saving chunk tables\n";
2620 SendMsg2Socket(fd_progress, msg_progress);
2621 ChunkTables *saved_chunk_tables = new ChunkTables(
2622 *cvmfs::mount_point_->chunk_tables());
2623 loader::SavedState *state_chunk_tables = new loader::SavedState();
2624 state_chunk_tables->state_id = loader::kStateOpenChunksV4;
2625 state_chunk_tables->state = saved_chunk_tables;
2626 saved_states->push_back(state_chunk_tables);
2627
2628 msg_progress = "Saving inode generation\n";
2629 SendMsg2Socket(fd_progress, msg_progress);
2630 cvmfs::inode_generation_info_.inode_generation +=
2631 cvmfs::mount_point_->catalog_mgr()->inode_gauge();
2632 cvmfs::InodeGenerationInfo *saved_inode_generation =
2633 new cvmfs::InodeGenerationInfo(cvmfs::inode_generation_info_);
2634 loader::SavedState *state_inode_generation = new loader::SavedState();
2635 state_inode_generation->state_id = loader::kStateInodeGeneration;
2636 state_inode_generation->state = saved_inode_generation;
2637 saved_states->push_back(state_inode_generation);
2638
2639 msg_progress = "Saving fuse state\n";
2640 SendMsg2Socket(fd_progress, msg_progress);
2641 cvmfs::FuseState *saved_fuse_state = new cvmfs::FuseState();
2642 saved_fuse_state->cache_symlinks = cvmfs::mount_point_->cache_symlinks();
2643 saved_fuse_state->has_dentry_expire =
2644 cvmfs::mount_point_->fuse_expire_entry();
2645 loader::SavedState *state_fuse = new loader::SavedState();
2646 state_fuse->state_id = loader::kStateFuse;
2647 state_fuse->state = saved_fuse_state;
2648 saved_states->push_back(state_fuse);
2649
2650 // Close open file catalogs
2651 ShutdownMountpoint();
2652
2653 loader::SavedState *state_cache_mgr = new loader::SavedState();
2654 state_cache_mgr->state_id = loader::kStateOpenFiles;
2655 state_cache_mgr->state =
2656 cvmfs::file_system_->cache_mgr()->SaveState(fd_progress);
2657 saved_states->push_back(state_cache_mgr);
2658
2659 msg_progress = "Saving open files counter\n";
2660 uint32_t *saved_num_fd =
2661 new uint32_t(cvmfs::file_system_->no_open_files()->Get());
2662 loader::SavedState *state_num_fd = new loader::SavedState();
2663 state_num_fd->state_id = loader::kStateOpenFilesCounter;
2664 state_num_fd->state = saved_num_fd;
2665 saved_states->push_back(state_num_fd);
2666
2667 return true;
2668 }
2669
2670
2671 static bool RestoreState(const int fd_progress,
2672 const loader::StateList &saved_states)
2673 {
2674 // If we have no saved version of the page cache tracker, it is unsafe
2675 // to start using it. The page cache tracker has to run for the entire
2676 // lifetime of the mountpoint or not at all.
2677 cvmfs::mount_point_->page_cache_tracker()->Disable();
2678
2679 for (unsigned i = 0, l = saved_states.size(); i < l; ++i) {
2680 if (saved_states[i]->state_id == loader::kStateOpenDirs) {
2681 SendMsg2Socket(fd_progress, "Restoring open directory handles... ");
2682 delete cvmfs::directory_handles_;
2683 cvmfs::DirectoryHandles *saved_handles =
2684 (cvmfs::DirectoryHandles *)saved_states[i]->state;
2685 cvmfs::directory_handles_ = new cvmfs::DirectoryHandles(*saved_handles);
2686 cvmfs::file_system_->no_open_dirs()->Set(
2687 cvmfs::directory_handles_->size());
2688 cvmfs::DirectoryHandles::const_iterator i =
2689 cvmfs::directory_handles_->begin();
2690 for (; i != cvmfs::directory_handles_->end(); ++i) {
2691 if (i->first >= cvmfs::next_directory_handle_)
2692 cvmfs::next_directory_handle_ = i->first + 1;
2693 }
2694
2695 SendMsg2Socket(fd_progress,
2696 StringifyInt(cvmfs::directory_handles_->size()) + " handles\n");
2697 }
2698
2699 if (saved_states[i]->state_id == loader::kStateGlueBuffer) {
2700 SendMsg2Socket(fd_progress, "Migrating inode tracker (v1 to v4)... ");
2701 compat::inode_tracker::InodeTracker *saved_inode_tracker =
2702 (compat::inode_tracker::InodeTracker *)saved_states[i]->state;
2703 compat::inode_tracker::Migrate(
2704 saved_inode_tracker, cvmfs::mount_point_->inode_tracker());
2705 SendMsg2Socket(fd_progress, " done\n");
2706 }
2707
2708 if (saved_states[i]->state_id == loader::kStateGlueBufferV2) {
2709 SendMsg2Socket(fd_progress, "Migrating inode tracker (v2 to v4)... ");
2710 compat::inode_tracker_v2::InodeTracker *saved_inode_tracker =
2711 (compat::inode_tracker_v2::InodeTracker *)saved_states[i]->state;
2712 compat::inode_tracker_v2::Migrate(saved_inode_tracker,
2713 cvmfs::mount_point_->inode_tracker());
2714 SendMsg2Socket(fd_progress, " done\n");
2715 }
2716
2717 if (saved_states[i]->state_id == loader::kStateGlueBufferV3) {
2718 SendMsg2Socket(fd_progress, "Migrating inode tracker (v3 to v4)... ");
2719 compat::inode_tracker_v3::InodeTracker *saved_inode_tracker =
2720 (compat::inode_tracker_v3::InodeTracker *)saved_states[i]->state;
2721 compat::inode_tracker_v3::Migrate(saved_inode_tracker,
2722 cvmfs::mount_point_->inode_tracker());
2723 SendMsg2Socket(fd_progress, " done\n");
2724 }
2725
2726 if (saved_states[i]->state_id == loader::kStateGlueBufferV4) {
2727 SendMsg2Socket(fd_progress, "Restoring inode tracker... ");
2728 cvmfs::mount_point_->inode_tracker()->~InodeTracker();
2729 glue::InodeTracker *saved_inode_tracker =
2730 (glue::InodeTracker *)saved_states[i]->state;
2731 new (cvmfs::mount_point_->inode_tracker())
2732 glue::InodeTracker(*saved_inode_tracker);
2733 SendMsg2Socket(fd_progress, " done\n");
2734 }
2735
2736 if (saved_states[i]->state_id == loader::kStateDentryTracker) {
2737 SendMsg2Socket(fd_progress, "Restoring dentry tracker... ");
2738 cvmfs::mount_point_->dentry_tracker()->~DentryTracker();
2739 glue::DentryTracker *saved_dentry_tracker =
2740 static_cast<glue::DentryTracker *>(saved_states[i]->state);
2741 new (cvmfs::mount_point_->dentry_tracker())
2742 glue::DentryTracker(*saved_dentry_tracker);
2743 SendMsg2Socket(fd_progress, " done\n");
2744 }
2745
2746 if (saved_states[i]->state_id == loader::kStatePageCacheTracker) {
2747 SendMsg2Socket(fd_progress, "Restoring page cache entry tracker... ");
2748 cvmfs::mount_point_->page_cache_tracker()->~PageCacheTracker();
2749 glue::PageCacheTracker *saved_page_cache_tracker =
2750 (glue::PageCacheTracker *)saved_states[i]->state;
2751 new (cvmfs::mount_point_->page_cache_tracker())
2752 glue::PageCacheTracker(*saved_page_cache_tracker);
2753 SendMsg2Socket(fd_progress, " done\n");
2754 }
2755
2756 ChunkTables *chunk_tables = cvmfs::mount_point_->chunk_tables();
2757
2758 if (saved_states[i]->state_id == loader::kStateOpenChunks) {
2759 SendMsg2Socket(fd_progress, "Migrating chunk tables (v1 to v4)... ");
2760 compat::chunk_tables::ChunkTables *saved_chunk_tables =
2761 (compat::chunk_tables::ChunkTables *)saved_states[i]->state;
2762 compat::chunk_tables::Migrate(saved_chunk_tables, chunk_tables);
2763 SendMsg2Socket(fd_progress,
2764 StringifyInt(chunk_tables->handle2fd.size()) + " handles\n");
2765 }
2766
2767 if (saved_states[i]->state_id == loader::kStateOpenChunksV2) {
2768 SendMsg2Socket(fd_progress, "Migrating chunk tables (v2 to v4)... ");
2769 compat::chunk_tables_v2::ChunkTables *saved_chunk_tables =
2770 (compat::chunk_tables_v2::ChunkTables *)saved_states[i]->state;
2771 compat::chunk_tables_v2::Migrate(saved_chunk_tables, chunk_tables);
2772 SendMsg2Socket(fd_progress,
2773 StringifyInt(chunk_tables->handle2fd.size()) + " handles\n");
2774 }
2775
2776 if (saved_states[i]->state_id == loader::kStateOpenChunksV3) {
2777 SendMsg2Socket(fd_progress, "Migrating chunk tables (v3 to v4)... ");
2778 compat::chunk_tables_v3::ChunkTables *saved_chunk_tables =
2779 (compat::chunk_tables_v3::ChunkTables *)saved_states[i]->state;
2780 compat::chunk_tables_v3::Migrate(saved_chunk_tables, chunk_tables);
2781 SendMsg2Socket(fd_progress,
2782 StringifyInt(chunk_tables->handle2fd.size()) + " handles\n");
2783 }
2784
2785 if (saved_states[i]->state_id == loader::kStateOpenChunksV4) {
2786 SendMsg2Socket(fd_progress, "Restoring chunk tables... ");
2787 chunk_tables->~ChunkTables();
2788 ChunkTables *saved_chunk_tables = reinterpret_cast<ChunkTables *>(
2789 saved_states[i]->state);
2790 new (chunk_tables) ChunkTables(*saved_chunk_tables);
2791 SendMsg2Socket(fd_progress, " done\n");
2792 }
2793
2794 if (saved_states[i]->state_id == loader::kStateInodeGeneration) {
2795 SendMsg2Socket(fd_progress, "Restoring inode generation... ");
2796 cvmfs::InodeGenerationInfo *old_info =
2797 (cvmfs::InodeGenerationInfo *)saved_states[i]->state;
2798 if (old_info->version == 1) {
2799 // Migration
2800 cvmfs::inode_generation_info_.initial_revision =
2801 old_info->initial_revision;
2802 cvmfs::inode_generation_info_.incarnation = old_info->incarnation;
2803 // Note: in the rare case of inode generation being 0 before, inode
2804 // can clash after reload before remount
2805 } else {
2806 cvmfs::inode_generation_info_ = *old_info;
2807 }
2808 ++cvmfs::inode_generation_info_.incarnation;
2809 SendMsg2Socket(fd_progress, " done\n");
2810 }
2811
2812 if (saved_states[i]->state_id == loader::kStateOpenFilesCounter) {
2813 SendMsg2Socket(fd_progress, "Restoring open files counter... ");
2814 cvmfs::file_system_->no_open_files()->Set(*(reinterpret_cast<uint32_t *>(
2815 saved_states[i]->state)));
2816 SendMsg2Socket(fd_progress, " done\n");
2817 }
2818
2819 if (saved_states[i]->state_id == loader::kStateOpenFiles) {
2820 int old_root_fd = cvmfs::mount_point_->catalog_mgr()->root_fd();
2821
2822 // TODO(jblomer): make this less hacky
2823
2824 CacheManagerIds saved_type =
2825 cvmfs::file_system_->cache_mgr()->PeekState(saved_states[i]->state);
2826 int fixup_root_fd = -1;
2827
2828 if ((saved_type == kStreamingCacheManager) &&
2829 (cvmfs::file_system_->cache_mgr()->id() != kStreamingCacheManager))
2830 {
2831 // stick to the streaming cache manager
2832 StreamingCacheManager *new_cache_mgr = new
2833 StreamingCacheManager(cvmfs::max_open_files_,
2834 cvmfs::file_system_->cache_mgr(),
2835 cvmfs::mount_point_->download_mgr(),
2836 cvmfs::mount_point_->external_download_mgr());
2837 fixup_root_fd = new_cache_mgr->PlantFd(old_root_fd);
2838 cvmfs::file_system_->ReplaceCacheManager(new_cache_mgr);
2839 cvmfs::mount_point_->fetcher()->ReplaceCacheManager(new_cache_mgr);
2840 cvmfs::mount_point_->external_fetcher()->ReplaceCacheManager(
2841 new_cache_mgr);
2842 }
2843
2844 if ((cvmfs::file_system_->cache_mgr()->id() == kStreamingCacheManager) &&
2845 (saved_type != kStreamingCacheManager))
2846 {
2847 // stick to the cache manager wrapped into the streaming cache
2848 CacheManager *wrapped_cache_mgr = dynamic_cast<StreamingCacheManager *>(
2849 cvmfs::file_system_->cache_mgr())->MoveOutBackingCacheMgr(
2850 &fixup_root_fd);
2851 delete cvmfs::file_system_->cache_mgr();
2852 cvmfs::file_system_->ReplaceCacheManager(wrapped_cache_mgr);
2853 cvmfs::mount_point_->fetcher()->ReplaceCacheManager(wrapped_cache_mgr);
2854 cvmfs::mount_point_->external_fetcher()->ReplaceCacheManager(
2855 wrapped_cache_mgr);
2856 }
2857
2858 int new_root_fd = cvmfs::file_system_->cache_mgr()->RestoreState(
2859 fd_progress, saved_states[i]->state);
2860 LogCvmfs(kLogCvmfs, kLogDebug, "new root file catalog descriptor @%d",
2861 new_root_fd);
2862 if (new_root_fd >= 0) {
2863 cvmfs::file_system_->RemapCatalogFd(old_root_fd, new_root_fd);
2864 } else if (fixup_root_fd >= 0) {
2865 LogCvmfs(kLogCvmfs, kLogDebug,
2866 "new root file catalog descriptor (fixup) @%d", fixup_root_fd);
2867 cvmfs::file_system_->RemapCatalogFd(old_root_fd, fixup_root_fd);
2868 }
2869 }
2870
2871 if (saved_states[i]->state_id == loader::kStateFuse) {
2872 SendMsg2Socket(fd_progress, "Restoring fuse state... ");
2873 cvmfs::FuseState *fuse_state =
2874 static_cast<cvmfs::FuseState *>(saved_states[i]->state);
2875 if (!fuse_state->cache_symlinks)
2876 cvmfs::mount_point_->DisableCacheSymlinks();
2877 if (fuse_state->has_dentry_expire)
2878 cvmfs::mount_point_->EnableFuseExpireEntry();
2879 SendMsg2Socket(fd_progress, " done\n");
2880 }
2881 }
2882 if (cvmfs::mount_point_->inode_annotation()) {
2883 uint64_t saved_generation = cvmfs::inode_generation_info_.inode_generation;
2884 cvmfs::mount_point_->inode_annotation()->IncGeneration(saved_generation);
2885 }
2886
2887 return true;
2888 }
2889
2890
2891 static void FreeSavedState(const int fd_progress,
2892 const loader::StateList &saved_states)
2893 {
2894 for (unsigned i = 0, l = saved_states.size(); i < l; ++i) {
2895 switch (saved_states[i]->state_id) {
2896 case loader::kStateOpenDirs:
2897 SendMsg2Socket(fd_progress, "Releasing saved open directory handles\n");
2898 delete static_cast<cvmfs::DirectoryHandles *>(saved_states[i]->state);
2899 break;
2900 case loader::kStateGlueBuffer:
2901 SendMsg2Socket(
2902 fd_progress, "Releasing saved glue buffer (version 1)\n");
2903 delete static_cast<compat::inode_tracker::InodeTracker *>(
2904 saved_states[i]->state);
2905 break;
2906 case loader::kStateGlueBufferV2:
2907 SendMsg2Socket(
2908 fd_progress, "Releasing saved glue buffer (version 2)\n");
2909 delete static_cast<compat::inode_tracker_v2::InodeTracker *>(
2910 saved_states[i]->state);
2911 break;
2912 case loader::kStateGlueBufferV3:
2913 SendMsg2Socket(
2914 fd_progress, "Releasing saved glue buffer (version 3)\n");
2915 delete static_cast<compat::inode_tracker_v3::InodeTracker *>(
2916 saved_states[i]->state);
2917 break;
2918 case loader::kStateGlueBufferV4:
2919 SendMsg2Socket(fd_progress, "Releasing saved glue buffer\n");
2920 delete static_cast<glue::InodeTracker *>(saved_states[i]->state);
2921 break;
2922 case loader::kStateDentryTracker:
2923 SendMsg2Socket(fd_progress, "Releasing saved dentry tracker\n");
2924 delete static_cast<glue::DentryTracker *>(saved_states[i]->state);
2925 break;
2926 case loader::kStatePageCacheTracker:
2927 SendMsg2Socket(fd_progress, "Releasing saved page cache entry cache\n");
2928 delete static_cast<glue::PageCacheTracker *>(saved_states[i]->state);
2929 break;
2930 case loader::kStateOpenChunks:
2931 SendMsg2Socket(fd_progress, "Releasing chunk tables (version 1)\n");
2932 delete static_cast<compat::chunk_tables::ChunkTables *>(
2933 saved_states[i]->state);
2934 break;
2935 case loader::kStateOpenChunksV2:
2936 SendMsg2Socket(fd_progress, "Releasing chunk tables (version 2)\n");
2937 delete static_cast<compat::chunk_tables_v2::ChunkTables *>(
2938 saved_states[i]->state);
2939 break;
2940 case loader::kStateOpenChunksV3:
2941 SendMsg2Socket(fd_progress, "Releasing chunk tables (version 3)\n");
2942 delete static_cast<compat::chunk_tables_v3::ChunkTables *>(
2943 saved_states[i]->state);
2944 break;
2945 case loader::kStateOpenChunksV4:
2946 SendMsg2Socket(fd_progress, "Releasing chunk tables\n");
2947 delete static_cast<ChunkTables *>(saved_states[i]->state);
2948 break;
2949 case loader::kStateInodeGeneration:
2950 SendMsg2Socket(fd_progress, "Releasing saved inode generation info\n");
2951 delete static_cast<cvmfs::InodeGenerationInfo *>(
2952 saved_states[i]->state);
2953 break;
2954 case loader::kStateOpenFiles:
2955 cvmfs::file_system_->cache_mgr()->FreeState(
2956 fd_progress, saved_states[i]->state);
2957 break;
2958 case loader::kStateOpenFilesCounter:
2959 SendMsg2Socket(fd_progress, "Releasing open files counter\n");
2960 delete static_cast<uint32_t *>(saved_states[i]->state);
2961 break;
2962 case loader::kStateFuse:
2963 SendMsg2Socket(fd_progress, "Releasing fuse state\n");
2964 delete static_cast<cvmfs::FuseState *>(saved_states[i]->state);
2965 break;
2966 default:
2967 break;
2968 }
2969 }
2970 }
2971
2972
2973 static void __attribute__((constructor)) LibraryMain() {
2974 g_cvmfs_exports = new loader::CvmfsExports();
2975 g_cvmfs_exports->so_version = PACKAGE_VERSION;
2976 g_cvmfs_exports->fnAltProcessFlavor = AltProcessFlavor;
2977 g_cvmfs_exports->fnInit = Init;
2978 g_cvmfs_exports->fnSpawn = Spawn;
2979 g_cvmfs_exports->fnFini = Fini;
2980 g_cvmfs_exports->fnGetErrorMsg = GetErrorMsg;
2981 g_cvmfs_exports->fnMaintenanceMode = MaintenanceMode;
2982 g_cvmfs_exports->fnSaveState = SaveState;
2983 g_cvmfs_exports->fnRestoreState = RestoreState;
2984 g_cvmfs_exports->fnFreeSavedState = FreeSavedState;
2985 cvmfs::SetCvmfsOperations(&g_cvmfs_exports->cvmfs_operations);
2986 }
2987
2988
2989 static void __attribute__((destructor)) LibraryExit() {
2990 delete g_cvmfs_exports;
2991 g_cvmfs_exports = NULL;
2992 }
2993