| Line |
Branch |
Exec |
Source |
| 1 |
|
|
/** |
| 2 |
|
|
* This file is part of the CernVM File System. |
| 3 |
|
|
* |
| 4 |
|
|
* This tool checks a cvmfs repository for file catalog errors. |
| 5 |
|
|
*/ |
| 6 |
|
|
|
| 7 |
|
|
#define __STDC_FORMAT_MACROS |
| 8 |
|
|
|
| 9 |
|
|
|
| 10 |
|
|
#include "swissknife_check.h" |
| 11 |
|
|
|
| 12 |
|
|
#include <inttypes.h> |
| 13 |
|
|
#include <unistd.h> |
| 14 |
|
|
|
| 15 |
|
|
#include <cassert> |
| 16 |
|
|
#include <map> |
| 17 |
|
|
#include <queue> |
| 18 |
|
|
#include <set> |
| 19 |
|
|
#include <string> |
| 20 |
|
|
#include <vector> |
| 21 |
|
|
|
| 22 |
|
|
#include "catalog_sql.h" |
| 23 |
|
|
#include "compression/compression.h" |
| 24 |
|
|
#include "file_chunk.h" |
| 25 |
|
|
#include "history_sqlite.h" |
| 26 |
|
|
#include "manifest.h" |
| 27 |
|
|
#include "network/download.h" |
| 28 |
|
|
#include "reflog.h" |
| 29 |
|
|
#include "sanitizer.h" |
| 30 |
|
|
#include "shortstring.h" |
| 31 |
|
|
#include "util/exception.h" |
| 32 |
|
|
#include "util/logging.h" |
| 33 |
|
|
#include "util/pointer.h" |
| 34 |
|
|
#include "util/posix.h" |
| 35 |
|
|
|
| 36 |
|
|
using namespace std; // NOLINT |
| 37 |
|
|
|
| 38 |
|
|
// for map of duplicate entries; as in kvstore.cc |
| 39 |
|
✗ |
static inline uint32_t hasher_any(const shash::Any &key) { |
| 40 |
|
|
// We'll just do the same thing as hasher_md5, since every hash is at |
| 41 |
|
|
// least as large. |
| 42 |
|
✗ |
return *const_cast<uint32_t *>(reinterpret_cast<const uint32_t *>(key.digest) |
| 43 |
|
✗ |
+ 1); |
| 44 |
|
|
} |
| 45 |
|
|
|
| 46 |
|
|
|
| 47 |
|
|
namespace swissknife { |
| 48 |
|
|
|
| 49 |
|
✗ |
CommandCheck::CommandCheck() |
| 50 |
|
✗ |
: check_chunks_(false), no_duplicates_map_(false), is_remote_(false) { |
| 51 |
|
✗ |
const shash::Any hash_null; |
| 52 |
|
✗ |
duplicates_map_.Init(16, hash_null, hasher_any); |
| 53 |
|
|
} |
| 54 |
|
|
|
| 55 |
|
✗ |
bool CommandCheck::CompareEntries(const catalog::DirectoryEntry &a, |
| 56 |
|
|
const catalog::DirectoryEntry &b, |
| 57 |
|
|
const bool compare_names, |
| 58 |
|
|
const bool is_transition_point) { |
| 59 |
|
|
typedef catalog::DirectoryEntry::Difference Difference; |
| 60 |
|
|
|
| 61 |
|
✗ |
const catalog::DirectoryEntry::Differences diffs = a.CompareTo(b); |
| 62 |
|
✗ |
if (diffs == Difference::kIdentical) { |
| 63 |
|
✗ |
return true; |
| 64 |
|
|
} |
| 65 |
|
|
|
| 66 |
|
|
// in case of a nested catalog transition point the controlling flags are |
| 67 |
|
|
// supposed to differ. If this is the only difference we are done... |
| 68 |
|
✗ |
if (is_transition_point |
| 69 |
|
✗ |
&& (diffs ^ Difference::kNestedCatalogTransitionFlags) == 0) { |
| 70 |
|
✗ |
return true; |
| 71 |
|
|
} |
| 72 |
|
|
|
| 73 |
|
✗ |
bool retval = true; |
| 74 |
|
✗ |
if (compare_names) { |
| 75 |
|
✗ |
if (diffs & Difference::kName) { |
| 76 |
|
✗ |
LogCvmfs(kLogCvmfs, kLogStderr, "names differ: %s / %s", a.name().c_str(), |
| 77 |
|
|
b.name().c_str()); |
| 78 |
|
✗ |
retval = false; |
| 79 |
|
|
} |
| 80 |
|
|
} |
| 81 |
|
✗ |
if (diffs & Difference::kLinkcount) { |
| 82 |
|
✗ |
LogCvmfs(kLogCvmfs, kLogStderr, "linkcounts differ: %u / %u", a.linkcount(), |
| 83 |
|
|
b.linkcount()); |
| 84 |
|
✗ |
retval = false; |
| 85 |
|
|
} |
| 86 |
|
✗ |
if (diffs & Difference::kHardlinkGroup) { |
| 87 |
|
✗ |
LogCvmfs(kLogCvmfs, kLogStderr, "hardlink groups differ: %u / %u", |
| 88 |
|
|
a.hardlink_group(), b.hardlink_group()); |
| 89 |
|
✗ |
retval = false; |
| 90 |
|
|
} |
| 91 |
|
✗ |
if (diffs & Difference::kSize) { |
| 92 |
|
✗ |
LogCvmfs(kLogCvmfs, kLogStderr, "sizes differ: %" PRIu64 " / %" PRIu64, |
| 93 |
|
|
a.size(), b.size()); |
| 94 |
|
✗ |
retval = false; |
| 95 |
|
|
} |
| 96 |
|
✗ |
if (diffs & Difference::kMode) { |
| 97 |
|
✗ |
LogCvmfs(kLogCvmfs, kLogStderr, "modes differ: %u / %u", a.mode(), |
| 98 |
|
|
b.mode()); |
| 99 |
|
✗ |
retval = false; |
| 100 |
|
|
} |
| 101 |
|
✗ |
if (diffs & Difference::kMtime) { |
| 102 |
|
✗ |
LogCvmfs(kLogCvmfs, kLogStderr, "timestamps differ: %lu / %lu", a.mtime(), |
| 103 |
|
|
b.mtime()); |
| 104 |
|
✗ |
retval = false; |
| 105 |
|
|
} |
| 106 |
|
✗ |
if (diffs & Difference::kChecksum) { |
| 107 |
|
✗ |
LogCvmfs(kLogCvmfs, kLogStderr, "content hashes differ: %s / %s", |
| 108 |
|
|
a.checksum().ToString().c_str(), b.checksum().ToString().c_str()); |
| 109 |
|
✗ |
retval = false; |
| 110 |
|
|
} |
| 111 |
|
✗ |
if (diffs & Difference::kSymlink) { |
| 112 |
|
✗ |
LogCvmfs(kLogCvmfs, kLogStderr, "symlinks differ: %s / %s", |
| 113 |
|
|
a.symlink().c_str(), b.symlink().c_str()); |
| 114 |
|
✗ |
retval = false; |
| 115 |
|
|
} |
| 116 |
|
✗ |
if (diffs & Difference::kExternalFileFlag) { |
| 117 |
|
✗ |
LogCvmfs(kLogCvmfs, kLogStderr, |
| 118 |
|
|
"external file flag differs: %d / %d " |
| 119 |
|
|
"(%s / %s)", |
| 120 |
|
|
a.IsExternalFile(), b.IsExternalFile(), a.name().c_str(), |
| 121 |
|
|
b.name().c_str()); |
| 122 |
|
✗ |
retval = false; |
| 123 |
|
|
} |
| 124 |
|
✗ |
if (diffs & Difference::kHasXattrsFlag) { |
| 125 |
|
✗ |
LogCvmfs(kLogCvmfs, kLogStderr, |
| 126 |
|
|
"extended attributes differ: %d / %d " |
| 127 |
|
|
"(%s / %s)", |
| 128 |
|
|
a.HasXattrs(), b.HasXattrs(), a.name().c_str(), b.name().c_str()); |
| 129 |
|
✗ |
retval = false; |
| 130 |
|
|
} |
| 131 |
|
✗ |
if (!is_transition_point) { |
| 132 |
|
✗ |
if (diffs & Difference::kUid) { |
| 133 |
|
✗ |
LogCvmfs(kLogCvmfs, kLogStderr, "uids differ: %d / %d (%s / %s)", a.uid(), |
| 134 |
|
|
b.uid(), a.name().c_str(), b.name().c_str()); |
| 135 |
|
✗ |
retval = false; |
| 136 |
|
|
} |
| 137 |
|
✗ |
if (diffs & Difference::kGid) { |
| 138 |
|
✗ |
LogCvmfs(kLogCvmfs, kLogStderr, "gids differ: %d / %d (%s / %s)", a.gid(), |
| 139 |
|
|
b.gid(), a.name().c_str(), b.name().c_str()); |
| 140 |
|
✗ |
retval = false; |
| 141 |
|
|
} |
| 142 |
|
|
} |
| 143 |
|
|
|
| 144 |
|
✗ |
return retval; |
| 145 |
|
|
} |
| 146 |
|
|
|
| 147 |
|
|
|
| 148 |
|
✗ |
bool CommandCheck::CompareCounters(const catalog::Counters &a, |
| 149 |
|
|
const catalog::Counters &b) { |
| 150 |
|
✗ |
const catalog::Counters::FieldsMap map_a = a.GetFieldsMap(); |
| 151 |
|
✗ |
const catalog::Counters::FieldsMap map_b = b.GetFieldsMap(); |
| 152 |
|
|
|
| 153 |
|
✗ |
bool retval = true; |
| 154 |
|
✗ |
catalog::Counters::FieldsMap::const_iterator i = map_a.begin(); |
| 155 |
|
✗ |
const catalog::Counters::FieldsMap::const_iterator iend = map_a.end(); |
| 156 |
|
✗ |
for (; i != iend; ++i) { |
| 157 |
|
✗ |
const catalog::Counters::FieldsMap::const_iterator comp = map_b.find( |
| 158 |
|
✗ |
i->first); |
| 159 |
|
✗ |
assert(comp != map_b.end()); |
| 160 |
|
|
|
| 161 |
|
✗ |
if (*(i->second) != *(comp->second)) { |
| 162 |
|
✗ |
LogCvmfs(kLogCvmfs, kLogStderr, |
| 163 |
|
|
"catalog statistics mismatch: %s (expected: %" PRIu64 " / " |
| 164 |
|
|
"in catalog: %" PRIu64 ")", |
| 165 |
|
|
comp->first.c_str(), *(i->second), *(comp->second)); |
| 166 |
|
✗ |
retval = false; |
| 167 |
|
|
} |
| 168 |
|
|
} |
| 169 |
|
|
|
| 170 |
|
✗ |
return retval; |
| 171 |
|
|
} |
| 172 |
|
|
|
| 173 |
|
|
|
| 174 |
|
|
/** |
| 175 |
|
|
* Checks for existence of a file either locally or via HTTP head |
| 176 |
|
|
*/ |
| 177 |
|
✗ |
bool CommandCheck::Exists(const string &file) { |
| 178 |
|
✗ |
if (!is_remote_) { |
| 179 |
|
✗ |
return FileExists(file) || SymlinkExists(file); |
| 180 |
|
|
} else { |
| 181 |
|
✗ |
const string url = repo_base_path_ + "/" + file; |
| 182 |
|
✗ |
LogCvmfs(kLogCvmfs, kLogVerboseMsg, "[Exists::url] %s", url.c_str()); |
| 183 |
|
✗ |
download::JobInfo head(&url, false); |
| 184 |
|
✗ |
return download_manager()->Fetch(&head) == download::kFailOk; |
| 185 |
|
|
} |
| 186 |
|
|
} |
| 187 |
|
|
|
| 188 |
|
|
|
| 189 |
|
|
/** |
| 190 |
|
|
* Copies a file from the repository into a temporary file. |
| 191 |
|
|
*/ |
| 192 |
|
✗ |
string CommandCheck::FetchPath(const string &path) { |
| 193 |
|
✗ |
string tmp_path; |
| 194 |
|
✗ |
FILE *f = CreateTempFile(temp_directory_ + "/cvmfstmp", kDefaultFileMode, |
| 195 |
|
|
"w+", &tmp_path); |
| 196 |
|
✗ |
assert(f != NULL); |
| 197 |
|
|
|
| 198 |
|
✗ |
const string url = repo_base_path_ + "/" + path; |
| 199 |
|
✗ |
if (is_remote_) { |
| 200 |
|
✗ |
cvmfs::FileSink filesink(f); |
| 201 |
|
✗ |
download::JobInfo download_job(&url, false, false, NULL, &filesink); |
| 202 |
|
✗ |
const download::Failures retval = download_manager()->Fetch(&download_job); |
| 203 |
|
✗ |
if (retval != download::kFailOk) { |
| 204 |
|
✗ |
PANIC(kLogStderr, "failed to read %s", url.c_str()); |
| 205 |
|
|
} |
| 206 |
|
✗ |
} else { |
| 207 |
|
✗ |
const bool retval = CopyPath2File(url, f); |
| 208 |
|
✗ |
if (!retval) { |
| 209 |
|
✗ |
PANIC(kLogStderr, "failed to read %s", url.c_str()); |
| 210 |
|
|
} |
| 211 |
|
|
} |
| 212 |
|
|
|
| 213 |
|
✗ |
fclose(f); |
| 214 |
|
✗ |
return tmp_path; |
| 215 |
|
|
} |
| 216 |
|
|
|
| 217 |
|
|
|
| 218 |
|
|
/** |
| 219 |
|
|
* Verifies reflog checksum and looks for presence of the entry points |
| 220 |
|
|
* referenced in the manifest. |
| 221 |
|
|
*/ |
| 222 |
|
✗ |
bool CommandCheck::InspectReflog(const shash::Any &reflog_hash, |
| 223 |
|
|
manifest::Manifest *manifest) { |
| 224 |
|
✗ |
LogCvmfs(kLogCvmfs, kLogStdout, "Inspecting log of references"); |
| 225 |
|
✗ |
const string reflog_path = FetchPath(".cvmfsreflog"); |
| 226 |
|
✗ |
shash::Any computed_hash(reflog_hash.algorithm); |
| 227 |
|
✗ |
manifest::Reflog::HashDatabase(reflog_path, &computed_hash); |
| 228 |
|
✗ |
if (computed_hash != reflog_hash) { |
| 229 |
|
✗ |
LogCvmfs(kLogCvmfs, kLogStderr, |
| 230 |
|
|
"The .cvmfsreflog has unexpected content hash %s (expected %s)", |
| 231 |
|
|
computed_hash.ToString().c_str(), reflog_hash.ToString().c_str()); |
| 232 |
|
✗ |
unlink(reflog_path.c_str()); |
| 233 |
|
✗ |
return false; |
| 234 |
|
|
} |
| 235 |
|
|
|
| 236 |
|
✗ |
const UniquePtr<manifest::Reflog> reflog(manifest::Reflog::Open(reflog_path)); |
| 237 |
|
✗ |
assert(reflog.IsValid()); |
| 238 |
|
✗ |
reflog->TakeDatabaseFileOwnership(); |
| 239 |
|
|
|
| 240 |
|
✗ |
if (!reflog->ContainsCatalog(manifest->catalog_hash())) { |
| 241 |
|
✗ |
LogCvmfs(kLogCvmfs, kLogStderr, |
| 242 |
|
|
"failed to find catalog root hash %s in .cvmfsreflog", |
| 243 |
|
|
manifest->catalog_hash().ToString().c_str()); |
| 244 |
|
✗ |
return false; |
| 245 |
|
|
} |
| 246 |
|
|
|
| 247 |
|
✗ |
if (!reflog->ContainsCertificate(manifest->certificate())) { |
| 248 |
|
✗ |
LogCvmfs(kLogCvmfs, kLogStderr, |
| 249 |
|
|
"failed to find certificate hash %s in .cvmfsreflog", |
| 250 |
|
|
manifest->certificate().ToString().c_str()); |
| 251 |
|
✗ |
return false; |
| 252 |
|
|
} |
| 253 |
|
|
|
| 254 |
|
✗ |
if (!manifest->history().IsNull() |
| 255 |
|
✗ |
&& !reflog->ContainsHistory(manifest->history())) { |
| 256 |
|
✗ |
LogCvmfs(kLogCvmfs, kLogStderr, |
| 257 |
|
|
"failed to find tag database's hash %s in .cvmfsreflog", |
| 258 |
|
|
manifest->history().ToString().c_str()); |
| 259 |
|
✗ |
return false; |
| 260 |
|
|
} |
| 261 |
|
|
|
| 262 |
|
✗ |
if (!manifest->meta_info().IsNull() |
| 263 |
|
✗ |
&& !reflog->ContainsMetainfo(manifest->meta_info())) { |
| 264 |
|
✗ |
LogCvmfs(kLogCvmfs, kLogStderr, |
| 265 |
|
|
"failed to find meta info hash %s in .cvmfsreflog", |
| 266 |
|
|
manifest->meta_info().ToString().c_str()); |
| 267 |
|
✗ |
return false; |
| 268 |
|
|
} |
| 269 |
|
|
|
| 270 |
|
✗ |
return true; |
| 271 |
|
|
} |
| 272 |
|
|
|
| 273 |
|
|
|
| 274 |
|
|
/** |
| 275 |
|
|
* Verifies the logical consistency of the tag database. |
| 276 |
|
|
*/ |
| 277 |
|
✗ |
bool CommandCheck::InspectHistory(history::History *history) { |
| 278 |
|
✗ |
LogCvmfs(kLogCvmfs, kLogStdout, "Inspecting tag database"); |
| 279 |
|
|
bool retval; |
| 280 |
|
✗ |
vector<history::History::Tag> tags; |
| 281 |
|
✗ |
retval = history->List(&tags); |
| 282 |
|
✗ |
if (!retval) { |
| 283 |
|
✗ |
LogCvmfs(kLogCvmfs, kLogStderr, "failed to enumerate tags"); |
| 284 |
|
✗ |
return false; |
| 285 |
|
|
} |
| 286 |
|
✗ |
vector<history::History::Branch> branches; |
| 287 |
|
✗ |
retval = history->ListBranches(&branches); |
| 288 |
|
✗ |
if (!retval) { |
| 289 |
|
✗ |
LogCvmfs(kLogCvmfs, kLogStderr, "failed to enumerate branches"); |
| 290 |
|
✗ |
return false; |
| 291 |
|
|
} |
| 292 |
|
|
|
| 293 |
|
✗ |
bool result = true; |
| 294 |
|
|
|
| 295 |
|
✗ |
map<string, uint64_t> initial_revisions; |
| 296 |
|
✗ |
const sanitizer::BranchSanitizer sanitizer; |
| 297 |
|
✗ |
for (unsigned i = 0; i < branches.size(); ++i) { |
| 298 |
|
✗ |
if (!sanitizer.IsValid(branches[i].branch)) { |
| 299 |
|
✗ |
LogCvmfs(kLogCvmfs, kLogStderr, "invalid branch name: %s", |
| 300 |
|
|
branches[i].branch.c_str()); |
| 301 |
|
✗ |
result = false; |
| 302 |
|
|
} |
| 303 |
|
✗ |
initial_revisions[branches[i].branch] = branches[i].initial_revision; |
| 304 |
|
|
} |
| 305 |
|
|
|
| 306 |
|
✗ |
set<string> used_branches; // all branches referenced in tag db |
| 307 |
|
|
// TODO(jblomer): same root hash implies same size and revision |
| 308 |
|
✗ |
for (unsigned i = 0; i < tags.size(); ++i) { |
| 309 |
|
✗ |
used_branches.insert(tags[i].branch); |
| 310 |
|
✗ |
const map<string, uint64_t>::const_iterator iter = initial_revisions.find( |
| 311 |
|
✗ |
tags[i].branch); |
| 312 |
|
✗ |
if (iter == initial_revisions.end()) { |
| 313 |
|
✗ |
LogCvmfs(kLogCvmfs, kLogStderr, "invalid branch %s in tag %s", |
| 314 |
|
|
tags[i].branch.c_str(), tags[i].name.c_str()); |
| 315 |
|
✗ |
result = false; |
| 316 |
|
|
} else { |
| 317 |
|
✗ |
if (tags[i].revision < iter->second) { |
| 318 |
|
✗ |
LogCvmfs(kLogCvmfs, kLogStderr, |
| 319 |
|
|
"invalid revision %" PRIu64 " of tag %s", tags[i].revision, |
| 320 |
|
|
tags[i].name.c_str()); |
| 321 |
|
✗ |
result = false; |
| 322 |
|
|
} |
| 323 |
|
|
} |
| 324 |
|
|
} |
| 325 |
|
|
|
| 326 |
|
✗ |
if (used_branches.size() != branches.size()) { |
| 327 |
|
✗ |
LogCvmfs(kLogCvmfs, kLogStderr, "unused, dangling branches stored"); |
| 328 |
|
✗ |
result = false; |
| 329 |
|
|
} |
| 330 |
|
|
|
| 331 |
|
✗ |
return result; |
| 332 |
|
|
} |
| 333 |
|
|
|
| 334 |
|
|
|
| 335 |
|
|
/** |
| 336 |
|
|
* Recursive catalog walk-through |
| 337 |
|
|
* |
| 338 |
|
|
* TODO(vavolkl): This method is large and does a lot of checks |
| 339 |
|
|
* that could be split into smaller ones. |
| 340 |
|
|
* |
| 341 |
|
|
*/ |
| 342 |
|
✗ |
bool CommandCheck::Find(const catalog::Catalog *catalog, |
| 343 |
|
|
const PathString &path, |
| 344 |
|
|
catalog::DeltaCounters *computed_counters, |
| 345 |
|
|
set<PathString> *bind_mountpoints) { |
| 346 |
|
✗ |
catalog::DirectoryEntryList entries; |
| 347 |
|
✗ |
catalog::DirectoryEntry this_directory; |
| 348 |
|
|
|
| 349 |
|
✗ |
if (!catalog->LookupPath(path, &this_directory)) { |
| 350 |
|
✗ |
LogCvmfs(kLogCvmfs, kLogStderr, "failed to lookup %s", path.c_str()); |
| 351 |
|
✗ |
return false; |
| 352 |
|
|
} |
| 353 |
|
✗ |
if (!catalog->ListingPath(path, &entries)) { |
| 354 |
|
✗ |
LogCvmfs(kLogCvmfs, kLogStderr, "failed to list %s", path.c_str()); |
| 355 |
|
✗ |
return false; |
| 356 |
|
|
} |
| 357 |
|
|
|
| 358 |
|
✗ |
uint32_t num_subdirs = 0; |
| 359 |
|
✗ |
bool retval = true; |
| 360 |
|
|
typedef map<uint32_t, vector<catalog::DirectoryEntry> > HardlinkMap; |
| 361 |
|
✗ |
HardlinkMap hardlinks; |
| 362 |
|
✗ |
bool found_nested_marker = false; |
| 363 |
|
|
|
| 364 |
|
✗ |
for (unsigned i = 0; i < entries.size(); ++i) { |
| 365 |
|
|
// for performance reasons, keep track of files already checked |
| 366 |
|
|
// and only run requests once per hash |
| 367 |
|
✗ |
const bool entry_needs_check = !entries[i].checksum().IsNull() |
| 368 |
|
✗ |
&& !entries[i].IsExternalFile() && |
| 369 |
|
|
// fallback cli option can force the entry to |
| 370 |
|
|
// be checked |
| 371 |
|
✗ |
(no_duplicates_map_ |
| 372 |
|
✗ |
|| !duplicates_map_.Contains( |
| 373 |
|
✗ |
entries[i].checksum())); |
| 374 |
|
✗ |
if (entry_needs_check && !no_duplicates_map_) |
| 375 |
|
✗ |
duplicates_map_.Insert(entries[i].checksum(), 1); |
| 376 |
|
|
|
| 377 |
|
✗ |
PathString full_path(path); |
| 378 |
|
✗ |
full_path.Append("/", 1); |
| 379 |
|
✗ |
full_path.Append(entries[i].name().GetChars(), |
| 380 |
|
✗ |
entries[i].name().GetLength()); |
| 381 |
|
✗ |
LogCvmfs(kLogCvmfs, kLogVerboseMsg, "[path] %s [needs check] %i", |
| 382 |
|
|
full_path.c_str(), entry_needs_check); |
| 383 |
|
|
|
| 384 |
|
|
|
| 385 |
|
|
// Name must not be empty |
| 386 |
|
✗ |
if (entries[i].name().IsEmpty()) { |
| 387 |
|
✗ |
LogCvmfs(kLogCvmfs, kLogStderr, "empty path at %s", full_path.c_str()); |
| 388 |
|
✗ |
retval = false; |
| 389 |
|
|
} |
| 390 |
|
|
|
| 391 |
|
|
// Catalog markers should indicate nested catalogs |
| 392 |
|
✗ |
if (entries[i].name() == NameString(string(".cvmfscatalog"))) { |
| 393 |
|
✗ |
if (catalog->mountpoint() != path) { |
| 394 |
|
✗ |
LogCvmfs(kLogCvmfs, kLogStderr, |
| 395 |
|
|
"found abandoned nested catalog marker at %s", |
| 396 |
|
|
full_path.c_str()); |
| 397 |
|
✗ |
retval = false; |
| 398 |
|
|
} |
| 399 |
|
✗ |
found_nested_marker = true; |
| 400 |
|
|
} |
| 401 |
|
|
|
| 402 |
|
|
// Check if checksum is not null |
| 403 |
|
✗ |
if (entries[i].IsRegular() && !entries[i].IsChunkedFile() |
| 404 |
|
✗ |
&& entries[i].checksum().IsNull()) { |
| 405 |
|
✗ |
LogCvmfs(kLogCvmfs, kLogStderr, |
| 406 |
|
|
"regular file pointing to zero-hash: '%s'", full_path.c_str()); |
| 407 |
|
✗ |
retval = false; |
| 408 |
|
|
} |
| 409 |
|
|
|
| 410 |
|
|
// Check if the chunk is there |
| 411 |
|
✗ |
if (check_chunks_ && entry_needs_check) { |
| 412 |
|
✗ |
string chunk_path = "data/" + entries[i].checksum().MakePath(); |
| 413 |
|
✗ |
if (entries[i].IsDirectory()) |
| 414 |
|
✗ |
chunk_path += shash::kSuffixMicroCatalog; |
| 415 |
|
✗ |
if (!Exists(chunk_path)) { |
| 416 |
|
✗ |
LogCvmfs(kLogCvmfs, kLogStderr, "data chunk %s (%s) missing", |
| 417 |
|
|
entries[i].checksum().ToString().c_str(), full_path.c_str()); |
| 418 |
|
✗ |
retval = false; |
| 419 |
|
|
} |
| 420 |
|
|
} |
| 421 |
|
|
|
| 422 |
|
|
// Add hardlinks to counting map |
| 423 |
|
✗ |
if ((entries[i].linkcount() > 1) && !entries[i].IsDirectory()) { |
| 424 |
|
✗ |
if (entries[i].hardlink_group() == 0) { |
| 425 |
|
✗ |
LogCvmfs(kLogCvmfs, kLogStderr, "invalid hardlink group for %s", |
| 426 |
|
|
full_path.c_str()); |
| 427 |
|
✗ |
retval = false; |
| 428 |
|
|
} else { |
| 429 |
|
✗ |
const HardlinkMap::iterator hardlink_group = hardlinks.find( |
| 430 |
|
✗ |
entries[i].hardlink_group()); |
| 431 |
|
✗ |
if (hardlink_group == hardlinks.end()) { |
| 432 |
|
✗ |
hardlinks[entries[i].hardlink_group()]; |
| 433 |
|
✗ |
hardlinks[entries[i].hardlink_group()].push_back(entries[i]); |
| 434 |
|
|
} else { |
| 435 |
|
✗ |
if (!CompareEntries(entries[i], (hardlink_group->second)[0], false)) { |
| 436 |
|
✗ |
LogCvmfs(kLogCvmfs, kLogStderr, "hardlink %s doesn't match", |
| 437 |
|
|
full_path.c_str()); |
| 438 |
|
✗ |
retval = false; |
| 439 |
|
|
} |
| 440 |
|
✗ |
hardlink_group->second.push_back(entries[i]); |
| 441 |
|
|
} // Hardlink added to map |
| 442 |
|
|
} // Hardlink group > 0 |
| 443 |
|
|
} // Hardlink found |
| 444 |
|
|
|
| 445 |
|
|
// For any kind of entry, the linkcount should be > 0 |
| 446 |
|
✗ |
if (entries[i].linkcount() == 0) { |
| 447 |
|
✗ |
LogCvmfs(kLogCvmfs, kLogStderr, "Entry %s has linkcount 0.", |
| 448 |
|
|
entries[i].name().c_str()); |
| 449 |
|
✗ |
retval = false; |
| 450 |
|
|
} |
| 451 |
|
|
|
| 452 |
|
|
// Checks depending of entry type |
| 453 |
|
✗ |
if (!entries[i].IsRegular()) { |
| 454 |
|
✗ |
if (entries[i].IsDirectIo()) { |
| 455 |
|
✗ |
LogCvmfs(kLogCvmfs, kLogStderr, "invalid direct i/o flag found: %s", |
| 456 |
|
|
full_path.c_str()); |
| 457 |
|
✗ |
retval = false; |
| 458 |
|
|
} |
| 459 |
|
|
} |
| 460 |
|
✗ |
if (entries[i].IsDirectory()) { |
| 461 |
|
✗ |
computed_counters->self.directories++; |
| 462 |
|
✗ |
num_subdirs++; |
| 463 |
|
|
// Directory size |
| 464 |
|
|
// if (entries[i].size() < 4096) { |
| 465 |
|
|
// LogCvmfs(kLogCvmfs, kLogStderr, "invalid file size for %s", |
| 466 |
|
|
// full_path.c_str()); |
| 467 |
|
|
// retval = false; |
| 468 |
|
|
// } |
| 469 |
|
|
// No directory hardlinks |
| 470 |
|
✗ |
if (entries[i].hardlink_group() != 0) { |
| 471 |
|
✗ |
LogCvmfs(kLogCvmfs, kLogStderr, "directory hardlink found at %s", |
| 472 |
|
|
full_path.c_str()); |
| 473 |
|
✗ |
retval = false; |
| 474 |
|
|
} |
| 475 |
|
✗ |
if (entries[i].IsNestedCatalogMountpoint() |
| 476 |
|
✗ |
|| entries[i].IsBindMountpoint()) { |
| 477 |
|
|
// Find transition point |
| 478 |
|
✗ |
if (entries[i].IsNestedCatalogMountpoint()) |
| 479 |
|
✗ |
computed_counters->self.nested_catalogs++; |
| 480 |
|
✗ |
shash::Any tmp; |
| 481 |
|
|
uint64_t tmp2; |
| 482 |
|
✗ |
const PathString mountpoint(full_path); |
| 483 |
|
✗ |
if (!catalog->FindNested(mountpoint, &tmp, &tmp2)) { |
| 484 |
|
✗ |
LogCvmfs(kLogCvmfs, kLogStderr, "nested catalog at %s not registered", |
| 485 |
|
|
full_path.c_str()); |
| 486 |
|
✗ |
retval = false; |
| 487 |
|
|
} |
| 488 |
|
|
|
| 489 |
|
|
// check that the nested mountpoint is empty in the current catalog |
| 490 |
|
✗ |
catalog::DirectoryEntryList nested_entries; |
| 491 |
|
✗ |
if (catalog->ListingPath(full_path, &nested_entries) |
| 492 |
|
✗ |
&& !nested_entries.empty()) { |
| 493 |
|
✗ |
LogCvmfs(kLogCvmfs, kLogStderr, |
| 494 |
|
|
"non-empty nested catalog mountpoint " |
| 495 |
|
|
"at %s.", |
| 496 |
|
|
full_path.c_str()); |
| 497 |
|
✗ |
retval = false; |
| 498 |
|
|
} |
| 499 |
|
|
|
| 500 |
|
✗ |
if (entries[i].IsBindMountpoint()) { |
| 501 |
|
✗ |
bind_mountpoints->insert(full_path); |
| 502 |
|
✗ |
if (entries[i].IsNestedCatalogMountpoint()) { |
| 503 |
|
✗ |
LogCvmfs(kLogCvmfs, kLogStderr, |
| 504 |
|
|
"bind mountpoint and nested mountpoint mutually exclusive" |
| 505 |
|
|
" at %s.", |
| 506 |
|
|
full_path.c_str()); |
| 507 |
|
✗ |
retval = false; |
| 508 |
|
|
} |
| 509 |
|
|
} |
| 510 |
|
✗ |
} else { |
| 511 |
|
|
// Recurse |
| 512 |
|
✗ |
if (!Find(catalog, full_path, computed_counters, bind_mountpoints)) |
| 513 |
|
✗ |
retval = false; |
| 514 |
|
|
} |
| 515 |
|
✗ |
} else if (entries[i].IsLink()) { |
| 516 |
|
✗ |
computed_counters->self.symlinks++; |
| 517 |
|
|
// No hash for symbolics links |
| 518 |
|
✗ |
if (!entries[i].checksum().IsNull()) { |
| 519 |
|
✗ |
LogCvmfs(kLogCvmfs, kLogStderr, "symbolic links with hash at %s", |
| 520 |
|
|
full_path.c_str()); |
| 521 |
|
✗ |
retval = false; |
| 522 |
|
|
} |
| 523 |
|
|
// Right size of symbolic link? |
| 524 |
|
✗ |
if (entries[i].size() != entries[i].symlink().GetLength()) { |
| 525 |
|
✗ |
LogCvmfs(kLogCvmfs, kLogStderr, |
| 526 |
|
|
"wrong symbolic link size for %s; " |
| 527 |
|
|
"expected %u, got %lu", |
| 528 |
|
|
full_path.c_str(), entries[i].symlink().GetLength(), |
| 529 |
|
|
entries[i].size()); |
| 530 |
|
✗ |
retval = false; |
| 531 |
|
|
} |
| 532 |
|
✗ |
} else if (entries[i].IsRegular()) { |
| 533 |
|
✗ |
computed_counters->self.regular_files++; |
| 534 |
|
✗ |
computed_counters->self.file_size += entries[i].size(); |
| 535 |
|
✗ |
} else if (entries[i].IsSpecial()) { |
| 536 |
|
✗ |
computed_counters->self.specials++; |
| 537 |
|
|
// Size zero for special files |
| 538 |
|
✗ |
if (entries[i].size() != 0) { |
| 539 |
|
✗ |
LogCvmfs(kLogCvmfs, kLogStderr, |
| 540 |
|
|
"unexpected non-zero special file size %s", full_path.c_str()); |
| 541 |
|
✗ |
retval = false; |
| 542 |
|
|
} |
| 543 |
|
|
// No hash for special files |
| 544 |
|
✗ |
if (!entries[i].checksum().IsNull()) { |
| 545 |
|
✗ |
LogCvmfs(kLogCvmfs, kLogStderr, "special file with hash at %s", |
| 546 |
|
|
full_path.c_str()); |
| 547 |
|
✗ |
retval = false; |
| 548 |
|
|
} |
| 549 |
|
|
// No symlink |
| 550 |
|
✗ |
if (entries[i].symlink().GetLength() > 0) { |
| 551 |
|
✗ |
LogCvmfs(kLogCvmfs, kLogStderr, |
| 552 |
|
|
"special file with non-zero symlink at %s", full_path.c_str()); |
| 553 |
|
✗ |
retval = false; |
| 554 |
|
|
} |
| 555 |
|
|
} else { |
| 556 |
|
✗ |
LogCvmfs(kLogCvmfs, kLogStderr, "unknown file type %s", |
| 557 |
|
|
full_path.c_str()); |
| 558 |
|
✗ |
retval = false; |
| 559 |
|
|
} |
| 560 |
|
|
|
| 561 |
|
✗ |
if (entries[i].HasXattrs()) { |
| 562 |
|
✗ |
computed_counters->self.xattrs++; |
| 563 |
|
|
} |
| 564 |
|
|
|
| 565 |
|
✗ |
if (entries[i].IsExternalFile()) { |
| 566 |
|
✗ |
computed_counters->self.externals++; |
| 567 |
|
✗ |
computed_counters->self.external_file_size += entries[i].size(); |
| 568 |
|
✗ |
if (!entries[i].IsRegular()) { |
| 569 |
|
✗ |
LogCvmfs(kLogCvmfs, kLogStderr, |
| 570 |
|
|
"only regular files can be external: %s", full_path.c_str()); |
| 571 |
|
✗ |
retval = false; |
| 572 |
|
|
} |
| 573 |
|
|
} |
| 574 |
|
|
|
| 575 |
|
|
// checking file chunk integrity |
| 576 |
|
✗ |
if (entries[i].IsChunkedFile()) { |
| 577 |
|
✗ |
FileChunkList chunks; |
| 578 |
|
✗ |
catalog->ListPathChunks(full_path, entries[i].hash_algorithm(), &chunks); |
| 579 |
|
|
|
| 580 |
|
✗ |
computed_counters->self.chunked_files++; |
| 581 |
|
✗ |
computed_counters->self.chunked_file_size += entries[i].size(); |
| 582 |
|
✗ |
computed_counters->self.file_chunks += chunks.size(); |
| 583 |
|
|
|
| 584 |
|
|
// do we find file chunks for the chunked file in this catalog? |
| 585 |
|
✗ |
if (chunks.size() == 0) { |
| 586 |
|
✗ |
LogCvmfs(kLogCvmfs, kLogStderr, "no file chunks found for big file %s", |
| 587 |
|
|
full_path.c_str()); |
| 588 |
|
✗ |
retval = false; |
| 589 |
|
|
} |
| 590 |
|
|
|
| 591 |
|
✗ |
size_t aggregated_file_size = 0; |
| 592 |
|
✗ |
off_t next_offset = 0; |
| 593 |
|
|
|
| 594 |
|
✗ |
for (unsigned j = 0; j < chunks.size(); ++j) { |
| 595 |
|
✗ |
const FileChunk this_chunk = chunks.At(j); |
| 596 |
|
|
// check if the chunk boundaries fit together... |
| 597 |
|
✗ |
if (next_offset != this_chunk.offset()) { |
| 598 |
|
✗ |
LogCvmfs(kLogCvmfs, kLogStderr, "misaligned chunk offsets for %s", |
| 599 |
|
|
full_path.c_str()); |
| 600 |
|
✗ |
retval = false; |
| 601 |
|
|
} |
| 602 |
|
✗ |
next_offset = this_chunk.offset() + this_chunk.size(); |
| 603 |
|
✗ |
aggregated_file_size += this_chunk.size(); |
| 604 |
|
|
|
| 605 |
|
|
// are all data chunks in the data store? |
| 606 |
|
✗ |
if (check_chunks_ && !entries[i].IsExternalFile()) { |
| 607 |
|
✗ |
const shash::Any &chunk_hash = this_chunk.content_hash(); |
| 608 |
|
|
// for performance reasons, only perform the check once |
| 609 |
|
|
// and skip if the hash has been checked before |
| 610 |
|
✗ |
bool chunk_needs_check = true; |
| 611 |
|
✗ |
if (!no_duplicates_map_ && !duplicates_map_.Contains(chunk_hash)) { |
| 612 |
|
✗ |
duplicates_map_.Insert(chunk_hash, 1); |
| 613 |
|
✗ |
} else if (!no_duplicates_map_) { |
| 614 |
|
✗ |
chunk_needs_check = false; |
| 615 |
|
|
} |
| 616 |
|
✗ |
if (chunk_needs_check) { |
| 617 |
|
✗ |
const string chunk_path = "data/" + chunk_hash.MakePath(); |
| 618 |
|
✗ |
if (!Exists(chunk_path)) { |
| 619 |
|
✗ |
LogCvmfs(kLogCvmfs, kLogStderr, |
| 620 |
|
|
"partial data chunk %s (%s -> " |
| 621 |
|
|
"offset: %ld | size: %lu) missing", |
| 622 |
|
|
this_chunk.content_hash().ToStringWithSuffix().c_str(), |
| 623 |
|
|
full_path.c_str(), this_chunk.offset(), |
| 624 |
|
|
this_chunk.size()); |
| 625 |
|
✗ |
retval = false; |
| 626 |
|
|
} |
| 627 |
|
|
} |
| 628 |
|
|
} |
| 629 |
|
|
} |
| 630 |
|
|
|
| 631 |
|
|
// is the aggregated chunk size equal to the actual file size? |
| 632 |
|
✗ |
if (aggregated_file_size != entries[i].size()) { |
| 633 |
|
✗ |
LogCvmfs(kLogCvmfs, kLogStderr, |
| 634 |
|
|
"chunks of file %s produce a size " |
| 635 |
|
|
"mismatch. Calculated %zu bytes | %lu " |
| 636 |
|
|
"bytes expected", |
| 637 |
|
|
full_path.c_str(), aggregated_file_size, entries[i].size()); |
| 638 |
|
✗ |
retval = false; |
| 639 |
|
|
} |
| 640 |
|
|
} |
| 641 |
|
✗ |
} // Loop through entries |
| 642 |
|
|
|
| 643 |
|
|
// Check if nested catalog marker has been found |
| 644 |
|
✗ |
if (!path.IsEmpty() && (path == catalog->mountpoint()) |
| 645 |
|
✗ |
&& !found_nested_marker) { |
| 646 |
|
✗ |
LogCvmfs(kLogCvmfs, kLogStderr, "nested catalog without marker at %s", |
| 647 |
|
|
path.c_str()); |
| 648 |
|
✗ |
retval = false; |
| 649 |
|
|
} |
| 650 |
|
|
|
| 651 |
|
|
// Check directory linkcount |
| 652 |
|
✗ |
if (this_directory.linkcount() != num_subdirs + 2) { |
| 653 |
|
✗ |
LogCvmfs(kLogCvmfs, kLogStderr, |
| 654 |
|
|
"wrong linkcount for %s; " |
| 655 |
|
|
"expected %u, got %u", |
| 656 |
|
|
path.c_str(), num_subdirs + 2, this_directory.linkcount()); |
| 657 |
|
✗ |
retval = false; |
| 658 |
|
|
} |
| 659 |
|
|
|
| 660 |
|
|
// Check hardlink linkcounts |
| 661 |
|
✗ |
for (HardlinkMap::const_iterator i = hardlinks.begin(), |
| 662 |
|
✗ |
iEnd = hardlinks.end(); |
| 663 |
|
✗ |
i != iEnd; |
| 664 |
|
✗ |
++i) { |
| 665 |
|
✗ |
if (i->second[0].linkcount() != i->second.size()) { |
| 666 |
|
✗ |
LogCvmfs(kLogCvmfs, kLogStderr, |
| 667 |
|
|
"hardlink linkcount wrong for %s, " |
| 668 |
|
|
"expected %lu, got %u", |
| 669 |
|
|
(path.ToString() + "/" + i->second[0].name().ToString()).c_str(), |
| 670 |
|
|
i->second.size(), i->second[0].linkcount()); |
| 671 |
|
✗ |
retval = false; |
| 672 |
|
|
} |
| 673 |
|
|
} |
| 674 |
|
|
|
| 675 |
|
✗ |
return retval; |
| 676 |
|
|
} |
| 677 |
|
|
|
| 678 |
|
|
|
| 679 |
|
✗ |
string CommandCheck::DownloadPiece(const shash::Any catalog_hash) { |
| 680 |
|
✗ |
const string source = "data/" + catalog_hash.MakePath(); |
| 681 |
|
✗ |
const string dest = temp_directory_ + "/" + catalog_hash.ToString(); |
| 682 |
|
✗ |
const string url = repo_base_path_ + "/" + source; |
| 683 |
|
|
|
| 684 |
|
✗ |
cvmfs::PathSink pathsink(dest); |
| 685 |
|
|
download::JobInfo download_catalog(&url, true, false, &catalog_hash, |
| 686 |
|
✗ |
&pathsink); |
| 687 |
|
✗ |
const download::Failures retval = download_manager()->Fetch( |
| 688 |
|
|
&download_catalog); |
| 689 |
|
✗ |
if (retval != download::kFailOk) { |
| 690 |
|
✗ |
LogCvmfs(kLogCvmfs, kLogStderr, "failed to download object %s (%d)", |
| 691 |
|
|
catalog_hash.ToString().c_str(), retval); |
| 692 |
|
✗ |
return ""; |
| 693 |
|
|
} |
| 694 |
|
|
|
| 695 |
|
✗ |
return dest; |
| 696 |
|
|
} |
| 697 |
|
|
|
| 698 |
|
|
|
| 699 |
|
✗ |
string CommandCheck::DecompressPiece(const shash::Any catalog_hash) { |
| 700 |
|
✗ |
const string source = "data/" + catalog_hash.MakePath(); |
| 701 |
|
✗ |
const string dest = temp_directory_ + "/" + catalog_hash.ToString(); |
| 702 |
|
✗ |
if (!zlib::DecompressPath2Path(source, dest)) |
| 703 |
|
✗ |
return ""; |
| 704 |
|
|
|
| 705 |
|
✗ |
return dest; |
| 706 |
|
|
} |
| 707 |
|
|
|
| 708 |
|
|
|
| 709 |
|
✗ |
catalog::Catalog *CommandCheck::FetchCatalog(const string &path, |
| 710 |
|
|
const shash::Any &catalog_hash, |
| 711 |
|
|
const uint64_t catalog_size) { |
| 712 |
|
✗ |
string tmp_file; |
| 713 |
|
✗ |
if (!is_remote_) |
| 714 |
|
✗ |
tmp_file = DecompressPiece(catalog_hash); |
| 715 |
|
|
else |
| 716 |
|
✗ |
tmp_file = DownloadPiece(catalog_hash); |
| 717 |
|
|
|
| 718 |
|
✗ |
if (tmp_file == "") { |
| 719 |
|
✗ |
LogCvmfs(kLogCvmfs, kLogStderr, "failed to load catalog %s", |
| 720 |
|
|
catalog_hash.ToString().c_str()); |
| 721 |
|
✗ |
return NULL; |
| 722 |
|
|
} |
| 723 |
|
|
|
| 724 |
|
✗ |
catalog::Catalog *catalog = catalog::Catalog::AttachFreely(path, tmp_file, |
| 725 |
|
|
catalog_hash); |
| 726 |
|
✗ |
const int64_t catalog_file_size = GetFileSize(tmp_file); |
| 727 |
|
✗ |
if (catalog_file_size <= 0) { |
| 728 |
|
✗ |
LogCvmfs(kLogCvmfs, kLogStderr, "Error downloading catalog %s at %s %s", |
| 729 |
|
|
catalog_hash.ToString().c_str(), path.c_str(), tmp_file.c_str()); |
| 730 |
|
✗ |
assert(catalog_file_size > 0); |
| 731 |
|
|
} |
| 732 |
|
✗ |
unlink(tmp_file.c_str()); |
| 733 |
|
|
|
| 734 |
|
✗ |
if ((catalog_size > 0) && (uint64_t(catalog_file_size) != catalog_size)) { |
| 735 |
|
✗ |
LogCvmfs(kLogCvmfs, kLogStderr, |
| 736 |
|
|
"catalog file size mismatch, " |
| 737 |
|
|
"expected %" PRIu64 ", got %" PRIu64, |
| 738 |
|
|
catalog_size, catalog_file_size); |
| 739 |
|
✗ |
delete catalog; |
| 740 |
|
✗ |
return NULL; |
| 741 |
|
|
} |
| 742 |
|
|
|
| 743 |
|
✗ |
return catalog; |
| 744 |
|
|
} |
| 745 |
|
|
|
| 746 |
|
|
|
| 747 |
|
✗ |
bool CommandCheck::FindSubtreeRootCatalog(const string &subtree_path, |
| 748 |
|
|
shash::Any *root_hash, |
| 749 |
|
|
uint64_t *root_size) { |
| 750 |
|
✗ |
catalog::Catalog *current_catalog = FetchCatalog("", *root_hash); |
| 751 |
|
✗ |
if (current_catalog == NULL) { |
| 752 |
|
✗ |
return false; |
| 753 |
|
|
} |
| 754 |
|
|
|
| 755 |
|
|
typedef vector<string> Tokens; |
| 756 |
|
✗ |
const Tokens path_tokens = SplitString(subtree_path, '/'); |
| 757 |
|
|
|
| 758 |
|
✗ |
string current_path = ""; |
| 759 |
|
|
|
| 760 |
|
✗ |
Tokens::const_iterator i = path_tokens.begin(); |
| 761 |
|
✗ |
const Tokens::const_iterator iend = path_tokens.end(); |
| 762 |
|
✗ |
for (; i != iend; ++i) { |
| 763 |
|
✗ |
if (i->empty()) { |
| 764 |
|
✗ |
continue; |
| 765 |
|
|
} |
| 766 |
|
|
|
| 767 |
|
✗ |
current_path += "/" + *i; |
| 768 |
|
✗ |
if (current_catalog->FindNested(PathString(current_path), root_hash, |
| 769 |
|
|
root_size)) { |
| 770 |
|
✗ |
delete current_catalog; |
| 771 |
|
|
|
| 772 |
|
✗ |
if (current_path.length() < subtree_path.length()) { |
| 773 |
|
✗ |
current_catalog = FetchCatalog(current_path, *root_hash); |
| 774 |
|
✗ |
if (current_catalog == NULL) { |
| 775 |
|
✗ |
break; |
| 776 |
|
|
} |
| 777 |
|
|
} else { |
| 778 |
|
✗ |
return true; |
| 779 |
|
|
} |
| 780 |
|
|
} |
| 781 |
|
|
} |
| 782 |
|
✗ |
return false; |
| 783 |
|
|
} |
| 784 |
|
|
|
| 785 |
|
|
|
| 786 |
|
|
/** |
| 787 |
|
|
* Recursion on nested catalog level. No ownership of computed_counters. |
| 788 |
|
|
*/ |
| 789 |
|
✗ |
bool CommandCheck::InspectTree(const string &path, |
| 790 |
|
|
const shash::Any &catalog_hash, |
| 791 |
|
|
const uint64_t catalog_size, |
| 792 |
|
|
const bool is_nested_catalog, |
| 793 |
|
|
const catalog::DirectoryEntry *transition_point, |
| 794 |
|
|
catalog::DeltaCounters *computed_counters) { |
| 795 |
|
✗ |
LogCvmfs(kLogCvmfs, kLogStdout | kLogInform, "[inspecting catalog] %s at %s", |
| 796 |
|
|
catalog_hash.ToString().c_str(), path == "" ? "/" : path.c_str()); |
| 797 |
|
|
|
| 798 |
|
✗ |
const catalog::Catalog *catalog = FetchCatalog(path, catalog_hash, |
| 799 |
|
|
catalog_size); |
| 800 |
|
✗ |
if (catalog == NULL) { |
| 801 |
|
✗ |
LogCvmfs(kLogCvmfs, kLogStderr, "failed to open catalog %s", |
| 802 |
|
|
catalog_hash.ToString().c_str()); |
| 803 |
|
✗ |
return false; |
| 804 |
|
|
} |
| 805 |
|
|
|
| 806 |
|
✗ |
int retval = true; |
| 807 |
|
|
|
| 808 |
|
✗ |
if (catalog->root_prefix() != PathString(path.data(), path.length())) { |
| 809 |
|
✗ |
LogCvmfs(kLogCvmfs, kLogStderr, |
| 810 |
|
|
"root prefix mismatch; " |
| 811 |
|
|
"expected %s, got %s", |
| 812 |
|
|
path.c_str(), catalog->root_prefix().c_str()); |
| 813 |
|
✗ |
retval = false; |
| 814 |
|
|
} |
| 815 |
|
|
|
| 816 |
|
|
// Check transition point |
| 817 |
|
✗ |
catalog::DirectoryEntry root_entry; |
| 818 |
|
✗ |
if (!catalog->LookupPath(catalog->root_prefix(), &root_entry)) { |
| 819 |
|
✗ |
LogCvmfs(kLogCvmfs, kLogStderr, "failed to lookup root entry (%s)", |
| 820 |
|
|
path.c_str()); |
| 821 |
|
✗ |
retval = false; |
| 822 |
|
|
} |
| 823 |
|
✗ |
if (!root_entry.IsDirectory()) { |
| 824 |
|
✗ |
LogCvmfs(kLogCvmfs, kLogStderr, "root entry not a directory (%s)", |
| 825 |
|
|
path.c_str()); |
| 826 |
|
✗ |
retval = false; |
| 827 |
|
|
} |
| 828 |
|
✗ |
if (is_nested_catalog) { |
| 829 |
|
✗ |
if (transition_point != NULL |
| 830 |
|
✗ |
&& !CompareEntries(*transition_point, root_entry, true, true)) { |
| 831 |
|
✗ |
LogCvmfs(kLogCvmfs, kLogStderr, |
| 832 |
|
|
"transition point and root entry differ (%s)", path.c_str()); |
| 833 |
|
✗ |
retval = false; |
| 834 |
|
|
} |
| 835 |
|
✗ |
if (!root_entry.IsNestedCatalogRoot()) { |
| 836 |
|
✗ |
LogCvmfs(kLogCvmfs, kLogStderr, |
| 837 |
|
|
"nested catalog root expected but not found (%s)", path.c_str()); |
| 838 |
|
✗ |
retval = false; |
| 839 |
|
|
} |
| 840 |
|
|
} else { |
| 841 |
|
✗ |
if (root_entry.IsNestedCatalogRoot()) { |
| 842 |
|
✗ |
LogCvmfs(kLogCvmfs, kLogStderr, |
| 843 |
|
|
"nested catalog root found but not expected (%s)", path.c_str()); |
| 844 |
|
✗ |
retval = false; |
| 845 |
|
|
} |
| 846 |
|
|
} |
| 847 |
|
|
|
| 848 |
|
|
// Traverse the catalog |
| 849 |
|
✗ |
set<PathString> bind_mountpoints; |
| 850 |
|
✗ |
if (!Find(catalog, PathString(path.data(), path.length()), computed_counters, |
| 851 |
|
|
&bind_mountpoints)) { |
| 852 |
|
✗ |
retval = false; |
| 853 |
|
|
} |
| 854 |
|
|
|
| 855 |
|
|
// Check number of entries |
| 856 |
|
✗ |
if (root_entry.HasXattrs()) |
| 857 |
|
✗ |
computed_counters->self.xattrs++; |
| 858 |
|
✗ |
const uint64_t num_found_entries = 1 + computed_counters->self.regular_files |
| 859 |
|
✗ |
+ computed_counters->self.symlinks |
| 860 |
|
✗ |
+ computed_counters->self.specials |
| 861 |
|
✗ |
+ computed_counters->self.directories; |
| 862 |
|
✗ |
if (num_found_entries != catalog->GetNumEntries()) { |
| 863 |
|
✗ |
LogCvmfs(kLogCvmfs, kLogStderr, |
| 864 |
|
|
"dangling entries in catalog, " |
| 865 |
|
|
"expected %" PRIu64 ", got %" PRIu64, |
| 866 |
|
|
catalog->GetNumEntries(), num_found_entries); |
| 867 |
|
✗ |
retval = false; |
| 868 |
|
|
} |
| 869 |
|
|
|
| 870 |
|
|
// Recurse into nested catalogs |
| 871 |
|
|
const catalog::Catalog::NestedCatalogList |
| 872 |
|
✗ |
&nested_catalogs = catalog->ListNestedCatalogs(); |
| 873 |
|
|
const catalog::Catalog::NestedCatalogList |
| 874 |
|
✗ |
own_nested_catalogs = catalog->ListOwnNestedCatalogs(); |
| 875 |
|
✗ |
if (own_nested_catalogs.size() |
| 876 |
|
✗ |
!= static_cast<uint64_t>(computed_counters->self.nested_catalogs)) { |
| 877 |
|
✗ |
LogCvmfs(kLogCvmfs, kLogStderr, |
| 878 |
|
|
"number of nested catalogs does not match;" |
| 879 |
|
|
" expected %lu, got %lu", |
| 880 |
|
|
computed_counters->self.nested_catalogs, |
| 881 |
|
|
own_nested_catalogs.size()); |
| 882 |
|
✗ |
retval = false; |
| 883 |
|
|
} |
| 884 |
|
✗ |
set<PathString> nested_catalog_paths; |
| 885 |
|
✗ |
for (catalog::Catalog::NestedCatalogList::const_iterator |
| 886 |
|
✗ |
i = nested_catalogs.begin(), |
| 887 |
|
✗ |
iEnd = nested_catalogs.end(); |
| 888 |
|
✗ |
i != iEnd; |
| 889 |
|
✗ |
++i) { |
| 890 |
|
✗ |
nested_catalog_paths.insert(i->mountpoint); |
| 891 |
|
|
} |
| 892 |
|
✗ |
if (nested_catalog_paths.size() != nested_catalogs.size()) { |
| 893 |
|
✗ |
LogCvmfs(kLogCvmfs, kLogStderr, |
| 894 |
|
|
"duplicates among nested catalogs and bind mountpoints"); |
| 895 |
|
✗ |
retval = false; |
| 896 |
|
|
} |
| 897 |
|
|
|
| 898 |
|
✗ |
for (catalog::Catalog::NestedCatalogList::const_iterator |
| 899 |
|
✗ |
i = nested_catalogs.begin(), |
| 900 |
|
✗ |
iEnd = nested_catalogs.end(); |
| 901 |
|
✗ |
i != iEnd; |
| 902 |
|
✗ |
++i) { |
| 903 |
|
✗ |
if (bind_mountpoints.find(i->mountpoint) != bind_mountpoints.end()) { |
| 904 |
|
✗ |
catalog::DirectoryEntry bind_mountpoint; |
| 905 |
|
✗ |
const PathString mountpoint("/" + i->mountpoint.ToString().substr(1)); |
| 906 |
|
✗ |
if (!catalog->LookupPath(mountpoint, &bind_mountpoint)) { |
| 907 |
|
✗ |
LogCvmfs(kLogCvmfs, kLogStderr, "failed to lookup bind mountpoint %s", |
| 908 |
|
|
mountpoint.c_str()); |
| 909 |
|
✗ |
retval = false; |
| 910 |
|
|
} |
| 911 |
|
|
LogCvmfs(kLogCvmfs, kLogDebug, "skipping bind mountpoint %s", |
| 912 |
|
|
i->mountpoint.c_str()); |
| 913 |
|
✗ |
continue; |
| 914 |
|
|
} |
| 915 |
|
✗ |
catalog::DirectoryEntry nested_transition_point; |
| 916 |
|
✗ |
if (!catalog->LookupPath(i->mountpoint, &nested_transition_point)) { |
| 917 |
|
✗ |
LogCvmfs(kLogCvmfs, kLogStderr, "failed to lookup transition point %s", |
| 918 |
|
|
i->mountpoint.c_str()); |
| 919 |
|
✗ |
retval = false; |
| 920 |
|
|
} else { |
| 921 |
|
✗ |
catalog::DeltaCounters nested_counters; |
| 922 |
|
✗ |
const bool is_nested = true; |
| 923 |
|
✗ |
if (!InspectTree(i->mountpoint.ToString(), i->hash, i->size, is_nested, |
| 924 |
|
|
&nested_transition_point, &nested_counters)) |
| 925 |
|
✗ |
retval = false; |
| 926 |
|
✗ |
nested_counters.PopulateToParent(computed_counters); |
| 927 |
|
|
} |
| 928 |
|
|
} |
| 929 |
|
|
|
| 930 |
|
|
// Check statistics counters |
| 931 |
|
|
// Additionally account for root directory |
| 932 |
|
✗ |
computed_counters->self.directories++; |
| 933 |
|
✗ |
catalog::Counters compare_counters; |
| 934 |
|
✗ |
compare_counters.ApplyDelta(*computed_counters); |
| 935 |
|
✗ |
const catalog::Counters stored_counters = catalog->GetCounters(); |
| 936 |
|
✗ |
if (!CompareCounters(compare_counters, stored_counters)) { |
| 937 |
|
✗ |
LogCvmfs(kLogCvmfs, kLogStderr, "statistics counter mismatch [%s]", |
| 938 |
|
|
catalog_hash.ToString().c_str()); |
| 939 |
|
✗ |
retval = false; |
| 940 |
|
|
} |
| 941 |
|
|
|
| 942 |
|
✗ |
delete catalog; |
| 943 |
|
✗ |
return retval; |
| 944 |
|
|
} |
| 945 |
|
|
|
| 946 |
|
|
|
| 947 |
|
✗ |
int CommandCheck::Main(const swissknife::ArgumentList &args) { |
| 948 |
|
✗ |
string tag_name; |
| 949 |
|
✗ |
string subtree_path = ""; |
| 950 |
|
✗ |
string pubkey_path = ""; |
| 951 |
|
✗ |
string repo_name = ""; |
| 952 |
|
✗ |
string reflog_chksum_path = ""; |
| 953 |
|
|
|
| 954 |
|
✗ |
temp_directory_ = (args.find('t') != args.end()) ? *args.find('t')->second |
| 955 |
|
✗ |
: "/tmp"; |
| 956 |
|
✗ |
if (args.find('n') != args.end()) |
| 957 |
|
✗ |
tag_name = *args.find('n')->second; |
| 958 |
|
✗ |
if (args.find('c') != args.end()) |
| 959 |
|
✗ |
check_chunks_ = true; |
| 960 |
|
✗ |
if (args.find('d') != args.end()) |
| 961 |
|
✗ |
no_duplicates_map_ = true; |
| 962 |
|
✗ |
if (args.find('l') != args.end()) { |
| 963 |
|
|
const unsigned log_level = kLogLevel0 |
| 964 |
|
✗ |
<< String2Uint64(*args.find('l')->second); |
| 965 |
|
✗ |
if (log_level > kLogNone) { |
| 966 |
|
✗ |
LogCvmfs(kLogCvmfs, kLogStderr, "invalid log level"); |
| 967 |
|
✗ |
return 1; |
| 968 |
|
|
} |
| 969 |
|
✗ |
SetLogVerbosity(static_cast<LogLevels>(log_level)); |
| 970 |
|
|
} |
| 971 |
|
✗ |
if (args.find('k') != args.end()) |
| 972 |
|
✗ |
pubkey_path = *args.find('k')->second; |
| 973 |
|
✗ |
if (DirectoryExists(pubkey_path)) |
| 974 |
|
✗ |
pubkey_path = JoinStrings(FindFilesBySuffix(pubkey_path, ".pub"), ":"); |
| 975 |
|
✗ |
if (args.find('N') != args.end()) |
| 976 |
|
✗ |
repo_name = *args.find('N')->second; |
| 977 |
|
|
|
| 978 |
|
✗ |
repo_base_path_ = MakeCanonicalPath(*args.find('r')->second); |
| 979 |
|
✗ |
if (args.find('s') != args.end()) |
| 980 |
|
✗ |
subtree_path = MakeCanonicalPath(*args.find('s')->second); |
| 981 |
|
✗ |
if (args.find('R') != args.end()) |
| 982 |
|
✗ |
reflog_chksum_path = *args.find('R')->second; |
| 983 |
|
|
|
| 984 |
|
|
// Repository can be HTTP address or on local file system |
| 985 |
|
✗ |
is_remote_ = IsHttpUrl(repo_base_path_); |
| 986 |
|
|
|
| 987 |
|
|
// initialize the (swissknife global) download and signature managers |
| 988 |
|
✗ |
if (is_remote_) { |
| 989 |
|
✗ |
const bool follow_redirects = (args.count('L') > 0); |
| 990 |
|
✗ |
const string proxy = (args.count('@') > 0) ? *args.find('@')->second : ""; |
| 991 |
|
✗ |
if (!this->InitDownloadManager(follow_redirects, proxy)) { |
| 992 |
|
✗ |
return 1; |
| 993 |
|
|
} |
| 994 |
|
|
|
| 995 |
|
✗ |
if (pubkey_path.empty() || repo_name.empty()) { |
| 996 |
|
✗ |
LogCvmfs(kLogCvmfs, kLogStderr, |
| 997 |
|
|
"please provide pubkey and repo name for " |
| 998 |
|
|
"remote repositories"); |
| 999 |
|
✗ |
return 1; |
| 1000 |
|
|
} |
| 1001 |
|
|
|
| 1002 |
|
✗ |
if (!this->InitSignatureManager(pubkey_path)) { |
| 1003 |
|
✗ |
return 1; |
| 1004 |
|
|
} |
| 1005 |
|
|
} |
| 1006 |
|
|
|
| 1007 |
|
|
// Load Manifest |
| 1008 |
|
✗ |
UniquePtr<manifest::Manifest> manifest; |
| 1009 |
|
✗ |
bool successful = true; |
| 1010 |
|
|
|
| 1011 |
|
✗ |
if (is_remote_) { |
| 1012 |
|
✗ |
manifest = FetchRemoteManifest(repo_base_path_, repo_name); |
| 1013 |
|
|
} else { |
| 1014 |
|
✗ |
if (chdir(repo_base_path_.c_str()) != 0) { |
| 1015 |
|
✗ |
LogCvmfs(kLogCvmfs, kLogStderr, "failed to switch to directory %s", |
| 1016 |
|
|
repo_base_path_.c_str()); |
| 1017 |
|
✗ |
return 1; |
| 1018 |
|
|
} |
| 1019 |
|
✗ |
manifest = OpenLocalManifest(".cvmfspublished"); |
| 1020 |
|
|
} |
| 1021 |
|
|
|
| 1022 |
|
✗ |
if (!manifest.IsValid()) { |
| 1023 |
|
✗ |
LogCvmfs(kLogCvmfs, kLogStderr, "failed to load repository manifest"); |
| 1024 |
|
✗ |
return 1; |
| 1025 |
|
|
} |
| 1026 |
|
|
|
| 1027 |
|
|
// Check meta-info object |
| 1028 |
|
✗ |
if (!manifest->meta_info().IsNull()) { |
| 1029 |
|
✗ |
string tmp_file; |
| 1030 |
|
✗ |
if (!is_remote_) |
| 1031 |
|
✗ |
tmp_file = DecompressPiece(manifest->meta_info()); |
| 1032 |
|
|
else |
| 1033 |
|
✗ |
tmp_file = DownloadPiece(manifest->meta_info()); |
| 1034 |
|
✗ |
if (tmp_file == "") { |
| 1035 |
|
✗ |
LogCvmfs(kLogCvmfs, kLogStderr, "failed to load repository metainfo %s", |
| 1036 |
|
|
manifest->meta_info().ToString().c_str()); |
| 1037 |
|
✗ |
return 1; |
| 1038 |
|
|
} |
| 1039 |
|
✗ |
unlink(tmp_file.c_str()); |
| 1040 |
|
|
} |
| 1041 |
|
|
|
| 1042 |
|
✗ |
shash::Any reflog_hash; |
| 1043 |
|
✗ |
if (!reflog_chksum_path.empty()) { |
| 1044 |
|
✗ |
if (!manifest::Reflog::ReadChecksum(reflog_chksum_path, &reflog_hash)) { |
| 1045 |
|
✗ |
LogCvmfs(kLogCvmfs, kLogStderr, "failed to read reflog checksum file"); |
| 1046 |
|
✗ |
return 1; |
| 1047 |
|
|
} |
| 1048 |
|
|
} else { |
| 1049 |
|
✗ |
reflog_hash = manifest->reflog_hash(); |
| 1050 |
|
|
} |
| 1051 |
|
|
|
| 1052 |
|
✗ |
if (Exists(".cvmfsreflog")) { |
| 1053 |
|
✗ |
if (reflog_hash.IsNull()) { |
| 1054 |
|
|
// If there is a reflog, we want to check it |
| 1055 |
|
✗ |
LogCvmfs(kLogCvmfs, kLogStderr, |
| 1056 |
|
|
".cvmfsreflog present but no checksum provided, aborting"); |
| 1057 |
|
✗ |
return 1; |
| 1058 |
|
|
} |
| 1059 |
|
✗ |
const bool retval = InspectReflog(reflog_hash, manifest.weak_ref()); |
| 1060 |
|
✗ |
if (!retval) { |
| 1061 |
|
✗ |
LogCvmfs(kLogCvmfs, kLogStderr, "failed to verify reflog"); |
| 1062 |
|
✗ |
return 1; |
| 1063 |
|
|
} |
| 1064 |
|
|
} else { |
| 1065 |
|
✗ |
if (!reflog_hash.IsNull()) { |
| 1066 |
|
|
// There is a checksum but no reflog; possibly the checksum is for the |
| 1067 |
|
|
// from the manifest for the stratum 0 reflog |
| 1068 |
|
✗ |
if (!reflog_chksum_path.empty()) { |
| 1069 |
|
✗ |
LogCvmfs(kLogCvmfs, kLogStderr, |
| 1070 |
|
|
"local reflog checksum set but reflog itself is missing, " |
| 1071 |
|
|
"aborting"); |
| 1072 |
|
✗ |
return 1; |
| 1073 |
|
|
} |
| 1074 |
|
|
} |
| 1075 |
|
|
} |
| 1076 |
|
|
|
| 1077 |
|
|
// Load history |
| 1078 |
|
✗ |
UniquePtr<history::History> tag_db; |
| 1079 |
|
✗ |
if (!manifest->history().IsNull()) { |
| 1080 |
|
✗ |
string tmp_file; |
| 1081 |
|
✗ |
if (!is_remote_) |
| 1082 |
|
✗ |
tmp_file = DecompressPiece(manifest->history()); |
| 1083 |
|
|
else |
| 1084 |
|
✗ |
tmp_file = DownloadPiece(manifest->history()); |
| 1085 |
|
✗ |
if (tmp_file == "") { |
| 1086 |
|
✗ |
LogCvmfs(kLogCvmfs, kLogStderr, "failed to load history database %s", |
| 1087 |
|
|
manifest->history().ToString().c_str()); |
| 1088 |
|
✗ |
return 1; |
| 1089 |
|
|
} |
| 1090 |
|
✗ |
tag_db = history::SqliteHistory::Open(tmp_file); |
| 1091 |
|
✗ |
if (!tag_db.IsValid()) { |
| 1092 |
|
✗ |
LogCvmfs(kLogCvmfs, kLogStderr, "failed to open history database %s", |
| 1093 |
|
|
manifest->history().ToString().c_str()); |
| 1094 |
|
✗ |
return 1; |
| 1095 |
|
|
} |
| 1096 |
|
✗ |
tag_db->TakeDatabaseFileOwnership(); |
| 1097 |
|
✗ |
successful = InspectHistory(tag_db.weak_ref()) && successful; |
| 1098 |
|
|
} |
| 1099 |
|
|
|
| 1100 |
|
✗ |
if (manifest->has_alt_catalog_path()) { |
| 1101 |
|
✗ |
if (!Exists(manifest->certificate().MakeAlternativePath())) { |
| 1102 |
|
✗ |
LogCvmfs(kLogCvmfs, kLogStderr, |
| 1103 |
|
|
"failed to find alternative certificate link %s", |
| 1104 |
|
|
manifest->certificate().MakeAlternativePath().c_str()); |
| 1105 |
|
✗ |
return 1; |
| 1106 |
|
|
} |
| 1107 |
|
✗ |
if (!Exists(manifest->catalog_hash().MakeAlternativePath())) { |
| 1108 |
|
✗ |
LogCvmfs(kLogCvmfs, kLogStderr, |
| 1109 |
|
|
"failed to find alternative catalog link %s", |
| 1110 |
|
|
manifest->catalog_hash().MakeAlternativePath().c_str()); |
| 1111 |
|
✗ |
return 1; |
| 1112 |
|
|
} |
| 1113 |
|
|
} |
| 1114 |
|
|
|
| 1115 |
|
✗ |
shash::Any root_hash = manifest->catalog_hash(); |
| 1116 |
|
✗ |
uint64_t root_size = manifest->catalog_size(); |
| 1117 |
|
✗ |
if (tag_name != "") { |
| 1118 |
|
✗ |
if (!tag_db.IsValid()) { |
| 1119 |
|
✗ |
LogCvmfs(kLogCvmfs, kLogStderr, "no history"); |
| 1120 |
|
✗ |
return 1; |
| 1121 |
|
|
} |
| 1122 |
|
✗ |
history::History::Tag tag; |
| 1123 |
|
✗ |
const bool retval = tag_db->GetByName(tag_name, &tag); |
| 1124 |
|
✗ |
if (!retval) { |
| 1125 |
|
✗ |
LogCvmfs(kLogCvmfs, kLogStderr, "no such tag: %s", tag_name.c_str()); |
| 1126 |
|
✗ |
return 1; |
| 1127 |
|
|
} |
| 1128 |
|
✗ |
root_hash = tag.root_hash; |
| 1129 |
|
✗ |
root_size = tag.size; |
| 1130 |
|
✗ |
LogCvmfs(kLogCvmfs, kLogStdout, "Inspecting repository tag %s", |
| 1131 |
|
|
tag_name.c_str()); |
| 1132 |
|
|
} |
| 1133 |
|
|
|
| 1134 |
|
✗ |
const bool is_nested_catalog = (!subtree_path.empty()); |
| 1135 |
|
✗ |
if (is_nested_catalog |
| 1136 |
|
✗ |
&& !FindSubtreeRootCatalog(subtree_path, &root_hash, &root_size)) { |
| 1137 |
|
✗ |
LogCvmfs(kLogCvmfs, kLogStderr, "cannot find nested catalog at %s", |
| 1138 |
|
|
subtree_path.c_str()); |
| 1139 |
|
✗ |
return 1; |
| 1140 |
|
|
} |
| 1141 |
|
|
|
| 1142 |
|
|
|
| 1143 |
|
✗ |
catalog::DeltaCounters computed_counters; |
| 1144 |
|
✗ |
successful = InspectTree(subtree_path, |
| 1145 |
|
|
root_hash, |
| 1146 |
|
|
root_size, |
| 1147 |
|
|
is_nested_catalog, |
| 1148 |
|
|
NULL, |
| 1149 |
|
|
&computed_counters) |
| 1150 |
|
✗ |
&& successful; |
| 1151 |
|
|
|
| 1152 |
|
✗ |
if (!successful) { |
| 1153 |
|
✗ |
LogCvmfs(kLogCvmfs, kLogStderr, "CATALOG PROBLEMS OR OTHER ERRORS FOUND"); |
| 1154 |
|
✗ |
return 1; |
| 1155 |
|
|
} |
| 1156 |
|
|
|
| 1157 |
|
✗ |
LogCvmfs(kLogCvmfs, kLogStdout, "no problems found"); |
| 1158 |
|
✗ |
return 0; |
| 1159 |
|
|
} |
| 1160 |
|
|
|
| 1161 |
|
|
} // namespace swissknife |
| 1162 |
|
|
|