Line |
Branch |
Exec |
Source |
1 |
|
|
/** |
2 |
|
|
* This file is part of the CernVM File System. |
3 |
|
|
* |
4 |
|
|
* This tool checks a cvmfs repository for file catalog errors. |
5 |
|
|
*/ |
6 |
|
|
|
7 |
|
|
#define __STDC_FORMAT_MACROS |
8 |
|
|
|
9 |
|
|
|
10 |
|
|
#include "swissknife_check.h" |
11 |
|
|
|
12 |
|
|
#include <inttypes.h> |
13 |
|
|
#include <unistd.h> |
14 |
|
|
|
15 |
|
|
#include <cassert> |
16 |
|
|
#include <map> |
17 |
|
|
#include <queue> |
18 |
|
|
#include <set> |
19 |
|
|
#include <string> |
20 |
|
|
#include <vector> |
21 |
|
|
|
22 |
|
|
#include "catalog_sql.h" |
23 |
|
|
#include "compression/compression.h" |
24 |
|
|
#include "file_chunk.h" |
25 |
|
|
#include "history_sqlite.h" |
26 |
|
|
#include "manifest.h" |
27 |
|
|
#include "network/download.h" |
28 |
|
|
#include "reflog.h" |
29 |
|
|
#include "sanitizer.h" |
30 |
|
|
#include "shortstring.h" |
31 |
|
|
#include "util/exception.h" |
32 |
|
|
#include "util/logging.h" |
33 |
|
|
#include "util/pointer.h" |
34 |
|
|
#include "util/posix.h" |
35 |
|
|
|
36 |
|
|
using namespace std; // NOLINT |
37 |
|
|
|
38 |
|
|
// for map of duplicate entries; as in kvstore.cc |
39 |
|
✗ |
static inline uint32_t hasher_any(const shash::Any &key) { |
40 |
|
|
// We'll just do the same thing as hasher_md5, since every hash is at |
41 |
|
|
// least as large. |
42 |
|
✗ |
return *const_cast<uint32_t *>( |
43 |
|
✗ |
reinterpret_cast<const uint32_t *>(key.digest) + 1); |
44 |
|
|
} |
45 |
|
|
|
46 |
|
|
|
47 |
|
|
namespace swissknife { |
48 |
|
|
|
49 |
|
✗ |
CommandCheck::CommandCheck() |
50 |
|
✗ |
: check_chunks_(false) |
51 |
|
✗ |
, no_duplicates_map_(false) |
52 |
|
✗ |
, is_remote_(false) { |
53 |
|
✗ |
const shash::Any hash_null; |
54 |
|
✗ |
duplicates_map_.Init(16, hash_null, hasher_any); |
55 |
|
|
} |
56 |
|
|
|
57 |
|
✗ |
bool CommandCheck::CompareEntries(const catalog::DirectoryEntry &a, |
58 |
|
|
const catalog::DirectoryEntry &b, |
59 |
|
|
const bool compare_names, |
60 |
|
|
const bool is_transition_point) |
61 |
|
|
{ |
62 |
|
|
typedef catalog::DirectoryEntry::Difference Difference; |
63 |
|
|
|
64 |
|
✗ |
catalog::DirectoryEntry::Differences diffs = a.CompareTo(b); |
65 |
|
✗ |
if (diffs == Difference::kIdentical) { |
66 |
|
✗ |
return true; |
67 |
|
|
} |
68 |
|
|
|
69 |
|
|
// in case of a nested catalog transition point the controlling flags are |
70 |
|
|
// supposed to differ. If this is the only difference we are done... |
71 |
|
✗ |
if (is_transition_point && |
72 |
|
|
(diffs ^ Difference::kNestedCatalogTransitionFlags) == 0) { |
73 |
|
✗ |
return true; |
74 |
|
|
} |
75 |
|
|
|
76 |
|
✗ |
bool retval = true; |
77 |
|
✗ |
if (compare_names) { |
78 |
|
✗ |
if (diffs & Difference::kName) { |
79 |
|
✗ |
LogCvmfs(kLogCvmfs, kLogStderr, "names differ: %s / %s", |
80 |
|
|
a.name().c_str(), b.name().c_str()); |
81 |
|
✗ |
retval = false; |
82 |
|
|
} |
83 |
|
|
} |
84 |
|
✗ |
if (diffs & Difference::kLinkcount) { |
85 |
|
✗ |
LogCvmfs(kLogCvmfs, kLogStderr, "linkcounts differ: %u / %u", |
86 |
|
|
a.linkcount(), b.linkcount()); |
87 |
|
✗ |
retval = false; |
88 |
|
|
} |
89 |
|
✗ |
if (diffs & Difference::kHardlinkGroup) { |
90 |
|
✗ |
LogCvmfs(kLogCvmfs, kLogStderr, "hardlink groups differ: %u / %u", |
91 |
|
|
a.hardlink_group(), b.hardlink_group()); |
92 |
|
✗ |
retval = false; |
93 |
|
|
} |
94 |
|
✗ |
if (diffs & Difference::kSize) { |
95 |
|
✗ |
LogCvmfs(kLogCvmfs, kLogStderr, "sizes differ: %" PRIu64 " / %" PRIu64, |
96 |
|
|
a.size(), b.size()); |
97 |
|
✗ |
retval = false; |
98 |
|
|
} |
99 |
|
✗ |
if (diffs & Difference::kMode) { |
100 |
|
✗ |
LogCvmfs(kLogCvmfs, kLogStderr, "modes differ: %u / %u", |
101 |
|
|
a.mode(), b.mode()); |
102 |
|
✗ |
retval = false; |
103 |
|
|
} |
104 |
|
✗ |
if (diffs & Difference::kMtime) { |
105 |
|
✗ |
LogCvmfs(kLogCvmfs, kLogStderr, "timestamps differ: %lu / %lu", |
106 |
|
|
a.mtime(), b.mtime()); |
107 |
|
✗ |
retval = false; |
108 |
|
|
} |
109 |
|
✗ |
if (diffs & Difference::kChecksum) { |
110 |
|
✗ |
LogCvmfs(kLogCvmfs, kLogStderr, "content hashes differ: %s / %s", |
111 |
|
|
a.checksum().ToString().c_str(), b.checksum().ToString().c_str()); |
112 |
|
✗ |
retval = false; |
113 |
|
|
} |
114 |
|
✗ |
if (diffs & Difference::kSymlink) { |
115 |
|
✗ |
LogCvmfs(kLogCvmfs, kLogStderr, "symlinks differ: %s / %s", |
116 |
|
|
a.symlink().c_str(), b.symlink().c_str()); |
117 |
|
✗ |
retval = false; |
118 |
|
|
} |
119 |
|
✗ |
if (diffs & Difference::kExternalFileFlag) { |
120 |
|
✗ |
LogCvmfs(kLogCvmfs, kLogStderr, "external file flag differs: %d / %d " |
121 |
|
|
"(%s / %s)", a.IsExternalFile(), b.IsExternalFile(), |
122 |
|
|
a.name().c_str(), b.name().c_str()); |
123 |
|
✗ |
retval = false; |
124 |
|
|
} |
125 |
|
✗ |
if (diffs & Difference::kHasXattrsFlag) { |
126 |
|
✗ |
LogCvmfs(kLogCvmfs, kLogStderr, "extended attributes differ: %d / %d " |
127 |
|
|
"(%s / %s)", a.HasXattrs(), b.HasXattrs(), |
128 |
|
|
a.name().c_str(), b.name().c_str()); |
129 |
|
✗ |
retval = false; |
130 |
|
|
} |
131 |
|
✗ |
if (!is_transition_point) { |
132 |
|
✗ |
if (diffs & Difference::kUid) { |
133 |
|
✗ |
LogCvmfs(kLogCvmfs, kLogStderr, "uids differ: %d / %d (%s / %s)", |
134 |
|
|
a.uid(), b.uid(), a.name().c_str(), b.name().c_str()); |
135 |
|
✗ |
retval = false; |
136 |
|
|
} |
137 |
|
✗ |
if (diffs & Difference::kGid) { |
138 |
|
✗ |
LogCvmfs(kLogCvmfs, kLogStderr, "gids differ: %d / %d (%s / %s)", |
139 |
|
|
a.gid(), b.gid(), a.name().c_str(), b.name().c_str()); |
140 |
|
✗ |
retval = false; |
141 |
|
|
} |
142 |
|
|
} |
143 |
|
|
|
144 |
|
✗ |
return retval; |
145 |
|
|
} |
146 |
|
|
|
147 |
|
|
|
148 |
|
✗ |
bool CommandCheck::CompareCounters(const catalog::Counters &a, |
149 |
|
|
const catalog::Counters &b) |
150 |
|
|
{ |
151 |
|
✗ |
const catalog::Counters::FieldsMap map_a = a.GetFieldsMap(); |
152 |
|
✗ |
const catalog::Counters::FieldsMap map_b = b.GetFieldsMap(); |
153 |
|
|
|
154 |
|
✗ |
bool retval = true; |
155 |
|
✗ |
catalog::Counters::FieldsMap::const_iterator i = map_a.begin(); |
156 |
|
✗ |
catalog::Counters::FieldsMap::const_iterator iend = map_a.end(); |
157 |
|
✗ |
for (; i != iend; ++i) { |
158 |
|
✗ |
catalog::Counters::FieldsMap::const_iterator comp = map_b.find(i->first); |
159 |
|
✗ |
assert(comp != map_b.end()); |
160 |
|
|
|
161 |
|
✗ |
if (*(i->second) != *(comp->second)) { |
162 |
|
✗ |
LogCvmfs(kLogCvmfs, kLogStderr, |
163 |
|
|
"catalog statistics mismatch: %s (expected: %" PRIu64 " / " |
164 |
|
|
"in catalog: %" PRIu64 ")", |
165 |
|
|
comp->first.c_str(), *(i->second), *(comp->second)); |
166 |
|
✗ |
retval = false; |
167 |
|
|
} |
168 |
|
|
} |
169 |
|
|
|
170 |
|
✗ |
return retval; |
171 |
|
|
} |
172 |
|
|
|
173 |
|
|
|
174 |
|
|
/** |
175 |
|
|
* Checks for existence of a file either locally or via HTTP head |
176 |
|
|
*/ |
177 |
|
✗ |
bool CommandCheck::Exists(const string &file) |
178 |
|
|
{ |
179 |
|
✗ |
if (!is_remote_) { |
180 |
|
✗ |
return FileExists(file) || SymlinkExists(file); |
181 |
|
|
} else { |
182 |
|
✗ |
const string url = repo_base_path_ + "/" + file; |
183 |
|
✗ |
LogCvmfs(kLogCvmfs, kLogVerboseMsg, "[Exists::url] %s", url.c_str()); |
184 |
|
✗ |
download::JobInfo head(&url, false); |
185 |
|
✗ |
return download_manager()->Fetch(&head) == download::kFailOk; |
186 |
|
|
} |
187 |
|
|
} |
188 |
|
|
|
189 |
|
|
|
190 |
|
|
/** |
191 |
|
|
* Copies a file from the repository into a temporary file. |
192 |
|
|
*/ |
193 |
|
✗ |
string CommandCheck::FetchPath(const string &path) { |
194 |
|
✗ |
string tmp_path; |
195 |
|
✗ |
FILE *f = CreateTempFile(temp_directory_ + "/cvmfstmp", kDefaultFileMode, |
196 |
|
|
"w+", &tmp_path); |
197 |
|
✗ |
assert(f != NULL); |
198 |
|
|
|
199 |
|
✗ |
const string url = repo_base_path_ + "/" + path; |
200 |
|
✗ |
if (is_remote_) { |
201 |
|
✗ |
cvmfs::FileSink filesink(f); |
202 |
|
✗ |
download::JobInfo download_job(&url, false, false, NULL, &filesink); |
203 |
|
✗ |
download::Failures retval = download_manager()->Fetch(&download_job); |
204 |
|
✗ |
if (retval != download::kFailOk) { |
205 |
|
✗ |
PANIC(kLogStderr, "failed to read %s", url.c_str()); |
206 |
|
|
} |
207 |
|
✗ |
} else { |
208 |
|
✗ |
bool retval = CopyPath2File(url, f); |
209 |
|
✗ |
if (!retval) { |
210 |
|
✗ |
PANIC(kLogStderr, "failed to read %s", url.c_str()); |
211 |
|
|
} |
212 |
|
|
} |
213 |
|
|
|
214 |
|
✗ |
fclose(f); |
215 |
|
✗ |
return tmp_path; |
216 |
|
|
} |
217 |
|
|
|
218 |
|
|
|
219 |
|
|
/** |
220 |
|
|
* Verifies reflog checksum and looks for presence of the entry points |
221 |
|
|
* referenced in the manifest. |
222 |
|
|
*/ |
223 |
|
✗ |
bool CommandCheck::InspectReflog( |
224 |
|
|
const shash::Any &reflog_hash, |
225 |
|
|
manifest::Manifest *manifest) |
226 |
|
|
{ |
227 |
|
✗ |
LogCvmfs(kLogCvmfs, kLogStdout, "Inspecting log of references"); |
228 |
|
✗ |
string reflog_path = FetchPath(".cvmfsreflog"); |
229 |
|
✗ |
shash::Any computed_hash(reflog_hash.algorithm); |
230 |
|
✗ |
manifest::Reflog::HashDatabase(reflog_path, &computed_hash); |
231 |
|
✗ |
if (computed_hash != reflog_hash) { |
232 |
|
✗ |
LogCvmfs(kLogCvmfs, kLogStderr, |
233 |
|
|
"The .cvmfsreflog has unexpected content hash %s (expected %s)", |
234 |
|
|
computed_hash.ToString().c_str(), reflog_hash.ToString().c_str()); |
235 |
|
✗ |
unlink(reflog_path.c_str()); |
236 |
|
✗ |
return false; |
237 |
|
|
} |
238 |
|
|
|
239 |
|
✗ |
UniquePtr<manifest::Reflog> reflog(manifest::Reflog::Open(reflog_path)); |
240 |
|
✗ |
assert(reflog.IsValid()); |
241 |
|
✗ |
reflog->TakeDatabaseFileOwnership(); |
242 |
|
|
|
243 |
|
✗ |
if (!reflog->ContainsCatalog(manifest->catalog_hash())) { |
244 |
|
✗ |
LogCvmfs(kLogCvmfs, kLogStderr, |
245 |
|
|
"failed to find catalog root hash %s in .cvmfsreflog", |
246 |
|
|
manifest->catalog_hash().ToString().c_str()); |
247 |
|
✗ |
return false; |
248 |
|
|
} |
249 |
|
|
|
250 |
|
✗ |
if (!reflog->ContainsCertificate(manifest->certificate())) { |
251 |
|
✗ |
LogCvmfs(kLogCvmfs, kLogStderr, |
252 |
|
|
"failed to find certificate hash %s in .cvmfsreflog", |
253 |
|
|
manifest->certificate().ToString().c_str()); |
254 |
|
✗ |
return false; |
255 |
|
|
} |
256 |
|
|
|
257 |
|
✗ |
if (!manifest->history().IsNull() && |
258 |
|
✗ |
!reflog->ContainsHistory(manifest->history())) |
259 |
|
|
{ |
260 |
|
✗ |
LogCvmfs(kLogCvmfs, kLogStderr, |
261 |
|
|
"failed to find tag database's hash %s in .cvmfsreflog", |
262 |
|
|
manifest->history().ToString().c_str()); |
263 |
|
✗ |
return false; |
264 |
|
|
} |
265 |
|
|
|
266 |
|
✗ |
if (!manifest->meta_info().IsNull() && |
267 |
|
✗ |
!reflog->ContainsMetainfo(manifest->meta_info())) |
268 |
|
|
{ |
269 |
|
✗ |
LogCvmfs(kLogCvmfs, kLogStderr, |
270 |
|
|
"failed to find meta info hash %s in .cvmfsreflog", |
271 |
|
|
manifest->meta_info().ToString().c_str()); |
272 |
|
✗ |
return false; |
273 |
|
|
} |
274 |
|
|
|
275 |
|
✗ |
return true; |
276 |
|
|
} |
277 |
|
|
|
278 |
|
|
|
279 |
|
|
/** |
280 |
|
|
* Verifies the logical consistency of the tag database. |
281 |
|
|
*/ |
282 |
|
✗ |
bool CommandCheck::InspectHistory(history::History *history) { |
283 |
|
✗ |
LogCvmfs(kLogCvmfs, kLogStdout, "Inspecting tag database"); |
284 |
|
|
bool retval; |
285 |
|
✗ |
vector<history::History::Tag> tags; |
286 |
|
✗ |
retval = history->List(&tags); |
287 |
|
✗ |
if (!retval) { |
288 |
|
✗ |
LogCvmfs(kLogCvmfs, kLogStderr, "failed to enumerate tags"); |
289 |
|
✗ |
return false; |
290 |
|
|
} |
291 |
|
✗ |
vector<history::History::Branch> branches; |
292 |
|
✗ |
retval = history->ListBranches(&branches); |
293 |
|
✗ |
if (!retval) { |
294 |
|
✗ |
LogCvmfs(kLogCvmfs, kLogStderr, "failed to enumerate branches"); |
295 |
|
✗ |
return false; |
296 |
|
|
} |
297 |
|
|
|
298 |
|
✗ |
bool result = true; |
299 |
|
|
|
300 |
|
✗ |
map<string, uint64_t> initial_revisions; |
301 |
|
✗ |
sanitizer::BranchSanitizer sanitizer; |
302 |
|
✗ |
for (unsigned i = 0; i < branches.size(); ++i) { |
303 |
|
✗ |
if (!sanitizer.IsValid(branches[i].branch)) { |
304 |
|
✗ |
LogCvmfs(kLogCvmfs, kLogStderr, "invalid branch name: %s", |
305 |
|
|
branches[i].branch.c_str()); |
306 |
|
✗ |
result = false; |
307 |
|
|
} |
308 |
|
✗ |
initial_revisions[branches[i].branch] = branches[i].initial_revision; |
309 |
|
|
} |
310 |
|
|
|
311 |
|
✗ |
set<string> used_branches; // all branches referenced in tag db |
312 |
|
|
// TODO(jblomer): same root hash implies same size and revision |
313 |
|
✗ |
for (unsigned i = 0; i < tags.size(); ++i) { |
314 |
|
✗ |
used_branches.insert(tags[i].branch); |
315 |
|
|
const map<string, uint64_t>::const_iterator iter = |
316 |
|
✗ |
initial_revisions.find(tags[i].branch); |
317 |
|
✗ |
if (iter == initial_revisions.end()) { |
318 |
|
✗ |
LogCvmfs(kLogCvmfs, kLogStderr, "invalid branch %s in tag %s", |
319 |
|
|
tags[i].branch.c_str(), tags[i].name.c_str()); |
320 |
|
✗ |
result = false; |
321 |
|
|
} else { |
322 |
|
✗ |
if (tags[i].revision < iter->second) { |
323 |
|
✗ |
LogCvmfs(kLogCvmfs, kLogStderr, "invalid revision %" PRIu64 |
324 |
|
|
" of tag %s", tags[i].revision, tags[i].name.c_str()); |
325 |
|
✗ |
result = false; |
326 |
|
|
} |
327 |
|
|
} |
328 |
|
|
} |
329 |
|
|
|
330 |
|
✗ |
if (used_branches.size() != branches.size()) { |
331 |
|
✗ |
LogCvmfs(kLogCvmfs, kLogStderr, "unused, dangling branches stored"); |
332 |
|
✗ |
result = false; |
333 |
|
|
} |
334 |
|
|
|
335 |
|
✗ |
return result; |
336 |
|
|
} |
337 |
|
|
|
338 |
|
|
|
339 |
|
|
/** |
340 |
|
|
* Recursive catalog walk-through |
341 |
|
|
* |
342 |
|
|
* TODO(vavolkl): This method is large and does a lot of checks |
343 |
|
|
* that could be split into smaller ones. |
344 |
|
|
* |
345 |
|
|
*/ |
346 |
|
✗ |
bool CommandCheck::Find(const catalog::Catalog *catalog, |
347 |
|
|
const PathString &path, |
348 |
|
|
catalog::DeltaCounters *computed_counters, |
349 |
|
|
set<PathString> *bind_mountpoints) |
350 |
|
|
{ |
351 |
|
✗ |
catalog::DirectoryEntryList entries; |
352 |
|
✗ |
catalog::DirectoryEntry this_directory; |
353 |
|
|
|
354 |
|
✗ |
if (!catalog->LookupPath(path, &this_directory)) { |
355 |
|
✗ |
LogCvmfs(kLogCvmfs, kLogStderr, "failed to lookup %s", |
356 |
|
|
path.c_str()); |
357 |
|
✗ |
return false; |
358 |
|
|
} |
359 |
|
✗ |
if (!catalog->ListingPath(path, &entries)) { |
360 |
|
✗ |
LogCvmfs(kLogCvmfs, kLogStderr, "failed to list %s", |
361 |
|
|
path.c_str()); |
362 |
|
✗ |
return false; |
363 |
|
|
} |
364 |
|
|
|
365 |
|
✗ |
uint32_t num_subdirs = 0; |
366 |
|
✗ |
bool retval = true; |
367 |
|
|
typedef map< uint32_t, vector<catalog::DirectoryEntry> > HardlinkMap; |
368 |
|
✗ |
HardlinkMap hardlinks; |
369 |
|
✗ |
bool found_nested_marker = false; |
370 |
|
|
|
371 |
|
✗ |
for (unsigned i = 0; i < entries.size(); ++i) { |
372 |
|
|
// for performance reasons, keep track of files already checked |
373 |
|
|
// and only run requests once per hash |
374 |
|
|
const bool entry_needs_check = |
375 |
|
✗ |
!entries[i].checksum().IsNull() && !entries[i].IsExternalFile() && |
376 |
|
|
// fallback cli option can force the entry to be checked |
377 |
|
✗ |
(no_duplicates_map_ || |
378 |
|
✗ |
!duplicates_map_.Contains(entries[i].checksum())); |
379 |
|
✗ |
if (entry_needs_check && !no_duplicates_map_) |
380 |
|
✗ |
duplicates_map_.Insert(entries[i].checksum(), 1); |
381 |
|
|
|
382 |
|
✗ |
PathString full_path(path); |
383 |
|
✗ |
full_path.Append("/", 1); |
384 |
|
✗ |
full_path.Append(entries[i].name().GetChars(), |
385 |
|
✗ |
entries[i].name().GetLength()); |
386 |
|
✗ |
LogCvmfs(kLogCvmfs, kLogVerboseMsg, "[path] %s [needs check] %i", |
387 |
|
|
full_path.c_str(), entry_needs_check); |
388 |
|
|
|
389 |
|
|
|
390 |
|
|
// Name must not be empty |
391 |
|
✗ |
if (entries[i].name().IsEmpty()) { |
392 |
|
✗ |
LogCvmfs(kLogCvmfs, kLogStderr, "empty path at %s", |
393 |
|
|
full_path.c_str()); |
394 |
|
✗ |
retval = false; |
395 |
|
|
} |
396 |
|
|
|
397 |
|
|
// Catalog markers should indicate nested catalogs |
398 |
|
✗ |
if (entries[i].name() == NameString(string(".cvmfscatalog"))) { |
399 |
|
✗ |
if (catalog->mountpoint() != path) { |
400 |
|
✗ |
LogCvmfs(kLogCvmfs, kLogStderr, |
401 |
|
|
"found abandoned nested catalog marker at %s", |
402 |
|
|
full_path.c_str()); |
403 |
|
✗ |
retval = false; |
404 |
|
|
} |
405 |
|
✗ |
found_nested_marker = true; |
406 |
|
|
} |
407 |
|
|
|
408 |
|
|
// Check if checksum is not null |
409 |
|
✗ |
if (entries[i].IsRegular() && !entries[i].IsChunkedFile() && |
410 |
|
✗ |
entries[i].checksum().IsNull()) |
411 |
|
|
{ |
412 |
|
✗ |
LogCvmfs(kLogCvmfs, kLogStderr, |
413 |
|
|
"regular file pointing to zero-hash: '%s'", full_path.c_str()); |
414 |
|
✗ |
retval = false; |
415 |
|
|
} |
416 |
|
|
|
417 |
|
|
// Check if the chunk is there |
418 |
|
✗ |
if (check_chunks_ && entry_needs_check) |
419 |
|
|
{ |
420 |
|
✗ |
string chunk_path = "data/" + entries[i].checksum().MakePath(); |
421 |
|
✗ |
if (entries[i].IsDirectory()) |
422 |
|
✗ |
chunk_path += shash::kSuffixMicroCatalog; |
423 |
|
✗ |
if (!Exists(chunk_path)) { |
424 |
|
✗ |
LogCvmfs(kLogCvmfs, kLogStderr, "data chunk %s (%s) missing", |
425 |
|
|
entries[i].checksum().ToString().c_str(), full_path.c_str()); |
426 |
|
✗ |
retval = false; |
427 |
|
|
} |
428 |
|
|
} |
429 |
|
|
|
430 |
|
|
// Add hardlinks to counting map |
431 |
|
✗ |
if ((entries[i].linkcount() > 1) && !entries[i].IsDirectory()) { |
432 |
|
✗ |
if (entries[i].hardlink_group() == 0) { |
433 |
|
✗ |
LogCvmfs(kLogCvmfs, kLogStderr, "invalid hardlink group for %s", |
434 |
|
|
full_path.c_str()); |
435 |
|
✗ |
retval = false; |
436 |
|
|
} else { |
437 |
|
|
HardlinkMap::iterator hardlink_group = |
438 |
|
✗ |
hardlinks.find(entries[i].hardlink_group()); |
439 |
|
✗ |
if (hardlink_group == hardlinks.end()) { |
440 |
|
✗ |
hardlinks[entries[i].hardlink_group()]; |
441 |
|
✗ |
hardlinks[entries[i].hardlink_group()].push_back(entries[i]); |
442 |
|
|
} else { |
443 |
|
✗ |
if (!CompareEntries(entries[i], (hardlink_group->second)[0], false)) { |
444 |
|
✗ |
LogCvmfs(kLogCvmfs, kLogStderr, "hardlink %s doesn't match", |
445 |
|
|
full_path.c_str()); |
446 |
|
✗ |
retval = false; |
447 |
|
|
} |
448 |
|
✗ |
hardlink_group->second.push_back(entries[i]); |
449 |
|
|
} // Hardlink added to map |
450 |
|
|
} // Hardlink group > 0 |
451 |
|
|
} // Hardlink found |
452 |
|
|
|
453 |
|
|
// For any kind of entry, the linkcount should be > 0 |
454 |
|
✗ |
if (entries[i].linkcount() == 0) { |
455 |
|
✗ |
LogCvmfs(kLogCvmfs, kLogStderr, "Entry %s has linkcount 0.", |
456 |
|
|
entries[i].name().c_str()); |
457 |
|
✗ |
retval = false; |
458 |
|
|
} |
459 |
|
|
|
460 |
|
|
// Checks depending of entry type |
461 |
|
✗ |
if (!entries[i].IsRegular()) { |
462 |
|
✗ |
if (entries[i].IsDirectIo()) { |
463 |
|
✗ |
LogCvmfs(kLogCvmfs, kLogStderr, "invalid direct i/o flag found: %s", |
464 |
|
|
full_path.c_str()); |
465 |
|
✗ |
retval = false; |
466 |
|
|
} |
467 |
|
|
} |
468 |
|
✗ |
if (entries[i].IsDirectory()) { |
469 |
|
✗ |
computed_counters->self.directories++; |
470 |
|
✗ |
num_subdirs++; |
471 |
|
|
// Directory size |
472 |
|
|
// if (entries[i].size() < 4096) { |
473 |
|
|
// LogCvmfs(kLogCvmfs, kLogStderr, "invalid file size for %s", |
474 |
|
|
// full_path.c_str()); |
475 |
|
|
// retval = false; |
476 |
|
|
// } |
477 |
|
|
// No directory hardlinks |
478 |
|
✗ |
if (entries[i].hardlink_group() != 0) { |
479 |
|
✗ |
LogCvmfs(kLogCvmfs, kLogStderr, "directory hardlink found at %s", |
480 |
|
|
full_path.c_str()); |
481 |
|
✗ |
retval = false; |
482 |
|
|
} |
483 |
|
✗ |
if (entries[i].IsNestedCatalogMountpoint() || |
484 |
|
✗ |
entries[i].IsBindMountpoint()) |
485 |
|
|
{ |
486 |
|
|
// Find transition point |
487 |
|
✗ |
if (entries[i].IsNestedCatalogMountpoint()) |
488 |
|
✗ |
computed_counters->self.nested_catalogs++; |
489 |
|
✗ |
shash::Any tmp; |
490 |
|
|
uint64_t tmp2; |
491 |
|
✗ |
PathString mountpoint(full_path); |
492 |
|
✗ |
if (!catalog->FindNested(mountpoint, &tmp, &tmp2)) { |
493 |
|
✗ |
LogCvmfs(kLogCvmfs, kLogStderr, "nested catalog at %s not registered", |
494 |
|
|
full_path.c_str()); |
495 |
|
✗ |
retval = false; |
496 |
|
|
} |
497 |
|
|
|
498 |
|
|
// check that the nested mountpoint is empty in the current catalog |
499 |
|
✗ |
catalog::DirectoryEntryList nested_entries; |
500 |
|
✗ |
if (catalog->ListingPath(full_path, &nested_entries) && |
501 |
|
✗ |
!nested_entries.empty()) { |
502 |
|
✗ |
LogCvmfs(kLogCvmfs, kLogStderr, "non-empty nested catalog mountpoint " |
503 |
|
|
"at %s.", |
504 |
|
|
full_path.c_str()); |
505 |
|
✗ |
retval = false; |
506 |
|
|
} |
507 |
|
|
|
508 |
|
✗ |
if (entries[i].IsBindMountpoint()) { |
509 |
|
✗ |
bind_mountpoints->insert(full_path); |
510 |
|
✗ |
if (entries[i].IsNestedCatalogMountpoint()) { |
511 |
|
✗ |
LogCvmfs(kLogCvmfs, kLogStderr, |
512 |
|
|
"bind mountpoint and nested mountpoint mutually exclusive" |
513 |
|
|
" at %s.", full_path.c_str()); |
514 |
|
✗ |
retval = false; |
515 |
|
|
} |
516 |
|
|
} |
517 |
|
✗ |
} else { |
518 |
|
|
// Recurse |
519 |
|
✗ |
if (!Find(catalog, full_path, computed_counters, bind_mountpoints)) |
520 |
|
✗ |
retval = false; |
521 |
|
|
} |
522 |
|
✗ |
} else if (entries[i].IsLink()) { |
523 |
|
✗ |
computed_counters->self.symlinks++; |
524 |
|
|
// No hash for symbolics links |
525 |
|
✗ |
if (!entries[i].checksum().IsNull()) { |
526 |
|
✗ |
LogCvmfs(kLogCvmfs, kLogStderr, "symbolic links with hash at %s", |
527 |
|
|
full_path.c_str()); |
528 |
|
✗ |
retval = false; |
529 |
|
|
} |
530 |
|
|
// Right size of symbolic link? |
531 |
|
✗ |
if (entries[i].size() != entries[i].symlink().GetLength()) { |
532 |
|
✗ |
LogCvmfs(kLogCvmfs, kLogStderr, "wrong symbolic link size for %s; " |
533 |
|
|
"expected %u, got %lu", full_path.c_str(), |
534 |
|
|
entries[i].symlink().GetLength(), entries[i].size()); |
535 |
|
✗ |
retval = false; |
536 |
|
|
} |
537 |
|
✗ |
} else if (entries[i].IsRegular()) { |
538 |
|
✗ |
computed_counters->self.regular_files++; |
539 |
|
✗ |
computed_counters->self.file_size += entries[i].size(); |
540 |
|
✗ |
} else if (entries[i].IsSpecial()) { |
541 |
|
✗ |
computed_counters->self.specials++; |
542 |
|
|
// Size zero for special files |
543 |
|
✗ |
if (entries[i].size() != 0) { |
544 |
|
✗ |
LogCvmfs(kLogCvmfs, kLogStderr, |
545 |
|
|
"unexpected non-zero special file size %s", |
546 |
|
|
full_path.c_str()); |
547 |
|
✗ |
retval = false; |
548 |
|
|
} |
549 |
|
|
// No hash for special files |
550 |
|
✗ |
if (!entries[i].checksum().IsNull()) { |
551 |
|
✗ |
LogCvmfs(kLogCvmfs, kLogStderr, "special file with hash at %s", |
552 |
|
|
full_path.c_str()); |
553 |
|
✗ |
retval = false; |
554 |
|
|
} |
555 |
|
|
// No symlink |
556 |
|
✗ |
if (entries[i].symlink().GetLength() > 0) { |
557 |
|
✗ |
LogCvmfs(kLogCvmfs, kLogStderr, |
558 |
|
|
"special file with non-zero symlink at %s", full_path.c_str()); |
559 |
|
✗ |
retval = false; |
560 |
|
|
} |
561 |
|
|
} else { |
562 |
|
✗ |
LogCvmfs(kLogCvmfs, kLogStderr, "unknown file type %s", |
563 |
|
|
full_path.c_str()); |
564 |
|
✗ |
retval = false; |
565 |
|
|
} |
566 |
|
|
|
567 |
|
✗ |
if (entries[i].HasXattrs()) { |
568 |
|
✗ |
computed_counters->self.xattrs++; |
569 |
|
|
} |
570 |
|
|
|
571 |
|
✗ |
if (entries[i].IsExternalFile()) { |
572 |
|
✗ |
computed_counters->self.externals++; |
573 |
|
✗ |
computed_counters->self.external_file_size += entries[i].size(); |
574 |
|
✗ |
if (!entries[i].IsRegular()) { |
575 |
|
✗ |
LogCvmfs(kLogCvmfs, kLogStderr, |
576 |
|
|
"only regular files can be external: %s", full_path.c_str()); |
577 |
|
✗ |
retval = false; |
578 |
|
|
} |
579 |
|
|
} |
580 |
|
|
|
581 |
|
|
// checking file chunk integrity |
582 |
|
✗ |
if (entries[i].IsChunkedFile()) { |
583 |
|
✗ |
FileChunkList chunks; |
584 |
|
✗ |
catalog->ListPathChunks(full_path, entries[i].hash_algorithm(), &chunks); |
585 |
|
|
|
586 |
|
✗ |
computed_counters->self.chunked_files++; |
587 |
|
✗ |
computed_counters->self.chunked_file_size += entries[i].size(); |
588 |
|
✗ |
computed_counters->self.file_chunks += chunks.size(); |
589 |
|
|
|
590 |
|
|
// do we find file chunks for the chunked file in this catalog? |
591 |
|
✗ |
if (chunks.size() == 0) { |
592 |
|
✗ |
LogCvmfs(kLogCvmfs, kLogStderr, "no file chunks found for big file %s", |
593 |
|
|
full_path.c_str()); |
594 |
|
✗ |
retval = false; |
595 |
|
|
} |
596 |
|
|
|
597 |
|
✗ |
size_t aggregated_file_size = 0; |
598 |
|
✗ |
off_t next_offset = 0; |
599 |
|
|
|
600 |
|
✗ |
for (unsigned j = 0; j < chunks.size(); ++j) { |
601 |
|
✗ |
FileChunk this_chunk = chunks.At(j); |
602 |
|
|
// check if the chunk boundaries fit together... |
603 |
|
✗ |
if (next_offset != this_chunk.offset()) { |
604 |
|
✗ |
LogCvmfs(kLogCvmfs, kLogStderr, "misaligned chunk offsets for %s", |
605 |
|
|
full_path.c_str()); |
606 |
|
✗ |
retval = false; |
607 |
|
|
} |
608 |
|
✗ |
next_offset = this_chunk.offset() + this_chunk.size(); |
609 |
|
✗ |
aggregated_file_size += this_chunk.size(); |
610 |
|
|
|
611 |
|
|
// are all data chunks in the data store? |
612 |
|
✗ |
if (check_chunks_ && !entries[i].IsExternalFile()) { |
613 |
|
✗ |
const shash::Any &chunk_hash = this_chunk.content_hash(); |
614 |
|
|
// for performance reasons, only perform the check once |
615 |
|
|
// and skip if the hash has been checked before |
616 |
|
✗ |
bool chunk_needs_check = true; |
617 |
|
✗ |
if (!no_duplicates_map_ && !duplicates_map_.Contains(chunk_hash)) { |
618 |
|
✗ |
duplicates_map_.Insert(chunk_hash, 1); |
619 |
|
✗ |
} else if (!no_duplicates_map_) { |
620 |
|
✗ |
chunk_needs_check = false; |
621 |
|
|
} |
622 |
|
✗ |
if (chunk_needs_check) { |
623 |
|
✗ |
const string chunk_path = "data/" + chunk_hash.MakePath(); |
624 |
|
✗ |
if (!Exists(chunk_path)) { |
625 |
|
✗ |
LogCvmfs(kLogCvmfs, kLogStderr, "partial data chunk %s (%s -> " |
626 |
|
|
"offset: %ld | size: %lu) missing", |
627 |
|
|
this_chunk.content_hash().ToStringWithSuffix().c_str(), |
628 |
|
|
full_path.c_str(), |
629 |
|
|
this_chunk.offset(), |
630 |
|
|
this_chunk.size()); |
631 |
|
✗ |
retval = false; |
632 |
|
|
} |
633 |
|
|
} |
634 |
|
|
} |
635 |
|
|
} |
636 |
|
|
|
637 |
|
|
// is the aggregated chunk size equal to the actual file size? |
638 |
|
✗ |
if (aggregated_file_size != entries[i].size()) { |
639 |
|
✗ |
LogCvmfs(kLogCvmfs, kLogStderr, "chunks of file %s produce a size " |
640 |
|
|
"mismatch. Calculated %zu bytes | %lu " |
641 |
|
|
"bytes expected", |
642 |
|
|
full_path.c_str(), |
643 |
|
|
aggregated_file_size, |
644 |
|
|
entries[i].size()); |
645 |
|
✗ |
retval = false; |
646 |
|
|
} |
647 |
|
|
} |
648 |
|
✗ |
} // Loop through entries |
649 |
|
|
|
650 |
|
|
// Check if nested catalog marker has been found |
651 |
|
✗ |
if (!path.IsEmpty() && (path == catalog->mountpoint()) && |
652 |
|
✗ |
!found_nested_marker) |
653 |
|
|
{ |
654 |
|
✗ |
LogCvmfs(kLogCvmfs, kLogStderr, "nested catalog without marker at %s", |
655 |
|
|
path.c_str()); |
656 |
|
✗ |
retval = false; |
657 |
|
|
} |
658 |
|
|
|
659 |
|
|
// Check directory linkcount |
660 |
|
✗ |
if (this_directory.linkcount() != num_subdirs + 2) { |
661 |
|
✗ |
LogCvmfs(kLogCvmfs, kLogStderr, "wrong linkcount for %s; " |
662 |
|
|
"expected %u, got %u", |
663 |
|
|
path.c_str(), num_subdirs + 2, this_directory.linkcount()); |
664 |
|
✗ |
retval = false; |
665 |
|
|
} |
666 |
|
|
|
667 |
|
|
// Check hardlink linkcounts |
668 |
|
✗ |
for (HardlinkMap::const_iterator i = hardlinks.begin(), |
669 |
|
✗ |
iEnd = hardlinks.end(); i != iEnd; ++i) |
670 |
|
|
{ |
671 |
|
✗ |
if (i->second[0].linkcount() != i->second.size()) { |
672 |
|
✗ |
LogCvmfs(kLogCvmfs, kLogStderr, "hardlink linkcount wrong for %s, " |
673 |
|
|
"expected %lu, got %u", |
674 |
|
|
(path.ToString() + "/" + i->second[0].name().ToString()).c_str(), |
675 |
|
|
i->second.size(), i->second[0].linkcount()); |
676 |
|
✗ |
retval = false; |
677 |
|
|
} |
678 |
|
|
} |
679 |
|
|
|
680 |
|
✗ |
return retval; |
681 |
|
|
} |
682 |
|
|
|
683 |
|
|
|
684 |
|
✗ |
string CommandCheck::DownloadPiece(const shash::Any catalog_hash) { |
685 |
|
✗ |
string source = "data/" + catalog_hash.MakePath(); |
686 |
|
✗ |
const string dest = temp_directory_ + "/" + catalog_hash.ToString(); |
687 |
|
✗ |
const string url = repo_base_path_ + "/" + source; |
688 |
|
|
|
689 |
|
✗ |
cvmfs::PathSink pathsink(dest); |
690 |
|
|
download::JobInfo download_catalog(&url, true, false, &catalog_hash, |
691 |
|
✗ |
&pathsink); |
692 |
|
✗ |
download::Failures retval = download_manager()->Fetch(&download_catalog); |
693 |
|
✗ |
if (retval != download::kFailOk) { |
694 |
|
✗ |
LogCvmfs(kLogCvmfs, kLogStderr, "failed to download object %s (%d)", |
695 |
|
|
catalog_hash.ToString().c_str(), retval); |
696 |
|
✗ |
return ""; |
697 |
|
|
} |
698 |
|
|
|
699 |
|
✗ |
return dest; |
700 |
|
|
} |
701 |
|
|
|
702 |
|
|
|
703 |
|
✗ |
string CommandCheck::DecompressPiece(const shash::Any catalog_hash) { |
704 |
|
✗ |
string source = "data/" + catalog_hash.MakePath(); |
705 |
|
✗ |
const string dest = temp_directory_ + "/" + catalog_hash.ToString(); |
706 |
|
✗ |
if (!zlib::DecompressPath2Path(source, dest)) |
707 |
|
✗ |
return ""; |
708 |
|
|
|
709 |
|
✗ |
return dest; |
710 |
|
|
} |
711 |
|
|
|
712 |
|
|
|
713 |
|
✗ |
catalog::Catalog* CommandCheck::FetchCatalog(const string &path, |
714 |
|
|
const shash::Any &catalog_hash, |
715 |
|
|
const uint64_t catalog_size) { |
716 |
|
✗ |
string tmp_file; |
717 |
|
✗ |
if (!is_remote_) |
718 |
|
✗ |
tmp_file = DecompressPiece(catalog_hash); |
719 |
|
|
else |
720 |
|
✗ |
tmp_file = DownloadPiece(catalog_hash); |
721 |
|
|
|
722 |
|
✗ |
if (tmp_file == "") { |
723 |
|
✗ |
LogCvmfs(kLogCvmfs, kLogStderr, "failed to load catalog %s", |
724 |
|
|
catalog_hash.ToString().c_str()); |
725 |
|
✗ |
return NULL; |
726 |
|
|
} |
727 |
|
|
|
728 |
|
|
catalog::Catalog *catalog = |
729 |
|
✗ |
catalog::Catalog::AttachFreely(path, tmp_file, catalog_hash); |
730 |
|
✗ |
int64_t catalog_file_size = GetFileSize(tmp_file); |
731 |
|
✗ |
if (catalog_file_size <= 0) { |
732 |
|
|
|
733 |
|
✗ |
LogCvmfs(kLogCvmfs, kLogStderr, "Error downloading catalog %s at %s %s", |
734 |
|
|
catalog_hash.ToString().c_str(), path.c_str(), tmp_file.c_str() ); |
735 |
|
✗ |
assert(catalog_file_size > 0); |
736 |
|
|
} |
737 |
|
✗ |
unlink(tmp_file.c_str()); |
738 |
|
|
|
739 |
|
✗ |
if ((catalog_size > 0) && (uint64_t(catalog_file_size) != catalog_size)) { |
740 |
|
✗ |
LogCvmfs(kLogCvmfs, kLogStderr, "catalog file size mismatch, " |
741 |
|
|
"expected %" PRIu64 ", got %" PRIu64, |
742 |
|
|
catalog_size, catalog_file_size); |
743 |
|
✗ |
delete catalog; |
744 |
|
✗ |
return NULL; |
745 |
|
|
} |
746 |
|
|
|
747 |
|
✗ |
return catalog; |
748 |
|
|
} |
749 |
|
|
|
750 |
|
|
|
751 |
|
✗ |
bool CommandCheck::FindSubtreeRootCatalog(const string &subtree_path, |
752 |
|
|
shash::Any *root_hash, |
753 |
|
|
uint64_t *root_size) { |
754 |
|
✗ |
catalog::Catalog *current_catalog = FetchCatalog("", *root_hash); |
755 |
|
✗ |
if (current_catalog == NULL) { |
756 |
|
✗ |
return false; |
757 |
|
|
} |
758 |
|
|
|
759 |
|
|
typedef vector<string> Tokens; |
760 |
|
✗ |
const Tokens path_tokens = SplitString(subtree_path, '/'); |
761 |
|
|
|
762 |
|
✗ |
string current_path = ""; |
763 |
|
|
|
764 |
|
✗ |
Tokens::const_iterator i = path_tokens.begin(); |
765 |
|
✗ |
Tokens::const_iterator iend = path_tokens.end(); |
766 |
|
✗ |
for (; i != iend; ++i) { |
767 |
|
✗ |
if (i->empty()) { |
768 |
|
✗ |
continue; |
769 |
|
|
} |
770 |
|
|
|
771 |
|
✗ |
current_path += "/" + *i; |
772 |
|
✗ |
if (current_catalog->FindNested(PathString(current_path), |
773 |
|
|
root_hash, |
774 |
|
|
root_size)) { |
775 |
|
✗ |
delete current_catalog; |
776 |
|
|
|
777 |
|
✗ |
if (current_path.length() < subtree_path.length()) { |
778 |
|
✗ |
current_catalog = FetchCatalog(current_path, *root_hash); |
779 |
|
✗ |
if (current_catalog == NULL) { |
780 |
|
✗ |
break; |
781 |
|
|
} |
782 |
|
|
} else { |
783 |
|
✗ |
return true; |
784 |
|
|
} |
785 |
|
|
} |
786 |
|
|
} |
787 |
|
✗ |
return false; |
788 |
|
|
} |
789 |
|
|
|
790 |
|
|
|
791 |
|
|
/** |
792 |
|
|
* Recursion on nested catalog level. No ownership of computed_counters. |
793 |
|
|
*/ |
794 |
|
✗ |
bool CommandCheck::InspectTree(const string &path, |
795 |
|
|
const shash::Any &catalog_hash, |
796 |
|
|
const uint64_t catalog_size, |
797 |
|
|
const bool is_nested_catalog, |
798 |
|
|
const catalog::DirectoryEntry *transition_point, |
799 |
|
|
catalog::DeltaCounters *computed_counters) |
800 |
|
|
{ |
801 |
|
✗ |
LogCvmfs(kLogCvmfs, kLogStdout | kLogInform, "[inspecting catalog] %s at %s", |
802 |
|
|
catalog_hash.ToString().c_str(), path == "" ? "/" : path.c_str()); |
803 |
|
|
|
804 |
|
✗ |
const catalog::Catalog *catalog = FetchCatalog(path, |
805 |
|
|
catalog_hash, |
806 |
|
|
catalog_size); |
807 |
|
✗ |
if (catalog == NULL) { |
808 |
|
✗ |
LogCvmfs(kLogCvmfs, kLogStderr, "failed to open catalog %s", |
809 |
|
|
catalog_hash.ToString().c_str()); |
810 |
|
✗ |
return false; |
811 |
|
|
} |
812 |
|
|
|
813 |
|
✗ |
int retval = true; |
814 |
|
|
|
815 |
|
✗ |
if (catalog->root_prefix() != PathString(path.data(), path.length())) { |
816 |
|
✗ |
LogCvmfs(kLogCvmfs, kLogStderr, "root prefix mismatch; " |
817 |
|
|
"expected %s, got %s", |
818 |
|
|
path.c_str(), catalog->root_prefix().c_str()); |
819 |
|
✗ |
retval = false; |
820 |
|
|
} |
821 |
|
|
|
822 |
|
|
// Check transition point |
823 |
|
✗ |
catalog::DirectoryEntry root_entry; |
824 |
|
✗ |
if (!catalog->LookupPath(catalog->root_prefix(), &root_entry)) { |
825 |
|
✗ |
LogCvmfs(kLogCvmfs, kLogStderr, "failed to lookup root entry (%s)", |
826 |
|
|
path.c_str()); |
827 |
|
✗ |
retval = false; |
828 |
|
|
} |
829 |
|
✗ |
if (!root_entry.IsDirectory()) { |
830 |
|
✗ |
LogCvmfs(kLogCvmfs, kLogStderr, "root entry not a directory (%s)", |
831 |
|
|
path.c_str()); |
832 |
|
✗ |
retval = false; |
833 |
|
|
} |
834 |
|
✗ |
if (is_nested_catalog) { |
835 |
|
✗ |
if (transition_point != NULL && |
836 |
|
✗ |
!CompareEntries(*transition_point, root_entry, true, true)) { |
837 |
|
✗ |
LogCvmfs(kLogCvmfs, kLogStderr, |
838 |
|
|
"transition point and root entry differ (%s)", path.c_str()); |
839 |
|
✗ |
retval = false; |
840 |
|
|
} |
841 |
|
✗ |
if (!root_entry.IsNestedCatalogRoot()) { |
842 |
|
✗ |
LogCvmfs(kLogCvmfs, kLogStderr, |
843 |
|
|
"nested catalog root expected but not found (%s)", path.c_str()); |
844 |
|
✗ |
retval = false; |
845 |
|
|
} |
846 |
|
|
} else { |
847 |
|
✗ |
if (root_entry.IsNestedCatalogRoot()) { |
848 |
|
✗ |
LogCvmfs(kLogCvmfs, kLogStderr, |
849 |
|
|
"nested catalog root found but not expected (%s)", path.c_str()); |
850 |
|
✗ |
retval = false; |
851 |
|
|
} |
852 |
|
|
} |
853 |
|
|
|
854 |
|
|
// Traverse the catalog |
855 |
|
✗ |
set<PathString> bind_mountpoints; |
856 |
|
✗ |
if (!Find(catalog, PathString(path.data(), path.length()), |
857 |
|
|
computed_counters, &bind_mountpoints)) |
858 |
|
|
{ |
859 |
|
✗ |
retval = false; |
860 |
|
|
} |
861 |
|
|
|
862 |
|
|
// Check number of entries |
863 |
|
✗ |
if (root_entry.HasXattrs()) |
864 |
|
✗ |
computed_counters->self.xattrs++; |
865 |
|
✗ |
const uint64_t num_found_entries = |
866 |
|
|
1 + |
867 |
|
✗ |
computed_counters->self.regular_files + |
868 |
|
✗ |
computed_counters->self.symlinks + |
869 |
|
✗ |
computed_counters->self.specials + |
870 |
|
✗ |
computed_counters->self.directories; |
871 |
|
✗ |
if (num_found_entries != catalog->GetNumEntries()) { |
872 |
|
✗ |
LogCvmfs(kLogCvmfs, kLogStderr, "dangling entries in catalog, " |
873 |
|
|
"expected %" PRIu64 ", got %" PRIu64, |
874 |
|
|
catalog->GetNumEntries(), num_found_entries); |
875 |
|
✗ |
retval = false; |
876 |
|
|
} |
877 |
|
|
|
878 |
|
|
// Recurse into nested catalogs |
879 |
|
|
const catalog::Catalog::NestedCatalogList &nested_catalogs = |
880 |
|
✗ |
catalog->ListNestedCatalogs(); |
881 |
|
|
const catalog::Catalog::NestedCatalogList own_nested_catalogs = |
882 |
|
✗ |
catalog->ListOwnNestedCatalogs(); |
883 |
|
✗ |
if (own_nested_catalogs.size() != |
884 |
|
✗ |
static_cast<uint64_t>(computed_counters->self.nested_catalogs)) |
885 |
|
|
{ |
886 |
|
✗ |
LogCvmfs(kLogCvmfs, kLogStderr, "number of nested catalogs does not match;" |
887 |
|
|
" expected %lu, got %lu", computed_counters->self.nested_catalogs, |
888 |
|
|
own_nested_catalogs.size()); |
889 |
|
✗ |
retval = false; |
890 |
|
|
} |
891 |
|
✗ |
set<PathString> nested_catalog_paths; |
892 |
|
✗ |
for (catalog::Catalog::NestedCatalogList::const_iterator i = |
893 |
|
✗ |
nested_catalogs.begin(), iEnd = nested_catalogs.end(); i != iEnd; ++i) |
894 |
|
|
{ |
895 |
|
✗ |
nested_catalog_paths.insert(i->mountpoint); |
896 |
|
|
} |
897 |
|
✗ |
if (nested_catalog_paths.size() != nested_catalogs.size()) { |
898 |
|
✗ |
LogCvmfs(kLogCvmfs, kLogStderr, |
899 |
|
|
"duplicates among nested catalogs and bind mountpoints"); |
900 |
|
✗ |
retval = false; |
901 |
|
|
} |
902 |
|
|
|
903 |
|
✗ |
for (catalog::Catalog::NestedCatalogList::const_iterator i = |
904 |
|
✗ |
nested_catalogs.begin(), iEnd = nested_catalogs.end(); i != iEnd; ++i) |
905 |
|
|
{ |
906 |
|
✗ |
if (bind_mountpoints.find(i->mountpoint) != bind_mountpoints.end()) { |
907 |
|
✗ |
catalog::DirectoryEntry bind_mountpoint; |
908 |
|
✗ |
PathString mountpoint("/" + i->mountpoint.ToString().substr(1)); |
909 |
|
✗ |
if (!catalog->LookupPath(mountpoint, &bind_mountpoint)) { |
910 |
|
✗ |
LogCvmfs(kLogCvmfs, kLogStderr, "failed to lookup bind mountpoint %s", |
911 |
|
|
mountpoint.c_str()); |
912 |
|
✗ |
retval = false; |
913 |
|
|
} |
914 |
|
|
LogCvmfs(kLogCvmfs, kLogDebug, "skipping bind mountpoint %s", |
915 |
|
|
i->mountpoint.c_str()); |
916 |
|
✗ |
continue; |
917 |
|
|
} |
918 |
|
✗ |
catalog::DirectoryEntry nested_transition_point; |
919 |
|
✗ |
if (!catalog->LookupPath(i->mountpoint, &nested_transition_point)) { |
920 |
|
✗ |
LogCvmfs(kLogCvmfs, kLogStderr, "failed to lookup transition point %s", |
921 |
|
|
i->mountpoint.c_str()); |
922 |
|
✗ |
retval = false; |
923 |
|
|
} else { |
924 |
|
✗ |
catalog::DeltaCounters nested_counters; |
925 |
|
✗ |
const bool is_nested = true; |
926 |
|
✗ |
if (!InspectTree(i->mountpoint.ToString(), i->hash, i->size, is_nested, |
927 |
|
|
&nested_transition_point, &nested_counters)) |
928 |
|
✗ |
retval = false; |
929 |
|
✗ |
nested_counters.PopulateToParent(computed_counters); |
930 |
|
|
} |
931 |
|
|
} |
932 |
|
|
|
933 |
|
|
// Check statistics counters |
934 |
|
|
// Additionally account for root directory |
935 |
|
✗ |
computed_counters->self.directories++; |
936 |
|
✗ |
catalog::Counters compare_counters; |
937 |
|
✗ |
compare_counters.ApplyDelta(*computed_counters); |
938 |
|
✗ |
const catalog::Counters stored_counters = catalog->GetCounters(); |
939 |
|
✗ |
if (!CompareCounters(compare_counters, stored_counters)) { |
940 |
|
✗ |
LogCvmfs(kLogCvmfs, kLogStderr, "statistics counter mismatch [%s]", |
941 |
|
|
catalog_hash.ToString().c_str()); |
942 |
|
✗ |
retval = false; |
943 |
|
|
} |
944 |
|
|
|
945 |
|
✗ |
delete catalog; |
946 |
|
✗ |
return retval; |
947 |
|
|
} |
948 |
|
|
|
949 |
|
|
|
950 |
|
✗ |
int CommandCheck::Main(const swissknife::ArgumentList &args) { |
951 |
|
✗ |
string tag_name; |
952 |
|
✗ |
string subtree_path = ""; |
953 |
|
✗ |
string pubkey_path = ""; |
954 |
|
✗ |
string repo_name = ""; |
955 |
|
✗ |
string reflog_chksum_path = ""; |
956 |
|
|
|
957 |
|
✗ |
temp_directory_ = (args.find('t') != args.end()) ? *args.find('t')->second |
958 |
|
✗ |
: "/tmp"; |
959 |
|
✗ |
if (args.find('n') != args.end()) |
960 |
|
✗ |
tag_name = *args.find('n')->second; |
961 |
|
✗ |
if (args.find('c') != args.end()) |
962 |
|
✗ |
check_chunks_ = true; |
963 |
|
✗ |
if (args.find('d') != args.end()) |
964 |
|
✗ |
no_duplicates_map_ = true; |
965 |
|
✗ |
if (args.find('l') != args.end()) { |
966 |
|
|
unsigned log_level = |
967 |
|
✗ |
kLogLevel0 << String2Uint64(*args.find('l')->second); |
968 |
|
✗ |
if (log_level > kLogNone) { |
969 |
|
✗ |
LogCvmfs(kLogCvmfs, kLogStderr, "invalid log level"); |
970 |
|
✗ |
return 1; |
971 |
|
|
} |
972 |
|
✗ |
SetLogVerbosity(static_cast<LogLevels>(log_level)); |
973 |
|
|
} |
974 |
|
✗ |
if (args.find('k') != args.end()) |
975 |
|
✗ |
pubkey_path = *args.find('k')->second; |
976 |
|
✗ |
if (DirectoryExists(pubkey_path)) |
977 |
|
✗ |
pubkey_path = JoinStrings(FindFilesBySuffix(pubkey_path, ".pub"), ":"); |
978 |
|
✗ |
if (args.find('N') != args.end()) |
979 |
|
✗ |
repo_name = *args.find('N')->second; |
980 |
|
|
|
981 |
|
✗ |
repo_base_path_ = MakeCanonicalPath(*args.find('r')->second); |
982 |
|
✗ |
if (args.find('s') != args.end()) |
983 |
|
✗ |
subtree_path = MakeCanonicalPath(*args.find('s')->second); |
984 |
|
✗ |
if (args.find('R') != args.end()) |
985 |
|
✗ |
reflog_chksum_path = *args.find('R')->second; |
986 |
|
|
|
987 |
|
|
// Repository can be HTTP address or on local file system |
988 |
|
✗ |
is_remote_ = IsHttpUrl(repo_base_path_); |
989 |
|
|
|
990 |
|
|
// initialize the (swissknife global) download and signature managers |
991 |
|
✗ |
if (is_remote_) { |
992 |
|
✗ |
const bool follow_redirects = (args.count('L') > 0); |
993 |
|
✗ |
const string proxy = (args.count('@') > 0) ? *args.find('@')->second : ""; |
994 |
|
✗ |
if (!this->InitDownloadManager(follow_redirects, proxy)) { |
995 |
|
✗ |
return 1; |
996 |
|
|
} |
997 |
|
|
|
998 |
|
✗ |
if (pubkey_path.empty() || repo_name.empty()) { |
999 |
|
✗ |
LogCvmfs(kLogCvmfs, kLogStderr, "please provide pubkey and repo name for " |
1000 |
|
|
"remote repositories"); |
1001 |
|
✗ |
return 1; |
1002 |
|
|
} |
1003 |
|
|
|
1004 |
|
✗ |
if (!this->InitSignatureManager(pubkey_path)) { |
1005 |
|
✗ |
return 1; |
1006 |
|
|
} |
1007 |
|
|
} |
1008 |
|
|
|
1009 |
|
|
// Load Manifest |
1010 |
|
✗ |
UniquePtr<manifest::Manifest> manifest; |
1011 |
|
✗ |
bool successful = true; |
1012 |
|
|
|
1013 |
|
✗ |
if (is_remote_) { |
1014 |
|
✗ |
manifest = FetchRemoteManifest(repo_base_path_, repo_name); |
1015 |
|
|
} else { |
1016 |
|
✗ |
if (chdir(repo_base_path_.c_str()) != 0) { |
1017 |
|
✗ |
LogCvmfs(kLogCvmfs, kLogStderr, "failed to switch to directory %s", |
1018 |
|
|
repo_base_path_.c_str()); |
1019 |
|
✗ |
return 1; |
1020 |
|
|
} |
1021 |
|
✗ |
manifest = OpenLocalManifest(".cvmfspublished"); |
1022 |
|
|
} |
1023 |
|
|
|
1024 |
|
✗ |
if (!manifest.IsValid()) { |
1025 |
|
✗ |
LogCvmfs(kLogCvmfs, kLogStderr, "failed to load repository manifest"); |
1026 |
|
✗ |
return 1; |
1027 |
|
|
} |
1028 |
|
|
|
1029 |
|
|
// Check meta-info object |
1030 |
|
✗ |
if (!manifest->meta_info().IsNull()) { |
1031 |
|
✗ |
string tmp_file; |
1032 |
|
✗ |
if (!is_remote_) |
1033 |
|
✗ |
tmp_file = DecompressPiece(manifest->meta_info()); |
1034 |
|
|
else |
1035 |
|
✗ |
tmp_file = DownloadPiece(manifest->meta_info()); |
1036 |
|
✗ |
if (tmp_file == "") { |
1037 |
|
✗ |
LogCvmfs(kLogCvmfs, kLogStderr, "failed to load repository metainfo %s", |
1038 |
|
|
manifest->meta_info().ToString().c_str()); |
1039 |
|
✗ |
return 1; |
1040 |
|
|
} |
1041 |
|
✗ |
unlink(tmp_file.c_str()); |
1042 |
|
|
} |
1043 |
|
|
|
1044 |
|
✗ |
shash::Any reflog_hash; |
1045 |
|
✗ |
if (!reflog_chksum_path.empty()) { |
1046 |
|
✗ |
if (!manifest::Reflog::ReadChecksum(reflog_chksum_path, &reflog_hash)) { |
1047 |
|
✗ |
LogCvmfs(kLogCvmfs, kLogStderr, "failed to read reflog checksum file"); |
1048 |
|
✗ |
return 1; |
1049 |
|
|
} |
1050 |
|
|
} else { |
1051 |
|
✗ |
reflog_hash = manifest->reflog_hash(); |
1052 |
|
|
} |
1053 |
|
|
|
1054 |
|
✗ |
if (Exists(".cvmfsreflog")) { |
1055 |
|
✗ |
if (reflog_hash.IsNull()) { |
1056 |
|
|
// If there is a reflog, we want to check it |
1057 |
|
✗ |
LogCvmfs(kLogCvmfs, kLogStderr, |
1058 |
|
|
".cvmfsreflog present but no checksum provided, aborting"); |
1059 |
|
✗ |
return 1; |
1060 |
|
|
} |
1061 |
|
✗ |
bool retval = InspectReflog(reflog_hash, manifest.weak_ref()); |
1062 |
|
✗ |
if (!retval) { |
1063 |
|
✗ |
LogCvmfs(kLogCvmfs, kLogStderr, "failed to verify reflog"); |
1064 |
|
✗ |
return 1; |
1065 |
|
|
} |
1066 |
|
|
} else { |
1067 |
|
✗ |
if (!reflog_hash.IsNull()) { |
1068 |
|
|
// There is a checksum but no reflog; possibly the checksum is for the |
1069 |
|
|
// from the manifest for the stratum 0 reflog |
1070 |
|
✗ |
if (!reflog_chksum_path.empty()) { |
1071 |
|
✗ |
LogCvmfs(kLogCvmfs, kLogStderr, |
1072 |
|
|
"local reflog checksum set but reflog itself is missing, " |
1073 |
|
|
"aborting"); |
1074 |
|
✗ |
return 1; |
1075 |
|
|
} |
1076 |
|
|
} |
1077 |
|
|
} |
1078 |
|
|
|
1079 |
|
|
// Load history |
1080 |
|
✗ |
UniquePtr<history::History> tag_db; |
1081 |
|
✗ |
if (!manifest->history().IsNull()) { |
1082 |
|
✗ |
string tmp_file; |
1083 |
|
✗ |
if (!is_remote_) |
1084 |
|
✗ |
tmp_file = DecompressPiece(manifest->history()); |
1085 |
|
|
else |
1086 |
|
✗ |
tmp_file = DownloadPiece(manifest->history()); |
1087 |
|
✗ |
if (tmp_file == "") { |
1088 |
|
✗ |
LogCvmfs(kLogCvmfs, kLogStderr, "failed to load history database %s", |
1089 |
|
|
manifest->history().ToString().c_str()); |
1090 |
|
✗ |
return 1; |
1091 |
|
|
} |
1092 |
|
✗ |
tag_db = history::SqliteHistory::Open(tmp_file); |
1093 |
|
✗ |
if (!tag_db.IsValid()) { |
1094 |
|
✗ |
LogCvmfs(kLogCvmfs, kLogStderr, "failed to open history database %s", |
1095 |
|
|
manifest->history().ToString().c_str()); |
1096 |
|
✗ |
return 1; |
1097 |
|
|
} |
1098 |
|
✗ |
tag_db->TakeDatabaseFileOwnership(); |
1099 |
|
✗ |
successful = InspectHistory(tag_db.weak_ref()) && successful; |
1100 |
|
|
} |
1101 |
|
|
|
1102 |
|
✗ |
if (manifest->has_alt_catalog_path()) { |
1103 |
|
✗ |
if (!Exists(manifest->certificate().MakeAlternativePath())) { |
1104 |
|
✗ |
LogCvmfs(kLogCvmfs, kLogStderr, |
1105 |
|
|
"failed to find alternative certificate link %s", |
1106 |
|
|
manifest->certificate().MakeAlternativePath().c_str()); |
1107 |
|
✗ |
return 1; |
1108 |
|
|
} |
1109 |
|
✗ |
if (!Exists(manifest->catalog_hash().MakeAlternativePath())) { |
1110 |
|
✗ |
LogCvmfs(kLogCvmfs, kLogStderr, |
1111 |
|
|
"failed to find alternative catalog link %s", |
1112 |
|
|
manifest->catalog_hash().MakeAlternativePath().c_str()); |
1113 |
|
✗ |
return 1; |
1114 |
|
|
} |
1115 |
|
|
} |
1116 |
|
|
|
1117 |
|
✗ |
shash::Any root_hash = manifest->catalog_hash(); |
1118 |
|
✗ |
uint64_t root_size = manifest->catalog_size(); |
1119 |
|
✗ |
if (tag_name != "") { |
1120 |
|
✗ |
if (!tag_db.IsValid()) { |
1121 |
|
✗ |
LogCvmfs(kLogCvmfs, kLogStderr, "no history"); |
1122 |
|
✗ |
return 1; |
1123 |
|
|
} |
1124 |
|
✗ |
history::History::Tag tag; |
1125 |
|
✗ |
const bool retval = tag_db->GetByName(tag_name, &tag); |
1126 |
|
✗ |
if (!retval) { |
1127 |
|
✗ |
LogCvmfs(kLogCvmfs, kLogStderr, "no such tag: %s", tag_name.c_str()); |
1128 |
|
✗ |
return 1; |
1129 |
|
|
} |
1130 |
|
✗ |
root_hash = tag.root_hash; |
1131 |
|
✗ |
root_size = tag.size; |
1132 |
|
✗ |
LogCvmfs(kLogCvmfs, kLogStdout, "Inspecting repository tag %s", |
1133 |
|
|
tag_name.c_str()); |
1134 |
|
|
} |
1135 |
|
|
|
1136 |
|
✗ |
const bool is_nested_catalog = (!subtree_path.empty()); |
1137 |
|
✗ |
if (is_nested_catalog && !FindSubtreeRootCatalog( subtree_path, |
1138 |
|
|
&root_hash, |
1139 |
|
|
&root_size)) { |
1140 |
|
✗ |
LogCvmfs(kLogCvmfs, kLogStderr, "cannot find nested catalog at %s", |
1141 |
|
|
subtree_path.c_str()); |
1142 |
|
✗ |
return 1; |
1143 |
|
|
} |
1144 |
|
|
|
1145 |
|
|
|
1146 |
|
✗ |
catalog::DeltaCounters computed_counters; |
1147 |
|
✗ |
successful = InspectTree(subtree_path, |
1148 |
|
|
root_hash, |
1149 |
|
|
root_size, |
1150 |
|
|
is_nested_catalog, |
1151 |
|
|
NULL, |
1152 |
|
✗ |
&computed_counters) && successful; |
1153 |
|
|
|
1154 |
|
✗ |
if (!successful) { |
1155 |
|
✗ |
LogCvmfs(kLogCvmfs, kLogStderr, "CATALOG PROBLEMS OR OTHER ERRORS FOUND"); |
1156 |
|
✗ |
return 1; |
1157 |
|
|
} |
1158 |
|
|
|
1159 |
|
✗ |
LogCvmfs(kLogCvmfs, kLogStdout, "no problems found"); |
1160 |
|
✗ |
return 0; |
1161 |
|
|
} |
1162 |
|
|
|
1163 |
|
|
} // namespace swissknife |
1164 |
|
|
|