GCC Code Coverage Report | |||||||||||||||||||||
|
|||||||||||||||||||||
Line | Branch | Exec | Source |
1 |
/** |
||
2 |
* This file is part of the CernVM File System. |
||
3 |
*/ |
||
4 |
|||
5 |
#define __STDC_FORMAT_MACROS |
||
6 |
|||
7 |
#include "catalog_rw.h" |
||
8 |
|||
9 |
#include <inttypes.h> |
||
10 |
#include <cstdio> |
||
11 |
#include <cstdlib> |
||
12 |
|||
13 |
#include "logging.h" |
||
14 |
#include "util_concurrency.h" |
||
15 |
#include "xattr.h" |
||
16 |
|||
17 |
using namespace std; // NOLINT |
||
18 |
|||
19 |
namespace catalog { |
||
20 |
|||
21 |
const double WritableCatalog::kMaximalFreePageRatio = 0.20; |
||
22 |
const double WritableCatalog::kMaximalRowIdWasteRatio = 0.25; |
||
23 |
|||
24 |
|||
25 |
39 |
WritableCatalog::WritableCatalog(const string &path, |
|
26 |
const shash::Any &catalog_hash, |
||
27 |
Catalog *parent, |
||
28 |
const bool is_not_root) : |
||
29 |
Catalog(PathString(path.data(), path.length()), |
||
30 |
catalog_hash, // This is 0 for a newly created catalog! |
||
31 |
parent, |
||
32 |
is_not_root), |
||
33 |
sql_insert_(NULL), |
||
34 |
sql_unlink_(NULL), |
||
35 |
sql_touch_(NULL), |
||
36 |
sql_update_(NULL), |
||
37 |
sql_chunk_insert_(NULL), |
||
38 |
sql_chunks_remove_(NULL), |
||
39 |
sql_chunks_count_(NULL), |
||
40 |
sql_max_link_id_(NULL), |
||
41 |
sql_inc_linkcount_(NULL), |
||
42 |
39 |
dirty_(false) |
|
43 |
{ |
||
44 |
39 |
atomic_init32(&dirty_children_); |
|
45 |
39 |
} |
|
46 |
|||
47 |
|||
48 |
16 |
WritableCatalog *WritableCatalog::AttachFreely(const string &root_path, |
|
49 |
const string &file, |
||
50 |
const shash::Any &catalog_hash, |
||
51 |
Catalog *parent, |
||
52 |
const bool is_not_root) { |
||
53 |
WritableCatalog *catalog = |
||
54 |
16 |
new WritableCatalog(root_path, catalog_hash, parent, is_not_root); |
|
55 |
16 |
const bool successful_init = catalog->InitStandalone(file); |
|
56 |
✗✓ | 16 |
if (!successful_init) { |
57 |
delete catalog; |
||
58 |
return NULL; |
||
59 |
} |
||
60 |
16 |
return catalog; |
|
61 |
} |
||
62 |
|||
63 |
|||
64 |
78 |
WritableCatalog::~WritableCatalog() { |
|
65 |
// CAUTION HOT! |
||
66 |
// (see Catalog.h - near the definition of FinalizePreparedStatements) |
||
67 |
39 |
FinalizePreparedStatements(); |
|
68 |
✗✗✓ | 39 |
} |
69 |
|||
70 |
|||
71 |
31 |
void WritableCatalog::Transaction() { |
|
72 |
LogCvmfs(kLogCatalog, kLogVerboseMsg, "opening SQLite transaction for '%s'", |
||
73 |
31 |
mountpoint().c_str()); |
|
74 |
31 |
const bool retval = database().BeginTransaction(); |
|
75 |
✗✓ | 31 |
assert(retval == true); |
76 |
31 |
} |
|
77 |
|||
78 |
|||
79 |
31 |
void WritableCatalog::Commit() { |
|
80 |
LogCvmfs(kLogCatalog, kLogVerboseMsg, "closing SQLite transaction for '%s'", |
||
81 |
31 |
mountpoint().c_str()); |
|
82 |
31 |
const bool retval = database().CommitTransaction(); |
|
83 |
✗✓ | 31 |
assert(retval == true); |
84 |
31 |
dirty_ = false; |
|
85 |
31 |
} |
|
86 |
|||
87 |
|||
88 |
39 |
void WritableCatalog::InitPreparedStatements() { |
|
89 |
39 |
Catalog::InitPreparedStatements(); // polymorphism: up call |
|
90 |
|||
91 |
39 |
bool retval = SqlCatalog(database(), "PRAGMA foreign_keys = ON;").Execute(); |
|
92 |
✗✓ | 39 |
assert(retval); |
93 |
39 |
sql_insert_ = new SqlDirentInsert (database()); |
|
94 |
39 |
sql_unlink_ = new SqlDirentUnlink (database()); |
|
95 |
39 |
sql_touch_ = new SqlDirentTouch (database()); |
|
96 |
39 |
sql_update_ = new SqlDirentUpdate (database()); |
|
97 |
39 |
sql_chunk_insert_ = new SqlChunkInsert (database()); |
|
98 |
39 |
sql_chunks_remove_ = new SqlChunksRemove (database()); |
|
99 |
39 |
sql_chunks_count_ = new SqlChunksCount (database()); |
|
100 |
39 |
sql_max_link_id_ = new SqlMaxHardlinkGroup (database()); |
|
101 |
39 |
sql_inc_linkcount_ = new SqlIncLinkcount (database()); |
|
102 |
39 |
} |
|
103 |
|||
104 |
|||
105 |
39 |
void WritableCatalog::FinalizePreparedStatements() { |
|
106 |
// no polymorphism: no up call (see Catalog.h - |
||
107 |
// near the definition of this method) |
||
108 |
✓✗ | 39 |
delete sql_insert_; |
109 |
✓✗ | 39 |
delete sql_unlink_; |
110 |
✓✗ | 39 |
delete sql_touch_; |
111 |
✓✗ | 39 |
delete sql_update_; |
112 |
✓✗ | 39 |
delete sql_chunk_insert_; |
113 |
✓✗ | 39 |
delete sql_chunks_remove_; |
114 |
✓✗ | 39 |
delete sql_chunks_count_; |
115 |
✓✗ | 39 |
delete sql_max_link_id_; |
116 |
✓✗ | 39 |
delete sql_inc_linkcount_; |
117 |
39 |
} |
|
118 |
|||
119 |
|||
120 |
/** |
||
121 |
* Find out the maximal hardlink group id in this catalog. |
||
122 |
*/ |
||
123 |
uint32_t WritableCatalog::GetMaxLinkId() const { |
||
124 |
int result = -1; |
||
125 |
|||
126 |
if (sql_max_link_id_->FetchRow()) { |
||
127 |
result = sql_max_link_id_->GetMaxGroupId(); |
||
128 |
} |
||
129 |
sql_max_link_id_->Reset(); |
||
130 |
|||
131 |
return result; |
||
132 |
} |
||
133 |
|||
134 |
|||
135 |
/** |
||
136 |
* Adds a directory entry. |
||
137 |
* @param entry the DirectoryEntry to add to the catalog |
||
138 |
* @param entry_path the full path of the DirectoryEntry to add |
||
139 |
* @param parent_path the full path of the containing directory |
||
140 |
*/ |
||
141 |
176 |
void WritableCatalog::AddEntry( |
|
142 |
const DirectoryEntry &entry, |
||
143 |
const XattrList &xattrs, |
||
144 |
const string &entry_path, |
||
145 |
const string &parent_path) |
||
146 |
{ |
||
147 |
176 |
SetDirty(); |
|
148 |
|||
149 |
LogCvmfs(kLogCatalog, kLogVerboseMsg, "add entry '%s' to '%s'", |
||
150 |
entry_path.c_str(), |
||
151 |
176 |
mountpoint().c_str()); |
|
152 |
|||
153 |
176 |
shash::Md5 path_hash((shash::AsciiPtr(entry_path))); |
|
154 |
176 |
shash::Md5 parent_hash((shash::AsciiPtr(parent_path))); |
|
155 |
176 |
DirectoryEntry effective_entry(entry); |
|
156 |
176 |
effective_entry.set_has_xattrs(!xattrs.IsEmpty()); |
|
157 |
|||
158 |
bool retval = |
||
159 |
sql_insert_->BindPathHash(path_hash) && |
||
160 |
sql_insert_->BindParentPathHash(parent_hash) && |
||
161 |
✓✗✓✗ ✓✗ |
176 |
sql_insert_->BindDirent(effective_entry); |
162 |
✗✓ | 176 |
assert(retval); |
163 |
✓✗ | 176 |
if (xattrs.IsEmpty()) { |
164 |
176 |
retval = sql_insert_->BindXattrEmpty(); |
|
165 |
} else { |
||
166 |
retval = sql_insert_->BindXattr(xattrs); |
||
167 |
} |
||
168 |
✗✓ | 176 |
assert(retval); |
169 |
176 |
retval = sql_insert_->Execute(); |
|
170 |
✗✓ | 176 |
assert(retval); |
171 |
176 |
sql_insert_->Reset(); |
|
172 |
|||
173 |
176 |
delta_counters_.Increment(effective_entry); |
|
174 |
176 |
} |
|
175 |
|||
176 |
|||
177 |
/** |
||
178 |
* Removes the specified entry from the catalog. |
||
179 |
* Note: removing a directory which is non-empty results in dangling entries. |
||
180 |
* (this should be treated in upper layers) |
||
181 |
* @param entry_path the full path of the DirectoryEntry to delete |
||
182 |
*/ |
||
183 |
25 |
void WritableCatalog::RemoveEntry(const string &file_path) { |
|
184 |
25 |
DirectoryEntry entry; |
|
185 |
25 |
bool retval = LookupPath(PathString(file_path), &entry); |
|
186 |
✗✓ | 25 |
assert(retval); |
187 |
|||
188 |
25 |
SetDirty(); |
|
189 |
|||
190 |
// If the entry used to be a chunked file... remove the chunks |
||
191 |
✗✓ | 25 |
if (entry.IsChunkedFile()) { |
192 |
RemoveFileChunks(file_path); |
||
193 |
} |
||
194 |
|||
195 |
// remove the entry itself |
||
196 |
25 |
shash::Md5 path_hash = shash::Md5(shash::AsciiPtr(file_path)); |
|
197 |
retval = |
||
198 |
sql_unlink_->BindPathHash(path_hash) && |
||
199 |
✓✗✓✗ |
25 |
sql_unlink_->Execute(); |
200 |
✗✓ | 25 |
assert(retval); |
201 |
25 |
sql_unlink_->Reset(); |
|
202 |
|||
203 |
25 |
delta_counters_.Decrement(entry); |
|
204 |
25 |
} |
|
205 |
|||
206 |
|||
207 |
void WritableCatalog::IncLinkcount(const string &path_within_group, |
||
208 |
const int delta) |
||
209 |
{ |
||
210 |
SetDirty(); |
||
211 |
|||
212 |
shash::Md5 path_hash = shash::Md5(shash::AsciiPtr(path_within_group)); |
||
213 |
|||
214 |
bool retval = |
||
215 |
sql_inc_linkcount_->BindPathHash(path_hash) && |
||
216 |
sql_inc_linkcount_->BindDelta(delta) && |
||
217 |
sql_inc_linkcount_->Execute(); |
||
218 |
assert(retval); |
||
219 |
sql_inc_linkcount_->Reset(); |
||
220 |
} |
||
221 |
|||
222 |
|||
223 |
void WritableCatalog::TouchEntry(const DirectoryEntryBase &entry, |
||
224 |
const shash::Md5 &path_hash) { |
||
225 |
SetDirty(); |
||
226 |
|||
227 |
bool retval = |
||
228 |
sql_touch_->BindPathHash(path_hash) && |
||
229 |
sql_touch_->BindDirentBase(entry) && |
||
230 |
sql_touch_->Execute(); |
||
231 |
assert(retval); |
||
232 |
sql_touch_->Reset(); |
||
233 |
} |
||
234 |
|||
235 |
|||
236 |
50 |
void WritableCatalog::UpdateEntry(const DirectoryEntry &entry, |
|
237 |
const shash::Md5 &path_hash) { |
||
238 |
50 |
SetDirty(); |
|
239 |
|||
240 |
bool retval = |
||
241 |
sql_update_->BindPathHash(path_hash) && |
||
242 |
50 |
sql_update_->BindDirent(entry) && |
|
243 |
✓✗✓✗ ✓✗ |
100 |
sql_update_->Execute(); |
244 |
✗✓ | 50 |
assert(retval); |
245 |
50 |
sql_update_->Reset(); |
|
246 |
50 |
} |
|
247 |
|||
248 |
8 |
void WritableCatalog::AddFileChunk(const std::string &entry_path, |
|
249 |
const FileChunk &chunk) { |
||
250 |
8 |
SetDirty(); |
|
251 |
|||
252 |
8 |
shash::Md5 path_hash((shash::AsciiPtr(entry_path))); |
|
253 |
|||
254 |
LogCvmfs(kLogCatalog, kLogVerboseMsg, "adding chunk for %s from offset %d " |
||
255 |
"and chunk size: %d bytes", |
||
256 |
entry_path.c_str(), |
||
257 |
chunk.offset(), |
||
258 |
8 |
chunk.offset() + chunk.size()); |
|
259 |
|||
260 |
8 |
delta_counters_.self.file_chunks++; |
|
261 |
|||
262 |
bool retval = |
||
263 |
sql_chunk_insert_->BindPathHash(path_hash) && |
||
264 |
sql_chunk_insert_->BindFileChunk(chunk) && |
||
265 |
✓✗✓✗ ✓✗ |
8 |
sql_chunk_insert_->Execute(); |
266 |
✗✓ | 8 |
assert(retval); |
267 |
8 |
sql_chunk_insert_->Reset(); |
|
268 |
8 |
} |
|
269 |
|||
270 |
|||
271 |
/** |
||
272 |
* Removes the file chunks for a given file path |
||
273 |
* @param entry_path the file path to clear from it's file chunks |
||
274 |
*/ |
||
275 |
void WritableCatalog::RemoveFileChunks(const std::string &entry_path) { |
||
276 |
shash::Md5 path_hash((shash::AsciiPtr(entry_path))); |
||
277 |
bool retval; |
||
278 |
|||
279 |
// subtract the number of chunks from the statistics counters |
||
280 |
retval = |
||
281 |
sql_chunks_count_->BindPathHash(path_hash) && |
||
282 |
sql_chunks_count_->Execute(); |
||
283 |
assert(retval); |
||
284 |
const int chunks_count = sql_chunks_count_->GetChunkCount(); |
||
285 |
delta_counters_.self.file_chunks -= chunks_count; |
||
286 |
sql_chunks_count_->Reset(); |
||
287 |
|||
288 |
// remove the chunks associated to `entry_path` |
||
289 |
retval = |
||
290 |
sql_chunks_remove_->BindPathHash(path_hash) && |
||
291 |
sql_chunks_remove_->Execute(); |
||
292 |
assert(retval); |
||
293 |
sql_chunks_remove_->Reset(); |
||
294 |
} |
||
295 |
|||
296 |
|||
297 |
/** |
||
298 |
* Sets the last modified time stamp of this catalog to current time. |
||
299 |
*/ |
||
300 |
95 |
void WritableCatalog::UpdateLastModified() { |
|
301 |
95 |
database().SetProperty("last_modified", static_cast<uint64_t>(time(NULL))); |
|
302 |
95 |
} |
|
303 |
|||
304 |
|||
305 |
/** |
||
306 |
* Increments the revision of the catalog in the database. |
||
307 |
*/ |
||
308 |
15 |
void WritableCatalog::IncrementRevision() { |
|
309 |
15 |
SetRevision(GetRevision() + 1); |
|
310 |
15 |
} |
|
311 |
|||
312 |
|||
313 |
15 |
void WritableCatalog::SetRevision(const uint64_t new_revision) { |
|
314 |
15 |
database().SetProperty("revision", new_revision); |
|
315 |
15 |
} |
|
316 |
|||
317 |
|||
318 |
void WritableCatalog::SetBranch(const std::string &branch_name) { |
||
319 |
database().SetProperty("branch", branch_name); |
||
320 |
} |
||
321 |
|||
322 |
|||
323 |
void WritableCatalog::SetTTL(const uint64_t new_ttl) { |
||
324 |
database().SetProperty("TTL", new_ttl); |
||
325 |
} |
||
326 |
|||
327 |
|||
328 |
bool WritableCatalog::SetVOMSAuthz(const std::string &voms_authz) { |
||
329 |
return database().SetVOMSAuthz(voms_authz); |
||
330 |
} |
||
331 |
|||
332 |
|||
333 |
/** |
||
334 |
* Sets the content hash of the previous catalog revision. |
||
335 |
*/ |
||
336 |
15 |
void WritableCatalog::SetPreviousRevision(const shash::Any &hash) { |
|
337 |
15 |
database().SetProperty("previous_revision", hash.ToString()); |
|
338 |
15 |
} |
|
339 |
|||
340 |
|||
341 |
/** |
||
342 |
* Moves a subtree from this catalog into a just created nested catalog. |
||
343 |
*/ |
||
344 |
7 |
void WritableCatalog::Partition(WritableCatalog *new_nested_catalog) { |
|
345 |
// Create connection between parent and child catalogs |
||
346 |
7 |
MakeTransitionPoint(new_nested_catalog->mountpoint().ToString()); |
|
347 |
7 |
new_nested_catalog->MakeNestedRoot(); |
|
348 |
7 |
delta_counters_.subtree.directories++; // Root directory in nested catalog |
|
349 |
|||
350 |
// Move the present directory tree into the newly created nested catalog |
||
351 |
// if we hit nested catalog mountpoints on the way, we return them through |
||
352 |
// the passed list |
||
353 |
7 |
vector<string> GrandChildMountpoints; |
|
354 |
MoveToNested(new_nested_catalog->mountpoint().ToString(), new_nested_catalog, |
||
355 |
7 |
&GrandChildMountpoints); |
|
356 |
|||
357 |
// Nested catalog mountpoints found in the moved directory structure are now |
||
358 |
// links to nested catalogs of the newly created nested catalog. |
||
359 |
// Move these references into the new nested catalog |
||
360 |
7 |
MoveCatalogsToNested(GrandChildMountpoints, new_nested_catalog); |
|
361 |
7 |
} |
|
362 |
|||
363 |
|||
364 |
7 |
void WritableCatalog::MakeTransitionPoint(const string &mountpoint) { |
|
365 |
// Find the directory entry to edit |
||
366 |
7 |
DirectoryEntry transition_entry; |
|
367 |
bool retval = LookupPath(PathString(mountpoint.data(), mountpoint.length()), |
||
368 |
7 |
&transition_entry); |
|
369 |
✗✓ | 7 |
assert(retval); |
370 |
|||
371 |
assert(transition_entry.IsDirectory() && |
||
372 |
✓✗✗✓ |
7 |
!transition_entry.IsNestedCatalogRoot()); |
373 |
|||
374 |
7 |
transition_entry.set_is_nested_catalog_mountpoint(true); |
|
375 |
7 |
UpdateEntry(transition_entry, mountpoint); |
|
376 |
7 |
} |
|
377 |
|||
378 |
|||
379 |
7 |
void WritableCatalog::MakeNestedRoot() { |
|
380 |
7 |
DirectoryEntry root_entry; |
|
381 |
7 |
bool retval = LookupPath(mountpoint(), &root_entry); |
|
382 |
✗✓ | 7 |
assert(retval); |
383 |
|||
384 |
✓✗✗✓ |
7 |
assert(root_entry.IsDirectory() && !root_entry.IsNestedCatalogMountpoint()); |
385 |
|||
386 |
7 |
root_entry.set_is_nested_catalog_root(true); |
|
387 |
7 |
UpdateEntry(root_entry, mountpoint().ToString()); |
|
388 |
7 |
} |
|
389 |
|||
390 |
|||
391 |
16 |
void WritableCatalog::MoveToNestedRecursively( |
|
392 |
const string directory, |
||
393 |
WritableCatalog *new_nested_catalog, |
||
394 |
vector<string> *grand_child_mountpoints) |
||
395 |
{ |
||
396 |
// After creating a new nested catalog we have to move all elements |
||
397 |
// now contained by the new one. List and move them recursively. |
||
398 |
16 |
DirectoryEntryList listing; |
|
399 |
16 |
const bool resolve_magic_symlinks = false; |
|
400 |
bool retval = ListingPath(PathString(directory), &listing, |
||
401 |
16 |
resolve_magic_symlinks); |
|
402 |
✗✓ | 16 |
assert(retval); |
403 |
|||
404 |
// Go through the listing |
||
405 |
16 |
XattrList empty_xattrs; |
|
406 |
✗✓✓ | 32 |
for (DirectoryEntryList::const_iterator i = listing.begin(), |
407 |
16 |
iEnd = listing.end(); i != iEnd; ++i) |
|
408 |
{ |
||
409 |
22 |
const string full_path = i->GetFullPath(directory); |
|
410 |
|||
411 |
// The entries are first inserted into the new catalog |
||
412 |
✗✓ | 22 |
if (i->HasXattrs()) { |
413 |
XattrList xattrs; |
||
414 |
retval = LookupXattrsPath(PathString(full_path), &xattrs); |
||
415 |
assert(retval); |
||
416 |
assert(!xattrs.IsEmpty()); |
||
417 |
new_nested_catalog->AddEntry(*i, xattrs, full_path); |
||
418 |
} else { |
||
419 |
22 |
new_nested_catalog->AddEntry(*i, empty_xattrs, full_path); |
|
420 |
} |
||
421 |
|||
422 |
// Then we check if we have some special cases: |
||
423 |
✗✓ | 22 |
if (i->IsNestedCatalogMountpoint()) { |
424 |
grand_child_mountpoints->push_back(full_path); |
||
425 |
✓✓ | 22 |
} else if (i->IsDirectory()) { |
426 |
// Recurse deeper into the directory tree |
||
427 |
MoveToNestedRecursively(full_path, new_nested_catalog, |
||
428 |
9 |
grand_child_mountpoints); |
|
429 |
✗✓ | 13 |
} else if (i->IsChunkedFile()) { |
430 |
MoveFileChunksToNested(full_path, i->hash_algorithm(), |
||
431 |
new_nested_catalog); |
||
432 |
} |
||
433 |
|||
434 |
// Remove the entry from the current catalog |
||
435 |
22 |
RemoveEntry(full_path); |
|
436 |
} |
||
437 |
16 |
} |
|
438 |
|||
439 |
|||
440 |
7 |
void WritableCatalog::MoveCatalogsToNested( |
|
441 |
const vector<string> &nested_catalogs, |
||
442 |
WritableCatalog *new_nested_catalog) |
||
443 |
{ |
||
444 |
✗✓ | 14 |
for (vector<string>::const_iterator i = nested_catalogs.begin(), |
445 |
7 |
iEnd = nested_catalogs.end(); i != iEnd; ++i) |
|
446 |
{ |
||
447 |
shash::Any hash_nested; |
||
448 |
uint64_t size_nested; |
||
449 |
bool retval = FindNested(PathString(*i), &hash_nested, &size_nested); |
||
450 |
assert(retval); |
||
451 |
|||
452 |
Catalog *attached_reference = NULL; |
||
453 |
RemoveNestedCatalog(*i, &attached_reference); |
||
454 |
|||
455 |
new_nested_catalog->InsertNestedCatalog(*i, attached_reference, |
||
456 |
hash_nested, size_nested); |
||
457 |
} |
||
458 |
7 |
} |
|
459 |
|||
460 |
|||
461 |
void WritableCatalog::MoveFileChunksToNested( |
||
462 |
const std::string &full_path, |
||
463 |
const shash::Algorithms algorithm, |
||
464 |
WritableCatalog *new_nested_catalog) |
||
465 |
{ |
||
466 |
FileChunkList chunks; |
||
467 |
ListPathChunks(PathString(full_path), algorithm, &chunks); |
||
468 |
assert(chunks.size() > 0); |
||
469 |
|||
470 |
for (unsigned i = 0; i < chunks.size(); ++i) { |
||
471 |
new_nested_catalog->AddFileChunk(full_path, *chunks.AtPtr(i)); |
||
472 |
} |
||
473 |
} |
||
474 |
|||
475 |
|||
476 |
/** |
||
477 |
* Insert a nested catalog reference into this catalog. |
||
478 |
* The attached catalog object of this mountpoint can be specified (optional) |
||
479 |
* This way, the in-memory representation of the catalog tree is updated, too |
||
480 |
* @param mountpoint the path to the catalog to add a reference to |
||
481 |
* @param attached_reference can contain a reference to the attached catalog |
||
482 |
* object of mountpoint |
||
483 |
* @param content_hash can be set to safe a content hash together with the |
||
484 |
* reference |
||
485 |
*/ |
||
486 |
15 |
void WritableCatalog::InsertNestedCatalog(const string &mountpoint, |
|
487 |
Catalog *attached_reference, |
||
488 |
const shash::Any content_hash, |
||
489 |
const uint64_t size) |
||
490 |
{ |
||
491 |
const string hash_string = (!content_hash.IsNull()) ? |
||
492 |
✓✓✗✗ ✓✓ |
15 |
content_hash.ToString() : ""; |
493 |
|||
494 |
SqlCatalog stmt(database(), "INSERT INTO nested_catalogs (path, sha1, size) " |
||
495 |
15 |
"VALUES (:p, :sha1, :size);"); |
|
496 |
bool retval = |
||
497 |
stmt.BindText(1, mountpoint) && |
||
498 |
stmt.BindText(2, hash_string) && |
||
499 |
stmt.BindInt64(3, size) && |
||
500 |
✓✗✓✗ ✓✗✓✗ |
15 |
stmt.Execute(); |
501 |
✗✓ | 15 |
assert(retval); |
502 |
|||
503 |
// If a reference of the in-memory object of the newly referenced |
||
504 |
// catalog was passed, we add this to our own children |
||
505 |
✗✓ | 15 |
if (attached_reference != NULL) |
506 |
AddChild(attached_reference); |
||
507 |
|||
508 |
15 |
ResetNestedCatalogCacheUnprotected(); |
|
509 |
|||
510 |
15 |
delta_counters_.self.nested_catalogs++; |
|
511 |
15 |
} |
|
512 |
|||
513 |
|||
514 |
/** |
||
515 |
* Registeres a snapshot in /.cvmfs/snapshots. Note that bind mountpoints are |
||
516 |
* not universally handled: in Partition and MergeIntoParent, bind mountpoint |
||
517 |
* handling is missing! |
||
518 |
*/ |
||
519 |
void WritableCatalog::InsertBindMountpoint( |
||
520 |
const string &mountpoint, |
||
521 |
const shash::Any content_hash, |
||
522 |
const uint64_t size) |
||
523 |
{ |
||
524 |
SqlCatalog stmt(database(), |
||
525 |
"INSERT INTO bind_mountpoints (path, sha1, size) " |
||
526 |
"VALUES (:p, :sha1, :size);"); |
||
527 |
bool retval = |
||
528 |
stmt.BindText(1, mountpoint) && |
||
529 |
stmt.BindText(2, content_hash.ToString()) && |
||
530 |
stmt.BindInt64(3, size) && |
||
531 |
stmt.Execute(); |
||
532 |
assert(retval); |
||
533 |
} |
||
534 |
|||
535 |
|||
536 |
/** |
||
537 |
* Remove a nested catalog reference from the database. |
||
538 |
* If the catalog 'mountpoint' is currently attached as a child, it will be |
||
539 |
* removed, too (but not detached). |
||
540 |
* @param[in] mountpoint the mountpoint of the nested catalog to dereference in |
||
541 |
the database |
||
542 |
* @param[out] attached_reference is set to the object of the attached child or |
||
543 |
* to NULL |
||
544 |
*/ |
||
545 |
void WritableCatalog::RemoveNestedCatalog(const string &mountpoint, |
||
546 |
Catalog **attached_reference) |
||
547 |
{ |
||
548 |
shash::Any dummy; |
||
549 |
uint64_t dummy_size; |
||
550 |
bool retval = FindNested(PathString(mountpoint.data(), mountpoint.length()), |
||
551 |
&dummy, &dummy_size); |
||
552 |
assert(retval); |
||
553 |
|||
554 |
SqlCatalog stmt(database(), |
||
555 |
"DELETE FROM nested_catalogs WHERE path = :p;"); |
||
556 |
retval = |
||
557 |
stmt.BindText(1, mountpoint) && |
||
558 |
stmt.Execute(); |
||
559 |
assert(retval); |
||
560 |
|||
561 |
// If the reference was successfully deleted, we also have to check whether |
||
562 |
// there is also an attached reference in our in-memory data. |
||
563 |
// In this case we remove the child and return it through **attached_reference |
||
564 |
Catalog *child = FindChild(PathString(mountpoint)); |
||
565 |
if (child != NULL) |
||
566 |
RemoveChild(child); |
||
567 |
if (attached_reference != NULL) |
||
568 |
*attached_reference = child; |
||
569 |
|||
570 |
ResetNestedCatalogCacheUnprotected(); |
||
571 |
|||
572 |
delta_counters_.self.nested_catalogs--; |
||
573 |
} |
||
574 |
|||
575 |
|||
576 |
/** |
||
577 |
* Unregisteres a snapshot from /.cvmfs/snapshots. Note that bind mountpoints |
||
578 |
* are not universally handled: in Partition and MergeIntoParent, bind |
||
579 |
* mountpoint handling is missing! |
||
580 |
*/ |
||
581 |
void WritableCatalog::RemoveBindMountpoint(const std::string &mountpoint) { |
||
582 |
shash::Any dummy; |
||
583 |
uint64_t dummy_size; |
||
584 |
bool retval = FindNested(PathString(mountpoint.data(), mountpoint.length()), |
||
585 |
&dummy, &dummy_size); |
||
586 |
assert(retval); |
||
587 |
|||
588 |
SqlCatalog stmt(database(), |
||
589 |
"DELETE FROM bind_mountpoints WHERE path = :p;"); |
||
590 |
retval = |
||
591 |
stmt.BindText(1, mountpoint) && |
||
592 |
stmt.Execute(); |
||
593 |
assert(retval); |
||
594 |
} |
||
595 |
|||
596 |
|||
597 |
/** |
||
598 |
* Updates the link to a nested catalog in the database. |
||
599 |
* @param path the path of the nested catalog to update |
||
600 |
* @param hash the hash to set the given nested catalog link to |
||
601 |
* @param size the uncompressed catalog database file size |
||
602 |
* @param child_counters the statistics counters of the nested catalog |
||
603 |
*/ |
||
604 |
7 |
void WritableCatalog::UpdateNestedCatalog(const std::string &path, |
|
605 |
const shash::Any &hash, |
||
606 |
const uint64_t size, |
||
607 |
const DeltaCounters &child_counters) { |
||
608 |
7 |
MutexLockGuard guard(lock_); |
|
609 |
|||
610 |
7 |
child_counters.PopulateToParent(&delta_counters_); |
|
611 |
|||
612 |
7 |
const string hash_str = hash.ToString(); |
|
613 |
const string sql = "UPDATE nested_catalogs SET sha1 = :sha1, size = :size " |
||
614 |
7 |
"WHERE path = :path;"; |
|
615 |
7 |
SqlCatalog stmt(database(), sql); |
|
616 |
|||
617 |
bool retval = |
||
618 |
stmt.BindText(1, hash_str) && |
||
619 |
stmt.BindInt64(2, size) && |
||
620 |
stmt.BindText(3, path) && |
||
621 |
✓✗✓✗ ✓✗✓✗ |
7 |
stmt.Execute(); |
622 |
|||
623 |
7 |
ResetNestedCatalogCacheUnprotected(); |
|
624 |
|||
625 |
✗✓ | 7 |
assert(retval); |
626 |
7 |
} |
|
627 |
|||
628 |
|||
629 |
void WritableCatalog::MergeIntoParent() { |
||
630 |
assert(!IsRoot() && HasParent()); |
||
631 |
WritableCatalog *parent = GetWritableParent(); |
||
632 |
|||
633 |
CopyToParent(); |
||
634 |
|||
635 |
// Copy the nested catalog references |
||
636 |
CopyCatalogsToParent(); |
||
637 |
|||
638 |
// Fix counters in parent |
||
639 |
delta_counters_.PopulateToParent(&parent->delta_counters_); |
||
640 |
Counters &counters = GetWritableCounters(); |
||
641 |
counters.ApplyDelta(delta_counters_); |
||
642 |
counters.MergeIntoParent(&parent->delta_counters_); |
||
643 |
|||
644 |
// Remove the nested catalog reference for this nested catalog. |
||
645 |
// From now on this catalog will be dangling! |
||
646 |
parent->RemoveNestedCatalog(this->mountpoint().ToString(), NULL); |
||
647 |
} |
||
648 |
|||
649 |
|||
650 |
void WritableCatalog::RemoveFromParent() { |
||
651 |
assert(!IsRoot() && HasParent()); |
||
652 |
WritableCatalog *parent = GetWritableParent(); |
||
653 |
|||
654 |
// Remove the nested catalog reference for this nested catalog. |
||
655 |
// From now on this catalog will be dangling! |
||
656 |
Catalog* child_catalog; |
||
657 |
parent->RemoveNestedCatalog(this->mountpoint().ToString(), &child_catalog); |
||
658 |
|||
659 |
const Counters& child_counters = child_catalog->GetCounters(); |
||
660 |
|||
661 |
parent->delta_counters_.subtree.directories -= 1; |
||
662 |
parent->delta_counters_.subtree.file_size -= child_counters.self.file_size; |
||
663 |
parent->delta_counters_.subtree.regular_files -= |
||
664 |
child_counters.self.regular_files; |
||
665 |
parent->delta_counters_.subtree.symlinks -= child_counters.self.symlinks; |
||
666 |
} |
||
667 |
|||
668 |
|||
669 |
void WritableCatalog::CopyCatalogsToParent() { |
||
670 |
WritableCatalog *parent = GetWritableParent(); |
||
671 |
|||
672 |
// Obtain a list of all nested catalog references |
||
673 |
const NestedCatalogList nested_catalog_references = ListOwnNestedCatalogs(); |
||
674 |
|||
675 |
// Go through the list and update the databases |
||
676 |
// simultaneously we are checking if the referenced catalogs are currently |
||
677 |
// attached and update the in-memory data structures as well |
||
678 |
for (NestedCatalogList::const_iterator i = nested_catalog_references.begin(), |
||
679 |
iEnd = nested_catalog_references.end(); i != iEnd; ++i) |
||
680 |
{ |
||
681 |
Catalog *child = FindChild(i->mountpoint); |
||
682 |
parent->InsertNestedCatalog( |
||
683 |
i->mountpoint.ToString(), child, i->hash, i->size); |
||
684 |
parent->delta_counters_.self.nested_catalogs--; // Will be fixed later |
||
685 |
} |
||
686 |
} |
||
687 |
|||
688 |
void WritableCatalog::CopyToParent() { |
||
689 |
// We could simply copy all entries from this database to the 'other' database |
||
690 |
// BUT: 1. this would create collisions in hardlink group IDs. |
||
691 |
// therefor we first update all hardlink group IDs to fit behind the |
||
692 |
// ones in the 'other' database |
||
693 |
// 2. the root entry of the nested catalog is present twice: |
||
694 |
// 1. in the parent directory (as mount point) and |
||
695 |
// 2. in the nested catalog (as root entry) |
||
696 |
// therefore we delete the mount point from the parent before merging |
||
697 |
|||
698 |
WritableCatalog *parent = GetWritableParent(); |
||
699 |
|||
700 |
// Update hardlink group IDs in this nested catalog. |
||
701 |
// To avoid collisions we add the maximal present hardlink group ID in parent |
||
702 |
// to all hardlink group IDs in the nested catalog. |
||
703 |
const uint64_t offset = static_cast<uint64_t>(parent->GetMaxLinkId()) << 32; |
||
704 |
const string update_link_ids = |
||
705 |
"UPDATE catalog SET hardlinks = hardlinks + " + StringifyInt(offset) + |
||
706 |
" WHERE hardlinks > (1 << 32);"; |
||
707 |
|||
708 |
SqlCatalog sql_update_link_ids(database(), update_link_ids); |
||
709 |
bool retval = sql_update_link_ids.Execute(); |
||
710 |
assert(retval); |
||
711 |
|||
712 |
// Remove the nested catalog root. |
||
713 |
// It is already present in the parent. |
||
714 |
RemoveEntry(this->mountpoint().ToString()); |
||
715 |
|||
716 |
// Now copy all DirectoryEntries to the 'other' catalog. |
||
717 |
// There will be no data collisions, as we resolved them beforehand |
||
718 |
if (dirty_) |
||
719 |
Commit(); |
||
720 |
if (parent->dirty_) |
||
721 |
parent->Commit(); |
||
722 |
SqlCatalog sql_attach(database(), "ATTACH '" + parent->database_path() + "' " |
||
723 |
"AS other;"); |
||
724 |
retval = sql_attach.Execute(); |
||
725 |
assert(retval); |
||
726 |
retval = SqlCatalog(database(), "INSERT INTO other.catalog " |
||
727 |
"SELECT * FROM main.catalog;").Execute(); |
||
728 |
assert(retval); |
||
729 |
retval = SqlCatalog(database(), "INSERT INTO other.chunks " |
||
730 |
"SELECT * FROM main.chunks;").Execute(); |
||
731 |
assert(retval); |
||
732 |
retval = SqlCatalog(database(), "DETACH other;").Execute(); |
||
733 |
assert(retval); |
||
734 |
parent->SetDirty(); |
||
735 |
|||
736 |
// Change the just copied nested catalog root to an ordinary directory |
||
737 |
// (the nested catalog is merged into it's parent) |
||
738 |
DirectoryEntry old_root_entry; |
||
739 |
retval = parent->LookupPath(this->mountpoint(), &old_root_entry); |
||
740 |
assert(retval); |
||
741 |
|||
742 |
assert(old_root_entry.IsDirectory() && |
||
743 |
old_root_entry.IsNestedCatalogMountpoint() && |
||
744 |
!old_root_entry.IsNestedCatalogRoot()); |
||
745 |
|||
746 |
// Remove the nested catalog root mark |
||
747 |
old_root_entry.set_is_nested_catalog_mountpoint(false); |
||
748 |
parent->UpdateEntry(old_root_entry, this->mountpoint().ToString()); |
||
749 |
} |
||
750 |
|||
751 |
|||
752 |
/** |
||
753 |
* Writes delta_counters_ to the database. |
||
754 |
*/ |
||
755 |
22 |
void WritableCatalog::UpdateCounters() { |
|
756 |
const bool retval = delta_counters_.WriteToDatabase(database()) && |
||
757 |
✓✗✓✗ |
22 |
ReadCatalogCounters(); |
758 |
✗✓ | 22 |
assert(retval); |
759 |
22 |
} |
|
760 |
|||
761 |
|||
762 |
/** |
||
763 |
* Checks if the database of this catalogs needs cleanup and defragments it |
||
764 |
* if necessary |
||
765 |
*/ |
||
766 |
15 |
void WritableCatalog::VacuumDatabaseIfNecessary() { |
|
767 |
15 |
const CatalogDatabase &db = database(); |
|
768 |
15 |
bool needs_defragmentation = false; |
|
769 |
15 |
double ratio = 0.0; |
|
770 |
15 |
std::string reason; |
|
771 |
|||
772 |
✗✓ | 15 |
if ((ratio = db.GetFreePageRatio()) > kMaximalFreePageRatio) { |
773 |
needs_defragmentation = true; |
||
774 |
reason = "free pages"; |
||
775 |
✓✓ | 15 |
} else if ((ratio = db.GetRowIdWasteRatio()) > kMaximalRowIdWasteRatio) { |
776 |
2 |
needs_defragmentation = true; |
|
777 |
2 |
reason = "wasted row IDs"; |
|
778 |
} |
||
779 |
|||
780 |
✓✓ | 15 |
if (needs_defragmentation) { |
781 |
LogCvmfs(kLogCatalog, kLogStdout | kLogNoLinebreak, |
||
782 |
"Note: Catalog at %s gets defragmented (%.2f%% %s)... ", |
||
783 |
(IsRoot()) ? "/" : mountpoint().c_str(), |
||
784 |
ratio * 100.0, |
||
785 |
✓✓✗✗ ✓✓ |
2 |
reason.c_str()); |
786 |
✗✓ | 2 |
if (!db.Vacuum()) { |
787 |
LogCvmfs(kLogCatalog, kLogStderr, "failed (SQLite: %s)", |
||
788 |
db.GetLastErrorMsg().c_str()); |
||
789 |
assert(false); |
||
790 |
} |
||
791 |
2 |
LogCvmfs(kLogCatalog, kLogStdout, "done"); |
|
792 |
} |
||
793 |
15 |
} |
|
794 |
|||
795 |
} // namespace catalog |
Generated by: GCOVR (Version 4.1) |