GCC Code Coverage Report | |||||||||||||||||||||
|
|||||||||||||||||||||
Line | Branch | Exec | Source |
1 |
/** |
||
2 |
* This file is part of the CernVM file system. |
||
3 |
*/ |
||
4 |
|||
5 |
#include "cvmfs_config.h" |
||
6 |
#include "catalog_sql.h" |
||
7 |
|||
8 |
#include <cstdlib> |
||
9 |
#include <cstring> |
||
10 |
|||
11 |
#include "catalog.h" |
||
12 |
#include "globals.h" |
||
13 |
#include "logging.h" |
||
14 |
#include "util/posix.h" |
||
15 |
#include "xattr.h" |
||
16 |
|||
17 |
using namespace std; // NOLINT |
||
18 |
|||
19 |
namespace catalog { |
||
20 |
|||
21 |
/** |
||
22 |
* NOTE NOTE NOTE NOTE NOTE NOTE NOTE NOTE NOTE NOTE NOTE NOTE NOTE NOTE NOTE |
||
23 |
* Always remember to update the legacy catalog migration classes to produce a |
||
24 |
* compatible catalog structure when updating the schema revisions here! |
||
25 |
* |
||
26 |
* Repository rollbacks to an outdated catalog schema is not supported. Have a |
||
27 |
* look into CVM-252 if that becomes necessary at some point. |
||
28 |
* NOTE NOTE NOTE NOTE NOTE NOTE NOTE NOTE NOTE NOTE NOTE NOTE NOTE NOTE NOTE |
||
29 |
*/ |
||
30 |
|||
31 |
// ChangeLog |
||
32 |
// 2.5 (Jun 26 2013 - Git: e79baec22c6abd6ddcdf8f8d7d33921027a052ab) |
||
33 |
// * add (backward compatible) schema revision - see below |
||
34 |
// * add statistics counters for chunked files |
||
35 |
// Note: this was retrofitted and needed a catalog migration step |
||
36 |
// |
||
37 |
// 2.4 (Aug 15 2012 - Git: 17de8fc782b5b8dc4404dda925627b5ec2b552e1) |
||
38 |
// 2.3 (Aug 15 2012 - Git: ab77688cdb2f851af3fe983bf3694dc2465e65be) |
||
39 |
// 2.2 (never existed) |
||
40 |
// 2.1 (Aug 7 2012 - Git: beba36c12d2b1123ffbb169f865a861e570adc68) |
||
41 |
// * add 'chunks' table for file chunks |
||
42 |
// * add 'statistics' table for accumulative counters |
||
43 |
// * rename 'inode' field to 'hardlinks' |
||
44 |
// * containing both hardlink group ID and linkcount |
||
45 |
// * .cvmfscatalog files become first-class entries in the catalogs |
||
46 |
// |
||
47 |
// 2.0 (Aug 6 2012 - Git: c8a81ede603e57fbe4324b6ab6bc8c41e3a2fa5f) |
||
48 |
// * beginning of CernVM-FS 2.1.x branch ('modern' era) |
||
49 |
// |
||
50 |
// 1.x (earlier - based on SVN :-) ) |
||
51 |
// * pre-historic times |
||
52 |
const float CatalogDatabase::kLatestSchema = 2.5; |
||
53 |
const float CatalogDatabase::kLatestSupportedSchema = 2.5; // + 1.X (r/o) |
||
54 |
|||
55 |
// ChangeLog |
||
56 |
// 0 --> 1: (Jan 6 2014 - Git: 3667fe7a669d0d65e07275b753a7c6f23fc267df) |
||
57 |
// * add size column to nested catalog table, |
||
58 |
// * add schema_revision property |
||
59 |
// 1 --> 2: (Jan 22 2014 - Git: 85e6680e52cfe56dc1213a5ad74a5cc62fd50ead): |
||
60 |
// * add xattr column to catalog table |
||
61 |
// * add self_xattr and subtree_xattr statistics counters |
||
62 |
// 2 --> 3: (Sep 28 2015 - Git: f4171234b13ea448589820c1524ee52eae141bb4): |
||
63 |
// * add kFlagFileExternal to entries in catalog table |
||
64 |
// * add self_external and subtree_external statistics counters |
||
65 |
// * store compression algorithm in flags |
||
66 |
// 3 --> 4: (Nov 11 2016 - Git): |
||
67 |
// * add kFlagDirBindMountpoint |
||
68 |
// * add kFlagHidden |
||
69 |
// * add table bind_mountpoints |
||
70 |
// 4 --> 5: (Dec 07 2017): |
||
71 |
// * add kFlagFileSpecial (rebranded unused kFlagFileStat) |
||
72 |
// * add self_special and subtree_special statistics counters |
||
73 |
const unsigned CatalogDatabase::kLatestSchemaRevision = 5; |
||
74 |
|||
75 |
101 |
bool CatalogDatabase::CheckSchemaCompatibility() { |
|
76 |
return !( (schema_version() >= 2.0-kSchemaEpsilon) && |
||
77 |
(!IsEqualSchema(schema_version(), kLatestSupportedSchema)) && |
||
78 |
(!IsEqualSchema(schema_version(), 2.4) || |
||
79 |
✓✗✗✓ ✗✗✗✗ |
101 |
!IsEqualSchema(kLatestSupportedSchema, 2.5)) ); |
80 |
} |
||
81 |
|||
82 |
|||
83 |
42 |
bool CatalogDatabase::LiveSchemaUpgradeIfNecessary() { |
|
84 |
✗✓ | 42 |
assert(read_write()); |
85 |
|||
86 |
✓✗✓✓ ✓✓ |
42 |
if (IsEqualSchema(schema_version(), 2.5) && (schema_revision() == 0)) { |
87 |
1 |
LogCvmfs(kLogCatalog, kLogDebug, "upgrading schema revision (0 --> 1)"); |
|
88 |
|||
89 |
SqlCatalog sql_upgrade(*this, "ALTER TABLE nested_catalogs " |
||
90 |
1 |
"ADD size INTEGER;"); |
|
91 |
✗✓ | 1 |
if (!sql_upgrade.Execute()) { |
92 |
LogCvmfs(kLogCatalog, kLogDebug, "failed to upgrade nested_catalogs"); |
||
93 |
return false; |
||
94 |
} |
||
95 |
|||
96 |
1 |
set_schema_revision(1); |
|
97 |
✗✓ | 1 |
if (!StoreSchemaRevision()) { |
98 |
LogCvmfs(kLogCatalog, kLogDebug, "failed to upgrade schema revision"); |
||
99 |
return false; |
||
100 |
} |
||
101 |
} |
||
102 |
|||
103 |
✓✗✓✓ ✓✓ |
42 |
if (IsEqualSchema(schema_version(), 2.5) && (schema_revision() == 1)) { |
104 |
2 |
LogCvmfs(kLogCatalog, kLogDebug, "upgrading schema revision (1 --> 2)"); |
|
105 |
|||
106 |
2 |
SqlCatalog sql_upgrade1(*this, "ALTER TABLE catalog ADD xattr BLOB;"); |
|
107 |
SqlCatalog sql_upgrade2(*this, |
||
108 |
2 |
"INSERT INTO statistics (counter, value) VALUES ('self_xattr', 0);"); |
|
109 |
SqlCatalog sql_upgrade3(*this, |
||
110 |
2 |
"INSERT INTO statistics (counter, value) VALUES ('subtree_xattr', 0);"); |
|
111 |
✓✗✓✗ ✗✓✗✓ |
2 |
if (!sql_upgrade1.Execute() || !sql_upgrade2.Execute() || |
112 |
!sql_upgrade3.Execute()) |
||
113 |
{ |
||
114 |
LogCvmfs(kLogCatalog, kLogDebug, "failed to upgrade catalogs (1 --> 2)"); |
||
115 |
return false; |
||
116 |
} |
||
117 |
|||
118 |
2 |
set_schema_revision(2); |
|
119 |
✗✓ | 2 |
if (!StoreSchemaRevision()) { |
120 |
LogCvmfs(kLogCatalog, kLogDebug, "failed to upgrade schema revision"); |
||
121 |
return false; |
||
122 |
} |
||
123 |
} |
||
124 |
|||
125 |
✓✗✓✓ ✓✓ |
42 |
if (IsEqualSchema(schema_version(), 2.5) && (schema_revision() == 2)) { |
126 |
2 |
LogCvmfs(kLogCatalog, kLogDebug, "upgrading schema revision (2 --> 3)"); |
|
127 |
|||
128 |
SqlCatalog sql_upgrade4(*this, |
||
129 |
"INSERT INTO statistics (counter, value) VALUES " |
||
130 |
2 |
"('self_external', 0);"); |
|
131 |
SqlCatalog sql_upgrade5(*this, |
||
132 |
"INSERT INTO statistics (counter, value) VALUES " |
||
133 |
2 |
"('self_external_file_size', 0);"); |
|
134 |
SqlCatalog sql_upgrade6(*this, |
||
135 |
"INSERT INTO statistics (counter, value) VALUES " |
||
136 |
2 |
"('subtree_external', 0);"); |
|
137 |
SqlCatalog sql_upgrade7(*this, |
||
138 |
"INSERT INTO statistics (counter, value) VALUES " |
||
139 |
2 |
"('subtree_external_file_size', 0);"); |
|
140 |
✓✗✓✗ ✓✗✗✓ ✗✓ |
2 |
if (!sql_upgrade4.Execute() || !sql_upgrade5.Execute() || |
141 |
!sql_upgrade6.Execute() || !sql_upgrade7.Execute()) |
||
142 |
{ |
||
143 |
LogCvmfs(kLogCatalog, kLogDebug, "failed to upgrade catalogs (2 --> 3)"); |
||
144 |
return false; |
||
145 |
} |
||
146 |
|||
147 |
2 |
set_schema_revision(3); |
|
148 |
✗✓ | 2 |
if (!StoreSchemaRevision()) { |
149 |
LogCvmfs(kLogCatalog, kLogDebug, "failed to upgrade schema revision"); |
||
150 |
return false; |
||
151 |
} |
||
152 |
} |
||
153 |
|||
154 |
✓✗✓✓ ✓✓ |
42 |
if (IsEqualSchema(schema_version(), 2.5) && (schema_revision() == 3)) { |
155 |
2 |
LogCvmfs(kLogCatalog, kLogDebug, "upgrading schema revision (3 --> 4)"); |
|
156 |
|||
157 |
SqlCatalog sql_upgrade8(*this, |
||
158 |
"CREATE TABLE bind_mountpoints (path TEXT, sha1 TEXT, size INTEGER, " |
||
159 |
2 |
"CONSTRAINT pk_bind_mountpoints PRIMARY KEY (path));"); |
|
160 |
✗✓ | 2 |
if (!sql_upgrade8.Execute()) { |
161 |
LogCvmfs(kLogCatalog, kLogDebug, "failed to upgrade catalogs (3 --> 4)"); |
||
162 |
return false; |
||
163 |
} |
||
164 |
|||
165 |
2 |
set_schema_revision(4); |
|
166 |
✗✓ | 2 |
if (!StoreSchemaRevision()) { |
167 |
LogCvmfs(kLogCatalog, kLogDebug, "failed to upgrade schema revision"); |
||
168 |
return false; |
||
169 |
} |
||
170 |
} |
||
171 |
|||
172 |
|||
173 |
✓✗✓✓ ✓✓ |
42 |
if (IsEqualSchema(schema_version(), 2.5) && (schema_revision() == 4)) { |
174 |
2 |
LogCvmfs(kLogCatalog, kLogDebug, "upgrading schema revision (4 --> 5)"); |
|
175 |
|||
176 |
SqlCatalog sql_upgrade9(*this, |
||
177 |
"INSERT INTO statistics (counter, value) VALUES " |
||
178 |
2 |
"('self_special', 0);"); |
|
179 |
SqlCatalog sql_upgrade10(*this, |
||
180 |
"INSERT INTO statistics (counter, value) VALUES " |
||
181 |
2 |
"('subtree_special', 0);"); |
|
182 |
✓✗✗✓ ✗✓ |
2 |
if (!sql_upgrade9.Execute() || !sql_upgrade10.Execute()) { |
183 |
LogCvmfs(kLogCatalog, kLogDebug, "failed to upgrade catalogs (4 --> 5)"); |
||
184 |
return false; |
||
185 |
} |
||
186 |
|||
187 |
2 |
set_schema_revision(5); |
|
188 |
✗✓ | 2 |
if (!StoreSchemaRevision()) { |
189 |
LogCvmfs(kLogCatalog, kLogDebug, "failed to upgrade schema revision"); |
||
190 |
return false; |
||
191 |
} |
||
192 |
} |
||
193 |
|||
194 |
42 |
return true; |
|
195 |
} |
||
196 |
|||
197 |
|||
198 |
64 |
bool CatalogDatabase::CreateEmptyDatabase() { |
|
199 |
✗✓ | 64 |
assert(read_write()); |
200 |
|||
201 |
// generate the catalog table and index structure |
||
202 |
const bool retval = |
||
203 |
SqlCatalog(*this, |
||
204 |
"CREATE TABLE catalog " |
||
205 |
"(md5path_1 INTEGER, md5path_2 INTEGER, parent_1 INTEGER, parent_2 INTEGER," |
||
206 |
" hardlinks INTEGER, hash BLOB, size INTEGER, mode INTEGER, mtime INTEGER," |
||
207 |
" flags INTEGER, name TEXT, symlink TEXT, uid INTEGER, gid INTEGER, " |
||
208 |
" xattr BLOB, " |
||
209 |
" CONSTRAINT pk_catalog PRIMARY KEY (md5path_1, md5path_2));").Execute() && |
||
210 |
SqlCatalog(*this, |
||
211 |
"CREATE INDEX idx_catalog_parent " |
||
212 |
"ON catalog (parent_1, parent_2);") .Execute() && |
||
213 |
SqlCatalog(*this, |
||
214 |
"CREATE TABLE chunks " |
||
215 |
"(md5path_1 INTEGER, md5path_2 INTEGER, offset INTEGER, size INTEGER, " |
||
216 |
" hash BLOB, " |
||
217 |
" CONSTRAINT pk_chunks PRIMARY KEY (md5path_1, md5path_2, offset, size), " |
||
218 |
" FOREIGN KEY (md5path_1, md5path_2) REFERENCES " |
||
219 |
" catalog(md5path_1, md5path_2));") .Execute() && |
||
220 |
SqlCatalog(*this, |
||
221 |
"CREATE TABLE nested_catalogs (path TEXT, sha1 TEXT, size INTEGER, " |
||
222 |
"CONSTRAINT pk_nested_catalogs PRIMARY KEY (path));") .Execute() && |
||
223 |
// Bind mountpoints and nested catalogs are almost the same. We put them in |
||
224 |
// separate tables to |
||
225 |
// - not confuse previous client versions, which would crash on bind |
||
226 |
// mountpoints |
||
227 |
// - prevent catalogs referenced as bind mountpoints from being replicated, |
||
228 |
// which would cause exhaustive recursive catalog tree walking |
||
229 |
// - don't walk into bind mountpoints in catalog traversal (e.g. GC) |
||
230 |
SqlCatalog(*this, |
||
231 |
"CREATE TABLE bind_mountpoints (path TEXT, sha1 TEXT, size INTEGER, " |
||
232 |
"CONSTRAINT pk_bind_mountpoints PRIMARY KEY (path));") .Execute() && |
||
233 |
SqlCatalog(*this, |
||
234 |
"CREATE TABLE statistics (counter TEXT, value INTEGER, " |
||
235 |
✓✗✓✗ ✓✗✓✗ ✓✗✓✗ ✓✗✗✗ ✗✗✗✗ ✗✗✓✗ ✗✗✗✗ ✓✗✗✗ ✗✓✗✗ ✗✗✗✗ ✓✗✗✗ ✗✗✓✗ ✗✗✗✓ ✗✗✗✗ ✗✗✓✗ ✗✗✓✗ ✗✗✓✗ ✗✗✓✗ ✗✗✓✗ ✗✗✓✗ ✗✗✓✗ ✗✗✓✗ ✗✗✓✗ ✗✗✓✗ ✗✗✓✗ |
64 |
"CONSTRAINT pk_statistics PRIMARY KEY (counter));") .Execute(); |
236 |
|||
237 |
✗✓ | 64 |
if (!retval) { |
238 |
PrintSqlError("failed to create catalog database tables."); |
||
239 |
} |
||
240 |
|||
241 |
64 |
return retval; |
|
242 |
} |
||
243 |
|||
244 |
|||
245 |
63 |
bool CatalogDatabase::InsertInitialValues( |
|
246 |
const std::string &root_path, |
||
247 |
const bool volatile_content, |
||
248 |
const std::string &voms_authz, |
||
249 |
const DirectoryEntry &root_entry) |
||
250 |
{ |
||
251 |
✗✓ | 63 |
assert(read_write()); |
252 |
63 |
bool retval = false; |
|
253 |
|||
254 |
// Path hashes |
||
255 |
63 |
shash::Md5 root_path_hash = shash::Md5(shash::AsciiPtr(root_path)); |
|
256 |
shash::Md5 root_parent_hash = (root_path == "") |
||
257 |
? shash::Md5() |
||
258 |
✓✓✗✗ ✓✓ |
63 |
: shash::Md5(shash::AsciiPtr(GetParentPath(root_path))); |
259 |
|||
260 |
// Start initial filling transaction |
||
261 |
63 |
retval = BeginTransaction(); |
|
262 |
✗✓ | 63 |
if (!retval) { |
263 |
PrintSqlError("failed to enter initial filling transaction"); |
||
264 |
return false; |
||
265 |
} |
||
266 |
|||
267 |
// Insert initial values to properties |
||
268 |
✓✗✗✗ |
63 |
if (!this->SetProperty("revision", 0)) { |
269 |
PrintSqlError( |
||
270 |
"failed to insert default initial values into the newly created " |
||
271 |
"catalog tables."); |
||
272 |
return false; |
||
273 |
} |
||
274 |
|||
275 |
✗✓ | 63 |
if (volatile_content) { |
276 |
if (!this->SetProperty("volatile", 1)) { |
||
277 |
PrintSqlError("failed to insert volatile flag into the newly created " |
||
278 |
"catalog tables."); |
||
279 |
return false; |
||
280 |
} |
||
281 |
} |
||
282 |
|||
283 |
✗✓ | 63 |
if (!voms_authz.empty()) { |
284 |
if (!SetVOMSAuthz(voms_authz)) { |
||
285 |
PrintSqlError("failed to insert VOMS authz flag into the newly created " |
||
286 |
"catalog tables."); |
||
287 |
return false; |
||
288 |
} |
||
289 |
} |
||
290 |
|||
291 |
// Create initial statistics counters |
||
292 |
63 |
catalog::Counters counters; |
|
293 |
|||
294 |
// Insert root entry (when given) |
||
295 |
✓✓ | 63 |
if (!root_entry.IsNegative()) { |
296 |
47 |
SqlDirentInsert sql_insert(*this); |
|
297 |
retval = sql_insert.BindPathHash(root_path_hash) && |
||
298 |
sql_insert.BindParentPathHash(root_parent_hash) && |
||
299 |
sql_insert.BindDirent(root_entry) && |
||
300 |
✓✗✓✗ ✓✗✓✗ |
47 |
sql_insert.Execute(); |
301 |
✗✓ | 47 |
if (!retval) { |
302 |
PrintSqlError("failed to insert root entry into newly created catalog."); |
||
303 |
return false; |
||
304 |
} |
||
305 |
|||
306 |
// account for the created root entry |
||
307 |
✗✓✗ | 47 |
counters.self.directories = 1; |
308 |
} |
||
309 |
|||
310 |
// Save initial statistics counters |
||
311 |
✗✓ | 63 |
if (!counters.InsertIntoDatabase(*this)) { |
312 |
PrintSqlError("failed to insert initial catalog statistics counters."); |
||
313 |
return false; |
||
314 |
} |
||
315 |
|||
316 |
// Insert root path (when given) |
||
317 |
✓✓ | 63 |
if (!root_path.empty()) { |
318 |
✗✗✓✗ |
15 |
if (!this->SetProperty("root_prefix", root_path)) { |
319 |
PrintSqlError( |
||
320 |
"failed to store root prefix in the newly created catalog."); |
||
321 |
return false; |
||
322 |
} |
||
323 |
} |
||
324 |
|||
325 |
// Set creation timestamp |
||
326 |
✗✓✗✗ |
63 |
if (!this->SetProperty("last_modified", static_cast<uint64_t>(time(NULL)))) { |
327 |
PrintSqlError("failed to store creation timestamp in the new catalog."); |
||
328 |
return false; |
||
329 |
} |
||
330 |
|||
331 |
// Commit initial filling transaction |
||
332 |
63 |
retval = CommitTransaction(); |
|
333 |
✗✓ | 63 |
if (!retval) { |
334 |
PrintSqlError("failed to commit initial filling transaction"); |
||
335 |
return false; |
||
336 |
} |
||
337 |
|||
338 |
63 |
return true; |
|
339 |
} |
||
340 |
|||
341 |
|||
342 |
bool |
||
343 |
CatalogDatabase::SetVOMSAuthz(const std::string &voms_authz) { |
||
344 |
return this->SetProperty("voms_authz", voms_authz); |
||
345 |
} |
||
346 |
|||
347 |
|||
348 |
15 |
double CatalogDatabase::GetRowIdWasteRatio() const { |
|
349 |
SqlCatalog rowid_waste_ratio_query(*this, |
||
350 |
"SELECT 1.0 - CAST(COUNT(*) AS DOUBLE) / MAX(rowid) " |
||
351 |
15 |
"AS ratio FROM catalog;"); |
|
352 |
15 |
const bool retval = rowid_waste_ratio_query.FetchRow(); |
|
353 |
✗✓ | 15 |
assert(retval); |
354 |
|||
355 |
15 |
return rowid_waste_ratio_query.RetrieveDouble(0); |
|
356 |
} |
||
357 |
|||
358 |
/** |
||
359 |
* Cleanup unused database space |
||
360 |
* |
||
361 |
* This copies the entire catalog content into a temporary SQLite table, sweeps |
||
362 |
* the original data from the 'catalog' table and reinserts everything from the |
||
363 |
* temporary table afterwards. That way the implicit rowid field of 'catalog' is |
||
364 |
* defragmented. |
||
365 |
* |
||
366 |
* Since the 'chunks' table has a foreign key relationship to the 'catalog' we |
||
367 |
* need to temporarily switch off the foreign key checks. Otherwise the clearing |
||
368 |
* of the 'catalog' table would fail due to foreign key violations. Note that it |
||
369 |
* is a NOOP to change the foreign key setting during a transaction! |
||
370 |
* |
||
371 |
* Note: VACUUM used to have a similar behaviour but it was dropped from SQLite |
||
372 |
* at some point. Since we compute client-inodes from the rowIDs, we are |
||
373 |
* probably one of the few use cases where a defragmented rowID is indeed |
||
374 |
* beneficial. |
||
375 |
* |
||
376 |
* See: http://www.sqlite.org/lang_vacuum.html |
||
377 |
*/ |
||
378 |
2 |
bool CatalogDatabase::CompactDatabase() const { |
|
379 |
✗✓ | 2 |
assert(read_write()); |
380 |
|||
381 |
return SqlCatalog(*this, "PRAGMA foreign_keys = OFF;").Execute() && |
||
382 |
BeginTransaction() && |
||
383 |
SqlCatalog(*this, "CREATE TEMPORARY TABLE duplicate AS " |
||
384 |
" SELECT * FROM catalog " |
||
385 |
" ORDER BY rowid ASC;").Execute() && |
||
386 |
SqlCatalog(*this, "DELETE FROM catalog;").Execute() && |
||
387 |
SqlCatalog(*this, "INSERT INTO catalog " |
||
388 |
" SELECT * FROM duplicate " |
||
389 |
" ORDER BY rowid").Execute() && |
||
390 |
SqlCatalog(*this, "DROP TABLE duplicate;").Execute() && |
||
391 |
CommitTransaction() && |
||
392 |
✓✗✓✗ ✓✗✓✗ ✓✗✓✗ ✓✗✓✗ ✓✗✗✗ ✗✗✗✗ ✗✗✓✗ ✗✗✗✗ ✓✗✗✗ ✗✓✗✗ ✗✗✗✗ ✓✗✗✗ ✗✗✓✗ ✗✗✗✓ ✗✗✗✗ ✗✗✓✗ ✗✗✓✗ ✗✗✓✗ ✗✗✓✗ ✗✗✓✗ ✗✗✓✗ ✗✗✓✗ ✗✗✓✗ ✗✗✓✗ ✗✗✓✗ ✗✗✓✗ |
2 |
SqlCatalog(*this, "PRAGMA foreign_keys = ON;").Execute(); |
393 |
} |
||
394 |
|||
395 |
|||
396 |
//------------------------------------------------------------------------------ |
||
397 |
|||
398 |
|||
399 |
273 |
unsigned SqlDirent::CreateDatabaseFlags(const DirectoryEntry &entry) const { |
|
400 |
273 |
unsigned int database_flags = 0; |
|
401 |
|||
402 |
✓✓ | 273 |
if (entry.IsNestedCatalogRoot()) |
403 |
7 |
database_flags |= kFlagDirNestedRoot; |
|
404 |
✓✓ | 266 |
else if (entry.IsNestedCatalogMountpoint()) |
405 |
7 |
database_flags |= kFlagDirNestedMountpoint; |
|
406 |
✗✓ | 259 |
else if (entry.IsBindMountpoint()) |
407 |
database_flags |= kFlagDirBindMountpoint; |
||
408 |
|||
409 |
✓✓ | 273 |
if (entry.IsDirectory()) { |
410 |
153 |
database_flags |= kFlagDir; |
|
411 |
✓✓ | 120 |
} else if (entry.IsLink()) { |
412 |
9 |
database_flags |= kFlagFile | kFlagLink; |
|
413 |
✗✓ | 111 |
} else if (entry.IsSpecial()) { |
414 |
database_flags |= kFlagFile | kFlagFileSpecial; |
||
415 |
} else { |
||
416 |
111 |
database_flags |= kFlagFile; |
|
417 |
111 |
database_flags |= entry.compression_algorithm() << kFlagPosCompression; |
|
418 |
✓✓ | 111 |
if (entry.IsChunkedFile()) |
419 |
8 |
database_flags |= kFlagFileChunk; |
|
420 |
✗✓ | 111 |
if (entry.IsExternalFile()) |
421 |
database_flags |= kFlagFileExternal; |
||
422 |
} |
||
423 |
|||
424 |
✓✓✗✓ ✓✓ |
273 |
if (!entry.checksum_ptr()->IsNull() || entry.IsChunkedFile()) |
425 |
91 |
StoreHashAlgorithm(entry.checksum_ptr()->algorithm, &database_flags); |
|
426 |
|||
427 |
✓✓ | 273 |
if (entry.IsHidden()) |
428 |
8 |
database_flags |= kFlagHidden; |
|
429 |
|||
430 |
273 |
return database_flags; |
|
431 |
} |
||
432 |
|||
433 |
|||
434 |
91 |
void SqlDirent::StoreHashAlgorithm(const shash::Algorithms algo, |
|
435 |
unsigned *flags) const |
||
436 |
{ |
||
437 |
✗✓ | 91 |
assert(algo != shash::kAny); |
438 |
// Md5 unusable for content hashes |
||
439 |
91 |
*flags |= (algo - 1) << kFlagPosHash; |
|
440 |
91 |
} |
|
441 |
|||
442 |
|||
443 |
207 |
shash::Algorithms SqlDirent::RetrieveHashAlgorithm(const unsigned flags) const { |
|
444 |
207 |
unsigned in_flags = ((7 << kFlagPosHash) & flags) >> kFlagPosHash; |
|
445 |
// Skip Md5 |
||
446 |
207 |
in_flags++; |
|
447 |
✗✓ | 207 |
assert(in_flags < shash::kAny); |
448 |
207 |
return static_cast<shash::Algorithms>(in_flags); |
|
449 |
} |
||
450 |
|||
451 |
|||
452 |
199 |
zlib::Algorithms SqlDirent::RetrieveCompressionAlgorithm(const unsigned flags) |
|
453 |
const |
||
454 |
{ |
||
455 |
// 3 bits, so use 7 (111) to only pull out the flags we want |
||
456 |
unsigned in_flags = |
||
457 |
199 |
((7 << kFlagPosCompression) & flags) >> kFlagPosCompression; |
|
458 |
199 |
return static_cast<zlib::Algorithms>(in_flags); |
|
459 |
} |
||
460 |
|||
461 |
|||
462 |
199 |
uint32_t SqlDirent::Hardlinks2Linkcount(const uint64_t hardlinks) const { |
|
463 |
199 |
return (hardlinks << 32) >> 32; |
|
464 |
} |
||
465 |
|||
466 |
|||
467 |
199 |
uint32_t SqlDirent::Hardlinks2HardlinkGroup(const uint64_t hardlinks) const { |
|
468 |
199 |
return hardlinks >> 32; |
|
469 |
} |
||
470 |
|||
471 |
|||
472 |
273 |
uint64_t SqlDirent::MakeHardlinks(const uint32_t hardlink_group, |
|
473 |
const uint32_t linkcount) const |
||
474 |
{ |
||
475 |
✗✓ | 273 |
assert(linkcount > 0); |
476 |
273 |
return (static_cast<uint64_t>(hardlink_group) << 32) | linkcount; |
|
477 |
} |
||
478 |
|||
479 |
|||
480 |
/** |
||
481 |
* Expands variant symlinks containing $(VARIABLE) string. Uses the environment |
||
482 |
* variables of the current process (cvmfs2) |
||
483 |
*/ |
||
484 |
176 |
void SqlDirent::ExpandSymlink(LinkString *raw_symlink) const { |
|
485 |
176 |
const char *c = raw_symlink->GetChars(); |
|
486 |
176 |
const char *cEnd = c+raw_symlink->GetLength(); |
|
487 |
✓✓ | 194 |
for (; c < cEnd; ++c) { |
488 |
✗✓ | 18 |
if (*c == '$') |
489 |
goto expand_symlink; |
||
490 |
} |
||
491 |
176 |
return; |
|
492 |
|||
493 |
expand_symlink: |
||
494 |
LinkString result; |
||
495 |
for (c = raw_symlink->GetChars(); c < cEnd; ++c) { |
||
496 |
if ((*c == '$') && (c < cEnd-2) && (*(c+1) == '(')) { |
||
497 |
c += 2; |
||
498 |
const char *rpar = c; |
||
499 |
while (rpar < cEnd) { |
||
500 |
if (*rpar == ')') |
||
501 |
goto expand_symlink_getenv; |
||
502 |
rpar++; |
||
503 |
} |
||
504 |
// right parenthesis missing |
||
505 |
result.Append("$(", 2); |
||
506 |
result.Append(c, 1); |
||
507 |
continue; |
||
508 |
|||
509 |
expand_symlink_getenv: |
||
510 |
// Check for default value |
||
511 |
const char *default_separator = c; |
||
512 |
const char *default_value = rpar; |
||
513 |
while (default_separator != rpar) { |
||
514 |
if ((*default_separator == ':') && (*(default_separator + 1) == '-')) { |
||
515 |
default_value = default_separator+2; |
||
516 |
break; |
||
517 |
} |
||
518 |
default_separator++; |
||
519 |
} |
||
520 |
|||
521 |
const unsigned environ_var_length = default_separator-c; |
||
522 |
char environ_var[environ_var_length+1]; |
||
523 |
environ_var[environ_var_length] = '\0'; |
||
524 |
memcpy(environ_var, c, environ_var_length); |
||
525 |
const char *environ_value = getenv(environ_var); // Don't free! |
||
526 |
if (environ_value) { |
||
527 |
result.Append(environ_value, strlen(environ_value)); |
||
528 |
} else { |
||
529 |
const unsigned default_length = rpar-default_value; |
||
530 |
result.Append(default_value, default_length); |
||
531 |
} |
||
532 |
c = rpar; |
||
533 |
continue; |
||
534 |
} |
||
535 |
result.Append(c, 1); |
||
536 |
} |
||
537 |
raw_symlink->Assign(result); |
||
538 |
return; |
||
539 |
} |
||
540 |
|||
541 |
|||
542 |
//------------------------------------------------------------------------------ |
||
543 |
|||
544 |
|||
545 |
273 |
bool SqlDirentWrite::BindDirentFields(const int hash_idx, |
|
546 |
const int hardlinks_idx, |
||
547 |
const int size_idx, |
||
548 |
const int mode_idx, |
||
549 |
const int mtime_idx, |
||
550 |
const int flags_idx, |
||
551 |
const int name_idx, |
||
552 |
const int symlink_idx, |
||
553 |
const int uid_idx, |
||
554 |
const int gid_idx, |
||
555 |
const DirectoryEntry &entry) |
||
556 |
{ |
||
557 |
const uint64_t hardlinks = |
||
558 |
MakeHardlinks(entry.hardlink_group_, |
||
559 |
273 |
entry.linkcount_); |
|
560 |
|||
561 |
return ( |
||
562 |
BindHashBlob(hash_idx, entry.checksum_) && |
||
563 |
BindInt64(hardlinks_idx, hardlinks) && |
||
564 |
BindInt64(size_idx, entry.size_) && |
||
565 |
BindInt(mode_idx, entry.mode_) && |
||
566 |
BindInt64(uid_idx, entry.uid_) && |
||
567 |
BindInt64(gid_idx, entry.gid_) && |
||
568 |
BindInt64(mtime_idx, entry.mtime_) && |
||
569 |
BindInt(flags_idx, CreateDatabaseFlags(entry)) && |
||
570 |
BindText(name_idx, entry.name_.GetChars(), entry.name_.GetLength()) && |
||
571 |
BindText(symlink_idx, entry.symlink_.GetChars(), entry.symlink_.GetLength()) |
||
572 |
✓✗✓✗ ✓✗✓✗ ✗✗✓✓ ✗✓✗✓ ✗✓✓✗ |
273 |
); // NOLINT |
573 |
} |
||
574 |
|||
575 |
|||
576 |
//------------------------------------------------------------------------------ |
||
577 |
|||
578 |
|||
579 |
1 |
SqlListContentHashes::SqlListContentHashes(const CatalogDatabase &database) { |
|
580 |
static const char *stmt_lt_2_4 = |
||
581 |
"SELECT hash, flags, 0 " |
||
582 |
" FROM catalog " |
||
583 |
" WHERE length(hash) > 0;"; |
||
584 |
|||
585 |
static const char *stmt_ge_2_4 = |
||
586 |
"SELECT hash, flags, 0 " |
||
587 |
" FROM catalog " |
||
588 |
" WHERE (length(catalog.hash) > 0) AND " |
||
589 |
" ((flags & 128) = 0) " // kFlagFileExternal |
||
590 |
"UNION " |
||
591 |
"SELECT chunks.hash, catalog.flags, 1 " |
||
592 |
" FROM catalog " |
||
593 |
" LEFT JOIN chunks " |
||
594 |
" ON catalog.md5path_1 = chunks.md5path_1 AND " |
||
595 |
" catalog.md5path_2 = chunks.md5path_2 " |
||
596 |
" WHERE (catalog.flags & 128) = 0;"; // kFlagFileExternal |
||
597 |
|||
598 |
✗✓ | 1 |
if (database.schema_version() < 2.4-CatalogDatabase::kSchemaEpsilon) { |
599 |
DeferredInit(database.sqlite_db(), stmt_lt_2_4); |
||
600 |
} else { |
||
601 |
1 |
DeferredInit(database.sqlite_db(), stmt_ge_2_4); |
|
602 |
} |
||
603 |
} |
||
604 |
|||
605 |
|||
606 |
8 |
shash::Any SqlListContentHashes::GetHash() const { |
|
607 |
8 |
const unsigned int db_flags = RetrieveInt(1); |
|
608 |
8 |
const shash::Algorithms hash_algorithm = RetrieveHashAlgorithm(db_flags); |
|
609 |
8 |
shash::Any hash = RetrieveHashBlob(0, hash_algorithm); |
|
610 |
✓✓ | 8 |
if (RetrieveInt(2) == 1) { |
611 |
5 |
hash.suffix = shash::kSuffixPartial; |
|
612 |
} |
||
613 |
|||
614 |
8 |
return hash; |
|
615 |
} |
||
616 |
|||
617 |
|||
618 |
//------------------------------------------------------------------------------ |
||
619 |
|||
620 |
#define DB_FIELDS_LT_V2_1 \ |
||
621 |
"catalog.hash, catalog.inode, catalog.size, " \ |
||
622 |
"catalog.mode, catalog.mtime, catalog.flags, " \ |
||
623 |
"catalog.name, catalog.symlink, catalog.md5path_1, " \ |
||
624 |
"catalog.md5path_2, catalog.parent_1, catalog.parent_2, " \ |
||
625 |
"catalog.rowid" |
||
626 |
#define DB_FIELDS_GE_V2_1_LT_R2 \ |
||
627 |
"catalog.hash, catalog.hardlinks, catalog.size, " \ |
||
628 |
"catalog.mode, catalog.mtime, catalog.flags, " \ |
||
629 |
"catalog.name, catalog.symlink, catalog.md5path_1, " \ |
||
630 |
"catalog.md5path_2, catalog.parent_1, catalog.parent_2, " \ |
||
631 |
"catalog.rowid, catalog.uid, catalog.gid, " \ |
||
632 |
"0" |
||
633 |
#define DB_FIELDS_GE_V2_1_GE_R2 \ |
||
634 |
"catalog.hash, catalog.hardlinks, catalog.size, " \ |
||
635 |
"catalog.mode, catalog.mtime, catalog.flags, " \ |
||
636 |
"catalog.name, catalog.symlink, catalog.md5path_1, " \ |
||
637 |
"catalog.md5path_2, catalog.parent_1, catalog.parent_2, " \ |
||
638 |
"catalog.rowid, catalog.uid, catalog.gid, " \ |
||
639 |
"catalog.xattr IS NOT NULL" |
||
640 |
|||
641 |
#define MAKE_STATEMENT(STMT_TMPL, REV) \ |
||
642 |
static const std::string REV = \ |
||
643 |
ReplaceAll(STMT_TMPL, "@DB_FIELDS@", DB_FIELDS_ ## REV) |
||
644 |
|||
645 |
#define MAKE_STATEMENTS(STMT_TMPL) \ |
||
646 |
MAKE_STATEMENT(STMT_TMPL, LT_V2_1); \ |
||
647 |
MAKE_STATEMENT(STMT_TMPL, GE_V2_1_LT_R2); \ |
||
648 |
MAKE_STATEMENT(STMT_TMPL, GE_V2_1_GE_R2) |
||
649 |
|||
650 |
#define DEFERRED_INIT(DB, REV) \ |
||
651 |
DeferredInit((DB).sqlite_db(), (REV).c_str()) |
||
652 |
|||
653 |
#define DEFERRED_INITS(DB) \ |
||
654 |
if ((DB).schema_version() < 2.1 - CatalogDatabase::kSchemaEpsilon) { \ |
||
655 |
DEFERRED_INIT((DB), LT_V2_1); \ |
||
656 |
} else if ((DB).schema_revision() < 2) { \ |
||
657 |
DEFERRED_INIT((DB), GE_V2_1_LT_R2); \ |
||
658 |
} else { \ |
||
659 |
DEFERRED_INIT((DB), GE_V2_1_GE_R2); \ |
||
660 |
} |
||
661 |
|||
662 |
|||
663 |
shash::Md5 SqlLookup::GetPathHash() const { |
||
664 |
return RetrieveMd5(8, 9); |
||
665 |
} |
||
666 |
|||
667 |
|||
668 |
shash::Md5 SqlLookup::GetParentPathHash() const { |
||
669 |
return RetrieveMd5(10, 11); |
||
670 |
} |
||
671 |
|||
672 |
|||
673 |
/** |
||
674 |
* This method is a friend of DirectoryEntry. |
||
675 |
*/ |
||
676 |
199 |
DirectoryEntry SqlLookup::GetDirent(const Catalog *catalog, |
|
677 |
const bool expand_symlink) const |
||
678 |
{ |
||
679 |
199 |
DirectoryEntry result; |
|
680 |
|||
681 |
199 |
const unsigned database_flags = RetrieveInt(5); |
|
682 |
199 |
result.is_nested_catalog_root_ = (database_flags & kFlagDirNestedRoot); |
|
683 |
result.is_nested_catalog_mountpoint_ = |
||
684 |
199 |
(database_flags & kFlagDirNestedMountpoint); |
|
685 |
199 |
const char *name = reinterpret_cast<const char *>(RetrieveText(6)); |
|
686 |
199 |
const char *symlink = reinterpret_cast<const char *>(RetrieveText(7)); |
|
687 |
|||
688 |
// Retrieve the hardlink information from the hardlinks database field |
||
689 |
✗✓ | 199 |
if (catalog->schema() < 2.1 - CatalogDatabase::kSchemaEpsilon) { |
690 |
result.linkcount_ = 1; |
||
691 |
result.hardlink_group_ = 0; |
||
692 |
result.inode_ = catalog->GetMangledInode(RetrieveInt64(12), 0); |
||
693 |
result.is_chunked_file_ = false; |
||
694 |
result.has_xattrs_ = false; |
||
695 |
result.checksum_ = RetrieveHashBlob(0, shash::kSha1); |
||
696 |
result.uid_ = g_uid; |
||
697 |
result.gid_ = g_gid; |
||
698 |
} else { |
||
699 |
199 |
const uint64_t hardlinks = RetrieveInt64(1); |
|
700 |
199 |
result.linkcount_ = Hardlinks2Linkcount(hardlinks); |
|
701 |
199 |
result.hardlink_group_ = Hardlinks2HardlinkGroup(hardlinks); |
|
702 |
result.inode_ = |
||
703 |
199 |
catalog->GetMangledInode(RetrieveInt64(12), result.hardlink_group_); |
|
704 |
199 |
result.is_bind_mountpoint_ = (database_flags & kFlagDirBindMountpoint); |
|
705 |
199 |
result.is_chunked_file_ = (database_flags & kFlagFileChunk); |
|
706 |
199 |
result.is_hidden_ = (database_flags & kFlagHidden); |
|
707 |
199 |
result.is_external_file_ = (database_flags & kFlagFileExternal); |
|
708 |
199 |
result.has_xattrs_ = RetrieveInt(15) != 0; |
|
709 |
result.checksum_ = |
||
710 |
✗✓ | 199 |
RetrieveHashBlob(0, RetrieveHashAlgorithm(database_flags)); |
711 |
result.compression_algorithm_ = |
||
712 |
199 |
RetrieveCompressionAlgorithm(database_flags); |
|
713 |
|||
714 |
✗✓ | 199 |
if (g_claim_ownership) { |
715 |
result.uid_ = g_uid; |
||
716 |
result.gid_ = g_gid; |
||
717 |
} else { |
||
718 |
199 |
result.uid_ = catalog->MapUid(RetrieveInt64(13)); |
|
719 |
199 |
result.gid_ = catalog->MapGid(RetrieveInt64(14)); |
|
720 |
} |
||
721 |
} |
||
722 |
|||
723 |
199 |
result.mode_ = RetrieveInt(3); |
|
724 |
199 |
result.size_ = RetrieveInt64(2); |
|
725 |
199 |
result.mtime_ = RetrieveInt64(4); |
|
726 |
199 |
result.name_.Assign(name, strlen(name)); |
|
727 |
199 |
result.symlink_.Assign(symlink, strlen(symlink)); |
|
728 |
✓✓✓✗ |
199 |
if (expand_symlink && !g_raw_symlinks) |
729 |
176 |
ExpandSymlink(&result.symlink_); |
|
730 |
|||
731 |
199 |
return result; |
|
732 |
} |
||
733 |
|||
734 |
|||
735 |
//------------------------------------------------------------------------------ |
||
736 |
|||
737 |
|||
738 |
98 |
SqlListing::SqlListing(const CatalogDatabase &database) { |
|
739 |
MAKE_STATEMENTS("SELECT @DB_FIELDS@ FROM catalog " |
||
740 |
✓✓✓✗ ✗✗✗✗ ✗✗✓✓ ✓✗✗✗ ✗✗✗✗ ✓✓✓✗ ✗✗ |
98 |
"WHERE (parent_1 = :p_1) AND (parent_2 = :p_2);"); |
741 |
✓✗✓✗ |
98 |
DEFERRED_INITS(database); |
742 |
} |
||
743 |
|||
744 |
|||
745 |
34 |
bool SqlListing::BindPathHash(const struct shash::Md5 &hash) { |
|
746 |
34 |
return BindMd5(1, 2, hash); |
|
747 |
} |
||
748 |
|||
749 |
|||
750 |
//------------------------------------------------------------------------------ |
||
751 |
|||
752 |
|||
753 |
98 |
SqlLookupPathHash::SqlLookupPathHash(const CatalogDatabase &database) { |
|
754 |
MAKE_STATEMENTS("SELECT @DB_FIELDS@ FROM catalog " |
||
755 |
✓✓✓✗ ✗✗✗✗ ✗✗✓✓ ✓✗✗✗ ✗✗✗✗ ✓✓✓✗ ✗✗ |
98 |
"WHERE (md5path_1 = :md5_1) AND (md5path_2 = :md5_2);"); |
756 |
✓✗✓✗ |
98 |
DEFERRED_INITS(database); |
757 |
} |
||
758 |
|||
759 |
157 |
bool SqlLookupPathHash::BindPathHash(const struct shash::Md5 &hash) { |
|
760 |
157 |
return BindMd5(1, 2, hash); |
|
761 |
} |
||
762 |
|||
763 |
|||
764 |
//------------------------------------------------------------------------------ |
||
765 |
|||
766 |
|||
767 |
SqlLookupInode::SqlLookupInode(const CatalogDatabase &database) { |
||
768 |
MAKE_STATEMENTS("SELECT @DB_FIELDS@ FROM catalog WHERE rowid = :rowid;"); |
||
769 |
DEFERRED_INITS(database); |
||
770 |
} |
||
771 |
|||
772 |
|||
773 |
bool SqlLookupInode::BindRowId(const uint64_t inode) { |
||
774 |
return BindInt64(1, inode); |
||
775 |
} |
||
776 |
|||
777 |
|||
778 |
//------------------------------------------------------------------------------ |
||
779 |
|||
780 |
|||
781 |
SqlLookupDanglingMountpoints::SqlLookupDanglingMountpoints( |
||
782 |
const catalog::CatalogDatabase &database) { |
||
783 |
MAKE_STATEMENTS("SELECT DISTINCT @DB_FIELDS@ FROM catalog " |
||
784 |
"JOIN catalog AS c2 " |
||
785 |
"ON catalog.md5path_1 = c2.parent_1 AND " |
||
786 |
" catalog.md5path_2 = c2.parent_2 " |
||
787 |
"WHERE catalog.flags & :nested_mountpoint_flag"); |
||
788 |
DEFERRED_INITS(database); |
||
789 |
|||
790 |
// this pretty much removes the advantage of a deferred init but the statement |
||
791 |
// is anyway only used directly. |
||
792 |
const bool success = BindInt64(1, SqlDirent::kFlagDirNestedMountpoint); |
||
793 |
assert(success); |
||
794 |
} |
||
795 |
|||
796 |
|||
797 |
//------------------------------------------------------------------------------ |
||
798 |
|||
799 |
|||
800 |
39 |
SqlDirentTouch::SqlDirentTouch(const CatalogDatabase &database) { |
|
801 |
DeferredInit(database.sqlite_db(), |
||
802 |
"UPDATE catalog " |
||
803 |
"SET hash = :hash, size = :size, mode = :mode, mtime = :mtime, " |
||
804 |
// 1 2 3 4 |
||
805 |
"name = :name, symlink = :symlink, uid = :uid, gid = :gid " |
||
806 |
// 5 6 7 8 |
||
807 |
39 |
"WHERE (md5path_1 = :md5_1) AND (md5path_2 = :md5_2);"); |
|
808 |
// 9 10 |
||
809 |
} |
||
810 |
|||
811 |
|||
812 |
bool SqlDirentTouch::BindDirentBase(const DirectoryEntryBase &entry) { |
||
813 |
return ( |
||
814 |
BindHashBlob(1, entry.checksum_) && |
||
815 |
BindInt64(2, entry.size_) && |
||
816 |
BindInt(3, entry.mode_) && |
||
817 |
BindInt64(4, entry.mtime_) && |
||
818 |
BindText(5, entry.name_.GetChars(), entry.name_.GetLength()) && |
||
819 |
BindText(6, entry.symlink_.GetChars(), entry.symlink_.GetLength()) && |
||
820 |
BindInt64(7, entry.uid_) && |
||
821 |
BindInt64(8, entry.gid_)); |
||
822 |
} |
||
823 |
|||
824 |
|||
825 |
bool SqlDirentTouch::BindPathHash(const shash::Md5 &hash) { |
||
826 |
return BindMd5(9, 10, hash); |
||
827 |
} |
||
828 |
|||
829 |
|||
830 |
//------------------------------------------------------------------------------ |
||
831 |
|||
832 |
|||
833 |
98 |
SqlNestedCatalogLookup::SqlNestedCatalogLookup(const CatalogDatabase &database) |
|
834 |
{ |
||
835 |
static const char *stmt_2_5_ge_4 = |
||
836 |
"SELECT sha1, size FROM nested_catalogs WHERE path=:path " |
||
837 |
"UNION ALL SELECT sha1, size FROM bind_mountpoints WHERE path=:path;"; |
||
838 |
static const char *stmt_2_5_ge_1_lt_4 = |
||
839 |
"SELECT sha1, size FROM nested_catalogs WHERE path=:path;"; |
||
840 |
// Internally converts NULL to 0 for size |
||
841 |
static const char *stmt_2_5_lt_1 = |
||
842 |
"SELECT sha1, 0 FROM nested_catalogs WHERE path=:path;"; |
||
843 |
|||
844 |
✓✗✓✗ ✓✗ |
98 |
if (database.IsEqualSchema(database.schema_version(), 2.5) && |
845 |
(database.schema_revision() >= 4)) |
||
846 |
{ |
||
847 |
98 |
DeferredInit(database.sqlite_db(), stmt_2_5_ge_4); |
|
848 |
} else if (database.IsEqualSchema(database.schema_version(), 2.5) && |
||
849 |
(database.schema_revision() >= 1)) |
||
850 |
{ |
||
851 |
DeferredInit(database.sqlite_db(), stmt_2_5_ge_1_lt_4); |
||
852 |
} else { |
||
853 |
DeferredInit(database.sqlite_db(), stmt_2_5_lt_1); |
||
854 |
} |
||
855 |
} |
||
856 |
|||
857 |
|||
858 |
12 |
bool SqlNestedCatalogLookup::BindSearchPath(const PathString &path) { |
|
859 |
12 |
return BindText(1, path.GetChars(), path.GetLength()); |
|
860 |
} |
||
861 |
|||
862 |
|||
863 |
12 |
shash::Any SqlNestedCatalogLookup::GetContentHash() const { |
|
864 |
12 |
const string hash = string(reinterpret_cast<const char *>(RetrieveText(0))); |
|
865 |
return (hash.empty()) ? shash::Any(shash::kAny) : |
||
866 |
shash::MkFromHexPtr(shash::HexPtr(hash), |
||
867 |
✓✓ | 12 |
shash::kSuffixCatalog); |
868 |
} |
||
869 |
|||
870 |
|||
871 |
12 |
uint64_t SqlNestedCatalogLookup::GetSize() const { |
|
872 |
12 |
return RetrieveInt64(1); |
|
873 |
} |
||
874 |
|||
875 |
|||
876 |
//------------------------------------------------------------------------------ |
||
877 |
|||
878 |
|||
879 |
98 |
SqlNestedCatalogListing::SqlNestedCatalogListing( |
|
880 |
98 |
const CatalogDatabase &database) |
|
881 |
{ |
||
882 |
static const char *stmt_2_5_ge_4 = |
||
883 |
"SELECT path, sha1, size FROM nested_catalogs " |
||
884 |
"UNION ALL SELECT path, sha1, size FROM bind_mountpoints;"; |
||
885 |
static const char *stmt_2_5_ge_1_lt_4 = |
||
886 |
"SELECT path, sha1, size FROM nested_catalogs;"; |
||
887 |
// Internally converts NULL to 0 for size |
||
888 |
static const char *stmt_2_5_lt_1 = |
||
889 |
"SELECT path, sha1, 0 FROM nested_catalogs;"; |
||
890 |
|||
891 |
✓✗✓✗ ✓✗ |
98 |
if (database.IsEqualSchema(database.schema_version(), 2.5) && |
892 |
(database.schema_revision() >= 4)) |
||
893 |
{ |
||
894 |
98 |
DeferredInit(database.sqlite_db(), stmt_2_5_ge_4); |
|
895 |
} else if (database.IsEqualSchema(database.schema_version(), 2.5) && |
||
896 |
(database.schema_revision() >= 1)) |
||
897 |
{ |
||
898 |
DeferredInit(database.sqlite_db(), stmt_2_5_ge_1_lt_4); |
||
899 |
} else { |
||
900 |
DeferredInit(database.sqlite_db(), stmt_2_5_lt_1); |
||
901 |
} |
||
902 |
} |
||
903 |
|||
904 |
|||
905 |
13 |
PathString SqlNestedCatalogListing::GetPath() const { |
|
906 |
13 |
const char *path = reinterpret_cast<const char *>(RetrieveText(0)); |
|
907 |
13 |
return PathString(path, strlen(path)); |
|
908 |
} |
||
909 |
|||
910 |
|||
911 |
13 |
shash::Any SqlNestedCatalogListing::GetContentHash() const { |
|
912 |
13 |
const string hash = string(reinterpret_cast<const char *>(RetrieveText(1))); |
|
913 |
return (hash.empty()) ? shash::Any(shash::kAny) : |
||
914 |
shash::MkFromHexPtr(shash::HexPtr(hash), |
||
915 |
✓✓ | 13 |
shash::kSuffixCatalog); |
916 |
} |
||
917 |
|||
918 |
|||
919 |
13 |
uint64_t SqlNestedCatalogListing::GetSize() const { |
|
920 |
13 |
return RetrieveInt64(2); |
|
921 |
} |
||
922 |
|||
923 |
|||
924 |
//------------------------------------------------------------------------------ |
||
925 |
|||
926 |
|||
927 |
98 |
SqlOwnNestedCatalogListing::SqlOwnNestedCatalogListing( |
|
928 |
98 |
const CatalogDatabase &database) |
|
929 |
{ |
||
930 |
static const char *stmt_2_5_ge_1 = |
||
931 |
"SELECT path, sha1, size FROM nested_catalogs;"; |
||
932 |
// Internally converts NULL to 0 for size |
||
933 |
static const char *stmt_2_5_lt_1 = |
||
934 |
"SELECT path, sha1, 0 FROM nested_catalogs;"; |
||
935 |
|||
936 |
✓✗✓✗ ✓✗ |
98 |
if (database.IsEqualSchema(database.schema_version(), 2.5) && |
937 |
(database.schema_revision() >= 1)) |
||
938 |
{ |
||
939 |
98 |
DeferredInit(database.sqlite_db(), stmt_2_5_ge_1); |
|
940 |
} else { |
||
941 |
DeferredInit(database.sqlite_db(), stmt_2_5_lt_1); |
||
942 |
} |
||
943 |
} |
||
944 |
|||
945 |
|||
946 |
4 |
PathString SqlOwnNestedCatalogListing::GetPath() const { |
|
947 |
4 |
const char *path = reinterpret_cast<const char *>(RetrieveText(0)); |
|
948 |
4 |
return PathString(path, strlen(path)); |
|
949 |
} |
||
950 |
|||
951 |
|||
952 |
4 |
shash::Any SqlOwnNestedCatalogListing::GetContentHash() const { |
|
953 |
4 |
const string hash = string(reinterpret_cast<const char *>(RetrieveText(1))); |
|
954 |
return (hash.empty()) ? shash::Any(shash::kAny) : |
||
955 |
shash::MkFromHexPtr(shash::HexPtr(hash), |
||
956 |
✗✓ | 4 |
shash::kSuffixCatalog); |
957 |
} |
||
958 |
|||
959 |
|||
960 |
4 |
uint64_t SqlOwnNestedCatalogListing::GetSize() const { |
|
961 |
4 |
return RetrieveInt64(2); |
|
962 |
} |
||
963 |
|||
964 |
|||
965 |
//------------------------------------------------------------------------------ |
||
966 |
|||
967 |
|||
968 |
86 |
SqlDirentInsert::SqlDirentInsert(const CatalogDatabase &database) { |
|
969 |
DeferredInit(database.sqlite_db(), |
||
970 |
"INSERT INTO catalog " |
||
971 |
"(md5path_1, md5path_2, parent_1, parent_2, hash, hardlinks, size, mode," |
||
972 |
// 1 2 3 4 5 6 7 8 |
||
973 |
"mtime, flags, name, symlink, uid, gid, xattr) " |
||
974 |
// 9, 10 11 12 13 14 15 |
||
975 |
"VALUES (:md5_1, :md5_2, :p_1, :p_2, :hash, :links, :size, :mode, :mtime," |
||
976 |
86 |
" :flags, :name, :symlink, :uid, :gid, :xattr);"); |
|
977 |
} |
||
978 |
|||
979 |
|||
980 |
223 |
bool SqlDirentInsert::BindPathHash(const shash::Md5 &hash) { |
|
981 |
223 |
return BindMd5(1, 2, hash); |
|
982 |
} |
||
983 |
|||
984 |
|||
985 |
223 |
bool SqlDirentInsert::BindParentPathHash(const shash::Md5 &hash) { |
|
986 |
223 |
return BindMd5(3, 4, hash); |
|
987 |
} |
||
988 |
|||
989 |
|||
990 |
223 |
bool SqlDirentInsert::BindDirent(const DirectoryEntry &entry) { |
|
991 |
223 |
return BindDirentFields(5, 6, 7, 8, 9, 10, 11, 12, 13, 14, entry); |
|
992 |
} |
||
993 |
|||
994 |
|||
995 |
bool SqlDirentInsert::BindXattr(const XattrList &xattrs) { |
||
996 |
unsigned char *packed_xattrs; |
||
997 |
unsigned size; |
||
998 |
xattrs.Serialize(&packed_xattrs, &size); |
||
999 |
if (packed_xattrs == NULL) |
||
1000 |
return BindNull(15); |
||
1001 |
return BindBlobTransient(15, packed_xattrs, size); |
||
1002 |
} |
||
1003 |
|||
1004 |
|||
1005 |
176 |
bool SqlDirentInsert::BindXattrEmpty() { |
|
1006 |
176 |
return BindNull(15); |
|
1007 |
} |
||
1008 |
|||
1009 |
|||
1010 |
//------------------------------------------------------------------------------ |
||
1011 |
|||
1012 |
|||
1013 |
39 |
SqlDirentUpdate::SqlDirentUpdate(const CatalogDatabase &database) { |
|
1014 |
DeferredInit(database.sqlite_db(), |
||
1015 |
"UPDATE catalog " |
||
1016 |
"SET hash = :hash, size = :size, mode = :mode, mtime = :mtime, " |
||
1017 |
// 1 2 3 4 |
||
1018 |
"flags = :flags, name = :name, symlink = :symlink, hardlinks = :hardlinks, " |
||
1019 |
// 5 6 7 8 |
||
1020 |
"uid = :uid, gid = :gid " |
||
1021 |
// 9 10 |
||
1022 |
39 |
"WHERE (md5path_1 = :md5_1) AND (md5path_2 = :md5_2);"); |
|
1023 |
// 11 12 |
||
1024 |
} |
||
1025 |
|||
1026 |
|||
1027 |
50 |
bool SqlDirentUpdate::BindPathHash(const shash::Md5 &hash) { |
|
1028 |
50 |
return BindMd5(11, 12, hash); |
|
1029 |
} |
||
1030 |
|||
1031 |
|||
1032 |
50 |
bool SqlDirentUpdate::BindDirent(const DirectoryEntry &entry) { |
|
1033 |
50 |
return BindDirentFields(1, 8, 2, 3, 4, 5, 6, 7, 9, 10, entry); |
|
1034 |
} |
||
1035 |
|||
1036 |
|||
1037 |
//------------------------------------------------------------------------------ |
||
1038 |
|||
1039 |
|||
1040 |
39 |
SqlDirentUnlink::SqlDirentUnlink(const CatalogDatabase &database) { |
|
1041 |
DeferredInit(database.sqlite_db(), |
||
1042 |
"DELETE FROM catalog " |
||
1043 |
39 |
"WHERE (md5path_1 = :md5_1) AND (md5path_2 = :md5_2);"); |
|
1044 |
} |
||
1045 |
|||
1046 |
25 |
bool SqlDirentUnlink::BindPathHash(const shash::Md5 &hash) { |
|
1047 |
25 |
return BindMd5(1, 2, hash); |
|
1048 |
} |
||
1049 |
|||
1050 |
|||
1051 |
//------------------------------------------------------------------------------ |
||
1052 |
|||
1053 |
|||
1054 |
39 |
SqlIncLinkcount::SqlIncLinkcount(const CatalogDatabase &database) { |
|
1055 |
// This command changes the linkcount of a whole hardlink group at once! |
||
1056 |
// We can do this, since the 'hardlinks'-field contains the hardlink group ID |
||
1057 |
// in the higher 32bit as well as the 'linkcount' in the lower 32bit. |
||
1058 |
// This field will be equal for all entries belonging to the same hardlink |
||
1059 |
// group while adding/subtracting small values from it will only effect the |
||
1060 |
// linkcount in the lower 32bit. |
||
1061 |
// Take a deep breath! |
||
1062 |
DeferredInit(database.sqlite_db(), |
||
1063 |
"UPDATE catalog SET hardlinks = hardlinks + :delta " |
||
1064 |
"WHERE hardlinks = (SELECT hardlinks from catalog " |
||
1065 |
39 |
"WHERE md5path_1 = :md5_1 AND md5path_2 = :md5_2);"); |
|
1066 |
} |
||
1067 |
|||
1068 |
|||
1069 |
bool SqlIncLinkcount::BindPathHash(const shash::Md5 &hash) { |
||
1070 |
return BindMd5(2, 3, hash); |
||
1071 |
} |
||
1072 |
|||
1073 |
|||
1074 |
bool SqlIncLinkcount::BindDelta(const int delta) { |
||
1075 |
return BindInt(1, delta); |
||
1076 |
} |
||
1077 |
|||
1078 |
|||
1079 |
//------------------------------------------------------------------------------ |
||
1080 |
|||
1081 |
|||
1082 |
39 |
SqlChunkInsert::SqlChunkInsert(const CatalogDatabase &database) { |
|
1083 |
DeferredInit(database.sqlite_db(), |
||
1084 |
"INSERT INTO chunks (md5path_1, md5path_2, offset, size, hash) " |
||
1085 |
// 1 2 3 4 5 |
||
1086 |
39 |
"VALUES (:md5_1, :md5_2, :offset, :size, :hash);"); |
|
1087 |
} |
||
1088 |
|||
1089 |
|||
1090 |
8 |
bool SqlChunkInsert::BindPathHash(const shash::Md5 &hash) { |
|
1091 |
8 |
return BindMd5(1, 2, hash); |
|
1092 |
} |
||
1093 |
|||
1094 |
|||
1095 |
8 |
bool SqlChunkInsert::BindFileChunk(const FileChunk &chunk) { |
|
1096 |
return |
||
1097 |
BindInt64(3, chunk.offset()) && |
||
1098 |
BindInt64(4, chunk.size()) && |
||
1099 |
✓✗✓✗ ✓✗ |
8 |
BindHashBlob(5, chunk.content_hash()); |
1100 |
} |
||
1101 |
|||
1102 |
|||
1103 |
//------------------------------------------------------------------------------ |
||
1104 |
|||
1105 |
|||
1106 |
39 |
SqlChunksRemove::SqlChunksRemove(const CatalogDatabase &database) { |
|
1107 |
DeferredInit(database.sqlite_db(), |
||
1108 |
"DELETE FROM chunks " |
||
1109 |
39 |
"WHERE (md5path_1 = :md5_1) AND (md5path_2 = :md5_2);"); |
|
1110 |
} |
||
1111 |
|||
1112 |
|||
1113 |
bool SqlChunksRemove::BindPathHash(const shash::Md5 &hash) { |
||
1114 |
return BindMd5(1, 2, hash); |
||
1115 |
} |
||
1116 |
|||
1117 |
|||
1118 |
//------------------------------------------------------------------------------ |
||
1119 |
|||
1120 |
|||
1121 |
98 |
SqlChunksListing::SqlChunksListing(const CatalogDatabase &database) { |
|
1122 |
DeferredInit(database.sqlite_db(), |
||
1123 |
"SELECT offset, size, hash FROM chunks " |
||
1124 |
// 0 1 2 |
||
1125 |
"WHERE (md5path_1 = :md5_1) AND (md5path_2 = :md5_2) " |
||
1126 |
// 1 2 |
||
1127 |
98 |
"ORDER BY offset ASC;"); |
|
1128 |
} |
||
1129 |
|||
1130 |
|||
1131 |
1 |
bool SqlChunksListing::BindPathHash(const shash::Md5 &hash) { |
|
1132 |
1 |
return BindMd5(1, 2, hash); |
|
1133 |
} |
||
1134 |
|||
1135 |
|||
1136 |
1 |
FileChunk SqlChunksListing::GetFileChunk( |
|
1137 |
const shash::Algorithms interpret_hash_as) const |
||
1138 |
{ |
||
1139 |
return FileChunk( |
||
1140 |
RetrieveHashBlob(2, interpret_hash_as, shash::kSuffixPartial), |
||
1141 |
RetrieveInt64(0), |
||
1142 |
1 |
RetrieveInt64(1)); |
|
1143 |
} |
||
1144 |
|||
1145 |
|||
1146 |
//------------------------------------------------------------------------------ |
||
1147 |
|||
1148 |
|||
1149 |
39 |
SqlChunksCount::SqlChunksCount(const CatalogDatabase &database) { |
|
1150 |
DeferredInit(database.sqlite_db(), |
||
1151 |
"SELECT count(*) FROM chunks " |
||
1152 |
// 0 |
||
1153 |
39 |
"WHERE (md5path_1 = :md5_1) AND (md5path_2 = :md5_2)"); |
|
1154 |
// 1 2 |
||
1155 |
} |
||
1156 |
|||
1157 |
|||
1158 |
bool SqlChunksCount::BindPathHash(const shash::Md5 &hash) { |
||
1159 |
return BindMd5(1, 2, hash); |
||
1160 |
} |
||
1161 |
|||
1162 |
|||
1163 |
int SqlChunksCount::GetChunkCount() const { |
||
1164 |
return RetrieveInt64(0); |
||
1165 |
} |
||
1166 |
|||
1167 |
|||
1168 |
//------------------------------------------------------------------------------ |
||
1169 |
|||
1170 |
|||
1171 |
39 |
SqlMaxHardlinkGroup::SqlMaxHardlinkGroup(const CatalogDatabase &database) { |
|
1172 |
39 |
DeferredInit(database.sqlite_db(), "SELECT max(hardlinks) FROM catalog;"); |
|
1173 |
} |
||
1174 |
|||
1175 |
|||
1176 |
uint32_t SqlMaxHardlinkGroup::GetMaxGroupId() const { |
||
1177 |
return RetrieveInt64(0) >> 32; |
||
1178 |
} |
||
1179 |
|||
1180 |
|||
1181 |
//------------------------------------------------------------------------------ |
||
1182 |
|||
1183 |
|||
1184 |
120 |
SqlGetCounter::SqlGetCounter(const CatalogDatabase &database) { |
|
1185 |
static const char *stmt_ge_2_4 = |
||
1186 |
"SELECT value from statistics WHERE counter = :counter;"; |
||
1187 |
static const char *stmt_lt_2_4 = |
||
1188 |
"SELECT 0;"; |
||
1189 |
|||
1190 |
✓✗ | 120 |
if (database.schema_version() >= 2.4 - CatalogDatabase::kSchemaEpsilon) { |
1191 |
120 |
compat_ = false; |
|
1192 |
120 |
DeferredInit(database.sqlite_db(), stmt_ge_2_4); |
|
1193 |
} else { |
||
1194 |
compat_ = true; |
||
1195 |
DeferredInit(database.sqlite_db(), stmt_lt_2_4); |
||
1196 |
} |
||
1197 |
} |
||
1198 |
|||
1199 |
|||
1200 |
2880 |
bool SqlGetCounter::BindCounter(const std::string &counter) { |
|
1201 |
✗✓ | 2880 |
if (compat_) return true; |
1202 |
2880 |
return BindText(1, counter); |
|
1203 |
} |
||
1204 |
|||
1205 |
|||
1206 |
2880 |
uint64_t SqlGetCounter::GetCounter() const { |
|
1207 |
✗✓ | 2880 |
if (compat_) return 0; |
1208 |
2880 |
return RetrieveInt64(0); |
|
1209 |
} |
||
1210 |
|||
1211 |
|||
1212 |
//------------------------------------------------------------------------------ |
||
1213 |
|||
1214 |
|||
1215 |
22 |
SqlUpdateCounter::SqlUpdateCounter(const CatalogDatabase &database) { |
|
1216 |
DeferredInit(database.sqlite_db(), |
||
1217 |
22 |
"UPDATE statistics SET value=value+:val WHERE counter=:counter;"); |
|
1218 |
} |
||
1219 |
|||
1220 |
|||
1221 |
528 |
bool SqlUpdateCounter::BindCounter(const std::string &counter) { |
|
1222 |
528 |
return BindText(2, counter); |
|
1223 |
} |
||
1224 |
|||
1225 |
|||
1226 |
528 |
bool SqlUpdateCounter::BindDelta(const int64_t delta) { |
|
1227 |
528 |
return BindInt64(1, delta); |
|
1228 |
} |
||
1229 |
|||
1230 |
|||
1231 |
//------------------------------------------------------------------------------ |
||
1232 |
|||
1233 |
|||
1234 |
64 |
SqlCreateCounter::SqlCreateCounter(const CatalogDatabase &database) { |
|
1235 |
DeferredInit(database.sqlite_db(), |
||
1236 |
"INSERT OR REPLACE INTO statistics (counter, value) " |
||
1237 |
64 |
"VALUES (:counter, :value);"); |
|
1238 |
} |
||
1239 |
|||
1240 |
|||
1241 |
1536 |
bool SqlCreateCounter::BindCounter(const std::string &counter) { |
|
1242 |
1536 |
return BindText(1, counter); |
|
1243 |
} |
||
1244 |
|||
1245 |
|||
1246 |
1536 |
bool SqlCreateCounter::BindInitialValue(const int64_t value) { |
|
1247 |
1536 |
return BindInt64(2, value); |
|
1248 |
} |
||
1249 |
|||
1250 |
|||
1251 |
//------------------------------------------------------------------------------ |
||
1252 |
|||
1253 |
|||
1254 |
98 |
SqlAllChunks::SqlAllChunks(const CatalogDatabase &database) { |
|
1255 |
98 |
int hash_mask = 7 << SqlDirent::kFlagPosHash; |
|
1256 |
string flags2hash = |
||
1257 |
" ((flags&" + StringifyInt(hash_mask) + ") >> " + |
||
1258 |
98 |
StringifyInt(SqlDirent::kFlagPosHash) + ")+1 AS hash_algorithm "; |
|
1259 |
|||
1260 |
98 |
int compression_mask = 7 << SqlDirent::kFlagPosCompression; |
|
1261 |
string flags2compression = |
||
1262 |
" ((flags&" + StringifyInt(compression_mask) + ") >> " + |
||
1263 |
StringifyInt(SqlDirent::kFlagPosCompression) + ") " + |
||
1264 |
98 |
"AS compression_algorithm "; |
|
1265 |
|||
1266 |
// TODO(reneme): this depends on shash::kSuffix* being a char! |
||
1267 |
// it should be more generic or replaced entirely |
||
1268 |
// TODO(reneme): this is practically the same as SqlListContentHashes and |
||
1269 |
// should be consolidated |
||
1270 |
string sql = "SELECT DISTINCT hash, " |
||
1271 |
"CASE WHEN flags & " + StringifyInt(SqlDirent::kFlagFile) + " THEN " + |
||
1272 |
StringifyInt(shash::kSuffixNone) + " " + |
||
1273 |
"WHEN flags & " + StringifyInt(SqlDirent::kFlagDir) + " THEN " + |
||
1274 |
StringifyInt(shash::kSuffixMicroCatalog) + " END " + |
||
1275 |
"AS chunk_type, " + flags2hash + "," + flags2compression + |
||
1276 |
"FROM catalog WHERE (hash IS NOT NULL) AND " |
||
1277 |
98 |
"(flags & " + StringifyInt(SqlDirent::kFlagFileExternal) + " = 0)"; |
|
1278 |
✓✗ | 98 |
if (database.schema_version() >= 2.4 - CatalogDatabase::kSchemaEpsilon) { |
1279 |
sql += |
||
1280 |
" UNION " |
||
1281 |
"SELECT DISTINCT chunks.hash, " + StringifyInt(shash::kSuffixPartial) + |
||
1282 |
", " + flags2hash + "," + flags2compression + |
||
1283 |
"FROM chunks, catalog WHERE " |
||
1284 |
"chunks.md5path_1=catalog.md5path_1 AND " |
||
1285 |
"chunks.md5path_2=catalog.md5path_2 AND " |
||
1286 |
"(catalog.flags & " + StringifyInt(SqlDirent::kFlagFileExternal) + |
||
1287 |
98 |
" = 0)"; |
|
1288 |
} |
||
1289 |
98 |
sql += ";"; |
|
1290 |
98 |
Init(database.sqlite_db(), sql); |
|
1291 |
} |
||
1292 |
|||
1293 |
|||
1294 |
1 |
bool SqlAllChunks::Open() { |
|
1295 |
1 |
return true; |
|
1296 |
} |
||
1297 |
|||
1298 |
|||
1299 |
5 |
bool SqlAllChunks::Next(shash::Any *hash, zlib::Algorithms *compression_alg) { |
|
1300 |
✓✓ | 5 |
if (!FetchRow()) { |
1301 |
1 |
return false; |
|
1302 |
} |
||
1303 |
|||
1304 |
*hash = RetrieveHashBlob(0, static_cast<shash::Algorithms>(RetrieveInt(2)), |
||
1305 |
✗✓ | 4 |
static_cast<shash::Suffix>(RetrieveInt(1))); |
1306 |
4 |
*compression_alg = static_cast<zlib::Algorithms>(RetrieveInt(3)); |
|
1307 |
4 |
return true; |
|
1308 |
} |
||
1309 |
|||
1310 |
|||
1311 |
1 |
bool SqlAllChunks::Close() { |
|
1312 |
1 |
return Reset(); |
|
1313 |
} |
||
1314 |
|||
1315 |
|||
1316 |
//------------------------------------------------------------------------------ |
||
1317 |
|||
1318 |
|||
1319 |
98 |
SqlLookupXattrs::SqlLookupXattrs(const CatalogDatabase &database) { |
|
1320 |
DeferredInit(database.sqlite_db(), |
||
1321 |
"SELECT xattr FROM catalog " |
||
1322 |
98 |
"WHERE (md5path_1 = :md5_1) AND (md5path_2 = :md5_2);"); |
|
1323 |
} |
||
1324 |
|||
1325 |
|||
1326 |
1 |
bool SqlLookupXattrs::BindPathHash(const shash::Md5 &hash) { |
|
1327 |
1 |
return BindMd5(1, 2, hash); |
|
1328 |
} |
||
1329 |
|||
1330 |
|||
1331 |
1 |
XattrList SqlLookupXattrs::GetXattrs() { |
|
1332 |
const unsigned char *packed_xattrs = |
||
1333 |
1 |
reinterpret_cast<const unsigned char *>(RetrieveBlob(0)); |
|
1334 |
✓✗ | 1 |
if (packed_xattrs == NULL) |
1335 |
1 |
return XattrList(); |
|
1336 |
|||
1337 |
int size = RetrieveBytes(0); |
||
1338 |
assert(size >= 0); |
||
1339 |
UniquePtr<XattrList> xattrs(XattrList::Deserialize(packed_xattrs, size)); |
||
1340 |
if (!xattrs.IsValid()) { |
||
1341 |
LogCvmfs(kLogCatalog, kLogDebug, "corrupted xattr data"); |
||
1342 |
return XattrList(); |
||
1343 |
} |
||
1344 |
return *xattrs; |
||
1345 |
} |
||
1346 |
|||
1347 |
✓✗✓✗ |
45 |
} // namespace catalog |
Generated by: GCOVR (Version 4.1) |