Line |
Branch |
Exec |
Source |
1 |
|
|
/** |
2 |
|
|
* This file is part of the CernVM File System |
3 |
|
|
*/ |
4 |
|
|
|
5 |
|
|
#include "swissknife_history.h" |
6 |
|
|
|
7 |
|
|
|
8 |
|
|
#include <algorithm> |
9 |
|
|
#include <cassert> |
10 |
|
|
#include <ctime> |
11 |
|
|
|
12 |
|
|
#include "catalog_rw.h" |
13 |
|
|
#include "crypto/hash.h" |
14 |
|
|
#include "crypto/signature.h" |
15 |
|
|
#include "manifest_fetch.h" |
16 |
|
|
#include "network/download.h" |
17 |
|
|
#include "upload.h" |
18 |
|
|
|
19 |
|
|
using namespace std; // NOLINT |
20 |
|
|
using namespace swissknife; // NOLINT |
21 |
|
|
|
22 |
|
|
const std::string CommandTag::kHeadTag = "trunk"; |
23 |
|
|
const std::string CommandTag::kPreviousHeadTag = "trunk-previous"; |
24 |
|
|
|
25 |
|
|
const std::string CommandTag::kHeadTagDescription = "current HEAD"; |
26 |
|
|
const std::string CommandTag::kPreviousHeadTagDescription = |
27 |
|
|
"default undo target"; |
28 |
|
|
|
29 |
|
✗ |
static void InsertCommonParameters(ParameterList *r) { |
30 |
|
✗ |
r->push_back(Parameter::Mandatory('w', "repository directory / url")); |
31 |
|
✗ |
r->push_back(Parameter::Mandatory('t', "temporary scratch directory")); |
32 |
|
✗ |
r->push_back(Parameter::Optional('p', "public key of the repository")); |
33 |
|
✗ |
r->push_back(Parameter::Optional('f', "fully qualified repository name")); |
34 |
|
✗ |
r->push_back(Parameter::Optional('r', "spooler definition string")); |
35 |
|
✗ |
r->push_back(Parameter::Optional('m', "(unsigned) manifest file to edit")); |
36 |
|
✗ |
r->push_back(Parameter::Optional('b', "mounted repository base hash")); |
37 |
|
✗ |
r->push_back( |
38 |
|
✗ |
Parameter::Optional('e', "hash algorithm to use (default SHA1)")); |
39 |
|
✗ |
r->push_back(Parameter::Switch('L', "follow HTTP redirects")); |
40 |
|
✗ |
r->push_back(Parameter::Optional('P', "session_token_file")); |
41 |
|
✗ |
r->push_back(Parameter::Optional('@', "proxy url")); |
42 |
|
|
} |
43 |
|
|
|
44 |
|
✗ |
CommandTag::Environment *CommandTag::InitializeEnvironment( |
45 |
|
|
const ArgumentList &args, const bool read_write) { |
46 |
|
✗ |
const string repository_url = MakeCanonicalPath(*args.find('w')->second); |
47 |
|
✗ |
const string tmp_path = MakeCanonicalPath(*args.find('t')->second); |
48 |
|
|
const string spl_definition = |
49 |
|
✗ |
(args.find('r') == args.end()) |
50 |
|
|
? "" |
51 |
|
✗ |
: MakeCanonicalPath(*args.find('r')->second); |
52 |
|
✗ |
const string manifest_path = (args.find('m') == args.end()) |
53 |
|
|
? "" |
54 |
|
✗ |
: MakeCanonicalPath(*args.find('m')->second); |
55 |
|
|
const shash::Algorithms hash_algo = |
56 |
|
✗ |
(args.find('e') == args.end()) |
57 |
|
✗ |
? shash::kSha1 |
58 |
|
✗ |
: shash::ParseHashAlgorithm(*args.find('e')->second); |
59 |
|
✗ |
const string pubkey_path = (args.find('p') == args.end()) |
60 |
|
|
? "" |
61 |
|
✗ |
: MakeCanonicalPath(*args.find('p')->second); |
62 |
|
|
const shash::Any base_hash = |
63 |
|
✗ |
(args.find('b') == args.end()) |
64 |
|
|
? shash::Any() |
65 |
|
✗ |
: shash::MkFromHexPtr(shash::HexPtr(*args.find('b')->second), |
66 |
|
✗ |
shash::kSuffixCatalog); |
67 |
|
|
const string repo_name = |
68 |
|
✗ |
(args.find('f') == args.end()) ? "" : *args.find('f')->second; |
69 |
|
|
|
70 |
|
✗ |
string session_token_file; |
71 |
|
✗ |
if (args.find('P') != args.end()) { |
72 |
|
✗ |
session_token_file = *args.find('P')->second; |
73 |
|
|
} |
74 |
|
|
|
75 |
|
|
// Sanity checks |
76 |
|
✗ |
if (hash_algo == shash::kAny) { |
77 |
|
✗ |
LogCvmfs(kLogCvmfs, kLogStderr, "failed to parse hash algorithm to use"); |
78 |
|
✗ |
return NULL; |
79 |
|
|
} |
80 |
|
|
|
81 |
|
✗ |
if (read_write && spl_definition.empty()) { |
82 |
|
✗ |
LogCvmfs(kLogCvmfs, kLogStderr, "no upstream storage provided (-r)"); |
83 |
|
✗ |
return NULL; |
84 |
|
|
} |
85 |
|
|
|
86 |
|
✗ |
if (read_write && manifest_path.empty()) { |
87 |
|
✗ |
LogCvmfs(kLogCvmfs, kLogStderr, "no (unsigned) manifest provided (-m)"); |
88 |
|
✗ |
return NULL; |
89 |
|
|
} |
90 |
|
|
|
91 |
|
✗ |
if (!read_write && pubkey_path.empty()) { |
92 |
|
✗ |
LogCvmfs(kLogCvmfs, kLogStderr, "no public key provided (-p)"); |
93 |
|
✗ |
return NULL; |
94 |
|
|
} |
95 |
|
|
|
96 |
|
✗ |
if (!read_write && repo_name.empty()) { |
97 |
|
✗ |
LogCvmfs(kLogCvmfs, kLogStderr, "no repository name provided (-f)"); |
98 |
|
✗ |
return NULL; |
99 |
|
|
} |
100 |
|
|
|
101 |
|
✗ |
if (HasPrefix(spl_definition, "gw", false)) { |
102 |
|
✗ |
if (session_token_file.empty()) { |
103 |
|
✗ |
PrintError( |
104 |
|
|
"Session token file has to be provided " |
105 |
|
|
"when upstream type is gw."); |
106 |
|
✗ |
return NULL; |
107 |
|
|
} |
108 |
|
|
} |
109 |
|
|
|
110 |
|
|
// create new environment |
111 |
|
|
// Note: We use this encapsulation because we cannot be sure that the |
112 |
|
|
// Command object gets deleted properly. With the Environment object at |
113 |
|
|
// hand we have full control and can make heavy and safe use of RAII |
114 |
|
✗ |
UniquePtr<Environment> env(new Environment(repository_url, tmp_path)); |
115 |
|
✗ |
env->manifest_path.Set(manifest_path); |
116 |
|
✗ |
env->history_path.Set(CreateTempPath(tmp_path + "/history", 0600)); |
117 |
|
|
|
118 |
|
|
// initialize the (swissknife global) download manager |
119 |
|
✗ |
const bool follow_redirects = (args.count('L') > 0); |
120 |
|
✗ |
const std::string &proxy = (args.count('@') > 0) ? |
121 |
|
✗ |
*args.find('@')->second : ""; |
122 |
|
✗ |
if (!this->InitDownloadManager(follow_redirects, proxy)) { |
123 |
|
✗ |
return NULL; |
124 |
|
|
} |
125 |
|
|
|
126 |
|
|
// initialize the (swissknife global) signature manager (if possible) |
127 |
|
✗ |
if (!pubkey_path.empty() && !this->InitSignatureManager(pubkey_path)) { |
128 |
|
✗ |
return NULL; |
129 |
|
|
} |
130 |
|
|
|
131 |
|
|
// open the (yet unsigned) manifest file if it is there, otherwise load the |
132 |
|
|
// latest manifest from the server |
133 |
|
✗ |
env->manifest = |
134 |
|
✗ |
(FileExists(env->manifest_path.path())) |
135 |
|
✗ |
? OpenLocalManifest(env->manifest_path.path()) |
136 |
|
✗ |
: FetchRemoteManifest(env->repository_url, repo_name, base_hash); |
137 |
|
|
|
138 |
|
✗ |
if (!env->manifest.IsValid()) { |
139 |
|
✗ |
LogCvmfs(kLogCvmfs, kLogStderr, "failed to load manifest file"); |
140 |
|
✗ |
return NULL; |
141 |
|
|
} |
142 |
|
|
|
143 |
|
|
// figure out the hash of the history from the previous revision if needed |
144 |
|
✗ |
if (read_write && env->manifest->history().IsNull() && !base_hash.IsNull()) { |
145 |
|
✗ |
env->previous_manifest = |
146 |
|
✗ |
FetchRemoteManifest(env->repository_url, repo_name, base_hash); |
147 |
|
✗ |
if (!env->previous_manifest.IsValid()) { |
148 |
|
✗ |
LogCvmfs(kLogCvmfs, kLogStderr, "failed to load previous manifest"); |
149 |
|
✗ |
return NULL; |
150 |
|
|
} |
151 |
|
|
|
152 |
|
✗ |
LogCvmfs(kLogCvmfs, kLogDebug, |
153 |
|
|
"using history database '%s' from previous " |
154 |
|
|
"manifest (%s) as basis", |
155 |
|
✗ |
env->previous_manifest->history().ToString().c_str(), |
156 |
|
✗ |
env->previous_manifest->repository_name().c_str()); |
157 |
|
✗ |
env->manifest->set_history(env->previous_manifest->history()); |
158 |
|
✗ |
env->manifest->set_repository_name( |
159 |
|
✗ |
env->previous_manifest->repository_name()); |
160 |
|
|
} |
161 |
|
|
|
162 |
|
|
// download the history database referenced in the manifest |
163 |
|
✗ |
env->history = GetHistory(env->manifest.weak_ref(), env->repository_url, |
164 |
|
✗ |
env->history_path.path(), read_write); |
165 |
|
✗ |
if (!env->history.IsValid()) { |
166 |
|
✗ |
return NULL; |
167 |
|
|
} |
168 |
|
|
|
169 |
|
|
// if the using Command is expected to change the history database, we |
170 |
|
|
// need |
171 |
|
|
// to initialize the upload spooler for potential later history upload |
172 |
|
✗ |
if (read_write) { |
173 |
|
✗ |
const bool use_file_chunking = false; |
174 |
|
✗ |
const bool generate_legacy_bulk_chunks = false; |
175 |
|
|
const upload::SpoolerDefinition sd(spl_definition, hash_algo, |
176 |
|
|
zlib::kZlibDefault, |
177 |
|
|
generate_legacy_bulk_chunks, |
178 |
|
|
use_file_chunking, 0, 0, 0, |
179 |
|
✗ |
session_token_file); |
180 |
|
✗ |
env->spooler = upload::Spooler::Construct(sd); |
181 |
|
✗ |
if (!env->spooler.IsValid()) { |
182 |
|
✗ |
LogCvmfs(kLogCvmfs, kLogStderr, "failed to initialize upload spooler"); |
183 |
|
✗ |
return NULL; |
184 |
|
|
} |
185 |
|
|
} |
186 |
|
|
|
187 |
|
|
// return the pointer of the Environment (passing the ownership along) |
188 |
|
✗ |
return env.Release(); |
189 |
|
|
} |
190 |
|
|
|
191 |
|
✗ |
bool CommandTag::CloseAndPublishHistory(Environment *env) { |
192 |
|
✗ |
assert(env->spooler.IsValid()); |
193 |
|
|
|
194 |
|
|
// set the previous revision pointer of the history database |
195 |
|
✗ |
env->history->SetPreviousRevision(env->manifest->history()); |
196 |
|
|
|
197 |
|
|
// close the history database |
198 |
|
✗ |
history::History *weak_history = env->history.Release(); |
199 |
|
✗ |
delete weak_history; |
200 |
|
|
|
201 |
|
|
// compress and upload the new history database |
202 |
|
✗ |
Future<shash::Any> history_hash; |
203 |
|
✗ |
upload::Spooler::CallbackPtr callback = env->spooler->RegisterListener( |
204 |
|
|
&CommandTag::UploadClosure, this, &history_hash); |
205 |
|
✗ |
env->spooler->ProcessHistory(env->history_path.path()); |
206 |
|
✗ |
env->spooler->WaitForUpload(); |
207 |
|
✗ |
const shash::Any new_history_hash = history_hash.Get(); |
208 |
|
✗ |
env->spooler->UnregisterListener(callback); |
209 |
|
|
|
210 |
|
|
// retrieve the (async) uploader result |
211 |
|
✗ |
if (new_history_hash.IsNull()) { |
212 |
|
✗ |
return false; |
213 |
|
|
} |
214 |
|
|
|
215 |
|
|
// update the (yet unsigned) manifest file |
216 |
|
✗ |
env->manifest->set_history(new_history_hash); |
217 |
|
✗ |
if (!env->manifest->Export(env->manifest_path.path())) { |
218 |
|
✗ |
LogCvmfs(kLogCvmfs, kLogStderr, "failed to export the new manifest '%s'", |
219 |
|
✗ |
env->manifest_path.path().c_str()); |
220 |
|
✗ |
return false; |
221 |
|
|
} |
222 |
|
|
|
223 |
|
|
// disable the unlink guard in order to keep the newly exported manifest file |
224 |
|
✗ |
env->manifest_path.Disable(); |
225 |
|
✗ |
LogCvmfs(kLogCvmfs, kLogVerboseMsg, |
226 |
|
|
"exported manifest (%" PRIu64 ") with new history '%s'", |
227 |
|
✗ |
env->manifest->revision(), new_history_hash.ToString().c_str()); |
228 |
|
|
|
229 |
|
✗ |
return true; |
230 |
|
|
} |
231 |
|
|
|
232 |
|
|
|
233 |
|
✗ |
bool CommandTag::UploadCatalogAndUpdateManifest( |
234 |
|
|
CommandTag::Environment *env, catalog::WritableCatalog *catalog) { |
235 |
|
✗ |
assert(env->spooler.IsValid()); |
236 |
|
|
|
237 |
|
|
// gather information about catalog to be uploaded and update manifest |
238 |
|
✗ |
UniquePtr<catalog::WritableCatalog> wr_catalog(catalog); |
239 |
|
✗ |
const std::string catalog_path = wr_catalog->database_path(); |
240 |
|
✗ |
env->manifest->set_ttl(wr_catalog->GetTTL()); |
241 |
|
✗ |
env->manifest->set_revision(wr_catalog->GetRevision()); |
242 |
|
✗ |
env->manifest->set_publish_timestamp(wr_catalog->GetLastModified()); |
243 |
|
|
|
244 |
|
|
// close the catalog |
245 |
|
✗ |
catalog::WritableCatalog *weak_catalog = wr_catalog.Release(); |
246 |
|
✗ |
delete weak_catalog; |
247 |
|
|
|
248 |
|
|
// upload the catalog |
249 |
|
✗ |
Future<shash::Any> catalog_hash; |
250 |
|
✗ |
upload::Spooler::CallbackPtr callback = env->spooler->RegisterListener( |
251 |
|
|
&CommandTag::UploadClosure, this, &catalog_hash); |
252 |
|
✗ |
env->spooler->ProcessCatalog(catalog_path); |
253 |
|
✗ |
env->spooler->WaitForUpload(); |
254 |
|
✗ |
const shash::Any new_catalog_hash = catalog_hash.Get(); |
255 |
|
✗ |
env->spooler->UnregisterListener(callback); |
256 |
|
|
|
257 |
|
|
// check if the upload succeeded |
258 |
|
✗ |
if (new_catalog_hash.IsNull()) { |
259 |
|
✗ |
LogCvmfs(kLogCvmfs, kLogStderr, "failed to upload catalog '%s'", |
260 |
|
|
catalog_path.c_str()); |
261 |
|
✗ |
return false; |
262 |
|
|
} |
263 |
|
|
|
264 |
|
|
// update the catalog size and hash in the manifest |
265 |
|
✗ |
const size_t catalog_size = GetFileSize(catalog_path); |
266 |
|
✗ |
env->manifest->set_catalog_size(catalog_size); |
267 |
|
✗ |
env->manifest->set_catalog_hash(new_catalog_hash); |
268 |
|
|
|
269 |
|
✗ |
LogCvmfs(kLogCvmfs, kLogVerboseMsg, "uploaded new catalog (%lu bytes) '%s'", |
270 |
|
✗ |
catalog_size, new_catalog_hash.ToString().c_str()); |
271 |
|
|
|
272 |
|
✗ |
return true; |
273 |
|
|
} |
274 |
|
|
|
275 |
|
✗ |
void CommandTag::UploadClosure(const upload::SpoolerResult &result, |
276 |
|
|
Future<shash::Any> *hash) { |
277 |
|
✗ |
assert(!result.IsChunked()); |
278 |
|
✗ |
if (result.return_code != 0) { |
279 |
|
✗ |
LogCvmfs(kLogCvmfs, kLogStderr, "failed to upload history database (%d)", |
280 |
|
✗ |
result.return_code); |
281 |
|
✗ |
hash->Set(shash::Any()); |
282 |
|
|
} else { |
283 |
|
✗ |
hash->Set(result.content_hash); |
284 |
|
|
} |
285 |
|
|
} |
286 |
|
|
|
287 |
|
✗ |
bool CommandTag::UpdateUndoTags( |
288 |
|
|
Environment *env, const history::History::Tag ¤t_head_template, |
289 |
|
|
const bool undo_rollback) { |
290 |
|
✗ |
assert(env->history.IsValid()); |
291 |
|
|
|
292 |
|
✗ |
history::History::Tag current_head; |
293 |
|
✗ |
history::History::Tag current_old_head; |
294 |
|
|
|
295 |
|
|
// remove previous HEAD tag |
296 |
|
✗ |
if (!env->history->Remove(CommandTag::kPreviousHeadTag)) { |
297 |
|
✗ |
LogCvmfs(kLogCvmfs, kLogVerboseMsg, "didn't find a previous HEAD tag"); |
298 |
|
|
} |
299 |
|
|
|
300 |
|
|
// check if we have a current HEAD tag that needs to renamed to previous |
301 |
|
|
// HEAD |
302 |
|
✗ |
if (env->history->GetByName(CommandTag::kHeadTag, ¤t_head)) { |
303 |
|
|
// remove current HEAD tag |
304 |
|
✗ |
if (!env->history->Remove(CommandTag::kHeadTag)) { |
305 |
|
✗ |
LogCvmfs(kLogCvmfs, kLogStderr, "failed to remove current HEAD tag"); |
306 |
|
✗ |
return false; |
307 |
|
|
} |
308 |
|
|
|
309 |
|
|
// set previous HEAD tag where current HEAD used to be |
310 |
|
✗ |
if (!undo_rollback) { |
311 |
|
✗ |
current_old_head = current_head; |
312 |
|
✗ |
current_old_head.name = CommandTag::kPreviousHeadTag; |
313 |
|
✗ |
current_old_head.description = CommandTag::kPreviousHeadTagDescription; |
314 |
|
✗ |
if (!env->history->Insert(current_old_head)) { |
315 |
|
✗ |
LogCvmfs(kLogCvmfs, kLogStderr, "failed to set previous HEAD tag"); |
316 |
|
✗ |
return false; |
317 |
|
|
} |
318 |
|
|
} |
319 |
|
|
} |
320 |
|
|
|
321 |
|
|
// set the current HEAD to the catalog provided by the template HEAD |
322 |
|
✗ |
current_head = current_head_template; |
323 |
|
✗ |
current_head.name = CommandTag::kHeadTag; |
324 |
|
✗ |
current_head.description = CommandTag::kHeadTagDescription; |
325 |
|
✗ |
if (!env->history->Insert(current_head)) { |
326 |
|
✗ |
LogCvmfs(kLogCvmfs, kLogStderr, "failed to set new current HEAD"); |
327 |
|
✗ |
return false; |
328 |
|
|
} |
329 |
|
|
|
330 |
|
✗ |
return true; |
331 |
|
|
} |
332 |
|
|
|
333 |
|
✗ |
bool CommandTag::FetchObject(const std::string &repository_url, |
334 |
|
|
const shash::Any &object_hash, |
335 |
|
|
const std::string &destination_path) const { |
336 |
|
✗ |
assert(!object_hash.IsNull()); |
337 |
|
|
|
338 |
|
|
download::Failures dl_retval; |
339 |
|
✗ |
const std::string url = repository_url + "/data/" + object_hash.MakePath(); |
340 |
|
|
|
341 |
|
✗ |
cvmfs::PathSink pathsink(destination_path); |
342 |
|
✗ |
download::JobInfo download_object(&url, true, false, &object_hash, &pathsink); |
343 |
|
✗ |
dl_retval = download_manager()->Fetch(&download_object); |
344 |
|
|
|
345 |
|
✗ |
if (dl_retval != download::kFailOk) { |
346 |
|
✗ |
LogCvmfs(kLogCvmfs, kLogStderr, "failed to download object '%s' (%d - %s)", |
347 |
|
✗ |
object_hash.ToStringWithSuffix().c_str(), dl_retval, |
348 |
|
|
download::Code2Ascii(dl_retval)); |
349 |
|
✗ |
return false; |
350 |
|
|
} |
351 |
|
|
|
352 |
|
✗ |
return true; |
353 |
|
|
} |
354 |
|
|
|
355 |
|
✗ |
history::History *CommandTag::GetHistory(const manifest::Manifest *manifest, |
356 |
|
|
const std::string &repository_url, |
357 |
|
|
const std::string &history_path, |
358 |
|
|
const bool read_write) const { |
359 |
|
✗ |
const shash::Any history_hash = manifest->history(); |
360 |
|
|
history::History *history; |
361 |
|
|
|
362 |
|
✗ |
if (history_hash.IsNull()) { |
363 |
|
✗ |
history = history::SqliteHistory::Create(history_path, |
364 |
|
✗ |
manifest->repository_name()); |
365 |
|
✗ |
if (NULL == history) { |
366 |
|
✗ |
LogCvmfs(kLogCvmfs, kLogStderr, "failed to create history database"); |
367 |
|
✗ |
return NULL; |
368 |
|
|
} |
369 |
|
|
} else { |
370 |
|
✗ |
if (!FetchObject(repository_url, history_hash, history_path)) { |
371 |
|
✗ |
return NULL; |
372 |
|
|
} |
373 |
|
|
|
374 |
|
✗ |
history = (read_write) ? history::SqliteHistory::OpenWritable(history_path) |
375 |
|
✗ |
: history::SqliteHistory::Open(history_path); |
376 |
|
✗ |
if (NULL == history) { |
377 |
|
✗ |
LogCvmfs(kLogCvmfs, kLogStderr, "failed to open history database (%s)", |
378 |
|
|
history_path.c_str()); |
379 |
|
✗ |
unlink(history_path.c_str()); |
380 |
|
✗ |
return NULL; |
381 |
|
|
} |
382 |
|
|
|
383 |
|
✗ |
assert(history->fqrn() == manifest->repository_name()); |
384 |
|
|
} |
385 |
|
|
|
386 |
|
✗ |
return history; |
387 |
|
|
} |
388 |
|
|
|
389 |
|
✗ |
catalog::Catalog *CommandTag::GetCatalog(const std::string &repository_url, |
390 |
|
|
const shash::Any &catalog_hash, |
391 |
|
|
const std::string catalog_path, |
392 |
|
|
const bool read_write) const { |
393 |
|
✗ |
assert(shash::kSuffixCatalog == catalog_hash.suffix); |
394 |
|
✗ |
if (!FetchObject(repository_url, catalog_hash, catalog_path)) { |
395 |
|
✗ |
return NULL; |
396 |
|
|
} |
397 |
|
|
|
398 |
|
✗ |
const std::string catalog_root_path = ""; |
399 |
|
✗ |
return (read_write) ? catalog::WritableCatalog::AttachFreely( |
400 |
|
|
catalog_root_path, catalog_path, catalog_hash) |
401 |
|
✗ |
: catalog::Catalog::AttachFreely( |
402 |
|
✗ |
catalog_root_path, catalog_path, catalog_hash); |
403 |
|
|
} |
404 |
|
|
|
405 |
|
✗ |
void CommandTag::PrintTagMachineReadable( |
406 |
|
|
const history::History::Tag &tag) const { |
407 |
|
✗ |
LogCvmfs(kLogCvmfs, kLogStdout, "%s %s %" PRIu64 " %" PRIu64 " %ld %s %s", |
408 |
|
|
tag.name.c_str(), |
409 |
|
✗ |
tag.root_hash.ToString().c_str(), tag.size, tag.revision, |
410 |
|
✗ |
tag.timestamp, |
411 |
|
✗ |
(tag.branch == "") ? "(default)" : tag.branch.c_str(), |
412 |
|
|
tag.description.c_str()); |
413 |
|
|
} |
414 |
|
|
|
415 |
|
✗ |
std::string CommandTag::AddPadding(const std::string &str, const size_t padding, |
416 |
|
|
const bool align_right, |
417 |
|
|
const std::string &fill_char) const { |
418 |
|
✗ |
assert(str.size() <= padding); |
419 |
|
✗ |
std::string result(str); |
420 |
|
✗ |
result.resize(padding); |
421 |
|
✗ |
const size_t pos = (align_right) ? 0 : str.size(); |
422 |
|
✗ |
const size_t padding_width = padding - str.size(); |
423 |
|
✗ |
for (size_t i = 0; i < padding_width; ++i) result.insert(pos, fill_char); |
424 |
|
✗ |
return result; |
425 |
|
|
} |
426 |
|
|
|
427 |
|
✗ |
bool CommandTag::IsUndoTagName(const std::string &tag_name) const { |
428 |
|
✗ |
return tag_name == CommandTag::kHeadTag || |
429 |
|
✗ |
tag_name == CommandTag::kPreviousHeadTag; |
430 |
|
|
} |
431 |
|
|
|
432 |
|
|
//------------------------------------------------------------------------------ |
433 |
|
|
|
434 |
|
✗ |
ParameterList CommandEditTag::GetParams() const { |
435 |
|
✗ |
ParameterList r; |
436 |
|
✗ |
InsertCommonParameters(&r); |
437 |
|
|
|
438 |
|
✗ |
r.push_back(Parameter::Optional('d', "space separated tags to be deleted")); |
439 |
|
✗ |
r.push_back(Parameter::Optional('a', "name of the new tag")); |
440 |
|
✗ |
r.push_back(Parameter::Optional('D', "description of the tag")); |
441 |
|
✗ |
r.push_back(Parameter::Optional('B', "branch of the new tag")); |
442 |
|
✗ |
r.push_back(Parameter::Optional('P', "predecessor branch")); |
443 |
|
✗ |
r.push_back(Parameter::Optional('h', "root hash of the new tag")); |
444 |
|
✗ |
r.push_back(Parameter::Switch('x', "maintain undo tags")); |
445 |
|
✗ |
return r; |
446 |
|
|
} |
447 |
|
|
|
448 |
|
✗ |
int CommandEditTag::Main(const ArgumentList &args) { |
449 |
|
✗ |
if ((args.find('d') == args.end()) && (args.find('a') == args.end()) && |
450 |
|
✗ |
(args.find('x') == args.end())) { |
451 |
|
✗ |
LogCvmfs(kLogCvmfs, kLogStderr, "nothing to do"); |
452 |
|
✗ |
return 1; |
453 |
|
|
} |
454 |
|
|
|
455 |
|
|
// initialize the Environment (taking ownership) |
456 |
|
✗ |
const bool history_read_write = true; |
457 |
|
✗ |
UniquePtr<Environment> env(InitializeEnvironment(args, history_read_write)); |
458 |
|
✗ |
if (!env.IsValid()) { |
459 |
|
✗ |
LogCvmfs(kLogCvmfs, kLogStderr, "failed to init environment"); |
460 |
|
✗ |
return 1; |
461 |
|
|
} |
462 |
|
|
|
463 |
|
|
int retval; |
464 |
|
✗ |
if (args.find('d') != args.end()) { |
465 |
|
✗ |
retval = RemoveTags(args, env.weak_ref()); |
466 |
|
✗ |
if (retval != 0) return retval; |
467 |
|
|
} |
468 |
|
✗ |
if ((args.find('a') != args.end()) || (args.find('x') != args.end())) { |
469 |
|
✗ |
retval = AddNewTag(args, env.weak_ref()); |
470 |
|
✗ |
if (retval != 0) return retval; |
471 |
|
|
} |
472 |
|
|
|
473 |
|
|
// finalize processing and upload new history database |
474 |
|
✗ |
if (!CloseAndPublishHistory(env.weak_ref())) { |
475 |
|
✗ |
return 1; |
476 |
|
|
} |
477 |
|
✗ |
return 0; |
478 |
|
|
} |
479 |
|
|
|
480 |
|
✗ |
int CommandEditTag::AddNewTag(const ArgumentList &args, Environment *env) { |
481 |
|
|
const std::string tag_name = |
482 |
|
✗ |
(args.find('a') != args.end()) ? *args.find('a')->second : ""; |
483 |
|
|
const std::string tag_description = |
484 |
|
✗ |
(args.find('D') != args.end()) ? *args.find('D')->second : ""; |
485 |
|
✗ |
const bool undo_tags = (args.find('x') != args.end()); |
486 |
|
|
const std::string root_hash_string = |
487 |
|
✗ |
(args.find('h') != args.end()) ? *args.find('h')->second : ""; |
488 |
|
|
const std::string branch_name = |
489 |
|
✗ |
(args.find('B') != args.end()) ? *args.find('B')->second : ""; |
490 |
|
|
const std::string previous_branch_name = |
491 |
|
✗ |
(args.find('P') != args.end()) ? *args.find('P')->second : ""; |
492 |
|
|
|
493 |
|
✗ |
if (tag_name.find(" ") != std::string::npos) { |
494 |
|
✗ |
LogCvmfs(kLogCvmfs, kLogStderr, "tag names must not contain spaces"); |
495 |
|
✗ |
return 1; |
496 |
|
|
} |
497 |
|
|
|
498 |
|
✗ |
assert(!tag_name.empty() || undo_tags); |
499 |
|
|
|
500 |
|
✗ |
if (IsUndoTagName(tag_name)) { |
501 |
|
✗ |
LogCvmfs(kLogCvmfs, kLogStderr, "undo tags are managed internally"); |
502 |
|
✗ |
return 1; |
503 |
|
|
} |
504 |
|
|
|
505 |
|
|
// set the root hash to be tagged to the current HEAD if no other hash was |
506 |
|
|
// given by the user |
507 |
|
✗ |
shash::Any root_hash = GetTagRootHash(env, root_hash_string); |
508 |
|
✗ |
if (root_hash.IsNull()) { |
509 |
|
✗ |
return 1; |
510 |
|
|
} |
511 |
|
|
|
512 |
|
|
// open the catalog to be tagged (to check for existence and for meta info) |
513 |
|
|
const UnlinkGuard catalog_path( |
514 |
|
✗ |
CreateTempPath(env->tmp_path + "/catalog", 0600)); |
515 |
|
✗ |
const bool catalog_read_write = false; |
516 |
|
|
const UniquePtr<catalog::Catalog> catalog(GetCatalog( |
517 |
|
✗ |
env->repository_url, root_hash, catalog_path.path(), catalog_read_write)); |
518 |
|
✗ |
if (!catalog.IsValid()) { |
519 |
|
✗ |
LogCvmfs(kLogCvmfs, kLogStderr, "catalog with hash '%s' does not exist", |
520 |
|
✗ |
root_hash.ToString().c_str()); |
521 |
|
✗ |
return 1; |
522 |
|
|
} |
523 |
|
|
|
524 |
|
|
// check if the catalog is a root catalog |
525 |
|
✗ |
if (!catalog->root_prefix().IsEmpty()) { |
526 |
|
✗ |
LogCvmfs(kLogCvmfs, kLogStderr, |
527 |
|
|
"cannot tag catalog '%s' that is not a " |
528 |
|
|
"root catalog.", |
529 |
|
✗ |
root_hash.ToString().c_str()); |
530 |
|
✗ |
return 1; |
531 |
|
|
} |
532 |
|
|
|
533 |
|
|
// create a template for the new tag to be created, moved or used as undo tag |
534 |
|
✗ |
history::History::Tag tag_template; |
535 |
|
✗ |
tag_template.name = "<template>"; |
536 |
|
✗ |
tag_template.root_hash = root_hash; |
537 |
|
✗ |
tag_template.size = GetFileSize(catalog_path.path()); |
538 |
|
✗ |
tag_template.revision = catalog->GetRevision(); |
539 |
|
✗ |
tag_template.timestamp = catalog->GetLastModified(); |
540 |
|
✗ |
tag_template.branch = branch_name; |
541 |
|
✗ |
tag_template.description = tag_description; |
542 |
|
|
|
543 |
|
|
// manipulate the tag database by creating a new tag or moving an existing one |
544 |
|
✗ |
if (!tag_name.empty()) { |
545 |
|
✗ |
tag_template.name = tag_name; |
546 |
|
✗ |
const bool user_provided_hash = (!root_hash_string.empty()); |
547 |
|
|
|
548 |
|
✗ |
if (!env->history->ExistsBranch(tag_template.branch)) { |
549 |
|
|
history::History::Branch branch( |
550 |
|
|
tag_template.branch, |
551 |
|
|
previous_branch_name, |
552 |
|
✗ |
tag_template.revision); |
553 |
|
✗ |
if (!env->history->InsertBranch(branch)) { |
554 |
|
✗ |
LogCvmfs(kLogCvmfs, kLogStderr, "cannot insert branch '%s'", |
555 |
|
|
tag_template.branch.c_str()); |
556 |
|
✗ |
return 1; |
557 |
|
|
} |
558 |
|
|
} |
559 |
|
|
|
560 |
|
✗ |
if (!ManipulateTag(env, tag_template, user_provided_hash)) { |
561 |
|
✗ |
return 1; |
562 |
|
|
} |
563 |
|
|
} |
564 |
|
|
|
565 |
|
|
// handle undo tags ('trunk' and 'trunk-previous') if necessary |
566 |
|
✗ |
if (undo_tags && !UpdateUndoTags(env, tag_template)) { |
567 |
|
✗ |
return 1; |
568 |
|
|
} |
569 |
|
|
|
570 |
|
✗ |
return 0; |
571 |
|
|
} |
572 |
|
|
|
573 |
|
✗ |
shash::Any CommandEditTag::GetTagRootHash( |
574 |
|
|
Environment *env, const std::string &root_hash_string) const { |
575 |
|
✗ |
shash::Any root_hash; |
576 |
|
|
|
577 |
|
✗ |
if (root_hash_string.empty()) { |
578 |
|
✗ |
LogCvmfs(kLogCvmfs, kLogVerboseMsg, |
579 |
|
|
"no catalog hash provided, using hash" |
580 |
|
|
"of current HEAD catalog (%s)", |
581 |
|
✗ |
env->manifest->catalog_hash().ToString().c_str()); |
582 |
|
✗ |
root_hash = env->manifest->catalog_hash(); |
583 |
|
|
} else { |
584 |
|
✗ |
root_hash = shash::MkFromHexPtr(shash::HexPtr(root_hash_string), |
585 |
|
|
shash::kSuffixCatalog); |
586 |
|
✗ |
if (root_hash.IsNull()) { |
587 |
|
✗ |
LogCvmfs(kLogCvmfs, kLogStderr, |
588 |
|
|
"failed to read provided catalog hash '%s'", |
589 |
|
|
root_hash_string.c_str()); |
590 |
|
|
} |
591 |
|
|
} |
592 |
|
|
|
593 |
|
✗ |
return root_hash; |
594 |
|
|
} |
595 |
|
|
|
596 |
|
✗ |
bool CommandEditTag::ManipulateTag(Environment *env, |
597 |
|
|
const history::History::Tag &tag_template, |
598 |
|
|
const bool user_provided_hash) { |
599 |
|
✗ |
const std::string &tag_name = tag_template.name; |
600 |
|
|
|
601 |
|
|
// check if the tag already exists, otherwise create it and return |
602 |
|
✗ |
if (!env->history->Exists(tag_name)) { |
603 |
|
✗ |
return CreateTag(env, tag_template); |
604 |
|
|
} |
605 |
|
|
|
606 |
|
|
// tag does exist already, now we need to see if we can move it |
607 |
|
✗ |
if (!user_provided_hash) { |
608 |
|
✗ |
LogCvmfs(kLogCvmfs, kLogStderr, |
609 |
|
|
"a tag with the name '%s' already exists. Do you want to move it? " |
610 |
|
|
"(-h <root hash>)", |
611 |
|
|
tag_name.c_str()); |
612 |
|
✗ |
return false; |
613 |
|
|
} |
614 |
|
|
|
615 |
|
|
// move the already existing tag and return |
616 |
|
✗ |
return MoveTag(env, tag_template); |
617 |
|
|
} |
618 |
|
|
|
619 |
|
✗ |
bool CommandEditTag::MoveTag(Environment *env, |
620 |
|
|
const history::History::Tag &tag_template) |
621 |
|
|
{ |
622 |
|
✗ |
const std::string &tag_name = tag_template.name; |
623 |
|
✗ |
history::History::Tag new_tag = tag_template; |
624 |
|
|
|
625 |
|
|
// get the already existent tag |
626 |
|
✗ |
history::History::Tag old_tag; |
627 |
|
✗ |
if (!env->history->GetByName(tag_name, &old_tag)) { |
628 |
|
✗ |
LogCvmfs(kLogCvmfs, kLogStderr, "failed to retrieve tag '%s' for moving", |
629 |
|
|
tag_name.c_str()); |
630 |
|
✗ |
return false; |
631 |
|
|
} |
632 |
|
|
|
633 |
|
|
// check if we would move the tag to the same hash |
634 |
|
✗ |
if (old_tag.root_hash == new_tag.root_hash) { |
635 |
|
✗ |
LogCvmfs(kLogCvmfs, kLogStderr, "tag '%s' already points to '%s'", |
636 |
|
✗ |
tag_name.c_str(), old_tag.root_hash.ToString().c_str()); |
637 |
|
✗ |
return false; |
638 |
|
|
} |
639 |
|
|
|
640 |
|
|
// copy over old description if no new description was given |
641 |
|
✗ |
if (new_tag.description.empty()) { |
642 |
|
✗ |
new_tag.description = old_tag.description; |
643 |
|
|
} |
644 |
|
✗ |
new_tag.branch = old_tag.branch; |
645 |
|
|
|
646 |
|
|
// remove the old tag from the database |
647 |
|
✗ |
if (!env->history->Remove(tag_name)) { |
648 |
|
✗ |
LogCvmfs(kLogCvmfs, kLogStderr, "removing old tag '%s' before move failed", |
649 |
|
|
tag_name.c_str()); |
650 |
|
✗ |
return false; |
651 |
|
|
} |
652 |
|
✗ |
if (!env->history->PruneBranches()) { |
653 |
|
✗ |
LogCvmfs(kLogCvmfs, kLogStderr, "could not prune unused branches"); |
654 |
|
✗ |
return false; |
655 |
|
|
} |
656 |
|
✗ |
bool retval = env->history->Vacuum(); |
657 |
|
✗ |
assert(retval); |
658 |
|
|
|
659 |
|
✗ |
LogCvmfs(kLogCvmfs, kLogStdout, "moving tag '%s' from '%s' to '%s'", |
660 |
|
✗ |
tag_name.c_str(), old_tag.root_hash.ToString().c_str(), |
661 |
|
✗ |
tag_template.root_hash.ToString().c_str()); |
662 |
|
|
|
663 |
|
|
// re-create the moved tag |
664 |
|
✗ |
return CreateTag(env, new_tag); |
665 |
|
|
} |
666 |
|
|
|
667 |
|
✗ |
bool CommandEditTag::CreateTag(Environment *env, |
668 |
|
|
const history::History::Tag &new_tag) { |
669 |
|
✗ |
if (!env->history->Insert(new_tag)) { |
670 |
|
✗ |
LogCvmfs(kLogCvmfs, kLogStderr, "failed to insert new tag '%s'", |
671 |
|
|
new_tag.name.c_str()); |
672 |
|
✗ |
return false; |
673 |
|
|
} |
674 |
|
|
|
675 |
|
✗ |
return true; |
676 |
|
|
} |
677 |
|
|
|
678 |
|
✗ |
int CommandEditTag::RemoveTags(const ArgumentList &args, Environment *env) { |
679 |
|
|
typedef std::vector<std::string> TagNames; |
680 |
|
✗ |
const std::string tags_to_delete = *args.find('d')->second; |
681 |
|
|
|
682 |
|
✗ |
const TagNames condemned_tags = SplitString(tags_to_delete, ' '); |
683 |
|
|
|
684 |
|
|
// check if user tries to remove a magic undo tag |
685 |
|
✗ |
TagNames::const_iterator i = condemned_tags.begin(); |
686 |
|
✗ |
const TagNames::const_iterator iend = condemned_tags.end(); |
687 |
|
✗ |
for (; i != iend; ++i) { |
688 |
|
✗ |
if (IsUndoTagName(*i)) { |
689 |
|
✗ |
LogCvmfs(kLogCvmfs, kLogStderr, |
690 |
|
|
"undo tags are handled internally and cannot be deleted"); |
691 |
|
✗ |
return 1; |
692 |
|
|
} |
693 |
|
|
} |
694 |
|
|
|
695 |
|
✗ |
LogCvmfs(kLogCvmfs, kLogDebug, "proceeding to delete %lu tags", |
696 |
|
|
condemned_tags.size()); |
697 |
|
|
|
698 |
|
|
// check if the tags to be deleted exist |
699 |
|
✗ |
bool all_exist = true; |
700 |
|
✗ |
for (i = condemned_tags.begin(); i != iend; ++i) { |
701 |
|
✗ |
if (!env->history->Exists(*i)) { |
702 |
|
✗ |
LogCvmfs(kLogCvmfs, kLogStderr, "tag '%s' does not exist", i->c_str()); |
703 |
|
✗ |
all_exist = false; |
704 |
|
|
} |
705 |
|
|
} |
706 |
|
✗ |
if (!all_exist) { |
707 |
|
✗ |
return 1; |
708 |
|
|
} |
709 |
|
|
|
710 |
|
|
// delete the tags from the tag database and print their root hashes |
711 |
|
✗ |
i = condemned_tags.begin(); |
712 |
|
✗ |
env->history->BeginTransaction(); |
713 |
|
✗ |
for (; i != iend; ++i) { |
714 |
|
|
// print some information about the tag to be deleted |
715 |
|
✗ |
history::History::Tag condemned_tag; |
716 |
|
✗ |
const bool found_tag = env->history->GetByName(*i, &condemned_tag); |
717 |
|
✗ |
assert(found_tag); |
718 |
|
✗ |
LogCvmfs(kLogCvmfs, kLogStdout, "deleting '%s' (%s)", |
719 |
|
|
condemned_tag.name.c_str(), |
720 |
|
✗ |
condemned_tag.root_hash.ToString().c_str()); |
721 |
|
|
|
722 |
|
|
// remove the tag |
723 |
|
✗ |
if (!env->history->Remove(*i)) { |
724 |
|
✗ |
LogCvmfs(kLogCvmfs, kLogStderr, "failed to remove tag '%s' from history", |
725 |
|
|
i->c_str()); |
726 |
|
✗ |
return 1; |
727 |
|
|
} |
728 |
|
|
} |
729 |
|
✗ |
bool retval = env->history->PruneBranches(); |
730 |
|
✗ |
if (!retval) { |
731 |
|
✗ |
LogCvmfs(kLogCvmfs, kLogStderr, |
732 |
|
|
"failed to prune unused branches from history"); |
733 |
|
✗ |
return 1; |
734 |
|
|
} |
735 |
|
✗ |
env->history->CommitTransaction(); |
736 |
|
✗ |
retval = env->history->Vacuum(); |
737 |
|
✗ |
assert(retval); |
738 |
|
|
|
739 |
|
✗ |
return 0; |
740 |
|
|
} |
741 |
|
|
|
742 |
|
|
//------------------------------------------------------------------------------ |
743 |
|
|
|
744 |
|
|
|
745 |
|
✗ |
ParameterList CommandListTags::GetParams() const { |
746 |
|
✗ |
ParameterList r; |
747 |
|
✗ |
InsertCommonParameters(&r); |
748 |
|
✗ |
r.push_back(Parameter::Switch('x', "machine readable output")); |
749 |
|
✗ |
r.push_back(Parameter::Switch('B', "print branch hierarchy")); |
750 |
|
✗ |
return r; |
751 |
|
|
} |
752 |
|
|
|
753 |
|
✗ |
void CommandListTags::PrintHumanReadableTagList( |
754 |
|
|
const CommandListTags::TagList &tags) const { |
755 |
|
|
// go through the list of tags and figure out the column widths |
756 |
|
✗ |
const std::string name_label = "Name"; |
757 |
|
✗ |
const std::string rev_label = "Revision"; |
758 |
|
✗ |
const std::string time_label = "Timestamp"; |
759 |
|
✗ |
const std::string branch_label = "Branch"; |
760 |
|
✗ |
const std::string desc_label = "Description"; |
761 |
|
|
|
762 |
|
|
// figure out the maximal lengths of the fields in the lists |
763 |
|
✗ |
TagList::const_reverse_iterator i = tags.rbegin(); |
764 |
|
✗ |
const TagList::const_reverse_iterator iend = tags.rend(); |
765 |
|
✗ |
size_t max_name_len = name_label.size(); |
766 |
|
✗ |
size_t max_rev_len = rev_label.size(); |
767 |
|
✗ |
size_t max_time_len = desc_label.size(); |
768 |
|
✗ |
size_t max_branch_len = branch_label.size(); |
769 |
|
✗ |
for (; i != iend; ++i) { |
770 |
|
✗ |
max_name_len = std::max(max_name_len, i->name.size()); |
771 |
|
✗ |
max_rev_len = std::max(max_rev_len, StringifyInt(i->revision).size()); |
772 |
|
✗ |
max_time_len = |
773 |
|
✗ |
std::max(max_time_len, StringifyTime(i->timestamp, true).size()); |
774 |
|
✗ |
max_branch_len = std::max(max_branch_len, i->branch.size()); |
775 |
|
|
} |
776 |
|
|
|
777 |
|
|
// print the list header |
778 |
|
✗ |
LogCvmfs(kLogCvmfs, kLogStdout, |
779 |
|
|
"%s \u2502 %s \u2502 %s \u2502 %s \u2502 %s", |
780 |
|
✗ |
AddPadding(name_label, max_name_len).c_str(), |
781 |
|
✗ |
AddPadding(rev_label, max_rev_len).c_str(), |
782 |
|
✗ |
AddPadding(time_label, max_time_len).c_str(), |
783 |
|
✗ |
AddPadding(branch_label, max_branch_len).c_str(), |
784 |
|
|
desc_label.c_str()); |
785 |
|
✗ |
LogCvmfs(kLogCvmfs, kLogStdout, |
786 |
|
|
"%s\u2500\u253C\u2500%s\u2500\u253C\u2500%s" |
787 |
|
|
"\u2500\u253C\u2500%s\u2500\u253C\u2500%s", |
788 |
|
✗ |
AddPadding("", max_name_len, false, "\u2500").c_str(), |
789 |
|
✗ |
AddPadding("", max_rev_len, false, "\u2500").c_str(), |
790 |
|
✗ |
AddPadding("", max_time_len, false, "\u2500").c_str(), |
791 |
|
✗ |
AddPadding("", max_branch_len, false, "\u2500").c_str(), |
792 |
|
✗ |
AddPadding("", desc_label.size() + 1, false, "\u2500").c_str()); |
793 |
|
|
|
794 |
|
|
// print the rows of the list |
795 |
|
✗ |
i = tags.rbegin(); |
796 |
|
✗ |
for (; i != iend; ++i) { |
797 |
|
✗ |
LogCvmfs( |
798 |
|
|
kLogCvmfs, kLogStdout, |
799 |
|
|
"%s \u2502 %s \u2502 %s \u2502 %s \u2502 %s", |
800 |
|
✗ |
AddPadding(i->name, max_name_len).c_str(), |
801 |
|
✗ |
AddPadding(StringifyInt(i->revision), max_rev_len, true).c_str(), |
802 |
|
✗ |
AddPadding(StringifyTime(i->timestamp, true), max_time_len).c_str(), |
803 |
|
✗ |
AddPadding(i->branch, max_branch_len).c_str(), |
804 |
|
✗ |
i->description.c_str()); |
805 |
|
|
} |
806 |
|
|
|
807 |
|
|
// print the list footer |
808 |
|
✗ |
LogCvmfs(kLogCvmfs, kLogStdout, |
809 |
|
|
"%s\u2500\u2534\u2500%s\u2500\u2534\u2500%s" |
810 |
|
|
"\u2500\u2534\u2500%s\u2500\u2534\u2500%s", |
811 |
|
✗ |
AddPadding("", max_name_len, false, "\u2500").c_str(), |
812 |
|
✗ |
AddPadding("", max_rev_len, false, "\u2500").c_str(), |
813 |
|
✗ |
AddPadding("", max_time_len, false, "\u2500").c_str(), |
814 |
|
✗ |
AddPadding("", max_branch_len, false, "\u2500").c_str(), |
815 |
|
✗ |
AddPadding("", desc_label.size() + 1, false, "\u2500").c_str()); |
816 |
|
|
|
817 |
|
|
// print the number of tags listed |
818 |
|
✗ |
LogCvmfs(kLogCvmfs, kLogStdout, "listing contains %lu tags", tags.size()); |
819 |
|
|
} |
820 |
|
|
|
821 |
|
✗ |
void CommandListTags::PrintMachineReadableTagList(const TagList &tags) const { |
822 |
|
✗ |
TagList::const_iterator i = tags.begin(); |
823 |
|
✗ |
const TagList::const_iterator iend = tags.end(); |
824 |
|
✗ |
for (; i != iend; ++i) { |
825 |
|
✗ |
PrintTagMachineReadable(*i); |
826 |
|
|
} |
827 |
|
|
} |
828 |
|
|
|
829 |
|
|
|
830 |
|
✗ |
void CommandListTags::PrintHumanReadableBranchList( |
831 |
|
|
const BranchHierarchy &branches) const |
832 |
|
|
{ |
833 |
|
✗ |
unsigned N = branches.size(); |
834 |
|
✗ |
for (unsigned i = 0; i < N; ++i) { |
835 |
|
✗ |
for (unsigned l = 0; l < branches[i].level; ++l) { |
836 |
|
✗ |
LogCvmfs(kLogCvmfs, kLogStdout | kLogNoLinebreak, "%s", |
837 |
|
✗ |
((l + 1) == branches[i].level) ? "\u251c " : "\u2502 "); |
838 |
|
|
} |
839 |
|
✗ |
LogCvmfs(kLogCvmfs, kLogStdout, "%s @%" PRIu64, |
840 |
|
✗ |
branches[i].branch.branch.c_str(), |
841 |
|
✗ |
branches[i].branch.initial_revision); |
842 |
|
|
} |
843 |
|
|
} |
844 |
|
|
|
845 |
|
|
|
846 |
|
✗ |
void CommandListTags::PrintMachineReadableBranchList( |
847 |
|
|
const BranchHierarchy &branches) const |
848 |
|
|
{ |
849 |
|
✗ |
unsigned N = branches.size(); |
850 |
|
✗ |
for (unsigned i = 0; i < N; ++i) { |
851 |
|
✗ |
LogCvmfs(kLogCvmfs, kLogStdout, "[%u] %s%s @%" PRIu64, |
852 |
|
✗ |
branches[i].level, |
853 |
|
✗ |
AddPadding("", branches[i].level, false, " ").c_str(), |
854 |
|
✗ |
branches[i].branch.branch.c_str(), |
855 |
|
✗ |
branches[i].branch.initial_revision); |
856 |
|
|
} |
857 |
|
|
} |
858 |
|
|
|
859 |
|
|
|
860 |
|
✗ |
void CommandListTags::SortBranchesRecursively( |
861 |
|
|
unsigned level, |
862 |
|
|
const string &parent_branch, |
863 |
|
|
const BranchList &branches, |
864 |
|
|
BranchHierarchy *hierarchy) const |
865 |
|
|
{ |
866 |
|
|
// For large numbers of branches, this should be turned into the O(n) version |
867 |
|
|
// using a linked list |
868 |
|
✗ |
unsigned N = branches.size(); |
869 |
|
✗ |
for (unsigned i = 0; i < N; ++i) { |
870 |
|
✗ |
if (branches[i].branch == "") |
871 |
|
✗ |
continue; |
872 |
|
✗ |
if (branches[i].parent == parent_branch) { |
873 |
|
✗ |
hierarchy->push_back(BranchLevel(branches[i], level)); |
874 |
|
✗ |
SortBranchesRecursively( |
875 |
|
✗ |
level + 1, branches[i].branch, branches, hierarchy); |
876 |
|
|
} |
877 |
|
|
} |
878 |
|
|
} |
879 |
|
|
|
880 |
|
|
|
881 |
|
✗ |
CommandListTags::BranchHierarchy CommandListTags::SortBranches( |
882 |
|
|
const BranchList &branches) const |
883 |
|
|
{ |
884 |
|
✗ |
BranchHierarchy hierarchy; |
885 |
|
✗ |
hierarchy.push_back( |
886 |
|
✗ |
BranchLevel(history::History::Branch("(default)", "", 0), 0)); |
887 |
|
✗ |
SortBranchesRecursively(1, "", branches, &hierarchy); |
888 |
|
✗ |
return hierarchy; |
889 |
|
|
} |
890 |
|
|
|
891 |
|
|
|
892 |
|
✗ |
int CommandListTags::Main(const ArgumentList &args) { |
893 |
|
✗ |
const bool machine_readable = (args.find('x') != args.end()); |
894 |
|
✗ |
const bool branch_hierarchy = (args.find('B') != args.end()); |
895 |
|
|
|
896 |
|
|
// initialize the Environment (taking ownership) |
897 |
|
✗ |
const bool history_read_write = false; |
898 |
|
✗ |
UniquePtr<Environment> env(InitializeEnvironment(args, history_read_write)); |
899 |
|
✗ |
if (!env.IsValid()) { |
900 |
|
✗ |
LogCvmfs(kLogCvmfs, kLogStderr, "failed to init environment"); |
901 |
|
✗ |
return 1; |
902 |
|
|
} |
903 |
|
|
|
904 |
|
✗ |
if (branch_hierarchy) { |
905 |
|
✗ |
BranchList branch_list; |
906 |
|
✗ |
if (!env->history->ListBranches(&branch_list)) { |
907 |
|
✗ |
LogCvmfs(kLogCvmfs, kLogStderr, |
908 |
|
|
"failed to list branches in history database"); |
909 |
|
✗ |
return 1; |
910 |
|
|
} |
911 |
|
✗ |
BranchHierarchy branch_hierarchy = SortBranches(branch_list); |
912 |
|
|
|
913 |
|
✗ |
if (machine_readable) { |
914 |
|
✗ |
PrintMachineReadableBranchList(branch_hierarchy); |
915 |
|
|
} else { |
916 |
|
✗ |
PrintHumanReadableBranchList(branch_hierarchy); |
917 |
|
|
} |
918 |
|
✗ |
} else { |
919 |
|
|
// obtain a full list of all tags |
920 |
|
✗ |
TagList tags; |
921 |
|
✗ |
if (!env->history->List(&tags)) { |
922 |
|
✗ |
LogCvmfs(kLogCvmfs, kLogStderr, |
923 |
|
|
"failed to list tags in history database"); |
924 |
|
✗ |
return 1; |
925 |
|
|
} |
926 |
|
|
|
927 |
|
✗ |
if (machine_readable) { |
928 |
|
✗ |
PrintMachineReadableTagList(tags); |
929 |
|
|
} else { |
930 |
|
✗ |
PrintHumanReadableTagList(tags); |
931 |
|
|
} |
932 |
|
|
} |
933 |
|
|
|
934 |
|
✗ |
return 0; |
935 |
|
|
} |
936 |
|
|
|
937 |
|
|
//------------------------------------------------------------------------------ |
938 |
|
|
|
939 |
|
✗ |
ParameterList CommandInfoTag::GetParams() const { |
940 |
|
✗ |
ParameterList r; |
941 |
|
✗ |
InsertCommonParameters(&r); |
942 |
|
|
|
943 |
|
✗ |
r.push_back(Parameter::Mandatory('n', "name of the tag to be inspected")); |
944 |
|
✗ |
r.push_back(Parameter::Switch('x', "machine readable output")); |
945 |
|
✗ |
return r; |
946 |
|
|
} |
947 |
|
|
|
948 |
|
✗ |
std::string CommandInfoTag::HumanReadableFilesize(const size_t filesize) const { |
949 |
|
✗ |
const size_t kiB = 1024; |
950 |
|
✗ |
const size_t MiB = kiB * 1024; |
951 |
|
✗ |
const size_t GiB = MiB * 1024; |
952 |
|
|
|
953 |
|
✗ |
if (filesize > GiB) { |
954 |
|
✗ |
return StringifyDouble(static_cast<double>(filesize) / GiB) + " GiB"; |
955 |
|
✗ |
} else if (filesize > MiB) { |
956 |
|
✗ |
return StringifyDouble(static_cast<double>(filesize) / MiB) + " MiB"; |
957 |
|
✗ |
} else if (filesize > kiB) { |
958 |
|
✗ |
return StringifyDouble(static_cast<double>(filesize) / kiB) + " kiB"; |
959 |
|
|
} else { |
960 |
|
✗ |
return StringifyInt(filesize) + " Byte"; |
961 |
|
|
} |
962 |
|
|
} |
963 |
|
|
|
964 |
|
✗ |
void CommandInfoTag::PrintHumanReadableInfo( |
965 |
|
|
const history::History::Tag &tag) const { |
966 |
|
✗ |
LogCvmfs(kLogCvmfs, kLogStdout, |
967 |
|
|
"Name: %s\n" |
968 |
|
|
"Revision: %" PRIu64 "\n" |
969 |
|
|
"Timestamp: %s\n" |
970 |
|
|
"Branch: %s\n" |
971 |
|
|
"Root Hash: %s\n" |
972 |
|
|
"Catalog Size: %s\n" |
973 |
|
|
"%s", |
974 |
|
✗ |
tag.name.c_str(), tag.revision, |
975 |
|
✗ |
StringifyTime(tag.timestamp, true /* utc */).c_str(), |
976 |
|
|
tag.branch.c_str(), |
977 |
|
✗ |
tag.root_hash.ToString().c_str(), |
978 |
|
✗ |
HumanReadableFilesize(tag.size).c_str(), |
979 |
|
|
tag.description.c_str()); |
980 |
|
|
} |
981 |
|
|
|
982 |
|
✗ |
int CommandInfoTag::Main(const ArgumentList &args) { |
983 |
|
✗ |
const std::string tag_name = *args.find('n')->second; |
984 |
|
✗ |
const bool machine_readable = (args.find('x') != args.end()); |
985 |
|
|
|
986 |
|
|
// initialize the Environment (taking ownership) |
987 |
|
✗ |
const bool history_read_write = false; |
988 |
|
✗ |
UniquePtr<Environment> env(InitializeEnvironment(args, history_read_write)); |
989 |
|
✗ |
if (!env.IsValid()) { |
990 |
|
✗ |
LogCvmfs(kLogCvmfs, kLogStderr, "failed to init environment"); |
991 |
|
✗ |
return 1; |
992 |
|
|
} |
993 |
|
|
|
994 |
|
✗ |
history::History::Tag tag; |
995 |
|
✗ |
const bool found = env->history->GetByName(tag_name, &tag); |
996 |
|
✗ |
if (!found) { |
997 |
|
✗ |
LogCvmfs(kLogCvmfs, kLogStderr, "tag '%s' does not exist", |
998 |
|
|
tag_name.c_str()); |
999 |
|
✗ |
return 1; |
1000 |
|
|
} |
1001 |
|
|
|
1002 |
|
✗ |
if (machine_readable) { |
1003 |
|
✗ |
PrintTagMachineReadable(tag); |
1004 |
|
|
} else { |
1005 |
|
✗ |
PrintHumanReadableInfo(tag); |
1006 |
|
|
} |
1007 |
|
|
|
1008 |
|
✗ |
return 0; |
1009 |
|
|
} |
1010 |
|
|
|
1011 |
|
|
//------------------------------------------------------------------------------ |
1012 |
|
|
|
1013 |
|
✗ |
ParameterList CommandRollbackTag::GetParams() const { |
1014 |
|
✗ |
ParameterList r; |
1015 |
|
✗ |
InsertCommonParameters(&r); |
1016 |
|
|
|
1017 |
|
✗ |
r.push_back(Parameter::Optional('n', "name of the tag to be republished")); |
1018 |
|
✗ |
return r; |
1019 |
|
|
} |
1020 |
|
|
|
1021 |
|
✗ |
int CommandRollbackTag::Main(const ArgumentList &args) { |
1022 |
|
✗ |
const bool undo_rollback = (args.find('n') == args.end()); |
1023 |
|
|
const std::string tag_name = |
1024 |
|
✗ |
(!undo_rollback) ? *args.find('n')->second : CommandTag::kPreviousHeadTag; |
1025 |
|
|
|
1026 |
|
|
// initialize the Environment (taking ownership) |
1027 |
|
✗ |
const bool history_read_write = true; |
1028 |
|
✗ |
UniquePtr<Environment> env(InitializeEnvironment(args, history_read_write)); |
1029 |
|
✗ |
if (!env.IsValid()) { |
1030 |
|
✗ |
LogCvmfs(kLogCvmfs, kLogStderr, "failed to init environment"); |
1031 |
|
✗ |
return 1; |
1032 |
|
|
} |
1033 |
|
|
|
1034 |
|
|
// find tag to be rolled back to |
1035 |
|
✗ |
history::History::Tag target_tag; |
1036 |
|
✗ |
const bool found = env->history->GetByName(tag_name, &target_tag); |
1037 |
|
✗ |
if (!found) { |
1038 |
|
✗ |
if (undo_rollback) { |
1039 |
|
✗ |
LogCvmfs(kLogCvmfs, kLogStderr, |
1040 |
|
|
"only one anonymous rollback supported - " |
1041 |
|
|
"perhaps you want to provide a tag name?"); |
1042 |
|
|
} else { |
1043 |
|
✗ |
LogCvmfs(kLogCvmfs, kLogStderr, "tag '%s' does not exist", |
1044 |
|
|
tag_name.c_str()); |
1045 |
|
|
} |
1046 |
|
✗ |
return 1; |
1047 |
|
|
} |
1048 |
|
✗ |
if (target_tag.branch != "") { |
1049 |
|
✗ |
LogCvmfs(kLogCvmfs, kLogStderr, |
1050 |
|
|
"rollback is only supported on the default branch"); |
1051 |
|
✗ |
return 1; |
1052 |
|
|
} |
1053 |
|
|
|
1054 |
|
|
// list the tags that will be deleted |
1055 |
|
✗ |
TagList affected_tags; |
1056 |
|
✗ |
if (!env->history->ListTagsAffectedByRollback(tag_name, &affected_tags)) { |
1057 |
|
✗ |
LogCvmfs(kLogCvmfs, kLogStderr, |
1058 |
|
|
"failed to list condemned tags prior to rollback to '%s'", |
1059 |
|
|
tag_name.c_str()); |
1060 |
|
✗ |
return 1; |
1061 |
|
|
} |
1062 |
|
|
|
1063 |
|
|
// check if tag is valid to be rolled back to |
1064 |
|
✗ |
const uint64_t current_revision = env->manifest->revision(); |
1065 |
|
✗ |
assert(target_tag.revision <= current_revision); |
1066 |
|
✗ |
if (target_tag.revision == current_revision) { |
1067 |
|
✗ |
LogCvmfs(kLogCvmfs, kLogStderr, |
1068 |
|
|
"not rolling back to current head (%" PRIu64 ")", |
1069 |
|
|
current_revision); |
1070 |
|
✗ |
return 1; |
1071 |
|
|
} |
1072 |
|
|
|
1073 |
|
|
// open the catalog to be rolled back to |
1074 |
|
|
const UnlinkGuard catalog_path( |
1075 |
|
✗ |
CreateTempPath(env->tmp_path + "/catalog", 0600)); |
1076 |
|
✗ |
const bool catalog_read_write = true; |
1077 |
|
|
UniquePtr<catalog::WritableCatalog> catalog( |
1078 |
|
✗ |
dynamic_cast<catalog::WritableCatalog *>( |
1079 |
|
✗ |
GetCatalog(env->repository_url, target_tag.root_hash, |
1080 |
|
✗ |
catalog_path.path(), catalog_read_write))); |
1081 |
|
✗ |
if (!catalog.IsValid()) { |
1082 |
|
✗ |
LogCvmfs(kLogCvmfs, kLogStderr, "failed to open catalog with hash '%s'", |
1083 |
|
✗ |
target_tag.root_hash.ToString().c_str()); |
1084 |
|
✗ |
return 1; |
1085 |
|
|
} |
1086 |
|
|
|
1087 |
|
|
// check if the catalog has a supported schema version |
1088 |
|
✗ |
if (catalog->schema() < catalog::CatalogDatabase::kLatestSupportedSchema - |
1089 |
|
|
catalog::CatalogDatabase::kSchemaEpsilon) { |
1090 |
|
✗ |
LogCvmfs(kLogCvmfs, kLogStderr, |
1091 |
|
|
"not rolling back to outdated and " |
1092 |
|
|
"incompatible catalog schema (%.1f < %.1f)", |
1093 |
|
✗ |
catalog->schema(), |
1094 |
|
|
catalog::CatalogDatabase::kLatestSupportedSchema); |
1095 |
|
✗ |
return 1; |
1096 |
|
|
} |
1097 |
|
|
|
1098 |
|
|
// update the catalog to be republished |
1099 |
|
✗ |
catalog->Transaction(); |
1100 |
|
✗ |
catalog->UpdateLastModified(); |
1101 |
|
✗ |
catalog->SetRevision(current_revision + 1); |
1102 |
|
✗ |
catalog->SetPreviousRevision(env->manifest->catalog_hash()); |
1103 |
|
✗ |
catalog->Commit(); |
1104 |
|
|
|
1105 |
|
|
// Upload catalog (handing over ownership of catalog pointer) |
1106 |
|
✗ |
if (!UploadCatalogAndUpdateManifest(env.weak_ref(), catalog.Release())) { |
1107 |
|
✗ |
LogCvmfs(kLogCvmfs, kLogStderr, "catalog upload failed"); |
1108 |
|
✗ |
return 1; |
1109 |
|
|
} |
1110 |
|
|
|
1111 |
|
|
// update target tag with newly published root catalog information |
1112 |
|
✗ |
history::History::Tag updated_target_tag(target_tag); |
1113 |
|
✗ |
updated_target_tag.root_hash = env->manifest->catalog_hash(); |
1114 |
|
✗ |
updated_target_tag.size = env->manifest->catalog_size(); |
1115 |
|
✗ |
updated_target_tag.revision = env->manifest->revision(); |
1116 |
|
✗ |
updated_target_tag.timestamp = env->manifest->publish_timestamp(); |
1117 |
|
✗ |
if (!env->history->Rollback(updated_target_tag)) { |
1118 |
|
✗ |
LogCvmfs(kLogCvmfs, kLogStderr, "failed to rollback history to '%s'", |
1119 |
|
|
updated_target_tag.name.c_str()); |
1120 |
|
✗ |
return 1; |
1121 |
|
|
} |
1122 |
|
✗ |
bool retval = env->history->Vacuum(); |
1123 |
|
✗ |
assert(retval); |
1124 |
|
|
|
1125 |
|
|
// set the magic undo tags |
1126 |
|
✗ |
if (!UpdateUndoTags(env.weak_ref(), updated_target_tag, undo_rollback)) { |
1127 |
|
✗ |
LogCvmfs(kLogCvmfs, kLogStderr, "failed to update magic undo tags"); |
1128 |
|
✗ |
return 1; |
1129 |
|
|
} |
1130 |
|
|
|
1131 |
|
|
// finalize the history and upload it |
1132 |
|
✗ |
if (!CloseAndPublishHistory(env.weak_ref())) { |
1133 |
|
✗ |
return 1; |
1134 |
|
|
} |
1135 |
|
|
|
1136 |
|
|
// print the tags that have been removed by the rollback |
1137 |
|
✗ |
PrintDeletedTagList(affected_tags); |
1138 |
|
|
|
1139 |
|
✗ |
return 0; |
1140 |
|
|
} |
1141 |
|
|
|
1142 |
|
✗ |
void CommandRollbackTag::PrintDeletedTagList(const TagList &tags) const { |
1143 |
|
✗ |
size_t longest_name = 0; |
1144 |
|
✗ |
TagList::const_iterator i = tags.begin(); |
1145 |
|
✗ |
const TagList::const_iterator iend = tags.end(); |
1146 |
|
✗ |
for (; i != iend; ++i) { |
1147 |
|
✗ |
longest_name = std::max(i->name.size(), longest_name); |
1148 |
|
|
} |
1149 |
|
|
|
1150 |
|
✗ |
i = tags.begin(); |
1151 |
|
✗ |
for (; i != iend; ++i) { |
1152 |
|
✗ |
LogCvmfs(kLogCvmfs, kLogStdout, "removed tag %s (%s)", |
1153 |
|
✗ |
AddPadding(i->name, longest_name).c_str(), |
1154 |
|
✗ |
i->root_hash.ToString().c_str()); |
1155 |
|
|
} |
1156 |
|
|
} |
1157 |
|
|
|
1158 |
|
|
//------------------------------------------------------------------------------ |
1159 |
|
|
|
1160 |
|
✗ |
ParameterList CommandEmptyRecycleBin::GetParams() const { |
1161 |
|
✗ |
ParameterList r; |
1162 |
|
✗ |
InsertCommonParameters(&r); |
1163 |
|
✗ |
return r; |
1164 |
|
|
} |
1165 |
|
|
|
1166 |
|
✗ |
int CommandEmptyRecycleBin::Main(const ArgumentList &args) { |
1167 |
|
|
// initialize the Environment (taking ownership) |
1168 |
|
✗ |
const bool history_read_write = true; |
1169 |
|
✗ |
UniquePtr<Environment> env(InitializeEnvironment(args, history_read_write)); |
1170 |
|
✗ |
if (!env.IsValid()) { |
1171 |
|
✗ |
LogCvmfs(kLogCvmfs, kLogStderr, "failed to init environment"); |
1172 |
|
✗ |
return 1; |
1173 |
|
|
} |
1174 |
|
|
|
1175 |
|
✗ |
if (!env->history->EmptyRecycleBin()) { |
1176 |
|
✗ |
LogCvmfs(kLogCvmfs, kLogStderr, "failed to empty recycle bin"); |
1177 |
|
✗ |
return 1; |
1178 |
|
|
} |
1179 |
|
|
|
1180 |
|
|
// finalize the history and upload it |
1181 |
|
✗ |
if (!CloseAndPublishHistory(env.weak_ref())) { |
1182 |
|
✗ |
return 1; |
1183 |
|
|
} |
1184 |
|
|
|
1185 |
|
✗ |
return 0; |
1186 |
|
|
} |
1187 |
|
|
|