GCC Code Coverage Report


Directory: cvmfs/
File: cvmfs/compat.cc
Date: 2025-07-06 02:35:01
Exec Total Coverage
Lines: 0 140 0.0%
Branches: 0 124 0.0%

Line Branch Exec Source
1 /**
2 * This file is part of the CernVM File System.
3 */
4
5
6 #include "compat.h"
7
8 #include <cstdlib>
9 #include <cstring>
10
11 #include "crypto/hash.h"
12
13 using namespace std; // NOLINT
14
15 namespace compat {
16
17 namespace shash_v1 {
18
19 const char *kSuffixes[] = {"", "", "-rmd160", ""};
20
21 Md5::Md5(const char *chars, const unsigned length) {
22 ::shash::Md5 new_md5(chars, length);
23
24 algorithm = kMd5;
25 memcpy(new_md5.digest, digest, kDigestSizes[kMd5]);
26 }
27
28 void MigrateAny(const Any *old_hash, shash::Any *new_hash) {
29 memcpy(new_hash->digest, old_hash->digest, kDigestSizes[kAny]);
30 new_hash->algorithm = shash::Algorithms(old_hash->algorithm);
31 new_hash->suffix = shash::kSuffixNone;
32 }
33
34 } // namespace shash_v1
35
36
37 //------------------------------------------------------------------------------
38
39
40 namespace shash_v2 {
41
42 const char *kSuffixes[] = {"", "", "-rmd160", ""};
43
44 void MigrateAny(const Any *old_hash, shash::Any *new_hash) {
45 memcpy(new_hash->digest, old_hash->digest, kDigestSizes[kAny]);
46 new_hash->algorithm = shash::Algorithms(old_hash->algorithm);
47 new_hash->suffix = old_hash->suffix;
48 }
49
50 } // namespace shash_v2
51
52
53 //------------------------------------------------------------------------------
54
55
56 namespace inode_tracker {
57
58 bool InodeContainer::ConstructPath(const uint64_t inode, PathString *path) {
59 const InodeMap::const_iterator needle = map_.find(inode);
60 if (needle == map_.end())
61 return false;
62
63 if (needle->second.name.IsEmpty())
64 return true;
65
66 const bool retval = ConstructPath(needle->second.parent_inode, path);
67 path->Append("/", 1);
68 path->Append(needle->second.name.GetChars(), needle->second.name.GetLength());
69 assert(retval);
70 return retval;
71 }
72
73
74 InodeTracker::~InodeTracker() {
75 pthread_mutex_destroy(lock_);
76 free(lock_);
77 }
78
79 void Migrate(InodeTracker *old_tracker, glue::InodeTracker *new_tracker) {
80 InodeContainer::InodeMap::const_iterator i, iEnd;
81 i = old_tracker->inode2path_.map_.begin();
82 iEnd = old_tracker->inode2path_.map_.end();
83 for (; i != iEnd; ++i) {
84 const uint64_t inode = i->first;
85 const uint32_t references = i->second.references;
86 PathString path;
87 old_tracker->inode2path_.ConstructPath(inode, &path);
88 new_tracker->VfsGetBy(glue::InodeEx(inode, glue::InodeEx::kUnknownType),
89 references, path);
90 }
91 }
92
93 } // namespace inode_tracker
94
95
96 //------------------------------------------------------------------------------
97
98
99 namespace inode_tracker_v2 {
100
101 static uint32_t hasher_md5(const shash_v1::Md5 &key) {
102 return static_cast<uint32_t>(
103 *(reinterpret_cast<const uint32_t *>(key.digest + 1)));
104 }
105
106 static uint32_t hasher_inode(const uint64_t &inode) {
107 return MurmurHash2(&inode, sizeof(inode), 0x07387a4f);
108 }
109
110 void Migrate(InodeTracker *old_tracker, glue::InodeTracker *new_tracker) {
111 old_tracker->inode_map_.map_.hasher_ = hasher_inode;
112 old_tracker->path_map_.map_.hasher_ = hasher_md5;
113
114 SmallHashDynamic<uint64_t, uint32_t>
115 *old_inodes = &old_tracker->inode_references_.map_;
116 for (unsigned i = 0; i < old_inodes->capacity_; ++i) {
117 const uint64_t inode = old_inodes->keys_[i];
118 if (inode == 0)
119 continue;
120
121 const uint32_t references = old_inodes->values_[i];
122 PathString path;
123 const bool retval = old_tracker->FindPath(inode, &path);
124 assert(retval);
125 new_tracker->VfsGetBy(glue::InodeEx(inode, glue::InodeEx::kUnknownType),
126 references, path);
127 }
128 }
129
130 } // namespace inode_tracker_v2
131
132
133 //------------------------------------------------------------------------------
134
135
136 namespace inode_tracker_v3 {
137
138 static uint32_t hasher_md5(const shash_v1::Md5 &key) {
139 return static_cast<uint32_t>(
140 *(reinterpret_cast<const uint32_t *>(key.digest + 1)));
141 }
142
143 static uint32_t hasher_inode(const uint64_t &inode) {
144 return MurmurHash2(&inode, sizeof(inode), 0x07387a4f);
145 }
146
147 void Migrate(InodeTracker *old_tracker, glue::InodeTracker *new_tracker) {
148 old_tracker->inode_map_.map_.SetHasher(hasher_inode);
149 old_tracker->path_map_.map_.SetHasher(hasher_md5);
150 old_tracker->path_map_.path_store_.map_.SetHasher(hasher_md5);
151
152 SmallHashDynamic<uint64_t, uint32_t>
153 *old_inodes = &old_tracker->inode_references_.map_;
154 for (unsigned i = 0; i < old_inodes->capacity(); ++i) {
155 const uint64_t inode = old_inodes->keys()[i];
156 if (inode == 0)
157 continue;
158
159 const uint32_t references = old_inodes->values()[i];
160 PathString path;
161 const bool retval = old_tracker->FindPath(inode, &path);
162 assert(retval);
163 new_tracker->VfsGetBy(glue::InodeEx(inode, glue::InodeEx::kUnknownType),
164 references, path);
165 }
166 }
167
168 } // namespace inode_tracker_v3
169
170
171 //------------------------------------------------------------------------------
172
173
174 namespace chunk_tables {
175
176 ChunkTables::~ChunkTables() {
177 pthread_mutex_destroy(lock);
178 free(lock);
179 for (unsigned i = 0; i < kNumHandleLocks; ++i) {
180 pthread_mutex_destroy(handle_locks.At(i));
181 free(handle_locks.At(i));
182 }
183 }
184
185 void Migrate(ChunkTables *old_tables, ::ChunkTables *new_tables) {
186 new_tables->next_handle = old_tables->next_handle;
187 new_tables->handle2fd = old_tables->handle2fd;
188 new_tables->inode2references = old_tables->inode2references;
189
190 SmallHashDynamic<uint64_t, FileChunkReflist>
191 *old_inode2chunks = &old_tables->inode2chunks;
192 for (unsigned keyno = 0; keyno < old_inode2chunks->capacity(); ++keyno) {
193 const uint64_t inode = old_inode2chunks->keys()[keyno];
194 if (inode == 0)
195 continue;
196
197 FileChunkReflist *old_reflist = &old_inode2chunks->values()[keyno];
198 BigVector<FileChunk> *old_list = old_reflist->list;
199 FileChunkList *new_list = new FileChunkList();
200 for (unsigned i = 0; i < old_list->size(); ++i) {
201 const FileChunk *old_chunk = old_list->AtPtr(i);
202 const off_t offset = old_chunk->offset();
203 const size_t size = old_chunk->size();
204 shash::Any hash;
205 shash_v1::MigrateAny(&old_chunk->content_hash_, &hash);
206 new_list->PushBack(::FileChunk(hash, offset, size));
207 }
208 delete old_list;
209 const ::FileChunkReflist new_reflist(new_list, old_reflist->path,
210 zlib::kZlibDefault, false);
211 new_tables->inode2chunks.Insert(inode, new_reflist);
212 }
213 }
214
215 } // namespace chunk_tables
216
217
218 //------------------------------------------------------------------------------
219
220
221 namespace chunk_tables_v2 {
222
223 ChunkTables::~ChunkTables() {
224 pthread_mutex_destroy(lock);
225 free(lock);
226 for (unsigned i = 0; i < kNumHandleLocks; ++i) {
227 pthread_mutex_destroy(handle_locks.At(i));
228 free(handle_locks.At(i));
229 }
230 }
231
232 void Migrate(ChunkTables *old_tables, ::ChunkTables *new_tables) {
233 new_tables->next_handle = old_tables->next_handle;
234 new_tables->handle2fd = old_tables->handle2fd;
235 new_tables->inode2references = old_tables->inode2references;
236
237 SmallHashDynamic<uint64_t, FileChunkReflist>
238 *old_inode2chunks = &old_tables->inode2chunks;
239 for (unsigned keyno = 0; keyno < old_inode2chunks->capacity(); ++keyno) {
240 const uint64_t inode = old_inode2chunks->keys()[keyno];
241 if (inode == 0)
242 continue;
243
244 FileChunkReflist *old_reflist = &old_inode2chunks->values()[keyno];
245 BigVector<FileChunk> *old_list = old_reflist->list;
246 FileChunkList *new_list = new FileChunkList();
247 for (unsigned i = 0; i < old_list->size(); ++i) {
248 const FileChunk *old_chunk = old_list->AtPtr(i);
249 const off_t offset = old_chunk->offset();
250 const size_t size = old_chunk->size();
251 shash::Any hash;
252 shash_v2::MigrateAny(&old_chunk->content_hash_, &hash);
253 new_list->PushBack(::FileChunk(hash, offset, size));
254 }
255 delete old_list;
256 const ::FileChunkReflist new_reflist(new_list, old_reflist->path,
257 zlib::kZlibDefault, false);
258 new_tables->inode2chunks.Insert(inode, new_reflist);
259 }
260 }
261
262 } // namespace chunk_tables_v2
263
264
265 //------------------------------------------------------------------------------
266
267
268 namespace chunk_tables_v3 {
269
270 ChunkTables::~ChunkTables() {
271 pthread_mutex_destroy(lock);
272 free(lock);
273 for (unsigned i = 0; i < kNumHandleLocks; ++i) {
274 pthread_mutex_destroy(handle_locks.At(i));
275 free(handle_locks.At(i));
276 }
277 }
278
279 void Migrate(ChunkTables *old_tables, ::ChunkTables *new_tables) {
280 new_tables->next_handle = old_tables->next_handle;
281 new_tables->handle2fd = old_tables->handle2fd;
282 new_tables->inode2chunks = old_tables->inode2chunks;
283 new_tables->inode2references = old_tables->inode2references;
284 }
285
286 } // namespace chunk_tables_v3
287
288 } // namespace compat
289