Line |
Branch |
Exec |
Source |
1 |
|
|
/** |
2 |
|
|
* This file is part of the CernVM File System. |
3 |
|
|
* |
4 |
|
|
* A cache plugin that stores all data in a fixed-size memory chunk. |
5 |
|
|
*/ |
6 |
|
|
|
7 |
|
|
#define __STDC_FORMAT_MACROS |
8 |
|
|
|
9 |
|
|
#include <alloca.h> |
10 |
|
|
#include <fcntl.h> |
11 |
|
|
#include <inttypes.h> |
12 |
|
|
#include <signal.h> |
13 |
|
|
#include <stdint.h> |
14 |
|
|
#include <sys/types.h> |
15 |
|
|
#include <sys/wait.h> |
16 |
|
|
#include <unistd.h> |
17 |
|
|
|
18 |
|
|
#include <algorithm> |
19 |
|
|
#include <cassert> |
20 |
|
|
#include <cstdio> |
21 |
|
|
#include <cstdlib> |
22 |
|
|
#include <cstring> |
23 |
|
|
#include <string> |
24 |
|
|
#include <vector> |
25 |
|
|
|
26 |
|
|
#include "cache_plugin/libcvmfs_cache.h" |
27 |
|
|
#include "lru.h" |
28 |
|
|
#include "malloc_heap.h" |
29 |
|
|
#include "smallhash.h" |
30 |
|
|
#include "util/concurrency.h" |
31 |
|
|
#include "util/logging.h" |
32 |
|
|
#include "util/murmur.hxx" |
33 |
|
|
#include "util/platform.h" |
34 |
|
|
#include "util/smalloc.h" |
35 |
|
|
#include "util/string.h" |
36 |
|
|
|
37 |
|
|
using namespace std; // NOLINT |
38 |
|
|
|
39 |
|
|
/** |
40 |
|
|
* Header of the data pieces in the cache. After the object header, the |
41 |
|
|
* zero-terminated description and the object data follows. |
42 |
|
|
*/ |
43 |
|
|
struct ObjectHeader { |
44 |
|
✗ |
ObjectHeader() { |
45 |
|
✗ |
txn_id = uint64_t(-1); |
46 |
|
✗ |
size_data = 0; |
47 |
|
✗ |
size_desc = 0; |
48 |
|
✗ |
refcnt = 0; |
49 |
|
✗ |
type = CVMCACHE_OBJECT_REGULAR; |
50 |
|
✗ |
memset(&id, 0, sizeof(id)); |
51 |
|
|
} |
52 |
|
|
|
53 |
|
✗ |
char *GetDescription() { |
54 |
|
✗ |
if (size_desc == 0) |
55 |
|
✗ |
return NULL; |
56 |
|
✗ |
return reinterpret_cast<char *>(this) + sizeof(ObjectHeader); |
57 |
|
|
} |
58 |
|
|
|
59 |
|
✗ |
void SetDescription(char *description) { |
60 |
|
✗ |
if (description == NULL) |
61 |
|
✗ |
return; |
62 |
|
✗ |
memcpy(reinterpret_cast<char *>(this) + sizeof(ObjectHeader), description, |
63 |
|
✗ |
strlen(description) + 1); |
64 |
|
|
} |
65 |
|
|
|
66 |
|
✗ |
unsigned char *GetData() { |
67 |
|
|
return reinterpret_cast<unsigned char *>(this) + sizeof(ObjectHeader) |
68 |
|
✗ |
+ size_desc; |
69 |
|
|
} |
70 |
|
|
|
71 |
|
|
/** |
72 |
|
|
* Set during a running transaction so that we know where to look for pointers |
73 |
|
|
* when the memory block gets compacted. Once committed, this is |
74 |
|
|
* uint64_t(-1). |
75 |
|
|
*/ |
76 |
|
|
uint64_t txn_id; |
77 |
|
|
/** |
78 |
|
|
* Can be zero. |
79 |
|
|
*/ |
80 |
|
|
uint32_t size_data; |
81 |
|
|
/** |
82 |
|
|
* String length + 1 (null terminated) or null if the description is NULL. |
83 |
|
|
*/ |
84 |
|
|
uint32_t size_desc; |
85 |
|
|
/** |
86 |
|
|
* During a transaction, neg_nbytes_written is used to track the number of |
87 |
|
|
* already written bytes. On commit, refcnt is set to 1. |
88 |
|
|
*/ |
89 |
|
|
union { |
90 |
|
|
int32_t refcnt; |
91 |
|
|
int32_t neg_nbytes_written; |
92 |
|
|
}; |
93 |
|
|
cvmcache_object_type type; |
94 |
|
|
struct cvmcache_hash id; |
95 |
|
|
}; |
96 |
|
|
|
97 |
|
|
|
98 |
|
|
/** |
99 |
|
|
* Listings are generated and cached during the entire life time of a listing |
100 |
|
|
* id. Not very memory efficient but we don't optimize for listings. |
101 |
|
|
*/ |
102 |
|
|
struct Listing { |
103 |
|
✗ |
Listing() : pos(0) { } |
104 |
|
|
uint64_t pos; |
105 |
|
|
vector<struct cvmcache_object_info> elems; |
106 |
|
|
}; |
107 |
|
|
|
108 |
|
|
|
109 |
|
|
/** |
110 |
|
|
* Allows us to use a cvmcache_hash in (hash) maps. |
111 |
|
|
*/ |
112 |
|
|
struct ComparableHash { |
113 |
|
✗ |
ComparableHash() { memset(&hash, 0, sizeof(hash)); } |
114 |
|
✗ |
explicit ComparableHash(const struct cvmcache_hash &h) : hash(h) { } |
115 |
|
✗ |
bool operator==(const ComparableHash &other) const { |
116 |
|
✗ |
return cvmcache_hash_cmp(const_cast<cvmcache_hash *>(&(this->hash)), |
117 |
|
✗ |
const_cast<cvmcache_hash *>(&(other.hash))) |
118 |
|
✗ |
== 0; |
119 |
|
|
} |
120 |
|
|
bool operator!=(const ComparableHash &other) const { |
121 |
|
|
return cvmcache_hash_cmp(const_cast<cvmcache_hash *>(&(this->hash)), |
122 |
|
|
const_cast<cvmcache_hash *>(&(other.hash))) |
123 |
|
|
!= 0; |
124 |
|
|
} |
125 |
|
|
bool operator<(const ComparableHash &other) const { |
126 |
|
|
return cvmcache_hash_cmp(const_cast<cvmcache_hash *>(&(this->hash)), |
127 |
|
|
const_cast<cvmcache_hash *>(&(other.hash))) |
128 |
|
|
< 0; |
129 |
|
|
} |
130 |
|
|
bool operator>(const ComparableHash &other) const { |
131 |
|
|
return cvmcache_hash_cmp(const_cast<cvmcache_hash *>(&(this->hash)), |
132 |
|
|
const_cast<cvmcache_hash *>(&(other.hash))) |
133 |
|
|
> 0; |
134 |
|
|
} |
135 |
|
|
|
136 |
|
|
struct cvmcache_hash hash; |
137 |
|
|
}; |
138 |
|
|
|
139 |
|
|
|
140 |
|
|
namespace { |
141 |
|
|
|
142 |
|
✗ |
static inline uint32_t hasher_uint64(const uint64_t &key) { |
143 |
|
✗ |
return MurmurHash2(&key, sizeof(key), 0x07387a4f); |
144 |
|
|
} |
145 |
|
|
|
146 |
|
✗ |
static inline uint32_t hasher_any(const ComparableHash &key) { |
147 |
|
✗ |
return (uint32_t) * (reinterpret_cast<const uint32_t *>(&key.hash)); |
148 |
|
|
} |
149 |
|
|
|
150 |
|
|
} // anonymous namespace |
151 |
|
|
|
152 |
|
|
|
153 |
|
|
/** |
154 |
|
|
* Used in the PluginRamCache when detaching nested catalogs. |
155 |
|
|
*/ |
156 |
|
|
struct cvmcache_context *ctx; |
157 |
|
|
|
158 |
|
|
|
159 |
|
|
/** |
160 |
|
|
* Implements all the cache plugin callbacks. Singleton. |
161 |
|
|
*/ |
162 |
|
|
class PluginRamCache : public Callbackable<MallocHeap::BlockPtr> { |
163 |
|
|
public: |
164 |
|
✗ |
static PluginRamCache *Create(const string &mem_size_str) { |
165 |
|
✗ |
assert(instance_ == NULL); |
166 |
|
|
|
167 |
|
|
uint64_t mem_size_bytes; |
168 |
|
✗ |
if (HasSuffix(mem_size_str, "%", false)) { |
169 |
|
✗ |
mem_size_bytes = platform_memsize() * String2Uint64(mem_size_str) / 100; |
170 |
|
|
} else { |
171 |
|
✗ |
mem_size_bytes = String2Uint64(mem_size_str) * 1024 * 1024; |
172 |
|
|
} |
173 |
|
✗ |
instance_ = new PluginRamCache(mem_size_bytes); |
174 |
|
✗ |
return instance_; |
175 |
|
|
} |
176 |
|
|
|
177 |
|
✗ |
static PluginRamCache *GetInstance() { |
178 |
|
✗ |
assert(instance_ != NULL); |
179 |
|
✗ |
return instance_; |
180 |
|
|
} |
181 |
|
|
|
182 |
|
|
~PluginRamCache() { |
183 |
|
|
delete storage_; |
184 |
|
|
delete objects_all_; |
185 |
|
|
delete objects_volatile_; |
186 |
|
|
instance_ = NULL; |
187 |
|
|
} |
188 |
|
|
|
189 |
|
✗ |
void DropBreadcrumbs() { breadcrumbs_.clear(); } |
190 |
|
|
|
191 |
|
✗ |
static int ram_chrefcnt(struct cvmcache_hash *id, int32_t change_by) { |
192 |
|
✗ |
const ComparableHash h(*id); |
193 |
|
|
ObjectHeader *object; |
194 |
|
✗ |
if (!Me()->objects_all_->Lookup(h, &object)) |
195 |
|
✗ |
return CVMCACHE_STATUS_NOENTRY; |
196 |
|
|
|
197 |
|
✗ |
if (object->type == CVMCACHE_OBJECT_VOLATILE) |
198 |
|
✗ |
Me()->objects_volatile_->Update(h); |
199 |
|
|
|
200 |
|
✗ |
if (change_by == 0) |
201 |
|
✗ |
return CVMCACHE_STATUS_OK; |
202 |
|
✗ |
if ((object->refcnt + change_by) < 0) |
203 |
|
✗ |
return CVMCACHE_STATUS_BADCOUNT; |
204 |
|
|
|
205 |
|
✗ |
if (object->refcnt == 0) { |
206 |
|
✗ |
Me()->cache_info_.pinned_bytes += Me()->storage_->GetSize(object); |
207 |
|
✗ |
Me()->CheckHighPinWatermark(); |
208 |
|
|
} |
209 |
|
✗ |
object->refcnt += change_by; |
210 |
|
✗ |
if (object->refcnt == 0) { |
211 |
|
✗ |
Me()->cache_info_.pinned_bytes -= Me()->storage_->GetSize(object); |
212 |
|
✗ |
Me()->in_danger_zone_ = Me()->IsInDangerZone(); |
213 |
|
|
} |
214 |
|
✗ |
return CVMCACHE_STATUS_OK; |
215 |
|
|
} |
216 |
|
|
|
217 |
|
|
|
218 |
|
✗ |
static int ram_obj_info(struct cvmcache_hash *id, |
219 |
|
|
struct cvmcache_object_info *info) { |
220 |
|
✗ |
const ComparableHash h(*id); |
221 |
|
|
ObjectHeader *object; |
222 |
|
✗ |
if (!Me()->objects_all_->Lookup(h, &object, false)) |
223 |
|
✗ |
return CVMCACHE_STATUS_NOENTRY; |
224 |
|
|
|
225 |
|
✗ |
info->size = object->size_data; |
226 |
|
✗ |
info->type = object->type; |
227 |
|
✗ |
info->pinned = object->refcnt > 0; |
228 |
|
✗ |
info->description = (object->GetDescription() == NULL) |
229 |
|
✗ |
? NULL |
230 |
|
✗ |
: strdup(object->GetDescription()); |
231 |
|
✗ |
return CVMCACHE_STATUS_OK; |
232 |
|
|
} |
233 |
|
|
|
234 |
|
|
|
235 |
|
✗ |
static int ram_pread(struct cvmcache_hash *id, |
236 |
|
|
uint64_t offset, |
237 |
|
|
uint32_t *size, |
238 |
|
|
unsigned char *buffer) { |
239 |
|
✗ |
const ComparableHash h(*id); |
240 |
|
|
ObjectHeader *object; |
241 |
|
✗ |
const bool retval = Me()->objects_all_->Lookup(h, &object, false); |
242 |
|
✗ |
assert(retval); |
243 |
|
✗ |
if (offset > object->size_data) |
244 |
|
✗ |
return CVMCACHE_STATUS_OUTOFBOUNDS; |
245 |
|
|
const unsigned nbytes = |
246 |
|
✗ |
std::min(*size, static_cast<uint32_t>(object->size_data - offset)); |
247 |
|
✗ |
memcpy(buffer, object->GetData() + offset, nbytes); |
248 |
|
✗ |
*size = nbytes; |
249 |
|
✗ |
return CVMCACHE_STATUS_OK; |
250 |
|
|
} |
251 |
|
|
|
252 |
|
|
|
253 |
|
✗ |
static int ram_start_txn(struct cvmcache_hash *id, |
254 |
|
|
uint64_t txn_id, |
255 |
|
|
struct cvmcache_object_info *info) { |
256 |
|
✗ |
ObjectHeader object_header; |
257 |
|
✗ |
object_header.txn_id = txn_id; |
258 |
|
✗ |
if (info->size != CVMCACHE_SIZE_UNKNOWN) |
259 |
|
✗ |
object_header.size_data = info->size; |
260 |
|
|
else |
261 |
|
✗ |
object_header.size_data = 4096; |
262 |
|
✗ |
if (info->description != NULL) |
263 |
|
✗ |
object_header.size_desc = strlen(info->description) + 1; |
264 |
|
✗ |
object_header.refcnt = 1; |
265 |
|
✗ |
object_header.type = info->type; |
266 |
|
✗ |
object_header.id = *id; |
267 |
|
|
|
268 |
|
✗ |
const uint32_t total_size = sizeof(object_header) + |
269 |
|
✗ |
object_header.size_desc + |
270 |
|
✗ |
object_header.size_data; |
271 |
|
✗ |
Me()->TryFreeSpace(total_size); |
272 |
|
|
ObjectHeader *allocd_object = reinterpret_cast<ObjectHeader *>( |
273 |
|
✗ |
Me()->storage_->Allocate(total_size, &object_header, |
274 |
|
✗ |
sizeof(object_header))); |
275 |
|
✗ |
if (allocd_object == NULL) |
276 |
|
✗ |
return CVMCACHE_STATUS_NOSPACE; |
277 |
|
|
|
278 |
|
✗ |
allocd_object->SetDescription(info->description); |
279 |
|
✗ |
Me()->transactions_.Insert(txn_id, allocd_object); |
280 |
|
✗ |
return CVMCACHE_STATUS_OK; |
281 |
|
|
} |
282 |
|
|
|
283 |
|
|
|
284 |
|
✗ |
static int ram_write_txn(uint64_t txn_id, |
285 |
|
|
unsigned char *buffer, |
286 |
|
|
uint32_t size) { |
287 |
|
|
ObjectHeader *txn_object; |
288 |
|
✗ |
int retval = Me()->transactions_.Lookup(txn_id, &txn_object); |
289 |
|
✗ |
assert(retval); |
290 |
|
✗ |
assert(size > 0); |
291 |
|
|
|
292 |
|
✗ |
if (txn_object->neg_nbytes_written > 0) |
293 |
|
✗ |
txn_object->neg_nbytes_written = 0; |
294 |
|
✗ |
if ((size - txn_object->neg_nbytes_written) > txn_object->size_data) { |
295 |
|
✗ |
const uint32_t current_size = Me()->storage_->GetSize(txn_object); |
296 |
|
✗ |
const uint32_t header_size = current_size - txn_object->size_data; |
297 |
|
|
const uint32_t new_size = |
298 |
|
✗ |
std::max(header_size + size - txn_object->neg_nbytes_written, |
299 |
|
✗ |
uint32_t(current_size * kObjectExpandFactor)); |
300 |
|
✗ |
const bool did_compact = Me()->TryFreeSpace(new_size); |
301 |
|
✗ |
if (did_compact) { |
302 |
|
✗ |
retval = Me()->transactions_.Lookup(txn_id, &txn_object); |
303 |
|
✗ |
assert(retval); |
304 |
|
|
} |
305 |
|
✗ |
txn_object = reinterpret_cast<ObjectHeader *>( |
306 |
|
✗ |
Me()->storage_->Expand(txn_object, new_size)); |
307 |
|
✗ |
if (txn_object == NULL) |
308 |
|
✗ |
return CVMCACHE_STATUS_NOSPACE; |
309 |
|
✗ |
txn_object->size_data = new_size - header_size; |
310 |
|
✗ |
Me()->transactions_.Insert(txn_id, txn_object); |
311 |
|
|
} |
312 |
|
|
|
313 |
|
✗ |
memcpy(txn_object->GetData() - txn_object->neg_nbytes_written, buffer, |
314 |
|
|
size); |
315 |
|
✗ |
txn_object->neg_nbytes_written -= size; |
316 |
|
✗ |
return CVMCACHE_STATUS_OK; |
317 |
|
|
} |
318 |
|
|
|
319 |
|
|
|
320 |
|
✗ |
static int ram_commit_txn(uint64_t txn_id) { |
321 |
|
✗ |
Me()->TryFreeSpace(0); |
322 |
|
✗ |
if (Me()->objects_all_->IsFull()) |
323 |
|
✗ |
return CVMCACHE_STATUS_NOSPACE; |
324 |
|
|
|
325 |
|
|
ObjectHeader *txn_object; |
326 |
|
✗ |
const int retval = Me()->transactions_.Lookup(txn_id, &txn_object); |
327 |
|
✗ |
assert(retval); |
328 |
|
|
|
329 |
|
✗ |
Me()->transactions_.Erase(txn_id); |
330 |
|
✗ |
const ComparableHash h(txn_object->id); |
331 |
|
|
ObjectHeader *existing_object; |
332 |
|
✗ |
if (Me()->objects_all_->Lookup(h, &existing_object)) { |
333 |
|
|
// Concurrent addition of same objects, drop the one at hand and |
334 |
|
|
// increase ref count of existing copy |
335 |
|
✗ |
Me()->storage_->MarkFree(txn_object); |
336 |
|
✗ |
if (existing_object->refcnt == 0) |
337 |
|
✗ |
Me()->cache_info_.pinned_bytes += Me()->storage_->GetSize( |
338 |
|
|
existing_object); |
339 |
|
✗ |
existing_object->refcnt++; |
340 |
|
|
} else { |
341 |
|
✗ |
txn_object->txn_id = uint64_t(-1); |
342 |
|
✗ |
if (txn_object->neg_nbytes_written > 0) |
343 |
|
✗ |
txn_object->neg_nbytes_written = 0; |
344 |
|
✗ |
txn_object->size_data = -(txn_object->neg_nbytes_written); |
345 |
|
✗ |
txn_object->refcnt = 1; |
346 |
|
✗ |
Me()->cache_info_.used_bytes += Me()->storage_->GetSize(txn_object); |
347 |
|
✗ |
Me()->cache_info_.pinned_bytes += Me()->storage_->GetSize(txn_object); |
348 |
|
✗ |
Me()->objects_all_->Insert(h, txn_object); |
349 |
|
✗ |
if (txn_object->type == CVMCACHE_OBJECT_VOLATILE) { |
350 |
|
✗ |
assert(!Me()->objects_volatile_->IsFull()); |
351 |
|
✗ |
Me()->objects_volatile_->Insert(h, txn_object); |
352 |
|
|
} |
353 |
|
|
} |
354 |
|
✗ |
Me()->CheckHighPinWatermark(); |
355 |
|
✗ |
return CVMCACHE_STATUS_OK; |
356 |
|
|
} |
357 |
|
|
|
358 |
|
|
|
359 |
|
✗ |
static int ram_abort_txn(uint64_t txn_id) { |
360 |
|
✗ |
ObjectHeader *txn_object = NULL; |
361 |
|
✗ |
const int retval = Me()->transactions_.Lookup(txn_id, &txn_object); |
362 |
|
✗ |
assert(retval); |
363 |
|
✗ |
Me()->transactions_.Erase(txn_id); |
364 |
|
✗ |
Me()->storage_->MarkFree(txn_object); |
365 |
|
✗ |
return CVMCACHE_STATUS_OK; |
366 |
|
|
} |
367 |
|
|
|
368 |
|
|
|
369 |
|
✗ |
static int ram_info(struct cvmcache_info *info) { |
370 |
|
✗ |
*info = Me()->cache_info_; |
371 |
|
✗ |
return CVMCACHE_STATUS_OK; |
372 |
|
|
} |
373 |
|
|
|
374 |
|
|
|
375 |
|
✗ |
static int ram_shrink(uint64_t shrink_to, uint64_t *used) { |
376 |
|
✗ |
*used = Me()->cache_info_.used_bytes; |
377 |
|
✗ |
if (*used <= shrink_to) |
378 |
|
✗ |
return CVMCACHE_STATUS_OK; |
379 |
|
|
|
380 |
|
✗ |
Me()->DoShrink(shrink_to); |
381 |
|
✗ |
*used = Me()->cache_info_.used_bytes; |
382 |
|
✗ |
return (*used <= shrink_to) ? CVMCACHE_STATUS_OK : CVMCACHE_STATUS_PARTIAL; |
383 |
|
|
} |
384 |
|
|
|
385 |
|
|
|
386 |
|
✗ |
static int ram_listing_begin(uint64_t lst_id, |
387 |
|
|
enum cvmcache_object_type type) { |
388 |
|
✗ |
Listing *lst = new Listing(); |
389 |
|
✗ |
Me()->objects_all_->FilterBegin(); |
390 |
|
✗ |
while (Me()->objects_all_->FilterNext()) { |
391 |
|
✗ |
ComparableHash h; |
392 |
|
|
ObjectHeader *object; |
393 |
|
✗ |
Me()->objects_all_->FilterGet(&h, &object); |
394 |
|
✗ |
if (object->type != type) |
395 |
|
✗ |
continue; |
396 |
|
|
|
397 |
|
|
struct cvmcache_object_info item; |
398 |
|
✗ |
item.id = object->id; |
399 |
|
✗ |
item.size = object->size_data; |
400 |
|
✗ |
item.type = type; |
401 |
|
✗ |
item.pinned = object->refcnt != 0; |
402 |
|
✗ |
item.description = (object->size_desc > 0) |
403 |
|
✗ |
? strdup(object->GetDescription()) |
404 |
|
|
: NULL; |
405 |
|
✗ |
lst->elems.push_back(item); |
406 |
|
|
} |
407 |
|
✗ |
Me()->objects_all_->FilterEnd(); |
408 |
|
|
|
409 |
|
✗ |
Me()->listings_.Insert(lst_id, lst); |
410 |
|
✗ |
return CVMCACHE_STATUS_OK; |
411 |
|
|
} |
412 |
|
|
|
413 |
|
|
|
414 |
|
✗ |
static int ram_listing_next(int64_t listing_id, |
415 |
|
|
struct cvmcache_object_info *item) { |
416 |
|
|
Listing *lst; |
417 |
|
✗ |
const bool retval = Me()->listings_.Lookup(listing_id, &lst); |
418 |
|
✗ |
assert(retval); |
419 |
|
✗ |
if (lst->pos >= lst->elems.size()) |
420 |
|
✗ |
return CVMCACHE_STATUS_OUTOFBOUNDS; |
421 |
|
✗ |
*item = lst->elems[lst->pos]; |
422 |
|
✗ |
lst->pos++; |
423 |
|
✗ |
return CVMCACHE_STATUS_OK; |
424 |
|
|
} |
425 |
|
|
|
426 |
|
|
|
427 |
|
✗ |
static int ram_listing_end(int64_t listing_id) { |
428 |
|
|
Listing *lst; |
429 |
|
✗ |
const bool retval = Me()->listings_.Lookup(listing_id, &lst); |
430 |
|
✗ |
assert(retval); |
431 |
|
|
|
432 |
|
|
// Don't free description strings, done by the library |
433 |
|
✗ |
delete lst; |
434 |
|
✗ |
Me()->listings_.Erase(listing_id); |
435 |
|
✗ |
return CVMCACHE_STATUS_OK; |
436 |
|
|
} |
437 |
|
|
|
438 |
|
|
|
439 |
|
✗ |
static int ram_breadcrumb_store(const char *fqrn, |
440 |
|
|
const cvmcache_breadcrumb *breadcrumb) { |
441 |
|
✗ |
Me()->breadcrumbs_[fqrn] = *breadcrumb; |
442 |
|
✗ |
return CVMCACHE_STATUS_OK; |
443 |
|
|
} |
444 |
|
|
|
445 |
|
|
|
446 |
|
✗ |
static int ram_breadcrumb_load(const char *fqrn, |
447 |
|
|
cvmcache_breadcrumb *breadcrumb) { |
448 |
|
|
const map<std::string, cvmcache_breadcrumb>::const_iterator itr = |
449 |
|
✗ |
Me()->breadcrumbs_.find(fqrn); |
450 |
|
✗ |
if (itr == Me()->breadcrumbs_.end()) |
451 |
|
✗ |
return CVMCACHE_STATUS_NOENTRY; |
452 |
|
✗ |
*breadcrumb = itr->second; |
453 |
|
✗ |
return CVMCACHE_STATUS_OK; |
454 |
|
|
} |
455 |
|
|
|
456 |
|
|
private: |
457 |
|
|
static const uint64_t kMinSize; // 100 * 1024 * 1024; |
458 |
|
|
static const double kShrinkFactor; // = 0.75; |
459 |
|
|
static const double kObjectExpandFactor; // = 1.5; |
460 |
|
|
static const double kSlotFraction; // = 0.04; |
461 |
|
|
static const double kDangerZoneThreshold; // = 0.7 |
462 |
|
|
|
463 |
|
|
static PluginRamCache *instance_; |
464 |
|
✗ |
static PluginRamCache *Me() { return instance_; } |
465 |
|
✗ |
explicit PluginRamCache(uint64_t mem_size) { |
466 |
|
✗ |
in_danger_zone_ = false; |
467 |
|
|
|
468 |
|
✗ |
const uint64_t heap_size = RoundUp8( |
469 |
|
✗ |
std::max(kMinSize, uint64_t(mem_size * (1.0 - kSlotFraction)))); |
470 |
|
✗ |
memset(&cache_info_, 0, sizeof(cache_info_)); |
471 |
|
✗ |
cache_info_.size_bytes = heap_size; |
472 |
|
✗ |
storage_ = new MallocHeap( |
473 |
|
✗ |
heap_size, this->MakeCallback(&PluginRamCache::OnBlockMove, this)); |
474 |
|
|
|
475 |
|
|
struct cvmcache_hash hash_empty; |
476 |
|
✗ |
memset(&hash_empty, 0, sizeof(hash_empty)); |
477 |
|
|
|
478 |
|
✗ |
transactions_.Init(64, uint64_t(-1), hasher_uint64); |
479 |
|
✗ |
listings_.Init(8, uint64_t(-1), hasher_uint64); |
480 |
|
|
|
481 |
|
|
const double slot_size = |
482 |
|
✗ |
lru::LruCache<ComparableHash, ObjectHeader *>::GetEntrySize(); |
483 |
|
✗ |
const uint64_t num_slots = |
484 |
|
✗ |
uint64_t((heap_size * kSlotFraction) / (2.0 * slot_size)); |
485 |
|
✗ |
const unsigned mask_64 = ~((1 << 6) - 1); |
486 |
|
|
|
487 |
|
✗ |
LogCvmfs(kLogCache, kLogDebug | kLogSyslog, |
488 |
|
|
"Allocating %" PRIu64 "MB of memory for up to %" PRIu64 " objects", |
489 |
|
|
heap_size / (1024 * 1024), num_slots & mask_64); |
490 |
|
|
|
491 |
|
|
// Number of cache entries must be a multiple of 64 |
492 |
|
✗ |
objects_all_ = new lru::LruCache<ComparableHash, ObjectHeader *>( |
493 |
|
|
num_slots & mask_64, |
494 |
|
✗ |
ComparableHash(hash_empty), |
495 |
|
|
hasher_any, |
496 |
|
✗ |
perf::StatisticsTemplate("objects_all", &statistics_)); |
497 |
|
✗ |
objects_volatile_ = new lru::LruCache<ComparableHash, ObjectHeader *>( |
498 |
|
|
num_slots & mask_64, |
499 |
|
✗ |
ComparableHash(hash_empty), |
500 |
|
|
hasher_any, |
501 |
|
✗ |
perf::StatisticsTemplate("objects_volatile", &statistics_)); |
502 |
|
|
} |
503 |
|
|
|
504 |
|
|
/** |
505 |
|
|
* Returns true if memory compaction took place and pointers might have been |
506 |
|
|
* invalidated. |
507 |
|
|
*/ |
508 |
|
✗ |
bool TryFreeSpace(uint64_t bytes_required) { |
509 |
|
✗ |
if (!objects_all_->IsFull() && storage_->HasSpaceFor(bytes_required)) |
510 |
|
✗ |
return false; |
511 |
|
|
|
512 |
|
|
// Free space occupied due to piecewise catalog storage |
513 |
|
✗ |
if (!objects_all_->IsFull()) { |
514 |
|
✗ |
LogCvmfs(kLogCache, kLogDebug, "compacting ram cache"); |
515 |
|
✗ |
storage_->Compact(); |
516 |
|
✗ |
if (storage_->HasSpaceFor(bytes_required)) |
517 |
|
✗ |
return true; |
518 |
|
|
} |
519 |
|
|
|
520 |
|
|
const uint64_t shrink_to = |
521 |
|
✗ |
std::min(storage_->capacity() - (bytes_required + 8), |
522 |
|
✗ |
uint64_t(storage_->capacity() * kShrinkFactor)); |
523 |
|
✗ |
DoShrink(shrink_to); |
524 |
|
✗ |
return true; |
525 |
|
|
} |
526 |
|
|
|
527 |
|
✗ |
void OnBlockMove(const MallocHeap::BlockPtr &ptr) { |
528 |
|
✗ |
assert(ptr.pointer); |
529 |
|
✗ |
ObjectHeader *object = reinterpret_cast<ObjectHeader *>(ptr.pointer); |
530 |
|
✗ |
const ComparableHash h(object->id); |
531 |
|
✗ |
if (object->txn_id == uint64_t(-1)) { |
532 |
|
✗ |
bool retval = objects_all_->UpdateValue(h, object); |
533 |
|
✗ |
assert(retval); |
534 |
|
✗ |
if (object->type == CVMCACHE_OBJECT_VOLATILE) { |
535 |
|
✗ |
retval = objects_volatile_->UpdateValue(h, object); |
536 |
|
✗ |
assert(retval); |
537 |
|
|
} |
538 |
|
|
} else { |
539 |
|
✗ |
const uint64_t old_size = transactions_.size(); |
540 |
|
✗ |
transactions_.Insert(object->txn_id, object); |
541 |
|
✗ |
assert(old_size == transactions_.size()); |
542 |
|
|
} |
543 |
|
|
} |
544 |
|
|
|
545 |
|
|
|
546 |
|
✗ |
void DoShrink(uint64_t shrink_to) { |
547 |
|
✗ |
ComparableHash h; |
548 |
|
|
ObjectHeader *object; |
549 |
|
|
|
550 |
|
✗ |
LogCvmfs(kLogCache, kLogDebug | kLogSyslog, |
551 |
|
|
"clean up cache until at most %lu KB is used", shrink_to / 1024); |
552 |
|
|
|
553 |
|
✗ |
objects_volatile_->FilterBegin(); |
554 |
|
✗ |
while (objects_volatile_->FilterNext()) { |
555 |
|
✗ |
objects_volatile_->FilterGet(&h, &object); |
556 |
|
✗ |
if (object->refcnt != 0) |
557 |
|
✗ |
continue; |
558 |
|
✗ |
cache_info_.used_bytes -= storage_->GetSize(object); |
559 |
|
✗ |
storage_->MarkFree(object); |
560 |
|
✗ |
objects_volatile_->FilterDelete(); |
561 |
|
✗ |
objects_all_->Forget(h); |
562 |
|
✗ |
if (storage_->compacted_bytes() <= shrink_to) |
563 |
|
✗ |
break; |
564 |
|
|
} |
565 |
|
✗ |
objects_volatile_->FilterEnd(); |
566 |
|
|
|
567 |
|
✗ |
objects_all_->FilterBegin(); |
568 |
|
✗ |
while ((storage_->compacted_bytes() > shrink_to) |
569 |
|
✗ |
&& objects_all_->FilterNext()) { |
570 |
|
✗ |
objects_all_->FilterGet(&h, &object); |
571 |
|
✗ |
if (object->refcnt != 0) |
572 |
|
✗ |
continue; |
573 |
|
✗ |
assert(object->type != CVMCACHE_OBJECT_VOLATILE); |
574 |
|
✗ |
cache_info_.used_bytes -= storage_->GetSize(object); |
575 |
|
✗ |
storage_->MarkFree(object); |
576 |
|
✗ |
objects_all_->FilterDelete(); |
577 |
|
|
} |
578 |
|
✗ |
objects_all_->FilterEnd(); |
579 |
|
|
|
580 |
|
✗ |
storage_->Compact(); |
581 |
|
✗ |
cache_info_.no_shrink++; |
582 |
|
|
} |
583 |
|
|
|
584 |
|
✗ |
void CheckHighPinWatermark() { |
585 |
|
✗ |
if (!Me()->in_danger_zone_ && Me()->IsInDangerZone()) { |
586 |
|
✗ |
LogCvmfs(kLogCvmfs, kLogDebug | kLogSyslog, |
587 |
|
|
"high watermark of pinned files"); |
588 |
|
✗ |
Me()->in_danger_zone_ = true; |
589 |
|
✗ |
cvmcache_ask_detach(ctx); |
590 |
|
|
} |
591 |
|
|
} |
592 |
|
|
|
593 |
|
✗ |
bool IsInDangerZone() { |
594 |
|
✗ |
return (static_cast<double>(cache_info_.pinned_bytes) |
595 |
|
✗ |
/ static_cast<double>(cache_info_.size_bytes)) |
596 |
|
✗ |
> kDangerZoneThreshold; |
597 |
|
|
} |
598 |
|
|
|
599 |
|
|
|
600 |
|
|
struct cvmcache_info cache_info_; |
601 |
|
|
perf::Statistics statistics_; |
602 |
|
|
SmallHashDynamic<uint64_t, ObjectHeader *> transactions_; |
603 |
|
|
SmallHashDynamic<uint64_t, Listing *> listings_; |
604 |
|
|
lru::LruCache<ComparableHash, ObjectHeader *> *objects_all_; |
605 |
|
|
lru::LruCache<ComparableHash, ObjectHeader *> *objects_volatile_; |
606 |
|
|
map<std::string, cvmcache_breadcrumb> breadcrumbs_; |
607 |
|
|
MallocHeap *storage_; |
608 |
|
|
bool in_danger_zone_; |
609 |
|
|
}; // class PluginRamCache |
610 |
|
|
|
611 |
|
|
PluginRamCache *PluginRamCache::instance_ = NULL; |
612 |
|
|
const uint64_t PluginRamCache::kMinSize = 100 * 1024 * 1024; |
613 |
|
|
const double PluginRamCache::kShrinkFactor = 0.75; |
614 |
|
|
const double PluginRamCache::kObjectExpandFactor = 1.5; |
615 |
|
|
const double PluginRamCache::kSlotFraction = 0.04; |
616 |
|
|
const double PluginRamCache::kDangerZoneThreshold = 0.7; |
617 |
|
|
|
618 |
|
|
|
619 |
|
✗ |
static void Usage(const char *progname) { |
620 |
|
✗ |
LogCvmfs(kLogCache, kLogStdout, "%s <config file>", progname); |
621 |
|
|
} |
622 |
|
|
|
623 |
|
|
|
624 |
|
|
/** |
625 |
|
|
* For testing and debugging purposes, the cache manager drops its |
626 |
|
|
* breadcrumb cache upon SIGUSR2 retrieval |
627 |
|
|
*/ |
628 |
|
✗ |
void DropBreadcrumbs(int sig, siginfo_t *siginfo, void *context) { |
629 |
|
✗ |
LogCvmfs(kLogCache, kLogSyslog | kLogDebug, "dropping breadcrumbs"); |
630 |
|
✗ |
PluginRamCache::GetInstance()->DropBreadcrumbs(); |
631 |
|
|
} |
632 |
|
|
|
633 |
|
|
|
634 |
|
✗ |
int main(int argc, char **argv) { |
635 |
|
✗ |
if (argc < 2) { |
636 |
|
✗ |
Usage(argv[0]); |
637 |
|
✗ |
return 1; |
638 |
|
|
} |
639 |
|
|
|
640 |
|
✗ |
SetLogDebugFile("/dev/null"); |
641 |
|
|
|
642 |
|
✗ |
cvmcache_init_global(); |
643 |
|
|
|
644 |
|
✗ |
cvmcache_option_map *options = cvmcache_options_init(); |
645 |
|
✗ |
if (cvmcache_options_parse(options, argv[1]) != 0) { |
646 |
|
✗ |
LogCvmfs(kLogCache, kLogStderr, "cannot parse options file %s", argv[1]); |
647 |
|
✗ |
return 1; |
648 |
|
|
} |
649 |
|
✗ |
char *debug_log = cvmcache_options_get(options, |
650 |
|
|
"CVMFS_CACHE_PLUGIN_DEBUGLOG"); |
651 |
|
✗ |
if (debug_log != NULL) { |
652 |
|
✗ |
SetLogDebugFile(debug_log); |
653 |
|
✗ |
cvmcache_options_free(debug_log); |
654 |
|
|
} |
655 |
|
✗ |
char *locator = cvmcache_options_get(options, "CVMFS_CACHE_PLUGIN_LOCATOR"); |
656 |
|
✗ |
if (locator == NULL) { |
657 |
|
✗ |
LogCvmfs(kLogCache, kLogStderr, "CVMFS_CACHE_PLUGIN_LOCATOR missing"); |
658 |
|
✗ |
cvmcache_options_fini(options); |
659 |
|
✗ |
return 1; |
660 |
|
|
} |
661 |
|
✗ |
char *mem_size = cvmcache_options_get(options, "CVMFS_CACHE_PLUGIN_SIZE"); |
662 |
|
✗ |
if (mem_size == NULL) { |
663 |
|
✗ |
LogCvmfs(kLogCache, kLogStderr, "CVMFS_CACHE_PLUGIN_SIZE missing"); |
664 |
|
✗ |
cvmcache_options_fini(options); |
665 |
|
✗ |
return 1; |
666 |
|
|
} |
667 |
|
✗ |
char *test_mode = cvmcache_options_get(options, "CVMFS_CACHE_PLUGIN_TEST"); |
668 |
|
|
|
669 |
|
✗ |
if (!test_mode) |
670 |
|
✗ |
cvmcache_spawn_watchdog(NULL); |
671 |
|
|
|
672 |
|
✗ |
PluginRamCache *plugin = PluginRamCache::Create(mem_size); |
673 |
|
|
struct sigaction sa; |
674 |
|
✗ |
memset(&sa, 0, sizeof(sa)); |
675 |
|
✗ |
sa.sa_sigaction = DropBreadcrumbs; |
676 |
|
✗ |
sa.sa_flags = SA_SIGINFO; |
677 |
|
✗ |
sigfillset(&sa.sa_mask); |
678 |
|
✗ |
int retval = sigaction(SIGUSR2, &sa, NULL); |
679 |
|
✗ |
assert(retval == 0); |
680 |
|
|
|
681 |
|
|
struct cvmcache_callbacks callbacks; |
682 |
|
✗ |
memset(&callbacks, 0, sizeof(callbacks)); |
683 |
|
✗ |
callbacks.cvmcache_chrefcnt = plugin->ram_chrefcnt; |
684 |
|
✗ |
callbacks.cvmcache_obj_info = plugin->ram_obj_info; |
685 |
|
✗ |
callbacks.cvmcache_pread = plugin->ram_pread; |
686 |
|
✗ |
callbacks.cvmcache_start_txn = plugin->ram_start_txn; |
687 |
|
✗ |
callbacks.cvmcache_write_txn = plugin->ram_write_txn; |
688 |
|
✗ |
callbacks.cvmcache_commit_txn = plugin->ram_commit_txn; |
689 |
|
✗ |
callbacks.cvmcache_abort_txn = plugin->ram_abort_txn; |
690 |
|
✗ |
callbacks.cvmcache_info = plugin->ram_info; |
691 |
|
✗ |
callbacks.cvmcache_shrink = plugin->ram_shrink; |
692 |
|
✗ |
callbacks.cvmcache_listing_begin = plugin->ram_listing_begin; |
693 |
|
✗ |
callbacks.cvmcache_listing_next = plugin->ram_listing_next; |
694 |
|
✗ |
callbacks.cvmcache_listing_end = plugin->ram_listing_end; |
695 |
|
✗ |
callbacks.cvmcache_breadcrumb_store = plugin->ram_breadcrumb_store; |
696 |
|
✗ |
callbacks.cvmcache_breadcrumb_load = plugin->ram_breadcrumb_load; |
697 |
|
✗ |
callbacks.capabilities = CVMCACHE_CAP_ALL_V2; |
698 |
|
|
|
699 |
|
✗ |
ctx = cvmcache_init(&callbacks); |
700 |
|
✗ |
retval = cvmcache_listen(ctx, locator); |
701 |
|
✗ |
if (!retval) { |
702 |
|
✗ |
LogCvmfs(kLogCache, kLogStderr, "failed to listen on %s", locator); |
703 |
|
✗ |
return 1; |
704 |
|
|
} |
705 |
|
|
|
706 |
|
✗ |
if (test_mode) { |
707 |
|
|
// Daemonize, print out PID |
708 |
|
|
pid_t pid; |
709 |
|
|
int statloc; |
710 |
|
✗ |
if ((pid = fork()) == 0) { |
711 |
|
✗ |
if ((pid = fork()) == 0) { |
712 |
|
✗ |
const int null_read = open("/dev/null", O_RDONLY); |
713 |
|
✗ |
const int null_write = open("/dev/null", O_WRONLY); |
714 |
|
✗ |
assert((null_read >= 0) && (null_write >= 0)); |
715 |
|
✗ |
int retval = dup2(null_read, 0); |
716 |
|
✗ |
assert(retval == 0); |
717 |
|
✗ |
retval = dup2(null_write, 1); |
718 |
|
✗ |
assert(retval == 1); |
719 |
|
✗ |
retval = dup2(null_write, 2); |
720 |
|
✗ |
assert(retval == 2); |
721 |
|
✗ |
close(null_read); |
722 |
|
✗ |
close(null_write); |
723 |
|
|
} else { |
724 |
|
✗ |
assert(pid > 0); |
725 |
|
✗ |
printf("%d\n", pid); |
726 |
|
✗ |
fflush(stdout); |
727 |
|
✗ |
fsync(1); |
728 |
|
✗ |
_exit(0); |
729 |
|
|
} |
730 |
|
|
} else { |
731 |
|
✗ |
assert(pid > 0); |
732 |
|
✗ |
waitpid(pid, &statloc, 0); |
733 |
|
✗ |
_exit(0); |
734 |
|
|
} |
735 |
|
|
} |
736 |
|
|
|
737 |
|
✗ |
LogCvmfs(kLogCache, kLogStdout, |
738 |
|
|
"Listening for cvmfs clients on %s\n" |
739 |
|
|
"NOTE: this process needs to run as user cvmfs\n", |
740 |
|
|
locator); |
741 |
|
|
|
742 |
|
✗ |
cvmcache_process_requests(ctx, 0); |
743 |
|
✗ |
if (test_mode) |
744 |
|
|
while (true) |
745 |
|
✗ |
sleep(1); |
746 |
|
✗ |
if (!cvmcache_is_supervised()) { |
747 |
|
✗ |
LogCvmfs(kLogCache, kLogStdout, "Press <Ctrl+D> to quit"); |
748 |
|
✗ |
LogCvmfs(kLogCache, kLogStdout, |
749 |
|
|
"Press <R Enter> to ask clients to release nested catalogs"); |
750 |
|
|
while (true) { |
751 |
|
|
char buf; |
752 |
|
✗ |
retval = read(fileno(stdin), &buf, 1); |
753 |
|
✗ |
if (retval != 1) |
754 |
|
✗ |
break; |
755 |
|
✗ |
if (buf == 'R') { |
756 |
|
✗ |
LogCvmfs(kLogCache, kLogStdout, |
757 |
|
|
" ... asking clients to release nested catalogs"); |
758 |
|
✗ |
cvmcache_ask_detach(ctx); |
759 |
|
|
} |
760 |
|
|
} |
761 |
|
✗ |
cvmcache_terminate(ctx); |
762 |
|
|
} else { |
763 |
|
✗ |
LogCvmfs(kLogCache, kLogDebug | kLogSyslog, |
764 |
|
|
"CernVM-FS RAM cache plugin started in supervised mode"); |
765 |
|
|
} |
766 |
|
|
|
767 |
|
✗ |
cvmcache_wait_for(ctx); |
768 |
|
✗ |
LogCvmfs(kLogCache, kLogDebug | kLogStdout, " ... good bye"); |
769 |
|
✗ |
cvmcache_options_free(mem_size); |
770 |
|
✗ |
cvmcache_options_free(locator); |
771 |
|
✗ |
cvmcache_options_fini(options); |
772 |
|
✗ |
cvmcache_terminate_watchdog(); |
773 |
|
✗ |
cvmcache_cleanup_global(); |
774 |
|
✗ |
return 0; |
775 |
|
|
} |
776 |
|
|
|