GCC Code Coverage Report
Directory: cvmfs/ Exec Total Coverage
File: cvmfs/cache_ram.cc Lines: 165 197 83.8 %
Date: 2019-02-03 02:48:13 Branches: 51 80 63.8 %

Line Branch Exec Source
1
/**
2
 * This file is part of the CernVM File System.
3
 */
4
#include "cvmfs_config.h"
5
#include "cache_ram.h"
6
7
#include <errno.h>
8
#include <algorithm>
9
#include <cassert>
10
#include <cstring>
11
#include <new>
12
13
#include "kvstore.h"
14
#include "logging.h"
15
#include "util/posix.h"
16
#include "util/string.h"
17
#include "util_concurrency.h"
18
19
using namespace std;  // NOLINT
20
21
15
const shash::Any RamCacheManager::kInvalidHandle;
22
23
string RamCacheManager::Describe() {
24
  return "Internal in-memory cache manager (size " +
25
         StringifyInt(max_size_ / (1024 * 1024)) + "MB)\n";
26
}
27
28
29
48
RamCacheManager::RamCacheManager(
30
  uint64_t max_size,
31
  unsigned max_entries,
32
  MemoryKvStore::MemoryAllocator alloc,
33
  perf::StatisticsTemplate statistics)
34
  : max_size_(max_size)
35
  , fd_table_(max_entries, ReadOnlyHandle())
36
  // TODO(jblomer): the number of slots in the kv-stores should _not_ be the
37
  // number of open files.
38
  , regular_entries_(max_entries,
39
                     alloc,
40
                     max_size,
41
                     perf::StatisticsTemplate("kv.regular", statistics))
42
  , volatile_entries_(max_entries,
43
                      alloc,
44
                      max_size,
45
                      perf::StatisticsTemplate("kv.volatile", statistics))
46
48
  , counters_(statistics)
47
{
48
48
  int retval = pthread_rwlock_init(&rwlock_, NULL);
49
48
  assert(retval == 0);
50
  LogCvmfs(kLogCache, kLogDebug, "max %u B, %u entries",
51
48
           max_size, max_entries);
52
}
53
54
55
84
RamCacheManager::~RamCacheManager() {
56
48
  pthread_rwlock_destroy(&rwlock_);
57
84
}
58
59
60
11043
int RamCacheManager::AddFd(const ReadOnlyHandle &handle) {
61
11043
  int result = fd_table_.OpenFd(handle);
62
11043
  if (result == -ENFILE) {
63
1
    LogCvmfs(kLogCache, kLogDebug, "too many open files");
64
1
    perf::Inc(counters_.n_enfile);
65
  }
66
11043
  return result;
67
}
68
69
70
28
bool RamCacheManager::AcquireQuotaManager(QuotaManager *quota_mgr) {
71
28
  assert(quota_mgr != NULL);
72
28
  quota_mgr_ = quota_mgr;
73
28
  LogCvmfs(kLogCache, kLogDebug, "set quota manager");
74
28
  return true;
75
}
76
77
78
23
int RamCacheManager::Open(const BlessedObject &object) {
79
23
  WriteLockGuard guard(rwlock_);
80
23
  return DoOpen(object.id);
81
}
82
83
84
32
int RamCacheManager::DoOpen(const shash::Any &id) {
85
  bool ok;
86
  bool is_volatile;
87
32
  MemoryBuffer buf;
88
89
32
  if (regular_entries_.Contains(id)) {
90
16
    is_volatile = false;
91
16
  } else if (volatile_entries_.Contains(id)) {
92
2
    is_volatile = true;
93
  } else {
94
    LogCvmfs(kLogCache, kLogDebug, "miss for %s",
95
14
             id.ToString().c_str());
96
14
    perf::Inc(counters_.n_openmiss);
97
14
    return -ENOENT;
98
  }
99
18
  ReadOnlyHandle generic_handle(id, is_volatile);
100
18
  int fd = AddFd(generic_handle);
101
18
  if (fd < 0) {
102
    LogCvmfs(kLogCache, kLogDebug, "error while opening %s: %s",
103
             id.ToString().c_str(), strerror(-fd));
104
    return fd;
105
  }
106
18
  if (is_volatile) {
107
    LogCvmfs(kLogCache, kLogDebug, "hit in volatile entries for %s",
108
2
             id.ToString().c_str());
109
2
    perf::Inc(counters_.n_openvolatile);
110
  } else {
111
    LogCvmfs(kLogCache, kLogDebug, "hit in regular entries for %s",
112
16
             id.ToString().c_str());
113
16
    perf::Inc(counters_.n_openregular);
114
  }
115
18
  ok = GetStore(generic_handle)->IncRef(id);
116
18
  assert(ok);
117
18
  return fd;
118
}
119
120
121
6
int64_t RamCacheManager::GetSize(int fd) {
122
6
  ReadLockGuard guard(rwlock_);
123
6
  ReadOnlyHandle generic_handle = fd_table_.GetHandle(fd);
124
6
  if (generic_handle.handle == kInvalidHandle) {
125
    LogCvmfs(kLogCache, kLogDebug, "bad fd %d on GetSize", fd);
126
    return -EBADF;
127
  }
128
6
  perf::Inc(counters_.n_getsize);
129
6
  return GetStore(generic_handle)->GetSize(generic_handle.handle);
130
}
131
132
133
11036
int RamCacheManager::Close(int fd) {
134
  bool rc;
135
136
11036
  WriteLockGuard guard(rwlock_);
137
11036
  ReadOnlyHandle generic_handle = fd_table_.GetHandle(fd);
138
11036
  if (generic_handle.handle == kInvalidHandle) {
139
    LogCvmfs(kLogCache, kLogDebug, "bad fd %d on Close", fd);
140
    return -EBADF;
141
  }
142
11036
  rc = GetStore(generic_handle)->Unref(generic_handle.handle);
143
11036
  assert(rc);
144
145
11036
  int rc_int = fd_table_.CloseFd(fd);
146
11036
  assert(rc_int == 0);
147
11036
  LogCvmfs(kLogCache, kLogDebug, "closed fd %d", fd);
148
11036
  perf::Inc(counters_.n_close);
149
11036
  return 0;
150
}
151
152
153
4
int64_t RamCacheManager::Pread(
154
  int fd,
155
  void *buf,
156
  uint64_t size,
157
  uint64_t offset)
158
{
159
4
  ReadLockGuard guard(rwlock_);
160
4
  ReadOnlyHandle generic_handle = fd_table_.GetHandle(fd);
161
4
  if (generic_handle.handle == kInvalidHandle) {
162
    LogCvmfs(kLogCache, kLogDebug, "bad fd %d on Pread", fd);
163
    return -EBADF;
164
  }
165
4
  perf::Inc(counters_.n_pread);
166
  return GetStore(generic_handle)->Read(
167
4
    generic_handle.handle, buf, size, offset);
168
}
169
170
171
11026
int RamCacheManager::Dup(int fd) {
172
  bool ok;
173
  int rc;
174
11026
  WriteLockGuard guard(rwlock_);
175
11026
  ReadOnlyHandle generic_handle = fd_table_.GetHandle(fd);
176
11026
  if (generic_handle.handle == kInvalidHandle) {
177
1
    LogCvmfs(kLogCache, kLogDebug, "bad fd %d on Dup", fd);
178
1
    return -EBADF;
179
  }
180
11025
  rc = AddFd(generic_handle);
181
11025
  if (rc < 0) return rc;
182
11024
  ok = GetStore(generic_handle)->IncRef(generic_handle.handle);
183
11024
  assert(ok);
184
11024
  LogCvmfs(kLogCache, kLogDebug, "dup fd %d", fd);
185
11024
  perf::Inc(counters_.n_dup);
186
11024
  return rc;
187
}
188
189
190
/**
191
 * For a RAM cache, read-ahead is a no-op.
192
 */
193
int RamCacheManager::Readahead(int fd) {
194
  ReadLockGuard guard(rwlock_);
195
  ReadOnlyHandle generic_handle = fd_table_.GetHandle(fd);
196
  if (generic_handle.handle == kInvalidHandle) {
197
    LogCvmfs(kLogCache, kLogDebug, "bad fd %d on Readahead", fd);
198
    return -EBADF;
199
  }
200
  LogCvmfs(kLogCache, kLogDebug, "readahead (no-op) on %d", fd);
201
  perf::Inc(counters_.n_readahead);
202
  return 0;
203
}
204
205
206
37
int RamCacheManager::StartTxn(const shash::Any &id, uint64_t size, void *txn) {
207
  LogCvmfs(kLogCache, kLogDebug, "new transaction with id %s",
208
37
           id.ToString().c_str());
209
37
  Transaction *transaction = new (txn) Transaction();
210
37
  transaction->buffer.id = id;
211
37
  transaction->pos = 0;
212
37
  transaction->expected_size = size;
213
37
  transaction->buffer.size = (size == kSizeUnknown) ? kPageSize : size;
214
37
  transaction->buffer.address = malloc(transaction->buffer.size);
215

37
  if (!transaction->buffer.address && size > 0) {
216
    LogCvmfs(kLogCache, kLogDebug,
217
             "failed to allocate %lu B for %s",
218
             size, id.ToString().c_str());
219
    return -errno;
220
  }
221
37
  perf::Inc(counters_.n_starttxn);
222
37
  return 0;
223
}
224
225
226
8
void RamCacheManager::CtrlTxn(
227
  const ObjectInfo &object_info,
228
  const int flags,
229
  void *txn)
230
{
231
8
  Transaction *transaction = reinterpret_cast<Transaction *>(txn);
232
8
  transaction->description = object_info.description;
233
8
  transaction->buffer.object_type = object_info.type;
234
  LogCvmfs(kLogCache, kLogDebug, "modified transaction %s",
235
8
           transaction->buffer.id.ToString().c_str());
236
8
}
237
238
239
38
int64_t RamCacheManager::Write(const void *buf, uint64_t size, void *txn) {
240
38
  Transaction *transaction = reinterpret_cast<Transaction *>(txn);
241
242
38
  assert(transaction->pos <= transaction->buffer.size);
243
38
  if (transaction->pos + size > transaction->buffer.size) {
244
1
    if (transaction->expected_size == kSizeUnknown) {
245
      perf::Inc(counters_.n_realloc);
246
      size_t new_size = max(2*transaction->buffer.size,
247
        (size_t) (size + transaction->pos));
248
      LogCvmfs(kLogCache, kLogDebug, "reallocate transaction for %s to %u B",
249
               transaction->buffer.id.ToString().c_str(),
250
               transaction->buffer.size);
251
      void *new_ptr = realloc(transaction->buffer.address, new_size);
252
      if (!new_ptr) {
253
        LogCvmfs(kLogCache, kLogDebug,
254
                 "failed to allocate %lu B for %s",
255
                 new_size, transaction->buffer.id.ToString().c_str());
256
        return -EIO;
257
      }
258
      transaction->buffer.address = new_ptr;
259
      transaction->buffer.size = new_size;
260
    } else {
261
      LogCvmfs(kLogCache, kLogDebug,
262
               "attempted to write more than requested (%u>%u)",
263
1
               size, transaction->buffer.size);
264
1
      return -EFBIG;
265
    }
266
  }
267
268

37
  if (transaction->buffer.address && buf) {
269
    // LogCvmfs(kLogCache, kLogDebug, "copy %u bytes of transaction %s",
270
    //          size, transaction->id.ToString().c_str());
271
    memcpy(static_cast<char *>(transaction->buffer.address) + transaction->pos,
272
37
           buf, size);
273
  }
274
37
  transaction->pos += size;
275
37
  perf::Inc(counters_.n_write);
276
37
  return size;
277
}
278
279
280
2
int RamCacheManager::Reset(void *txn) {
281
2
  Transaction *transaction = reinterpret_cast<Transaction *>(txn);
282
2
  transaction->pos = 0;
283
  LogCvmfs(kLogCache, kLogDebug, "reset transaction %s",
284
2
           transaction->buffer.id.ToString().c_str());
285
2
  perf::Inc(counters_.n_reset);
286
2
  return 0;
287
}
288
289
290
10
int RamCacheManager::OpenFromTxn(void *txn) {
291
10
  WriteLockGuard guard(rwlock_);
292
10
  Transaction *transaction = reinterpret_cast<Transaction *>(txn);
293
10
  int64_t retval = CommitToKvStore(transaction);
294
10
  if (retval < 0) {
295
    LogCvmfs(kLogCache, kLogDebug,
296
             "error while commiting transaction on %s: %s",
297
1
             transaction->buffer.id.ToString().c_str(), strerror(-retval));
298
1
    return retval;
299
  }
300
  LogCvmfs(kLogCache, kLogDebug, "open pending transaction for %s",
301
9
           transaction->buffer.id.ToString().c_str());
302
9
  perf::Inc(counters_.n_committxn);
303
9
  return DoOpen(transaction->buffer.id);
304
}
305
306
307
2
int RamCacheManager::AbortTxn(void *txn) {
308
2
  Transaction *transaction = reinterpret_cast<Transaction *>(txn);
309
2
  free(transaction->buffer.address);
310
  LogCvmfs(kLogCache, kLogDebug, "abort transaction %s",
311
2
           transaction->buffer.id.ToString().c_str());
312
2
  perf::Inc(counters_.n_aborttxn);
313
2
  return 0;
314
}
315
316
317
29
int RamCacheManager::CommitTxn(void *txn) {
318
29
  WriteLockGuard guard(rwlock_);
319
29
  Transaction *transaction = reinterpret_cast<Transaction *>(txn);
320
29
  perf::Inc(counters_.n_committxn);
321
29
  int64_t rc = CommitToKvStore(transaction);
322
29
  if (rc < 0) return rc;
323
28
  free(transaction->buffer.address);
324
28
  return rc;
325
}
326
327
328
39
int64_t RamCacheManager::CommitToKvStore(Transaction *transaction) {
329
  MemoryKvStore *store;
330
331
39
  if (transaction->buffer.object_type == kTypeVolatile) {
332
3
    store = &volatile_entries_;
333
  } else {
334
36
    store = &regular_entries_;
335
  }
336

40
  if (transaction->buffer.object_type == kTypePinned ||
337
      transaction->buffer.object_type == kTypeCatalog) {
338
1
    transaction->buffer.refcount = 1;
339
  } else {
340
38
    transaction->buffer.refcount = 0;
341
  }
342
343
39
  int64_t regular_size = regular_entries_.GetUsed();
344
39
  int64_t volatile_size = volatile_entries_.GetUsed();
345
  int64_t overrun = regular_size + volatile_size +
346
39
    transaction->buffer.size - max_size_;
347
348
39
  if (overrun > 0) {
349
    // if we're going to clean the cache, try to remove at least 25%
350
6
    overrun = max(overrun, (int64_t) max_size_>>2);
351
6
    perf::Inc(counters_.n_overrun);
352
6
    volatile_entries_.ShrinkTo(max((int64_t) 0, volatile_size - overrun));
353
  }
354
39
  overrun -= volatile_size - volatile_entries_.GetUsed();
355
39
  if (overrun > 0) {
356
5
    regular_entries_.ShrinkTo(max((int64_t) 0, regular_size - overrun));
357
  }
358
39
  overrun -= regular_size -regular_entries_.GetUsed();
359
39
  if (overrun > 0) {
360
    LogCvmfs(kLogCache, kLogDebug,
361
             "transaction for %s would overrun the cache limit by %d",
362
2
             transaction->buffer.id.ToString().c_str(), overrun);
363
2
    perf::Inc(counters_.n_full);
364
2
    return -ENOSPC;
365
  }
366
367
37
  int rc = store->Commit(transaction->buffer);
368
37
  if (rc < 0) {
369
    LogCvmfs(kLogCache, kLogDebug,
370
             "commit on %s failed",
371
             transaction->buffer.id.ToString().c_str());
372
    return rc;
373
  }
374
  LogCvmfs(kLogCache, kLogDebug, "committed %s to cache",
375
37
           transaction->buffer.id.ToString().c_str());
376
37
  return 0;
377

45
}