GCC Code Coverage Report
Directory: cvmfs/ Exec Total Coverage
File: cvmfs/malloc_heap.cc Lines: 67 67 100.0 %
Date: 2019-02-03 02:48:13 Branches: 22 32 68.8 %

Line Branch Exec Source
1
/**
2
 * This file is part of the CernVM File System.
3
 */
4
5
#include "cvmfs_config.h"
6
#include "malloc_heap.h"
7
8
#include <cassert>
9
#include <cstring>
10
#include <new>
11
12
#include "smalloc.h"
13
14
using namespace std;  // NOLINT
15
16
106130
void *MallocHeap::Allocate(
17
  uint64_t size,
18
  void *header,
19
  unsigned header_size)
20
{
21
106130
  assert(size > 0);
22
106130
  assert(header_size <= size);
23
106130
  uint64_t rounded_size = RoundUp8(size);
24
106130
  int64_t real_size = rounded_size + sizeof(Tag);
25
106130
  if (gauge_ + real_size > capacity_)
26
4
    return NULL;
27
28
106126
  unsigned char *new_block = heap_ + gauge_;
29
106126
  new (new_block) Tag(rounded_size);
30
106126
  new_block += sizeof(Tag);
31
106126
  memcpy(new_block, header, header_size);
32
106126
  gauge_ += real_size;
33
106126
  stored_ += rounded_size;
34
106126
  num_blocks_++;
35
106126
  return new_block;
36
}
37
38
39
6
void MallocHeap::Compact() {
40
6
  if (gauge_ == 0)
41
1
    return;
42
43
  // Not really a tag, just the top memory address
44
5
  Tag *heap_top = reinterpret_cast<Tag *>(heap_ + gauge_);
45
5
  Tag *current_tag = reinterpret_cast<Tag *>(heap_);
46
5
  Tag *next_tag = current_tag->JumpToNext();
47
  // Move a sliding window of two blocks over the heap and compact where
48
  // possible
49
159467
  while (next_tag < heap_top) {
50
159457
    if (current_tag->IsFree()) {
51
105089
      if (next_tag->IsFree()) {
52
        // Adjacent free blocks, merge and try again
53
51724
        current_tag->size -= sizeof(Tag) + next_tag->GetSize();
54
51724
        next_tag = next_tag->JumpToNext();
55
      } else {
56
        // Free block followed by a reserved block, move memory and create a
57
        // new free tag at the end of the moved block
58
53365
        int64_t free_space = current_tag->size;
59
53365
        current_tag->size = next_tag->size;
60
        memmove(current_tag->GetBlock(),
61
53365
                next_tag->GetBlock(), next_tag->GetSize());
62
53365
        (*callback_ptr_)(BlockPtr(current_tag->GetBlock()));
63
53365
        next_tag = current_tag->JumpToNext();
64
53365
        next_tag->size = free_space;
65
      }
66
    } else {
67
      // Current block allocated, move on
68
54368
      current_tag = next_tag;
69
54368
      next_tag = next_tag->JumpToNext();
70
    }
71
  }
72
73
5
  gauge_ = (reinterpret_cast<unsigned char *>(current_tag) - heap_);
74
5
  if (!current_tag->IsFree())
75
1
    gauge_ += sizeof(Tag) + current_tag->GetSize();
76
}
77
78
79
2
void *MallocHeap::Expand(void *block, uint64_t new_size) {
80
2
  uint64_t old_size = GetSize(block);
81
2
  assert(old_size <= new_size);
82
2
  void *new_block = Allocate(new_size, block, old_size);
83
2
  if (new_block != NULL)
84
2
    MarkFree(block);
85
2
  return new_block;
86
}
87
88
89
2054
bool MallocHeap::HasSpaceFor(uint64_t nbytes) {
90
2054
  return RoundUp8(gauge_ + nbytes + sizeof(Tag)) <= capacity_;
91
}
92
93
94
51730
void MallocHeap::MarkFree(void *block) {
95
51730
  Tag *tag = reinterpret_cast<Tag *>(block) - 1;
96
51730
  assert(tag->size > 0);
97
51730
  tag->size = -(tag->size);
98
51730
  stored_ -= tag->GetSize();
99
51730
  num_blocks_--;
100
  // TODO(jblomer): if MarkFree() takes place at the top of the heap, one could
101
  // move back the gauge_ pointer.  If this is an optimiziation or unnecessary
102
  // extra work depends on how the MallocHeap is used.
103
51730
}
104
105
106
50300
uint64_t MallocHeap::GetSize(void *block) {
107
50300
  Tag *tag = reinterpret_cast<Tag *>(block) - 1;
108
50300
  assert(tag->size > 0);
109
50300
  return tag->size;
110
}
111
112
113
56
MallocHeap::MallocHeap(uint64_t capacity, CallbackPtr callback_ptr)
114
  : callback_ptr_(callback_ptr)
115
  , capacity_(capacity)
116
  , gauge_(0)
117
  , stored_(0)
118
56
  , num_blocks_(0)
119
{
120
56
  assert(capacity_ > kMinCapacity);
121
  // Ensure 8-byte alignment
122
56
  assert((capacity_ % 8) == 0);
123
56
  heap_ = reinterpret_cast<unsigned char *>(sxmmap(capacity));
124
56
  assert(uintptr_t(heap_) % 8 == 0);
125
56
}
126
127
128
56
MallocHeap::~MallocHeap() {
129
56
  sxunmap(heap_, capacity_);
130
56
}