16 return MurmurHash2(&value,
sizeof(value), 0x07387a4f);
24 assert(list && (list->size() > 0));
26 unsigned idx_high = list->size()-1;
27 unsigned chunk_idx = idx_high/2;
28 while (idx_low < idx_high) {
29 if (static_cast<uint64_t>(list->AtPtr(chunk_idx)->offset()) > off) {
31 idx_high = chunk_idx - 1;
33 if ((chunk_idx == list->size() - 1) ||
34 (static_cast<uint64_t>(list->AtPtr(chunk_idx + 1)->offset()) > off))
38 idx_low = chunk_idx + 1;
40 chunk_idx = idx_low + (idx_high - idx_low) / 2;
54 for (
unsigned i = 0; i < list->size(); ++i) {
70 reinterpret_cast<pthread_mutex_t *
>(smalloc(
sizeof(pthread_mutex_t)));
71 int retval = pthread_mutex_init(lock, NULL);
74 for (
unsigned i = 0; i < kNumHandleLocks; ++i) {
76 reinterpret_cast<pthread_mutex_t *
>(smalloc(
sizeof(pthread_mutex_t)));
77 int retval = pthread_mutex_init(m, NULL);
79 handle_locks.PushBack(m);
101 pthread_mutex_destroy(lock);
103 for (
unsigned i = 0; i < kNumHandleLocks; ++i) {
104 pthread_mutex_destroy(handle_locks.At(i));
105 free(handle_locks.At(i));
122 handle2uniqino.Clear();
124 inode2chunks.Clear();
125 inode2references.Clear();
143 const double bucket =
144 static_cast<double>(hash) * static_cast<double>(kNumHandleLocks) /
145 static_cast<double>((uint32_t)(-1));
146 return handle_locks.At((uint32_t)bucket % kNumHandleLocks);
155 reinterpret_cast<pthread_mutex_t *
>(smalloc(
sizeof(pthread_mutex_t)));
156 int retval = pthread_mutex_init(lock_, NULL);
162 for (
unsigned i = 0; i < fd_table_.size(); ++i) {
163 delete fd_table_[i].chunk_reflist.list;
165 pthread_mutex_destroy(lock_);
177 for (; i < fd_table_.size(); ++i) {
178 if (fd_table_[i].chunk_reflist.list == NULL) {
179 fd_table_[i] = new_entry;
184 fd_table_.push_back(new_entry);
195 unsigned idx =
static_cast<unsigned>(fd);
197 if (idx < fd_table_.size())
198 result = fd_table_[idx];
209 unsigned idx =
static_cast<unsigned>(fd);
210 if (idx >= fd_table_.size()) {
215 delete fd_table_[idx].chunk_reflist.list;
216 fd_table_[idx].chunk_reflist.list = NULL;
217 fd_table_[idx].chunk_reflist.path.Assign(
"", 0);
218 delete fd_table_[idx].chunk_fd;
219 fd_table_[idx].chunk_fd = NULL;
220 while (!fd_table_.empty() && (fd_table_.back().chunk_reflist.list == NULL)) {
221 fd_table_.pop_back();
struct cvmcache_context * ctx
FileChunkReflist chunk_reflist
SmallHashDynamic< uint64_t, uint64_t > handle2uniqino
assert((mem||(size==0))&&"Out Of Memory")
static uint32_t hasher_uint64t(const uint64_t &value)
SmallHashDynamic< uint64_t, ChunkFd > handle2fd
void Init(ContextPtr context)
pthread_mutex_t * Handle2Lock(const uint64_t handle) const
unsigned FindChunkIdx(const uint64_t offset)
void Final(ContextPtr context, Any *any_digest)
SmallHashDynamic< uint64_t, uint32_t > inode2references
ChunkTables & operator=(const ChunkTables &other)
int Add(FileChunkReflist chunks)
void Update(const unsigned char *buffer, const unsigned buffer_length, ContextPtr context)
shash::Any HashChunkList()
void CopyFrom(const ChunkTables &other)
const unsigned kDigestSizes[]
SmallHashDynamic< uint64_t, FileChunkReflist > inode2chunks
uint32_t MurmurHash2(const void *key, int len, uint32_t seed)