leveldb 0.0.1
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- checksums.yaml +7 -0
- data/LICENSE.txt +22 -0
- data/README.md +95 -0
- data/ext/Rakefile +11 -0
- data/ext/leveldb/LICENSE +27 -0
- data/ext/leveldb/Makefile +206 -0
- data/ext/leveldb/build_config.mk +13 -0
- data/ext/leveldb/db/builder.cc +88 -0
- data/ext/leveldb/db/builder.h +34 -0
- data/ext/leveldb/db/c.cc +595 -0
- data/ext/leveldb/db/c_test.c +390 -0
- data/ext/leveldb/db/corruption_test.cc +359 -0
- data/ext/leveldb/db/db_bench.cc +979 -0
- data/ext/leveldb/db/db_impl.cc +1485 -0
- data/ext/leveldb/db/db_impl.h +203 -0
- data/ext/leveldb/db/db_iter.cc +299 -0
- data/ext/leveldb/db/db_iter.h +26 -0
- data/ext/leveldb/db/db_test.cc +2092 -0
- data/ext/leveldb/db/dbformat.cc +140 -0
- data/ext/leveldb/db/dbformat.h +227 -0
- data/ext/leveldb/db/dbformat_test.cc +112 -0
- data/ext/leveldb/db/filename.cc +139 -0
- data/ext/leveldb/db/filename.h +80 -0
- data/ext/leveldb/db/filename_test.cc +122 -0
- data/ext/leveldb/db/leveldb_main.cc +238 -0
- data/ext/leveldb/db/log_format.h +35 -0
- data/ext/leveldb/db/log_reader.cc +259 -0
- data/ext/leveldb/db/log_reader.h +108 -0
- data/ext/leveldb/db/log_test.cc +500 -0
- data/ext/leveldb/db/log_writer.cc +103 -0
- data/ext/leveldb/db/log_writer.h +48 -0
- data/ext/leveldb/db/memtable.cc +145 -0
- data/ext/leveldb/db/memtable.h +91 -0
- data/ext/leveldb/db/repair.cc +389 -0
- data/ext/leveldb/db/skiplist.h +379 -0
- data/ext/leveldb/db/skiplist_test.cc +378 -0
- data/ext/leveldb/db/snapshot.h +66 -0
- data/ext/leveldb/db/table_cache.cc +121 -0
- data/ext/leveldb/db/table_cache.h +61 -0
- data/ext/leveldb/db/version_edit.cc +266 -0
- data/ext/leveldb/db/version_edit.h +107 -0
- data/ext/leveldb/db/version_edit_test.cc +46 -0
- data/ext/leveldb/db/version_set.cc +1443 -0
- data/ext/leveldb/db/version_set.h +383 -0
- data/ext/leveldb/db/version_set_test.cc +179 -0
- data/ext/leveldb/db/write_batch.cc +147 -0
- data/ext/leveldb/db/write_batch_internal.h +49 -0
- data/ext/leveldb/db/write_batch_test.cc +120 -0
- data/ext/leveldb/doc/bench/db_bench_sqlite3.cc +718 -0
- data/ext/leveldb/doc/bench/db_bench_tree_db.cc +528 -0
- data/ext/leveldb/helpers/memenv/memenv.cc +384 -0
- data/ext/leveldb/helpers/memenv/memenv.h +20 -0
- data/ext/leveldb/helpers/memenv/memenv_test.cc +232 -0
- data/ext/leveldb/include/leveldb/c.h +291 -0
- data/ext/leveldb/include/leveldb/cache.h +99 -0
- data/ext/leveldb/include/leveldb/comparator.h +63 -0
- data/ext/leveldb/include/leveldb/db.h +161 -0
- data/ext/leveldb/include/leveldb/env.h +333 -0
- data/ext/leveldb/include/leveldb/filter_policy.h +70 -0
- data/ext/leveldb/include/leveldb/iterator.h +100 -0
- data/ext/leveldb/include/leveldb/options.h +195 -0
- data/ext/leveldb/include/leveldb/slice.h +109 -0
- data/ext/leveldb/include/leveldb/status.h +106 -0
- data/ext/leveldb/include/leveldb/table.h +85 -0
- data/ext/leveldb/include/leveldb/table_builder.h +92 -0
- data/ext/leveldb/include/leveldb/write_batch.h +64 -0
- data/ext/leveldb/issues/issue178_test.cc +92 -0
- data/ext/leveldb/port/atomic_pointer.h +224 -0
- data/ext/leveldb/port/port.h +19 -0
- data/ext/leveldb/port/port_example.h +135 -0
- data/ext/leveldb/port/port_posix.cc +54 -0
- data/ext/leveldb/port/port_posix.h +157 -0
- data/ext/leveldb/port/thread_annotations.h +59 -0
- data/ext/leveldb/port/win/stdint.h +24 -0
- data/ext/leveldb/table/block.cc +268 -0
- data/ext/leveldb/table/block.h +44 -0
- data/ext/leveldb/table/block_builder.cc +109 -0
- data/ext/leveldb/table/block_builder.h +57 -0
- data/ext/leveldb/table/filter_block.cc +111 -0
- data/ext/leveldb/table/filter_block.h +68 -0
- data/ext/leveldb/table/filter_block_test.cc +128 -0
- data/ext/leveldb/table/format.cc +145 -0
- data/ext/leveldb/table/format.h +108 -0
- data/ext/leveldb/table/iterator.cc +67 -0
- data/ext/leveldb/table/iterator_wrapper.h +63 -0
- data/ext/leveldb/table/merger.cc +197 -0
- data/ext/leveldb/table/merger.h +26 -0
- data/ext/leveldb/table/table.cc +275 -0
- data/ext/leveldb/table/table_builder.cc +270 -0
- data/ext/leveldb/table/table_test.cc +868 -0
- data/ext/leveldb/table/two_level_iterator.cc +182 -0
- data/ext/leveldb/table/two_level_iterator.h +34 -0
- data/ext/leveldb/util/arena.cc +68 -0
- data/ext/leveldb/util/arena.h +68 -0
- data/ext/leveldb/util/arena_test.cc +68 -0
- data/ext/leveldb/util/bloom.cc +95 -0
- data/ext/leveldb/util/bloom_test.cc +160 -0
- data/ext/leveldb/util/cache.cc +325 -0
- data/ext/leveldb/util/cache_test.cc +186 -0
- data/ext/leveldb/util/coding.cc +194 -0
- data/ext/leveldb/util/coding.h +104 -0
- data/ext/leveldb/util/coding_test.cc +196 -0
- data/ext/leveldb/util/comparator.cc +81 -0
- data/ext/leveldb/util/crc32c.cc +332 -0
- data/ext/leveldb/util/crc32c.h +45 -0
- data/ext/leveldb/util/crc32c_test.cc +72 -0
- data/ext/leveldb/util/env.cc +96 -0
- data/ext/leveldb/util/env_posix.cc +698 -0
- data/ext/leveldb/util/env_test.cc +104 -0
- data/ext/leveldb/util/filter_policy.cc +11 -0
- data/ext/leveldb/util/hash.cc +52 -0
- data/ext/leveldb/util/hash.h +19 -0
- data/ext/leveldb/util/histogram.cc +139 -0
- data/ext/leveldb/util/histogram.h +42 -0
- data/ext/leveldb/util/logging.cc +81 -0
- data/ext/leveldb/util/logging.h +47 -0
- data/ext/leveldb/util/mutexlock.h +41 -0
- data/ext/leveldb/util/options.cc +29 -0
- data/ext/leveldb/util/posix_logger.h +98 -0
- data/ext/leveldb/util/random.h +59 -0
- data/ext/leveldb/util/status.cc +75 -0
- data/ext/leveldb/util/testharness.cc +77 -0
- data/ext/leveldb/util/testharness.h +138 -0
- data/ext/leveldb/util/testutil.cc +51 -0
- data/ext/leveldb/util/testutil.h +53 -0
- data/lib/leveldb/version.rb +3 -0
- data/lib/leveldb.rb +1006 -0
- metadata +228 -0
|
@@ -0,0 +1,325 @@
|
|
|
1
|
+
// Copyright (c) 2011 The LevelDB Authors. All rights reserved.
|
|
2
|
+
// Use of this source code is governed by a BSD-style license that can be
|
|
3
|
+
// found in the LICENSE file. See the AUTHORS file for names of contributors.
|
|
4
|
+
|
|
5
|
+
#include <assert.h>
|
|
6
|
+
#include <stdio.h>
|
|
7
|
+
#include <stdlib.h>
|
|
8
|
+
|
|
9
|
+
#include "leveldb/cache.h"
|
|
10
|
+
#include "port/port.h"
|
|
11
|
+
#include "util/hash.h"
|
|
12
|
+
#include "util/mutexlock.h"
|
|
13
|
+
|
|
14
|
+
namespace leveldb {
|
|
15
|
+
|
|
16
|
+
Cache::~Cache() {
|
|
17
|
+
}
|
|
18
|
+
|
|
19
|
+
namespace {
|
|
20
|
+
|
|
21
|
+
// LRU cache implementation
|
|
22
|
+
|
|
23
|
+
// An entry is a variable length heap-allocated structure. Entries
|
|
24
|
+
// are kept in a circular doubly linked list ordered by access time.
|
|
25
|
+
struct LRUHandle {
|
|
26
|
+
void* value;
|
|
27
|
+
void (*deleter)(const Slice&, void* value);
|
|
28
|
+
LRUHandle* next_hash;
|
|
29
|
+
LRUHandle* next;
|
|
30
|
+
LRUHandle* prev;
|
|
31
|
+
size_t charge; // TODO(opt): Only allow uint32_t?
|
|
32
|
+
size_t key_length;
|
|
33
|
+
uint32_t refs;
|
|
34
|
+
uint32_t hash; // Hash of key(); used for fast sharding and comparisons
|
|
35
|
+
char key_data[1]; // Beginning of key
|
|
36
|
+
|
|
37
|
+
Slice key() const {
|
|
38
|
+
// For cheaper lookups, we allow a temporary Handle object
|
|
39
|
+
// to store a pointer to a key in "value".
|
|
40
|
+
if (next == this) {
|
|
41
|
+
return *(reinterpret_cast<Slice*>(value));
|
|
42
|
+
} else {
|
|
43
|
+
return Slice(key_data, key_length);
|
|
44
|
+
}
|
|
45
|
+
}
|
|
46
|
+
};
|
|
47
|
+
|
|
48
|
+
// We provide our own simple hash table since it removes a whole bunch
|
|
49
|
+
// of porting hacks and is also faster than some of the built-in hash
|
|
50
|
+
// table implementations in some of the compiler/runtime combinations
|
|
51
|
+
// we have tested. E.g., readrandom speeds up by ~5% over the g++
|
|
52
|
+
// 4.4.3's builtin hashtable.
|
|
53
|
+
class HandleTable {
|
|
54
|
+
public:
|
|
55
|
+
HandleTable() : length_(0), elems_(0), list_(NULL) { Resize(); }
|
|
56
|
+
~HandleTable() { delete[] list_; }
|
|
57
|
+
|
|
58
|
+
LRUHandle* Lookup(const Slice& key, uint32_t hash) {
|
|
59
|
+
return *FindPointer(key, hash);
|
|
60
|
+
}
|
|
61
|
+
|
|
62
|
+
LRUHandle* Insert(LRUHandle* h) {
|
|
63
|
+
LRUHandle** ptr = FindPointer(h->key(), h->hash);
|
|
64
|
+
LRUHandle* old = *ptr;
|
|
65
|
+
h->next_hash = (old == NULL ? NULL : old->next_hash);
|
|
66
|
+
*ptr = h;
|
|
67
|
+
if (old == NULL) {
|
|
68
|
+
++elems_;
|
|
69
|
+
if (elems_ > length_) {
|
|
70
|
+
// Since each cache entry is fairly large, we aim for a small
|
|
71
|
+
// average linked list length (<= 1).
|
|
72
|
+
Resize();
|
|
73
|
+
}
|
|
74
|
+
}
|
|
75
|
+
return old;
|
|
76
|
+
}
|
|
77
|
+
|
|
78
|
+
LRUHandle* Remove(const Slice& key, uint32_t hash) {
|
|
79
|
+
LRUHandle** ptr = FindPointer(key, hash);
|
|
80
|
+
LRUHandle* result = *ptr;
|
|
81
|
+
if (result != NULL) {
|
|
82
|
+
*ptr = result->next_hash;
|
|
83
|
+
--elems_;
|
|
84
|
+
}
|
|
85
|
+
return result;
|
|
86
|
+
}
|
|
87
|
+
|
|
88
|
+
private:
|
|
89
|
+
// The table consists of an array of buckets where each bucket is
|
|
90
|
+
// a linked list of cache entries that hash into the bucket.
|
|
91
|
+
uint32_t length_;
|
|
92
|
+
uint32_t elems_;
|
|
93
|
+
LRUHandle** list_;
|
|
94
|
+
|
|
95
|
+
// Return a pointer to slot that points to a cache entry that
|
|
96
|
+
// matches key/hash. If there is no such cache entry, return a
|
|
97
|
+
// pointer to the trailing slot in the corresponding linked list.
|
|
98
|
+
LRUHandle** FindPointer(const Slice& key, uint32_t hash) {
|
|
99
|
+
LRUHandle** ptr = &list_[hash & (length_ - 1)];
|
|
100
|
+
while (*ptr != NULL &&
|
|
101
|
+
((*ptr)->hash != hash || key != (*ptr)->key())) {
|
|
102
|
+
ptr = &(*ptr)->next_hash;
|
|
103
|
+
}
|
|
104
|
+
return ptr;
|
|
105
|
+
}
|
|
106
|
+
|
|
107
|
+
void Resize() {
|
|
108
|
+
uint32_t new_length = 4;
|
|
109
|
+
while (new_length < elems_) {
|
|
110
|
+
new_length *= 2;
|
|
111
|
+
}
|
|
112
|
+
LRUHandle** new_list = new LRUHandle*[new_length];
|
|
113
|
+
memset(new_list, 0, sizeof(new_list[0]) * new_length);
|
|
114
|
+
uint32_t count = 0;
|
|
115
|
+
for (uint32_t i = 0; i < length_; i++) {
|
|
116
|
+
LRUHandle* h = list_[i];
|
|
117
|
+
while (h != NULL) {
|
|
118
|
+
LRUHandle* next = h->next_hash;
|
|
119
|
+
uint32_t hash = h->hash;
|
|
120
|
+
LRUHandle** ptr = &new_list[hash & (new_length - 1)];
|
|
121
|
+
h->next_hash = *ptr;
|
|
122
|
+
*ptr = h;
|
|
123
|
+
h = next;
|
|
124
|
+
count++;
|
|
125
|
+
}
|
|
126
|
+
}
|
|
127
|
+
assert(elems_ == count);
|
|
128
|
+
delete[] list_;
|
|
129
|
+
list_ = new_list;
|
|
130
|
+
length_ = new_length;
|
|
131
|
+
}
|
|
132
|
+
};
|
|
133
|
+
|
|
134
|
+
// A single shard of sharded cache.
|
|
135
|
+
class LRUCache {
|
|
136
|
+
public:
|
|
137
|
+
LRUCache();
|
|
138
|
+
~LRUCache();
|
|
139
|
+
|
|
140
|
+
// Separate from constructor so caller can easily make an array of LRUCache
|
|
141
|
+
void SetCapacity(size_t capacity) { capacity_ = capacity; }
|
|
142
|
+
|
|
143
|
+
// Like Cache methods, but with an extra "hash" parameter.
|
|
144
|
+
Cache::Handle* Insert(const Slice& key, uint32_t hash,
|
|
145
|
+
void* value, size_t charge,
|
|
146
|
+
void (*deleter)(const Slice& key, void* value));
|
|
147
|
+
Cache::Handle* Lookup(const Slice& key, uint32_t hash);
|
|
148
|
+
void Release(Cache::Handle* handle);
|
|
149
|
+
void Erase(const Slice& key, uint32_t hash);
|
|
150
|
+
|
|
151
|
+
private:
|
|
152
|
+
void LRU_Remove(LRUHandle* e);
|
|
153
|
+
void LRU_Append(LRUHandle* e);
|
|
154
|
+
void Unref(LRUHandle* e);
|
|
155
|
+
|
|
156
|
+
// Initialized before use.
|
|
157
|
+
size_t capacity_;
|
|
158
|
+
|
|
159
|
+
// mutex_ protects the following state.
|
|
160
|
+
port::Mutex mutex_;
|
|
161
|
+
size_t usage_;
|
|
162
|
+
|
|
163
|
+
// Dummy head of LRU list.
|
|
164
|
+
// lru.prev is newest entry, lru.next is oldest entry.
|
|
165
|
+
LRUHandle lru_;
|
|
166
|
+
|
|
167
|
+
HandleTable table_;
|
|
168
|
+
};
|
|
169
|
+
|
|
170
|
+
LRUCache::LRUCache()
|
|
171
|
+
: usage_(0) {
|
|
172
|
+
// Make empty circular linked list
|
|
173
|
+
lru_.next = &lru_;
|
|
174
|
+
lru_.prev = &lru_;
|
|
175
|
+
}
|
|
176
|
+
|
|
177
|
+
LRUCache::~LRUCache() {
|
|
178
|
+
for (LRUHandle* e = lru_.next; e != &lru_; ) {
|
|
179
|
+
LRUHandle* next = e->next;
|
|
180
|
+
assert(e->refs == 1); // Error if caller has an unreleased handle
|
|
181
|
+
Unref(e);
|
|
182
|
+
e = next;
|
|
183
|
+
}
|
|
184
|
+
}
|
|
185
|
+
|
|
186
|
+
void LRUCache::Unref(LRUHandle* e) {
|
|
187
|
+
assert(e->refs > 0);
|
|
188
|
+
e->refs--;
|
|
189
|
+
if (e->refs <= 0) {
|
|
190
|
+
usage_ -= e->charge;
|
|
191
|
+
(*e->deleter)(e->key(), e->value);
|
|
192
|
+
free(e);
|
|
193
|
+
}
|
|
194
|
+
}
|
|
195
|
+
|
|
196
|
+
void LRUCache::LRU_Remove(LRUHandle* e) {
|
|
197
|
+
e->next->prev = e->prev;
|
|
198
|
+
e->prev->next = e->next;
|
|
199
|
+
}
|
|
200
|
+
|
|
201
|
+
void LRUCache::LRU_Append(LRUHandle* e) {
|
|
202
|
+
// Make "e" newest entry by inserting just before lru_
|
|
203
|
+
e->next = &lru_;
|
|
204
|
+
e->prev = lru_.prev;
|
|
205
|
+
e->prev->next = e;
|
|
206
|
+
e->next->prev = e;
|
|
207
|
+
}
|
|
208
|
+
|
|
209
|
+
Cache::Handle* LRUCache::Lookup(const Slice& key, uint32_t hash) {
|
|
210
|
+
MutexLock l(&mutex_);
|
|
211
|
+
LRUHandle* e = table_.Lookup(key, hash);
|
|
212
|
+
if (e != NULL) {
|
|
213
|
+
e->refs++;
|
|
214
|
+
LRU_Remove(e);
|
|
215
|
+
LRU_Append(e);
|
|
216
|
+
}
|
|
217
|
+
return reinterpret_cast<Cache::Handle*>(e);
|
|
218
|
+
}
|
|
219
|
+
|
|
220
|
+
void LRUCache::Release(Cache::Handle* handle) {
|
|
221
|
+
MutexLock l(&mutex_);
|
|
222
|
+
Unref(reinterpret_cast<LRUHandle*>(handle));
|
|
223
|
+
}
|
|
224
|
+
|
|
225
|
+
Cache::Handle* LRUCache::Insert(
|
|
226
|
+
const Slice& key, uint32_t hash, void* value, size_t charge,
|
|
227
|
+
void (*deleter)(const Slice& key, void* value)) {
|
|
228
|
+
MutexLock l(&mutex_);
|
|
229
|
+
|
|
230
|
+
LRUHandle* e = reinterpret_cast<LRUHandle*>(
|
|
231
|
+
malloc(sizeof(LRUHandle)-1 + key.size()));
|
|
232
|
+
e->value = value;
|
|
233
|
+
e->deleter = deleter;
|
|
234
|
+
e->charge = charge;
|
|
235
|
+
e->key_length = key.size();
|
|
236
|
+
e->hash = hash;
|
|
237
|
+
e->refs = 2; // One from LRUCache, one for the returned handle
|
|
238
|
+
memcpy(e->key_data, key.data(), key.size());
|
|
239
|
+
LRU_Append(e);
|
|
240
|
+
usage_ += charge;
|
|
241
|
+
|
|
242
|
+
LRUHandle* old = table_.Insert(e);
|
|
243
|
+
if (old != NULL) {
|
|
244
|
+
LRU_Remove(old);
|
|
245
|
+
Unref(old);
|
|
246
|
+
}
|
|
247
|
+
|
|
248
|
+
while (usage_ > capacity_ && lru_.next != &lru_) {
|
|
249
|
+
LRUHandle* old = lru_.next;
|
|
250
|
+
LRU_Remove(old);
|
|
251
|
+
table_.Remove(old->key(), old->hash);
|
|
252
|
+
Unref(old);
|
|
253
|
+
}
|
|
254
|
+
|
|
255
|
+
return reinterpret_cast<Cache::Handle*>(e);
|
|
256
|
+
}
|
|
257
|
+
|
|
258
|
+
void LRUCache::Erase(const Slice& key, uint32_t hash) {
|
|
259
|
+
MutexLock l(&mutex_);
|
|
260
|
+
LRUHandle* e = table_.Remove(key, hash);
|
|
261
|
+
if (e != NULL) {
|
|
262
|
+
LRU_Remove(e);
|
|
263
|
+
Unref(e);
|
|
264
|
+
}
|
|
265
|
+
}
|
|
266
|
+
|
|
267
|
+
static const int kNumShardBits = 4;
|
|
268
|
+
static const int kNumShards = 1 << kNumShardBits;
|
|
269
|
+
|
|
270
|
+
class ShardedLRUCache : public Cache {
|
|
271
|
+
private:
|
|
272
|
+
LRUCache shard_[kNumShards];
|
|
273
|
+
port::Mutex id_mutex_;
|
|
274
|
+
uint64_t last_id_;
|
|
275
|
+
|
|
276
|
+
static inline uint32_t HashSlice(const Slice& s) {
|
|
277
|
+
return Hash(s.data(), s.size(), 0);
|
|
278
|
+
}
|
|
279
|
+
|
|
280
|
+
static uint32_t Shard(uint32_t hash) {
|
|
281
|
+
return hash >> (32 - kNumShardBits);
|
|
282
|
+
}
|
|
283
|
+
|
|
284
|
+
public:
|
|
285
|
+
explicit ShardedLRUCache(size_t capacity)
|
|
286
|
+
: last_id_(0) {
|
|
287
|
+
const size_t per_shard = (capacity + (kNumShards - 1)) / kNumShards;
|
|
288
|
+
for (int s = 0; s < kNumShards; s++) {
|
|
289
|
+
shard_[s].SetCapacity(per_shard);
|
|
290
|
+
}
|
|
291
|
+
}
|
|
292
|
+
virtual ~ShardedLRUCache() { }
|
|
293
|
+
virtual Handle* Insert(const Slice& key, void* value, size_t charge,
|
|
294
|
+
void (*deleter)(const Slice& key, void* value)) {
|
|
295
|
+
const uint32_t hash = HashSlice(key);
|
|
296
|
+
return shard_[Shard(hash)].Insert(key, hash, value, charge, deleter);
|
|
297
|
+
}
|
|
298
|
+
virtual Handle* Lookup(const Slice& key) {
|
|
299
|
+
const uint32_t hash = HashSlice(key);
|
|
300
|
+
return shard_[Shard(hash)].Lookup(key, hash);
|
|
301
|
+
}
|
|
302
|
+
virtual void Release(Handle* handle) {
|
|
303
|
+
LRUHandle* h = reinterpret_cast<LRUHandle*>(handle);
|
|
304
|
+
shard_[Shard(h->hash)].Release(handle);
|
|
305
|
+
}
|
|
306
|
+
virtual void Erase(const Slice& key) {
|
|
307
|
+
const uint32_t hash = HashSlice(key);
|
|
308
|
+
shard_[Shard(hash)].Erase(key, hash);
|
|
309
|
+
}
|
|
310
|
+
virtual void* Value(Handle* handle) {
|
|
311
|
+
return reinterpret_cast<LRUHandle*>(handle)->value;
|
|
312
|
+
}
|
|
313
|
+
virtual uint64_t NewId() {
|
|
314
|
+
MutexLock l(&id_mutex_);
|
|
315
|
+
return ++(last_id_);
|
|
316
|
+
}
|
|
317
|
+
};
|
|
318
|
+
|
|
319
|
+
} // end anonymous namespace
|
|
320
|
+
|
|
321
|
+
Cache* NewLRUCache(size_t capacity) {
|
|
322
|
+
return new ShardedLRUCache(capacity);
|
|
323
|
+
}
|
|
324
|
+
|
|
325
|
+
} // namespace leveldb
|
|
@@ -0,0 +1,186 @@
|
|
|
1
|
+
// Copyright (c) 2011 The LevelDB Authors. All rights reserved.
|
|
2
|
+
// Use of this source code is governed by a BSD-style license that can be
|
|
3
|
+
// found in the LICENSE file. See the AUTHORS file for names of contributors.
|
|
4
|
+
|
|
5
|
+
#include "leveldb/cache.h"
|
|
6
|
+
|
|
7
|
+
#include <vector>
|
|
8
|
+
#include "util/coding.h"
|
|
9
|
+
#include "util/testharness.h"
|
|
10
|
+
|
|
11
|
+
namespace leveldb {
|
|
12
|
+
|
|
13
|
+
// Conversions between numeric keys/values and the types expected by Cache.
|
|
14
|
+
static std::string EncodeKey(int k) {
|
|
15
|
+
std::string result;
|
|
16
|
+
PutFixed32(&result, k);
|
|
17
|
+
return result;
|
|
18
|
+
}
|
|
19
|
+
static int DecodeKey(const Slice& k) {
|
|
20
|
+
assert(k.size() == 4);
|
|
21
|
+
return DecodeFixed32(k.data());
|
|
22
|
+
}
|
|
23
|
+
static void* EncodeValue(uintptr_t v) { return reinterpret_cast<void*>(v); }
|
|
24
|
+
static int DecodeValue(void* v) { return reinterpret_cast<uintptr_t>(v); }
|
|
25
|
+
|
|
26
|
+
class CacheTest {
|
|
27
|
+
public:
|
|
28
|
+
static CacheTest* current_;
|
|
29
|
+
|
|
30
|
+
static void Deleter(const Slice& key, void* v) {
|
|
31
|
+
current_->deleted_keys_.push_back(DecodeKey(key));
|
|
32
|
+
current_->deleted_values_.push_back(DecodeValue(v));
|
|
33
|
+
}
|
|
34
|
+
|
|
35
|
+
static const int kCacheSize = 1000;
|
|
36
|
+
std::vector<int> deleted_keys_;
|
|
37
|
+
std::vector<int> deleted_values_;
|
|
38
|
+
Cache* cache_;
|
|
39
|
+
|
|
40
|
+
CacheTest() : cache_(NewLRUCache(kCacheSize)) {
|
|
41
|
+
current_ = this;
|
|
42
|
+
}
|
|
43
|
+
|
|
44
|
+
~CacheTest() {
|
|
45
|
+
delete cache_;
|
|
46
|
+
}
|
|
47
|
+
|
|
48
|
+
int Lookup(int key) {
|
|
49
|
+
Cache::Handle* handle = cache_->Lookup(EncodeKey(key));
|
|
50
|
+
const int r = (handle == NULL) ? -1 : DecodeValue(cache_->Value(handle));
|
|
51
|
+
if (handle != NULL) {
|
|
52
|
+
cache_->Release(handle);
|
|
53
|
+
}
|
|
54
|
+
return r;
|
|
55
|
+
}
|
|
56
|
+
|
|
57
|
+
void Insert(int key, int value, int charge = 1) {
|
|
58
|
+
cache_->Release(cache_->Insert(EncodeKey(key), EncodeValue(value), charge,
|
|
59
|
+
&CacheTest::Deleter));
|
|
60
|
+
}
|
|
61
|
+
|
|
62
|
+
void Erase(int key) {
|
|
63
|
+
cache_->Erase(EncodeKey(key));
|
|
64
|
+
}
|
|
65
|
+
};
|
|
66
|
+
CacheTest* CacheTest::current_;
|
|
67
|
+
|
|
68
|
+
TEST(CacheTest, HitAndMiss) {
|
|
69
|
+
ASSERT_EQ(-1, Lookup(100));
|
|
70
|
+
|
|
71
|
+
Insert(100, 101);
|
|
72
|
+
ASSERT_EQ(101, Lookup(100));
|
|
73
|
+
ASSERT_EQ(-1, Lookup(200));
|
|
74
|
+
ASSERT_EQ(-1, Lookup(300));
|
|
75
|
+
|
|
76
|
+
Insert(200, 201);
|
|
77
|
+
ASSERT_EQ(101, Lookup(100));
|
|
78
|
+
ASSERT_EQ(201, Lookup(200));
|
|
79
|
+
ASSERT_EQ(-1, Lookup(300));
|
|
80
|
+
|
|
81
|
+
Insert(100, 102);
|
|
82
|
+
ASSERT_EQ(102, Lookup(100));
|
|
83
|
+
ASSERT_EQ(201, Lookup(200));
|
|
84
|
+
ASSERT_EQ(-1, Lookup(300));
|
|
85
|
+
|
|
86
|
+
ASSERT_EQ(1, deleted_keys_.size());
|
|
87
|
+
ASSERT_EQ(100, deleted_keys_[0]);
|
|
88
|
+
ASSERT_EQ(101, deleted_values_[0]);
|
|
89
|
+
}
|
|
90
|
+
|
|
91
|
+
TEST(CacheTest, Erase) {
|
|
92
|
+
Erase(200);
|
|
93
|
+
ASSERT_EQ(0, deleted_keys_.size());
|
|
94
|
+
|
|
95
|
+
Insert(100, 101);
|
|
96
|
+
Insert(200, 201);
|
|
97
|
+
Erase(100);
|
|
98
|
+
ASSERT_EQ(-1, Lookup(100));
|
|
99
|
+
ASSERT_EQ(201, Lookup(200));
|
|
100
|
+
ASSERT_EQ(1, deleted_keys_.size());
|
|
101
|
+
ASSERT_EQ(100, deleted_keys_[0]);
|
|
102
|
+
ASSERT_EQ(101, deleted_values_[0]);
|
|
103
|
+
|
|
104
|
+
Erase(100);
|
|
105
|
+
ASSERT_EQ(-1, Lookup(100));
|
|
106
|
+
ASSERT_EQ(201, Lookup(200));
|
|
107
|
+
ASSERT_EQ(1, deleted_keys_.size());
|
|
108
|
+
}
|
|
109
|
+
|
|
110
|
+
TEST(CacheTest, EntriesArePinned) {
|
|
111
|
+
Insert(100, 101);
|
|
112
|
+
Cache::Handle* h1 = cache_->Lookup(EncodeKey(100));
|
|
113
|
+
ASSERT_EQ(101, DecodeValue(cache_->Value(h1)));
|
|
114
|
+
|
|
115
|
+
Insert(100, 102);
|
|
116
|
+
Cache::Handle* h2 = cache_->Lookup(EncodeKey(100));
|
|
117
|
+
ASSERT_EQ(102, DecodeValue(cache_->Value(h2)));
|
|
118
|
+
ASSERT_EQ(0, deleted_keys_.size());
|
|
119
|
+
|
|
120
|
+
cache_->Release(h1);
|
|
121
|
+
ASSERT_EQ(1, deleted_keys_.size());
|
|
122
|
+
ASSERT_EQ(100, deleted_keys_[0]);
|
|
123
|
+
ASSERT_EQ(101, deleted_values_[0]);
|
|
124
|
+
|
|
125
|
+
Erase(100);
|
|
126
|
+
ASSERT_EQ(-1, Lookup(100));
|
|
127
|
+
ASSERT_EQ(1, deleted_keys_.size());
|
|
128
|
+
|
|
129
|
+
cache_->Release(h2);
|
|
130
|
+
ASSERT_EQ(2, deleted_keys_.size());
|
|
131
|
+
ASSERT_EQ(100, deleted_keys_[1]);
|
|
132
|
+
ASSERT_EQ(102, deleted_values_[1]);
|
|
133
|
+
}
|
|
134
|
+
|
|
135
|
+
TEST(CacheTest, EvictionPolicy) {
|
|
136
|
+
Insert(100, 101);
|
|
137
|
+
Insert(200, 201);
|
|
138
|
+
|
|
139
|
+
// Frequently used entry must be kept around
|
|
140
|
+
for (int i = 0; i < kCacheSize + 100; i++) {
|
|
141
|
+
Insert(1000+i, 2000+i);
|
|
142
|
+
ASSERT_EQ(2000+i, Lookup(1000+i));
|
|
143
|
+
ASSERT_EQ(101, Lookup(100));
|
|
144
|
+
}
|
|
145
|
+
ASSERT_EQ(101, Lookup(100));
|
|
146
|
+
ASSERT_EQ(-1, Lookup(200));
|
|
147
|
+
}
|
|
148
|
+
|
|
149
|
+
TEST(CacheTest, HeavyEntries) {
|
|
150
|
+
// Add a bunch of light and heavy entries and then count the combined
|
|
151
|
+
// size of items still in the cache, which must be approximately the
|
|
152
|
+
// same as the total capacity.
|
|
153
|
+
const int kLight = 1;
|
|
154
|
+
const int kHeavy = 10;
|
|
155
|
+
int added = 0;
|
|
156
|
+
int index = 0;
|
|
157
|
+
while (added < 2*kCacheSize) {
|
|
158
|
+
const int weight = (index & 1) ? kLight : kHeavy;
|
|
159
|
+
Insert(index, 1000+index, weight);
|
|
160
|
+
added += weight;
|
|
161
|
+
index++;
|
|
162
|
+
}
|
|
163
|
+
|
|
164
|
+
int cached_weight = 0;
|
|
165
|
+
for (int i = 0; i < index; i++) {
|
|
166
|
+
const int weight = (i & 1 ? kLight : kHeavy);
|
|
167
|
+
int r = Lookup(i);
|
|
168
|
+
if (r >= 0) {
|
|
169
|
+
cached_weight += weight;
|
|
170
|
+
ASSERT_EQ(1000+i, r);
|
|
171
|
+
}
|
|
172
|
+
}
|
|
173
|
+
ASSERT_LE(cached_weight, kCacheSize + kCacheSize/10);
|
|
174
|
+
}
|
|
175
|
+
|
|
176
|
+
TEST(CacheTest, NewId) {
|
|
177
|
+
uint64_t a = cache_->NewId();
|
|
178
|
+
uint64_t b = cache_->NewId();
|
|
179
|
+
ASSERT_NE(a, b);
|
|
180
|
+
}
|
|
181
|
+
|
|
182
|
+
} // namespace leveldb
|
|
183
|
+
|
|
184
|
+
int main(int argc, char** argv) {
|
|
185
|
+
return leveldb::test::RunAllTests();
|
|
186
|
+
}
|
|
@@ -0,0 +1,194 @@
|
|
|
1
|
+
// Copyright (c) 2011 The LevelDB Authors. All rights reserved.
|
|
2
|
+
// Use of this source code is governed by a BSD-style license that can be
|
|
3
|
+
// found in the LICENSE file. See the AUTHORS file for names of contributors.
|
|
4
|
+
|
|
5
|
+
#include "util/coding.h"
|
|
6
|
+
|
|
7
|
+
namespace leveldb {
|
|
8
|
+
|
|
9
|
+
void EncodeFixed32(char* buf, uint32_t value) {
|
|
10
|
+
if (port::kLittleEndian) {
|
|
11
|
+
memcpy(buf, &value, sizeof(value));
|
|
12
|
+
} else {
|
|
13
|
+
buf[0] = value & 0xff;
|
|
14
|
+
buf[1] = (value >> 8) & 0xff;
|
|
15
|
+
buf[2] = (value >> 16) & 0xff;
|
|
16
|
+
buf[3] = (value >> 24) & 0xff;
|
|
17
|
+
}
|
|
18
|
+
}
|
|
19
|
+
|
|
20
|
+
void EncodeFixed64(char* buf, uint64_t value) {
|
|
21
|
+
if (port::kLittleEndian) {
|
|
22
|
+
memcpy(buf, &value, sizeof(value));
|
|
23
|
+
} else {
|
|
24
|
+
buf[0] = value & 0xff;
|
|
25
|
+
buf[1] = (value >> 8) & 0xff;
|
|
26
|
+
buf[2] = (value >> 16) & 0xff;
|
|
27
|
+
buf[3] = (value >> 24) & 0xff;
|
|
28
|
+
buf[4] = (value >> 32) & 0xff;
|
|
29
|
+
buf[5] = (value >> 40) & 0xff;
|
|
30
|
+
buf[6] = (value >> 48) & 0xff;
|
|
31
|
+
buf[7] = (value >> 56) & 0xff;
|
|
32
|
+
}
|
|
33
|
+
}
|
|
34
|
+
|
|
35
|
+
void PutFixed32(std::string* dst, uint32_t value) {
|
|
36
|
+
char buf[sizeof(value)];
|
|
37
|
+
EncodeFixed32(buf, value);
|
|
38
|
+
dst->append(buf, sizeof(buf));
|
|
39
|
+
}
|
|
40
|
+
|
|
41
|
+
void PutFixed64(std::string* dst, uint64_t value) {
|
|
42
|
+
char buf[sizeof(value)];
|
|
43
|
+
EncodeFixed64(buf, value);
|
|
44
|
+
dst->append(buf, sizeof(buf));
|
|
45
|
+
}
|
|
46
|
+
|
|
47
|
+
char* EncodeVarint32(char* dst, uint32_t v) {
|
|
48
|
+
// Operate on characters as unsigneds
|
|
49
|
+
unsigned char* ptr = reinterpret_cast<unsigned char*>(dst);
|
|
50
|
+
static const int B = 128;
|
|
51
|
+
if (v < (1<<7)) {
|
|
52
|
+
*(ptr++) = v;
|
|
53
|
+
} else if (v < (1<<14)) {
|
|
54
|
+
*(ptr++) = v | B;
|
|
55
|
+
*(ptr++) = v>>7;
|
|
56
|
+
} else if (v < (1<<21)) {
|
|
57
|
+
*(ptr++) = v | B;
|
|
58
|
+
*(ptr++) = (v>>7) | B;
|
|
59
|
+
*(ptr++) = v>>14;
|
|
60
|
+
} else if (v < (1<<28)) {
|
|
61
|
+
*(ptr++) = v | B;
|
|
62
|
+
*(ptr++) = (v>>7) | B;
|
|
63
|
+
*(ptr++) = (v>>14) | B;
|
|
64
|
+
*(ptr++) = v>>21;
|
|
65
|
+
} else {
|
|
66
|
+
*(ptr++) = v | B;
|
|
67
|
+
*(ptr++) = (v>>7) | B;
|
|
68
|
+
*(ptr++) = (v>>14) | B;
|
|
69
|
+
*(ptr++) = (v>>21) | B;
|
|
70
|
+
*(ptr++) = v>>28;
|
|
71
|
+
}
|
|
72
|
+
return reinterpret_cast<char*>(ptr);
|
|
73
|
+
}
|
|
74
|
+
|
|
75
|
+
void PutVarint32(std::string* dst, uint32_t v) {
|
|
76
|
+
char buf[5];
|
|
77
|
+
char* ptr = EncodeVarint32(buf, v);
|
|
78
|
+
dst->append(buf, ptr - buf);
|
|
79
|
+
}
|
|
80
|
+
|
|
81
|
+
char* EncodeVarint64(char* dst, uint64_t v) {
|
|
82
|
+
static const int B = 128;
|
|
83
|
+
unsigned char* ptr = reinterpret_cast<unsigned char*>(dst);
|
|
84
|
+
while (v >= B) {
|
|
85
|
+
*(ptr++) = (v & (B-1)) | B;
|
|
86
|
+
v >>= 7;
|
|
87
|
+
}
|
|
88
|
+
*(ptr++) = static_cast<unsigned char>(v);
|
|
89
|
+
return reinterpret_cast<char*>(ptr);
|
|
90
|
+
}
|
|
91
|
+
|
|
92
|
+
void PutVarint64(std::string* dst, uint64_t v) {
|
|
93
|
+
char buf[10];
|
|
94
|
+
char* ptr = EncodeVarint64(buf, v);
|
|
95
|
+
dst->append(buf, ptr - buf);
|
|
96
|
+
}
|
|
97
|
+
|
|
98
|
+
void PutLengthPrefixedSlice(std::string* dst, const Slice& value) {
|
|
99
|
+
PutVarint32(dst, value.size());
|
|
100
|
+
dst->append(value.data(), value.size());
|
|
101
|
+
}
|
|
102
|
+
|
|
103
|
+
int VarintLength(uint64_t v) {
|
|
104
|
+
int len = 1;
|
|
105
|
+
while (v >= 128) {
|
|
106
|
+
v >>= 7;
|
|
107
|
+
len++;
|
|
108
|
+
}
|
|
109
|
+
return len;
|
|
110
|
+
}
|
|
111
|
+
|
|
112
|
+
const char* GetVarint32PtrFallback(const char* p,
|
|
113
|
+
const char* limit,
|
|
114
|
+
uint32_t* value) {
|
|
115
|
+
uint32_t result = 0;
|
|
116
|
+
for (uint32_t shift = 0; shift <= 28 && p < limit; shift += 7) {
|
|
117
|
+
uint32_t byte = *(reinterpret_cast<const unsigned char*>(p));
|
|
118
|
+
p++;
|
|
119
|
+
if (byte & 128) {
|
|
120
|
+
// More bytes are present
|
|
121
|
+
result |= ((byte & 127) << shift);
|
|
122
|
+
} else {
|
|
123
|
+
result |= (byte << shift);
|
|
124
|
+
*value = result;
|
|
125
|
+
return reinterpret_cast<const char*>(p);
|
|
126
|
+
}
|
|
127
|
+
}
|
|
128
|
+
return NULL;
|
|
129
|
+
}
|
|
130
|
+
|
|
131
|
+
bool GetVarint32(Slice* input, uint32_t* value) {
|
|
132
|
+
const char* p = input->data();
|
|
133
|
+
const char* limit = p + input->size();
|
|
134
|
+
const char* q = GetVarint32Ptr(p, limit, value);
|
|
135
|
+
if (q == NULL) {
|
|
136
|
+
return false;
|
|
137
|
+
} else {
|
|
138
|
+
*input = Slice(q, limit - q);
|
|
139
|
+
return true;
|
|
140
|
+
}
|
|
141
|
+
}
|
|
142
|
+
|
|
143
|
+
const char* GetVarint64Ptr(const char* p, const char* limit, uint64_t* value) {
|
|
144
|
+
uint64_t result = 0;
|
|
145
|
+
for (uint32_t shift = 0; shift <= 63 && p < limit; shift += 7) {
|
|
146
|
+
uint64_t byte = *(reinterpret_cast<const unsigned char*>(p));
|
|
147
|
+
p++;
|
|
148
|
+
if (byte & 128) {
|
|
149
|
+
// More bytes are present
|
|
150
|
+
result |= ((byte & 127) << shift);
|
|
151
|
+
} else {
|
|
152
|
+
result |= (byte << shift);
|
|
153
|
+
*value = result;
|
|
154
|
+
return reinterpret_cast<const char*>(p);
|
|
155
|
+
}
|
|
156
|
+
}
|
|
157
|
+
return NULL;
|
|
158
|
+
}
|
|
159
|
+
|
|
160
|
+
bool GetVarint64(Slice* input, uint64_t* value) {
|
|
161
|
+
const char* p = input->data();
|
|
162
|
+
const char* limit = p + input->size();
|
|
163
|
+
const char* q = GetVarint64Ptr(p, limit, value);
|
|
164
|
+
if (q == NULL) {
|
|
165
|
+
return false;
|
|
166
|
+
} else {
|
|
167
|
+
*input = Slice(q, limit - q);
|
|
168
|
+
return true;
|
|
169
|
+
}
|
|
170
|
+
}
|
|
171
|
+
|
|
172
|
+
const char* GetLengthPrefixedSlice(const char* p, const char* limit,
|
|
173
|
+
Slice* result) {
|
|
174
|
+
uint32_t len;
|
|
175
|
+
p = GetVarint32Ptr(p, limit, &len);
|
|
176
|
+
if (p == NULL) return NULL;
|
|
177
|
+
if (p + len > limit) return NULL;
|
|
178
|
+
*result = Slice(p, len);
|
|
179
|
+
return p + len;
|
|
180
|
+
}
|
|
181
|
+
|
|
182
|
+
bool GetLengthPrefixedSlice(Slice* input, Slice* result) {
|
|
183
|
+
uint32_t len;
|
|
184
|
+
if (GetVarint32(input, &len) &&
|
|
185
|
+
input->size() >= len) {
|
|
186
|
+
*result = Slice(input->data(), len);
|
|
187
|
+
input->remove_prefix(len);
|
|
188
|
+
return true;
|
|
189
|
+
} else {
|
|
190
|
+
return false;
|
|
191
|
+
}
|
|
192
|
+
}
|
|
193
|
+
|
|
194
|
+
} // namespace leveldb
|