leveldb 0.1.3 → 0.1.4
Sign up to get free protection for your applications and to get access to all the features.
- checksums.yaml +4 -4
- data/README.md +71 -2
- data/ext/leveldb/Makefile +5 -1
- data/ext/leveldb/db/autocompact_test.cc +118 -0
- data/ext/leveldb/db/corruption_test.cc +22 -29
- data/ext/leveldb/db/db_impl.cc +27 -14
- data/ext/leveldb/db/db_impl.h +8 -1
- data/ext/leveldb/db/db_iter.cc +29 -12
- data/ext/leveldb/db/db_iter.h +5 -3
- data/ext/leveldb/db/dbformat.h +3 -0
- data/ext/leveldb/db/version_set.cc +92 -4
- data/ext/leveldb/db/version_set.h +15 -0
- data/ext/leveldb/include/leveldb/db.h +1 -1
- data/ext/leveldb/util/env_posix.cc +32 -1
- data/ext/leveldb/util/random.h +6 -1
- data/lib/leveldb/db.rb +39 -17
- data/lib/leveldb/version.rb +1 -1
- metadata +4 -3
checksums.yaml
CHANGED
@@ -1,7 +1,7 @@
|
|
1
1
|
---
|
2
2
|
SHA1:
|
3
|
-
metadata.gz:
|
4
|
-
data.tar.gz:
|
3
|
+
metadata.gz: 8ac9eef1bb10dc5b82a7ea8cc6f4c2d64cc5d04c
|
4
|
+
data.tar.gz: 04748952f04ef49c0c5622712ade7f4b3fc211e8
|
5
5
|
SHA512:
|
6
|
-
metadata.gz:
|
7
|
-
data.tar.gz:
|
6
|
+
metadata.gz: 06f097cb8df8f2f5679dfced115ee73236caa4770aea85302d54c58dbf9d451982356e404cd3050ed8413df1bd7e4bd6379e5a7789ef2f5b81db48f216f40346
|
7
|
+
data.tar.gz: 2f846938f51b43632a5c9629ea618eccbcdabc42ccba33f63fe0e8a6ade6dbf026485c9461b8b99ab7cce1400135e15d1ba29311c58496963d90b99a9337dbf8
|
data/README.md
CHANGED
@@ -36,8 +36,9 @@ embedded database. LevelDB is a persistent ordered map.
|
|
36
36
|
$ brew install snappy
|
37
37
|
$ git clone git://github.com/DAddYE/leveldb.git
|
38
38
|
$ cd leveldb
|
39
|
-
$
|
40
|
-
$ rake
|
39
|
+
$ bundle install
|
40
|
+
$ bundle exec rake compile
|
41
|
+
$ bundle exec rake console
|
41
42
|
|
42
43
|
### Standard
|
43
44
|
|
@@ -130,6 +131,74 @@ db.read_property('leveldb.stats')
|
|
130
131
|
db.stats
|
131
132
|
```
|
132
133
|
|
134
|
+
## Benchmarks
|
135
|
+
|
136
|
+
_Preface_: those are only for general purpose, I know that [zedshaw](http://zedshaw.com/essays/programmer_stats.html)
|
137
|
+
will kill me for this, but ... on my mac:
|
138
|
+
|
139
|
+
Model Identifier: MacBookPro10,1
|
140
|
+
Processor Name: Intel Core i7
|
141
|
+
Processor Speed: 2.3 GHz
|
142
|
+
Number of Processors: 1
|
143
|
+
Total Number of Cores: 4
|
144
|
+
L2 Cache (per Core): 256 KB
|
145
|
+
L3 Cache: 6 MB
|
146
|
+
Memory: 8 GB
|
147
|
+
|
148
|
+
The benchmark code is in [benchmark/leveldb.rb](/benchmark/leveldb.rb)
|
149
|
+
|
150
|
+
Writing/Reading `100mb` of _very_ random data of `10kb` each:
|
151
|
+
|
152
|
+
### Without compression:
|
153
|
+
|
154
|
+
user system total real
|
155
|
+
put 0.530000 0.310000 0.840000 ( 1.420387)
|
156
|
+
get 0.800000 0.460000 1.260000 ( 2.626631)
|
157
|
+
|
158
|
+
Level Files Size(MB) Time(sec) Read(MB) Write(MB)
|
159
|
+
--------------------------------------------------
|
160
|
+
0 1 0 0 0 0
|
161
|
+
2 50 98 0 0 0
|
162
|
+
3 1 2 0 0 0
|
163
|
+
|
164
|
+
### With compression:
|
165
|
+
|
166
|
+
user system total real
|
167
|
+
put 0.850000 0.320000 1.170000 ( 1.721609)
|
168
|
+
get 1.160000 0.480000 1.640000 ( 2.703543)
|
169
|
+
|
170
|
+
Level Files Size(MB) Time(sec) Read(MB) Write(MB)
|
171
|
+
--------------------------------------------------
|
172
|
+
0 1 0 0 0 0
|
173
|
+
1 5 10 0 0 0
|
174
|
+
2 45 90 0 0 0
|
175
|
+
|
176
|
+
**NOTE**: as you can see `snappy` can't compress that kind of _very very_
|
177
|
+
random data, but I was not interested to bench snappy (as a compressor) but
|
178
|
+
only to see how (eventually) much _slower_ will be using it. As you can see,
|
179
|
+
only a _few_ and on normal _data_ the db size will be much much better!
|
180
|
+
|
181
|
+
### With batch:
|
182
|
+
|
183
|
+
user system total real
|
184
|
+
put 0.260000 0.170000 0.430000 ( 0.433407)
|
185
|
+
|
186
|
+
Level Files Size(MB) Time(sec) Read(MB) Write(MB)
|
187
|
+
--------------------------------------------------
|
188
|
+
0 1 100 1 0 100
|
189
|
+
|
190
|
+
|
191
|
+
## Difference between a c++ pure ruby impl?
|
192
|
+
|
193
|
+
This, again, only for general purpose, but I want to compare the `c++` implementation
|
194
|
+
of [leveldb-ruby](https://github.com/wmorgan/leveldb-ruby) with this that use ffi.
|
195
|
+
|
196
|
+
I'm aware that this lib is 1 year older, but for those who cares, the basic bench:
|
197
|
+
|
198
|
+
user system total real
|
199
|
+
put 0.440000 0.300000 0.740000 ( 1.363188)
|
200
|
+
get 0.440000 0.440000 1.460000 ( 2.407274)
|
201
|
+
|
133
202
|
## Todo
|
134
203
|
|
135
204
|
1. Add pluggable serializers
|
data/ext/leveldb/Makefile
CHANGED
@@ -31,6 +31,7 @@ TESTHARNESS = ./util/testharness.o $(TESTUTIL)
|
|
31
31
|
|
32
32
|
TESTS = \
|
33
33
|
arena_test \
|
34
|
+
autocompact_test \
|
34
35
|
bloom_test \
|
35
36
|
c_test \
|
36
37
|
cache_test \
|
@@ -70,7 +71,7 @@ SHARED = $(SHARED1)
|
|
70
71
|
else
|
71
72
|
# Update db.h if you change these.
|
72
73
|
SHARED_MAJOR = 1
|
73
|
-
SHARED_MINOR =
|
74
|
+
SHARED_MINOR = 13
|
74
75
|
SHARED1 = libleveldb.$(PLATFORM_SHARED_EXT)
|
75
76
|
SHARED2 = $(SHARED1).$(SHARED_MAJOR)
|
76
77
|
SHARED3 = $(SHARED1).$(SHARED_MAJOR).$(SHARED_MINOR)
|
@@ -114,6 +115,9 @@ leveldbutil: db/leveldb_main.o $(LIBOBJECTS)
|
|
114
115
|
arena_test: util/arena_test.o $(LIBOBJECTS) $(TESTHARNESS)
|
115
116
|
$(CXX) $(LDFLAGS) util/arena_test.o $(LIBOBJECTS) $(TESTHARNESS) -o $@ $(LIBS)
|
116
117
|
|
118
|
+
autocompact_test: db/autocompact_test.o $(LIBOBJECTS) $(TESTHARNESS)
|
119
|
+
$(CXX) $(LDFLAGS) db/autocompact_test.o $(LIBOBJECTS) $(TESTHARNESS) -o $@ $(LIBS)
|
120
|
+
|
117
121
|
bloom_test: util/bloom_test.o $(LIBOBJECTS) $(TESTHARNESS)
|
118
122
|
$(CXX) $(LDFLAGS) util/bloom_test.o $(LIBOBJECTS) $(TESTHARNESS) -o $@ $(LIBS)
|
119
123
|
|
@@ -0,0 +1,118 @@
|
|
1
|
+
// Copyright (c) 2013 The LevelDB Authors. All rights reserved.
|
2
|
+
// Use of this source code is governed by a BSD-style license that can be
|
3
|
+
// found in the LICENSE file. See the AUTHORS file for names of contributors.
|
4
|
+
|
5
|
+
#include "leveldb/db.h"
|
6
|
+
#include "db/db_impl.h"
|
7
|
+
#include "leveldb/cache.h"
|
8
|
+
#include "util/testharness.h"
|
9
|
+
#include "util/testutil.h"
|
10
|
+
|
11
|
+
namespace leveldb {
|
12
|
+
|
13
|
+
class AutoCompactTest {
|
14
|
+
public:
|
15
|
+
std::string dbname_;
|
16
|
+
Cache* tiny_cache_;
|
17
|
+
Options options_;
|
18
|
+
DB* db_;
|
19
|
+
|
20
|
+
AutoCompactTest() {
|
21
|
+
dbname_ = test::TmpDir() + "/autocompact_test";
|
22
|
+
tiny_cache_ = NewLRUCache(100);
|
23
|
+
options_.block_cache = tiny_cache_;
|
24
|
+
DestroyDB(dbname_, options_);
|
25
|
+
options_.create_if_missing = true;
|
26
|
+
options_.compression = kNoCompression;
|
27
|
+
ASSERT_OK(DB::Open(options_, dbname_, &db_));
|
28
|
+
}
|
29
|
+
|
30
|
+
~AutoCompactTest() {
|
31
|
+
delete db_;
|
32
|
+
DestroyDB(dbname_, Options());
|
33
|
+
delete tiny_cache_;
|
34
|
+
}
|
35
|
+
|
36
|
+
std::string Key(int i) {
|
37
|
+
char buf[100];
|
38
|
+
snprintf(buf, sizeof(buf), "key%06d", i);
|
39
|
+
return std::string(buf);
|
40
|
+
}
|
41
|
+
|
42
|
+
uint64_t Size(const Slice& start, const Slice& limit) {
|
43
|
+
Range r(start, limit);
|
44
|
+
uint64_t size;
|
45
|
+
db_->GetApproximateSizes(&r, 1, &size);
|
46
|
+
return size;
|
47
|
+
}
|
48
|
+
|
49
|
+
void DoReads(int n);
|
50
|
+
};
|
51
|
+
|
52
|
+
static const int kValueSize = 200 * 1024;
|
53
|
+
static const int kTotalSize = 100 * 1024 * 1024;
|
54
|
+
static const int kCount = kTotalSize / kValueSize;
|
55
|
+
|
56
|
+
// Read through the first n keys repeatedly and check that they get
|
57
|
+
// compacted (verified by checking the size of the key space).
|
58
|
+
void AutoCompactTest::DoReads(int n) {
|
59
|
+
std::string value(kValueSize, 'x');
|
60
|
+
DBImpl* dbi = reinterpret_cast<DBImpl*>(db_);
|
61
|
+
|
62
|
+
// Fill database
|
63
|
+
for (int i = 0; i < kCount; i++) {
|
64
|
+
ASSERT_OK(db_->Put(WriteOptions(), Key(i), value));
|
65
|
+
}
|
66
|
+
ASSERT_OK(dbi->TEST_CompactMemTable());
|
67
|
+
|
68
|
+
// Delete everything
|
69
|
+
for (int i = 0; i < kCount; i++) {
|
70
|
+
ASSERT_OK(db_->Delete(WriteOptions(), Key(i)));
|
71
|
+
}
|
72
|
+
ASSERT_OK(dbi->TEST_CompactMemTable());
|
73
|
+
|
74
|
+
// Get initial measurement of the space we will be reading.
|
75
|
+
const int64_t initial_size = Size(Key(0), Key(n));
|
76
|
+
const int64_t initial_other_size = Size(Key(n), Key(kCount));
|
77
|
+
|
78
|
+
// Read until size drops significantly.
|
79
|
+
std::string limit_key = Key(n);
|
80
|
+
for (int read = 0; true; read++) {
|
81
|
+
ASSERT_LT(read, 100) << "Taking too long to compact";
|
82
|
+
Iterator* iter = db_->NewIterator(ReadOptions());
|
83
|
+
for (iter->SeekToFirst();
|
84
|
+
iter->Valid() && iter->key().ToString() < limit_key;
|
85
|
+
iter->Next()) {
|
86
|
+
// Drop data
|
87
|
+
}
|
88
|
+
delete iter;
|
89
|
+
// Wait a little bit to allow any triggered compactions to complete.
|
90
|
+
Env::Default()->SleepForMicroseconds(1000000);
|
91
|
+
uint64_t size = Size(Key(0), Key(n));
|
92
|
+
fprintf(stderr, "iter %3d => %7.3f MB [other %7.3f MB]\n",
|
93
|
+
read+1, size/1048576.0, Size(Key(n), Key(kCount))/1048576.0);
|
94
|
+
if (size <= initial_size/10) {
|
95
|
+
break;
|
96
|
+
}
|
97
|
+
}
|
98
|
+
|
99
|
+
// Verify that the size of the key space not touched by the reads
|
100
|
+
// is pretty much unchanged.
|
101
|
+
const int64_t final_other_size = Size(Key(n), Key(kCount));
|
102
|
+
ASSERT_LE(final_other_size, initial_other_size + 1048576);
|
103
|
+
ASSERT_GE(final_other_size, initial_other_size/5 - 1048576);
|
104
|
+
}
|
105
|
+
|
106
|
+
TEST(AutoCompactTest, ReadAll) {
|
107
|
+
DoReads(kCount);
|
108
|
+
}
|
109
|
+
|
110
|
+
TEST(AutoCompactTest, ReadHalf) {
|
111
|
+
DoReads(kCount/2);
|
112
|
+
}
|
113
|
+
|
114
|
+
} // namespace leveldb
|
115
|
+
|
116
|
+
int main(int argc, char** argv) {
|
117
|
+
return leveldb::test::RunAllTests();
|
118
|
+
}
|
@@ -35,6 +35,7 @@ class CorruptionTest {
|
|
35
35
|
CorruptionTest() {
|
36
36
|
tiny_cache_ = NewLRUCache(100);
|
37
37
|
options_.env = &env_;
|
38
|
+
options_.block_cache = tiny_cache_;
|
38
39
|
dbname_ = test::TmpDir() + "/db_test";
|
39
40
|
DestroyDB(dbname_, options_);
|
40
41
|
|
@@ -50,17 +51,14 @@ class CorruptionTest {
|
|
50
51
|
delete tiny_cache_;
|
51
52
|
}
|
52
53
|
|
53
|
-
Status TryReopen(
|
54
|
+
Status TryReopen() {
|
54
55
|
delete db_;
|
55
56
|
db_ = NULL;
|
56
|
-
|
57
|
-
opt.env = &env_;
|
58
|
-
opt.block_cache = tiny_cache_;
|
59
|
-
return DB::Open(opt, dbname_, &db_);
|
57
|
+
return DB::Open(options_, dbname_, &db_);
|
60
58
|
}
|
61
59
|
|
62
|
-
void Reopen(
|
63
|
-
ASSERT_OK(TryReopen(
|
60
|
+
void Reopen() {
|
61
|
+
ASSERT_OK(TryReopen());
|
64
62
|
}
|
65
63
|
|
66
64
|
void RepairDB() {
|
@@ -92,6 +90,10 @@ class CorruptionTest {
|
|
92
90
|
for (iter->SeekToFirst(); iter->Valid(); iter->Next()) {
|
93
91
|
uint64_t key;
|
94
92
|
Slice in(iter->key());
|
93
|
+
if (in == "" || in == "~") {
|
94
|
+
// Ignore boundary keys.
|
95
|
+
continue;
|
96
|
+
}
|
95
97
|
if (!ConsumeDecimalNumber(&in, &key) ||
|
96
98
|
!in.empty() ||
|
97
99
|
key < next_expected) {
|
@@ -233,7 +235,7 @@ TEST(CorruptionTest, TableFile) {
|
|
233
235
|
dbi->TEST_CompactRange(1, NULL, NULL);
|
234
236
|
|
235
237
|
Corrupt(kTableFile, 100, 1);
|
236
|
-
Check(
|
238
|
+
Check(90, 99);
|
237
239
|
}
|
238
240
|
|
239
241
|
TEST(CorruptionTest, TableFileIndexData) {
|
@@ -299,7 +301,7 @@ TEST(CorruptionTest, CompactionInputError) {
|
|
299
301
|
ASSERT_EQ(1, Property("leveldb.num-files-at-level" + NumberToString(last)));
|
300
302
|
|
301
303
|
Corrupt(kTableFile, 100, 1);
|
302
|
-
Check(
|
304
|
+
Check(5, 9);
|
303
305
|
|
304
306
|
// Force compactions by writing lots of values
|
305
307
|
Build(10000);
|
@@ -307,32 +309,23 @@ TEST(CorruptionTest, CompactionInputError) {
|
|
307
309
|
}
|
308
310
|
|
309
311
|
TEST(CorruptionTest, CompactionInputErrorParanoid) {
|
310
|
-
|
311
|
-
|
312
|
-
|
313
|
-
Reopen(&options);
|
312
|
+
options_.paranoid_checks = true;
|
313
|
+
options_.write_buffer_size = 512 << 10;
|
314
|
+
Reopen();
|
314
315
|
DBImpl* dbi = reinterpret_cast<DBImpl*>(db_);
|
315
316
|
|
316
|
-
//
|
317
|
-
for (int
|
318
|
-
|
319
|
-
dbi->Put(WriteOptions(), "~", "end");
|
317
|
+
// Make multiple inputs so we need to compact.
|
318
|
+
for (int i = 0; i < 2; i++) {
|
319
|
+
Build(10);
|
320
320
|
dbi->TEST_CompactMemTable();
|
321
|
+
Corrupt(kTableFile, 100, 1);
|
322
|
+
env_.SleepForMicroseconds(100000);
|
321
323
|
}
|
324
|
+
dbi->CompactRange(NULL, NULL);
|
322
325
|
|
323
|
-
|
324
|
-
dbi->TEST_CompactMemTable();
|
325
|
-
ASSERT_EQ(1, Property("leveldb.num-files-at-level0"));
|
326
|
-
|
327
|
-
Corrupt(kTableFile, 100, 1);
|
328
|
-
Check(9, 9);
|
329
|
-
|
330
|
-
// Write must eventually fail because of corrupted table
|
331
|
-
Status s;
|
326
|
+
// Write must fail because of corrupted table
|
332
327
|
std::string tmp1, tmp2;
|
333
|
-
|
334
|
-
s = db_->Put(WriteOptions(), Key(i, &tmp1), Value(i, &tmp2));
|
335
|
-
}
|
328
|
+
Status s = db_->Put(WriteOptions(), Key(5, &tmp1), Value(5, &tmp2));
|
336
329
|
ASSERT_TRUE(!s.ok()) << "write did not fail in corrupted paranoid db";
|
337
330
|
}
|
338
331
|
|
data/ext/leveldb/db/db_impl.cc
CHANGED
@@ -113,14 +113,14 @@ Options SanitizeOptions(const std::string& dbname,
|
|
113
113
|
return result;
|
114
114
|
}
|
115
115
|
|
116
|
-
DBImpl::DBImpl(const Options&
|
117
|
-
: env_(
|
118
|
-
internal_comparator_(
|
119
|
-
internal_filter_policy_(
|
120
|
-
options_(SanitizeOptions(
|
121
|
-
|
122
|
-
owns_info_log_(options_.info_log !=
|
123
|
-
owns_cache_(options_.block_cache !=
|
116
|
+
DBImpl::DBImpl(const Options& raw_options, const std::string& dbname)
|
117
|
+
: env_(raw_options.env),
|
118
|
+
internal_comparator_(raw_options.comparator),
|
119
|
+
internal_filter_policy_(raw_options.filter_policy),
|
120
|
+
options_(SanitizeOptions(dbname, &internal_comparator_,
|
121
|
+
&internal_filter_policy_, raw_options)),
|
122
|
+
owns_info_log_(options_.info_log != raw_options.info_log),
|
123
|
+
owns_cache_(options_.block_cache != raw_options.block_cache),
|
124
124
|
dbname_(dbname),
|
125
125
|
db_lock_(NULL),
|
126
126
|
shutting_down_(NULL),
|
@@ -130,6 +130,7 @@ DBImpl::DBImpl(const Options& options, const std::string& dbname)
|
|
130
130
|
logfile_(NULL),
|
131
131
|
logfile_number_(0),
|
132
132
|
log_(NULL),
|
133
|
+
seed_(0),
|
133
134
|
tmp_batch_(new WriteBatch),
|
134
135
|
bg_compaction_scheduled_(false),
|
135
136
|
manual_compaction_(NULL),
|
@@ -138,7 +139,7 @@ DBImpl::DBImpl(const Options& options, const std::string& dbname)
|
|
138
139
|
has_imm_.Release_Store(NULL);
|
139
140
|
|
140
141
|
// Reserve ten files or so for other uses and give the rest to TableCache.
|
141
|
-
const int table_cache_size =
|
142
|
+
const int table_cache_size = options_.max_open_files - kNumNonTableCacheFiles;
|
142
143
|
table_cache_ = new TableCache(dbname_, &options_, table_cache_size);
|
143
144
|
|
144
145
|
versions_ = new VersionSet(dbname_, &options_, table_cache_,
|
@@ -1027,7 +1028,8 @@ static void CleanupIteratorState(void* arg1, void* arg2) {
|
|
1027
1028
|
} // namespace
|
1028
1029
|
|
1029
1030
|
Iterator* DBImpl::NewInternalIterator(const ReadOptions& options,
|
1030
|
-
SequenceNumber* latest_snapshot
|
1031
|
+
SequenceNumber* latest_snapshot,
|
1032
|
+
uint32_t* seed) {
|
1031
1033
|
IterState* cleanup = new IterState;
|
1032
1034
|
mutex_.Lock();
|
1033
1035
|
*latest_snapshot = versions_->LastSequence();
|
@@ -1051,13 +1053,15 @@ Iterator* DBImpl::NewInternalIterator(const ReadOptions& options,
|
|
1051
1053
|
cleanup->version = versions_->current();
|
1052
1054
|
internal_iter->RegisterCleanup(CleanupIteratorState, cleanup, NULL);
|
1053
1055
|
|
1056
|
+
*seed = ++seed_;
|
1054
1057
|
mutex_.Unlock();
|
1055
1058
|
return internal_iter;
|
1056
1059
|
}
|
1057
1060
|
|
1058
1061
|
Iterator* DBImpl::TEST_NewInternalIterator() {
|
1059
1062
|
SequenceNumber ignored;
|
1060
|
-
|
1063
|
+
uint32_t ignored_seed;
|
1064
|
+
return NewInternalIterator(ReadOptions(), &ignored, &ignored_seed);
|
1061
1065
|
}
|
1062
1066
|
|
1063
1067
|
int64_t DBImpl::TEST_MaxNextLevelOverlappingBytes() {
|
@@ -1114,12 +1118,21 @@ Status DBImpl::Get(const ReadOptions& options,
|
|
1114
1118
|
|
1115
1119
|
Iterator* DBImpl::NewIterator(const ReadOptions& options) {
|
1116
1120
|
SequenceNumber latest_snapshot;
|
1117
|
-
|
1121
|
+
uint32_t seed;
|
1122
|
+
Iterator* iter = NewInternalIterator(options, &latest_snapshot, &seed);
|
1118
1123
|
return NewDBIterator(
|
1119
|
-
|
1124
|
+
this, user_comparator(), iter,
|
1120
1125
|
(options.snapshot != NULL
|
1121
1126
|
? reinterpret_cast<const SnapshotImpl*>(options.snapshot)->number_
|
1122
|
-
: latest_snapshot)
|
1127
|
+
: latest_snapshot),
|
1128
|
+
seed);
|
1129
|
+
}
|
1130
|
+
|
1131
|
+
void DBImpl::RecordReadSample(Slice key) {
|
1132
|
+
MutexLock l(&mutex_);
|
1133
|
+
if (versions_->current()->RecordReadSample(key)) {
|
1134
|
+
MaybeScheduleCompaction();
|
1135
|
+
}
|
1123
1136
|
}
|
1124
1137
|
|
1125
1138
|
const Snapshot* DBImpl::GetSnapshot() {
|
data/ext/leveldb/db/db_impl.h
CHANGED
@@ -59,13 +59,19 @@ class DBImpl : public DB {
|
|
59
59
|
// file at a level >= 1.
|
60
60
|
int64_t TEST_MaxNextLevelOverlappingBytes();
|
61
61
|
|
62
|
+
// Record a sample of bytes read at the specified internal key.
|
63
|
+
// Samples are taken approximately once every config::kReadBytesPeriod
|
64
|
+
// bytes.
|
65
|
+
void RecordReadSample(Slice key);
|
66
|
+
|
62
67
|
private:
|
63
68
|
friend class DB;
|
64
69
|
struct CompactionState;
|
65
70
|
struct Writer;
|
66
71
|
|
67
72
|
Iterator* NewInternalIterator(const ReadOptions&,
|
68
|
-
SequenceNumber* latest_snapshot
|
73
|
+
SequenceNumber* latest_snapshot,
|
74
|
+
uint32_t* seed);
|
69
75
|
|
70
76
|
Status NewDB();
|
71
77
|
|
@@ -135,6 +141,7 @@ class DBImpl : public DB {
|
|
135
141
|
WritableFile* logfile_;
|
136
142
|
uint64_t logfile_number_;
|
137
143
|
log::Writer* log_;
|
144
|
+
uint32_t seed_; // For sampling.
|
138
145
|
|
139
146
|
// Queue of writers.
|
140
147
|
std::deque<Writer*> writers_;
|
data/ext/leveldb/db/db_iter.cc
CHANGED
@@ -5,12 +5,14 @@
|
|
5
5
|
#include "db/db_iter.h"
|
6
6
|
|
7
7
|
#include "db/filename.h"
|
8
|
+
#include "db/db_impl.h"
|
8
9
|
#include "db/dbformat.h"
|
9
10
|
#include "leveldb/env.h"
|
10
11
|
#include "leveldb/iterator.h"
|
11
12
|
#include "port/port.h"
|
12
13
|
#include "util/logging.h"
|
13
14
|
#include "util/mutexlock.h"
|
15
|
+
#include "util/random.h"
|
14
16
|
|
15
17
|
namespace leveldb {
|
16
18
|
|
@@ -46,15 +48,16 @@ class DBIter: public Iterator {
|
|
46
48
|
kReverse
|
47
49
|
};
|
48
50
|
|
49
|
-
DBIter(const
|
50
|
-
|
51
|
-
:
|
52
|
-
env_(env),
|
51
|
+
DBIter(DBImpl* db, const Comparator* cmp, Iterator* iter, SequenceNumber s,
|
52
|
+
uint32_t seed)
|
53
|
+
: db_(db),
|
53
54
|
user_comparator_(cmp),
|
54
55
|
iter_(iter),
|
55
56
|
sequence_(s),
|
56
57
|
direction_(kForward),
|
57
|
-
valid_(false)
|
58
|
+
valid_(false),
|
59
|
+
rnd_(seed),
|
60
|
+
bytes_counter_(RandomPeriod()) {
|
58
61
|
}
|
59
62
|
virtual ~DBIter() {
|
60
63
|
delete iter_;
|
@@ -100,8 +103,12 @@ class DBIter: public Iterator {
|
|
100
103
|
}
|
101
104
|
}
|
102
105
|
|
103
|
-
|
104
|
-
|
106
|
+
// Pick next gap with average value of config::kReadBytesPeriod.
|
107
|
+
ssize_t RandomPeriod() {
|
108
|
+
return rnd_.Uniform(2*config::kReadBytesPeriod);
|
109
|
+
}
|
110
|
+
|
111
|
+
DBImpl* db_;
|
105
112
|
const Comparator* const user_comparator_;
|
106
113
|
Iterator* const iter_;
|
107
114
|
SequenceNumber const sequence_;
|
@@ -112,13 +119,23 @@ class DBIter: public Iterator {
|
|
112
119
|
Direction direction_;
|
113
120
|
bool valid_;
|
114
121
|
|
122
|
+
Random rnd_;
|
123
|
+
ssize_t bytes_counter_;
|
124
|
+
|
115
125
|
// No copying allowed
|
116
126
|
DBIter(const DBIter&);
|
117
127
|
void operator=(const DBIter&);
|
118
128
|
};
|
119
129
|
|
120
130
|
inline bool DBIter::ParseKey(ParsedInternalKey* ikey) {
|
121
|
-
|
131
|
+
Slice k = iter_->key();
|
132
|
+
ssize_t n = k.size() + iter_->value().size();
|
133
|
+
bytes_counter_ -= n;
|
134
|
+
while (bytes_counter_ < 0) {
|
135
|
+
bytes_counter_ += RandomPeriod();
|
136
|
+
db_->RecordReadSample(k);
|
137
|
+
}
|
138
|
+
if (!ParseInternalKey(k, ikey)) {
|
122
139
|
status_ = Status::Corruption("corrupted internal key in DBIter");
|
123
140
|
return false;
|
124
141
|
} else {
|
@@ -288,12 +305,12 @@ void DBIter::SeekToLast() {
|
|
288
305
|
} // anonymous namespace
|
289
306
|
|
290
307
|
Iterator* NewDBIterator(
|
291
|
-
|
292
|
-
Env* env,
|
308
|
+
DBImpl* db,
|
293
309
|
const Comparator* user_key_comparator,
|
294
310
|
Iterator* internal_iter,
|
295
|
-
|
296
|
-
|
311
|
+
SequenceNumber sequence,
|
312
|
+
uint32_t seed) {
|
313
|
+
return new DBIter(db, user_key_comparator, internal_iter, sequence, seed);
|
297
314
|
}
|
298
315
|
|
299
316
|
} // namespace leveldb
|
data/ext/leveldb/db/db_iter.h
CHANGED
@@ -11,15 +11,17 @@
|
|
11
11
|
|
12
12
|
namespace leveldb {
|
13
13
|
|
14
|
+
class DBImpl;
|
15
|
+
|
14
16
|
// Return a new iterator that converts internal keys (yielded by
|
15
17
|
// "*internal_iter") that were live at the specified "sequence" number
|
16
18
|
// into appropriate user keys.
|
17
19
|
extern Iterator* NewDBIterator(
|
18
|
-
|
19
|
-
Env* env,
|
20
|
+
DBImpl* db,
|
20
21
|
const Comparator* user_key_comparator,
|
21
22
|
Iterator* internal_iter,
|
22
|
-
|
23
|
+
SequenceNumber sequence,
|
24
|
+
uint32_t seed);
|
23
25
|
|
24
26
|
} // namespace leveldb
|
25
27
|
|
data/ext/leveldb/db/dbformat.h
CHANGED
@@ -38,6 +38,9 @@ static const int kL0_StopWritesTrigger = 12;
|
|
38
38
|
// space if the same key space is being repeatedly overwritten.
|
39
39
|
static const int kMaxMemCompactLevel = 2;
|
40
40
|
|
41
|
+
// Approximate gap in bytes between samples of data read during iteration.
|
42
|
+
static const int kReadBytesPeriod = 1048576;
|
43
|
+
|
41
44
|
} // namespace config
|
42
45
|
|
43
46
|
class InternalKey;
|
@@ -289,6 +289,51 @@ static bool NewestFirst(FileMetaData* a, FileMetaData* b) {
|
|
289
289
|
return a->number > b->number;
|
290
290
|
}
|
291
291
|
|
292
|
+
void Version::ForEachOverlapping(Slice user_key, Slice internal_key,
|
293
|
+
void* arg,
|
294
|
+
bool (*func)(void*, int, FileMetaData*)) {
|
295
|
+
// TODO(sanjay): Change Version::Get() to use this function.
|
296
|
+
const Comparator* ucmp = vset_->icmp_.user_comparator();
|
297
|
+
|
298
|
+
// Search level-0 in order from newest to oldest.
|
299
|
+
std::vector<FileMetaData*> tmp;
|
300
|
+
tmp.reserve(files_[0].size());
|
301
|
+
for (uint32_t i = 0; i < files_[0].size(); i++) {
|
302
|
+
FileMetaData* f = files_[0][i];
|
303
|
+
if (ucmp->Compare(user_key, f->smallest.user_key()) >= 0 &&
|
304
|
+
ucmp->Compare(user_key, f->largest.user_key()) <= 0) {
|
305
|
+
tmp.push_back(f);
|
306
|
+
}
|
307
|
+
}
|
308
|
+
if (!tmp.empty()) {
|
309
|
+
std::sort(tmp.begin(), tmp.end(), NewestFirst);
|
310
|
+
for (uint32_t i = 0; i < tmp.size(); i++) {
|
311
|
+
if (!(*func)(arg, 0, tmp[i])) {
|
312
|
+
return;
|
313
|
+
}
|
314
|
+
}
|
315
|
+
}
|
316
|
+
|
317
|
+
// Search other levels.
|
318
|
+
for (int level = 1; level < config::kNumLevels; level++) {
|
319
|
+
size_t num_files = files_[level].size();
|
320
|
+
if (num_files == 0) continue;
|
321
|
+
|
322
|
+
// Binary search to find earliest index whose largest key >= internal_key.
|
323
|
+
uint32_t index = FindFile(vset_->icmp_, files_[level], internal_key);
|
324
|
+
if (index < num_files) {
|
325
|
+
FileMetaData* f = files_[level][index];
|
326
|
+
if (ucmp->Compare(user_key, f->smallest.user_key()) < 0) {
|
327
|
+
// All of "f" is past any data for user_key
|
328
|
+
} else {
|
329
|
+
if (!(*func)(arg, level, f)) {
|
330
|
+
return;
|
331
|
+
}
|
332
|
+
}
|
333
|
+
}
|
334
|
+
}
|
335
|
+
}
|
336
|
+
|
292
337
|
Status Version::Get(const ReadOptions& options,
|
293
338
|
const LookupKey& k,
|
294
339
|
std::string* value,
|
@@ -401,6 +446,44 @@ bool Version::UpdateStats(const GetStats& stats) {
|
|
401
446
|
return false;
|
402
447
|
}
|
403
448
|
|
449
|
+
bool Version::RecordReadSample(Slice internal_key) {
|
450
|
+
ParsedInternalKey ikey;
|
451
|
+
if (!ParseInternalKey(internal_key, &ikey)) {
|
452
|
+
return false;
|
453
|
+
}
|
454
|
+
|
455
|
+
struct State {
|
456
|
+
GetStats stats; // Holds first matching file
|
457
|
+
int matches;
|
458
|
+
|
459
|
+
static bool Match(void* arg, int level, FileMetaData* f) {
|
460
|
+
State* state = reinterpret_cast<State*>(arg);
|
461
|
+
state->matches++;
|
462
|
+
if (state->matches == 1) {
|
463
|
+
// Remember first match.
|
464
|
+
state->stats.seek_file = f;
|
465
|
+
state->stats.seek_file_level = level;
|
466
|
+
}
|
467
|
+
// We can stop iterating once we have a second match.
|
468
|
+
return state->matches < 2;
|
469
|
+
}
|
470
|
+
};
|
471
|
+
|
472
|
+
State state;
|
473
|
+
state.matches = 0;
|
474
|
+
ForEachOverlapping(ikey.user_key, internal_key, &state, &State::Match);
|
475
|
+
|
476
|
+
// Must have at least two matches since we want to merge across
|
477
|
+
// files. But what if we have a single file that contains many
|
478
|
+
// overwrites and deletions? Should we have another mechanism for
|
479
|
+
// finding such files?
|
480
|
+
if (state.matches >= 2) {
|
481
|
+
// 1MB cost is about 1 seek (see comment in Builder::Apply).
|
482
|
+
return UpdateStats(state.stats);
|
483
|
+
}
|
484
|
+
return false;
|
485
|
+
}
|
486
|
+
|
404
487
|
void Version::Ref() {
|
405
488
|
++refs_;
|
406
489
|
}
|
@@ -435,10 +518,13 @@ int Version::PickLevelForMemTableOutput(
|
|
435
518
|
if (OverlapInLevel(level + 1, &smallest_user_key, &largest_user_key)) {
|
436
519
|
break;
|
437
520
|
}
|
438
|
-
|
439
|
-
|
440
|
-
|
441
|
-
|
521
|
+
if (level + 2 < config::kNumLevels) {
|
522
|
+
// Check that file does not overlap too many grandparent bytes.
|
523
|
+
GetOverlappingInputs(level + 2, &start, &limit, &overlaps);
|
524
|
+
const int64_t sum = TotalFileSize(overlaps);
|
525
|
+
if (sum > kMaxGrandParentOverlapBytes) {
|
526
|
+
break;
|
527
|
+
}
|
442
528
|
}
|
443
529
|
level++;
|
444
530
|
}
|
@@ -452,6 +538,8 @@ void Version::GetOverlappingInputs(
|
|
452
538
|
const InternalKey* begin,
|
453
539
|
const InternalKey* end,
|
454
540
|
std::vector<FileMetaData*>* inputs) {
|
541
|
+
assert(level >= 0);
|
542
|
+
assert(level < config::kNumLevels);
|
455
543
|
inputs->clear();
|
456
544
|
Slice user_begin, user_end;
|
457
545
|
if (begin != NULL) {
|
@@ -78,6 +78,12 @@ class Version {
|
|
78
78
|
// REQUIRES: lock is held
|
79
79
|
bool UpdateStats(const GetStats& stats);
|
80
80
|
|
81
|
+
// Record a sample of bytes read at the specified internal key.
|
82
|
+
// Samples are taken approximately once every config::kReadBytesPeriod
|
83
|
+
// bytes. Returns true if a new compaction may need to be triggered.
|
84
|
+
// REQUIRES: lock is held
|
85
|
+
bool RecordReadSample(Slice key);
|
86
|
+
|
81
87
|
// Reference count management (so Versions do not disappear out from
|
82
88
|
// under live iterators)
|
83
89
|
void Ref();
|
@@ -114,6 +120,15 @@ class Version {
|
|
114
120
|
class LevelFileNumIterator;
|
115
121
|
Iterator* NewConcatenatingIterator(const ReadOptions&, int level) const;
|
116
122
|
|
123
|
+
// Call func(arg, level, f) for every file that overlaps user_key in
|
124
|
+
// order from newest to oldest. If an invocation of func returns
|
125
|
+
// false, makes no more calls.
|
126
|
+
//
|
127
|
+
// REQUIRES: user portion of internal_key == user_key.
|
128
|
+
void ForEachOverlapping(Slice user_key, Slice internal_key,
|
129
|
+
void* arg,
|
130
|
+
bool (*func)(void*, int, FileMetaData*));
|
131
|
+
|
117
132
|
VersionSet* vset_; // VersionSet to which this Version belongs
|
118
133
|
Version* next_; // Next version in linked list
|
119
134
|
Version* prev_; // Previous version in linked list
|
@@ -319,8 +319,39 @@ class PosixMmapFile : public WritableFile {
|
|
319
319
|
return Status::OK();
|
320
320
|
}
|
321
321
|
|
322
|
-
|
322
|
+
Status SyncDirIfManifest() {
|
323
|
+
const char* f = filename_.c_str();
|
324
|
+
const char* sep = strrchr(f, '/');
|
325
|
+
Slice basename;
|
326
|
+
std::string dir;
|
327
|
+
if (sep == NULL) {
|
328
|
+
dir = ".";
|
329
|
+
basename = f;
|
330
|
+
} else {
|
331
|
+
dir = std::string(f, sep - f);
|
332
|
+
basename = sep + 1;
|
333
|
+
}
|
323
334
|
Status s;
|
335
|
+
if (basename.starts_with("MANIFEST")) {
|
336
|
+
int fd = open(dir.c_str(), O_RDONLY);
|
337
|
+
if (fd < 0) {
|
338
|
+
s = IOError(dir, errno);
|
339
|
+
} else {
|
340
|
+
if (fsync(fd) < 0) {
|
341
|
+
s = IOError(dir, errno);
|
342
|
+
}
|
343
|
+
close(fd);
|
344
|
+
}
|
345
|
+
}
|
346
|
+
return s;
|
347
|
+
}
|
348
|
+
|
349
|
+
virtual Status Sync() {
|
350
|
+
// Ensure new files referred to by the manifest are in the filesystem.
|
351
|
+
Status s = SyncDirIfManifest();
|
352
|
+
if (!s.ok()) {
|
353
|
+
return s;
|
354
|
+
}
|
324
355
|
|
325
356
|
if (pending_sync_) {
|
326
357
|
// Some unmapped data was not synced
|
data/ext/leveldb/util/random.h
CHANGED
@@ -16,7 +16,12 @@ class Random {
|
|
16
16
|
private:
|
17
17
|
uint32_t seed_;
|
18
18
|
public:
|
19
|
-
explicit Random(uint32_t s) : seed_(s & 0x7fffffffu) {
|
19
|
+
explicit Random(uint32_t s) : seed_(s & 0x7fffffffu) {
|
20
|
+
// Avoid bad seeds.
|
21
|
+
if (seed_ == 0 || seed_ == 2147483647L) {
|
22
|
+
seed_ = 1;
|
23
|
+
}
|
24
|
+
}
|
20
25
|
uint32_t Next() {
|
21
26
|
static const uint32_t M = 2147483647L; // 2^31-1
|
22
27
|
static const uint64_t A = 16807; // bits 14, 8, 7, 5, 2, 1, 0
|
data/lib/leveldb/db.rb
CHANGED
@@ -11,7 +11,7 @@ module LevelDB
|
|
11
11
|
class KeyError < StandardError; end
|
12
12
|
class ClosedError < StandardError; end
|
13
13
|
|
14
|
-
attr_reader :path
|
14
|
+
attr_reader :path, :options
|
15
15
|
@@mutex = Mutex.new
|
16
16
|
|
17
17
|
DEFAULT = {
|
@@ -20,7 +20,7 @@ module LevelDB
|
|
20
20
|
paranoid_checks: false,
|
21
21
|
write_buffer_size: 4 << 20,
|
22
22
|
block_size: 4096,
|
23
|
-
max_open_files:
|
23
|
+
max_open_files: 200,
|
24
24
|
block_cache_size: 8 * (2 << 20),
|
25
25
|
block_restart_interval: 16,
|
26
26
|
compression: false,
|
@@ -29,27 +29,35 @@ module LevelDB
|
|
29
29
|
}
|
30
30
|
|
31
31
|
def initialize(path, options={})
|
32
|
+
new!(path, options)
|
33
|
+
end
|
34
|
+
|
35
|
+
def new!(path, options={})
|
32
36
|
@_db_opts = C.options_create
|
33
37
|
@_write_opts = C.writeoptions_create
|
34
38
|
@_read_opts = C.readoptions_create
|
35
39
|
@_read_len = C.value('size_t')
|
36
40
|
|
37
|
-
options = DEFAULT.merge(options)
|
41
|
+
@options = DEFAULT.merge(options)
|
38
42
|
|
39
|
-
@_cache = C.cache_create_lru(options[:block_cache_size])
|
43
|
+
@_cache = C.cache_create_lru(@options[:block_cache_size])
|
40
44
|
|
41
|
-
C.readoptions_set_verify_checksums(@_read_opts, options[:verify_checksums] ? 1 : 0)
|
42
|
-
C.readoptions_set_fill_cache(@_read_opts, options[:fill_cache] ? 1 : 0)
|
45
|
+
C.readoptions_set_verify_checksums(@_read_opts, @options[:verify_checksums] ? 1 : 0)
|
46
|
+
C.readoptions_set_fill_cache(@_read_opts, @options[:fill_cache] ? 1 : 0)
|
43
47
|
|
44
|
-
C.options_set_create_if_missing(@_db_opts, options[:create_if_missing] ? 1 : 0)
|
45
|
-
C.options_set_error_if_exists(@_db_opts, options[:error_if_exists] ? 1 : 0)
|
46
|
-
C.options_set_paranoid_checks(@_db_opts, options[:paranoid_checks] ? 1 : 0)
|
47
|
-
C.options_set_write_buffer_size(@_db_opts, options[:write_buffer_size])
|
48
|
-
C.options_set_block_size(@_db_opts, options[:block_size])
|
48
|
+
C.options_set_create_if_missing(@_db_opts, @options[:create_if_missing] ? 1 : 0)
|
49
|
+
C.options_set_error_if_exists(@_db_opts, @options[:error_if_exists] ? 1 : 0)
|
50
|
+
C.options_set_paranoid_checks(@_db_opts, @options[:paranoid_checks] ? 1 : 0)
|
51
|
+
C.options_set_write_buffer_size(@_db_opts, @options[:write_buffer_size])
|
52
|
+
C.options_set_block_size(@_db_opts, @options[:block_size])
|
49
53
|
C.options_set_cache(@_db_opts, @_cache)
|
50
|
-
C.options_set_max_open_files(@_db_opts, options[:max_open_files])
|
51
|
-
C.options_set_block_restart_interval(@_db_opts, options[:block_restart_interval])
|
52
|
-
C.options_set_compression(@_db_opts, options[:compression] ? 1 : 0)
|
54
|
+
C.options_set_max_open_files(@_db_opts, @options[:max_open_files])
|
55
|
+
C.options_set_block_restart_interval(@_db_opts, @options[:block_restart_interval])
|
56
|
+
C.options_set_compression(@_db_opts, @options[:compression] ? 1 : 0)
|
57
|
+
|
58
|
+
if @options[:bloom_filter_bits_per_key]
|
59
|
+
C.options_set_filter_policy(@_db_opts, C.filterpolicy_create_bloom(@options[:bloom_filter_bits_per_key]))
|
60
|
+
end
|
53
61
|
|
54
62
|
@_db_opts.free = @_write_opts.free = @_read_opts.free = C[:options_destroy]
|
55
63
|
|
@@ -63,6 +71,14 @@ module LevelDB
|
|
63
71
|
|
64
72
|
raise Error, error_message if errors?
|
65
73
|
end
|
74
|
+
private :new!
|
75
|
+
|
76
|
+
def reopen
|
77
|
+
close unless closed?
|
78
|
+
@@mutex.synchronize { @_closed = false }
|
79
|
+
new!(@path, @options)
|
80
|
+
end
|
81
|
+
alias reopen! reopen
|
66
82
|
|
67
83
|
def []=(key, val)
|
68
84
|
raise ClosedError if closed?
|
@@ -81,12 +97,13 @@ module LevelDB
|
|
81
97
|
def [](key)
|
82
98
|
raise ClosedError if closed?
|
83
99
|
|
84
|
-
key
|
85
|
-
val
|
100
|
+
key = key.to_s
|
101
|
+
val = C.get(@_db, @_read_opts, key, key.size, @_read_len, @_err)
|
102
|
+
val.free = C[:free]
|
86
103
|
|
87
104
|
raise Error, error_message if errors?
|
88
105
|
|
89
|
-
@_read_len.value == 0 ? nil : val.to_s(@_read_len.value)
|
106
|
+
@_read_len.value == 0 ? nil : val.to_s(@_read_len.value).clone
|
90
107
|
end
|
91
108
|
alias get []
|
92
109
|
|
@@ -189,6 +206,11 @@ module LevelDB
|
|
189
206
|
true
|
190
207
|
end
|
191
208
|
|
209
|
+
def destroy!
|
210
|
+
close && destroy && reopen
|
211
|
+
end
|
212
|
+
alias clear! destroy!
|
213
|
+
|
192
214
|
def read_property(name)
|
193
215
|
raise ClosedError if closed?
|
194
216
|
|
data/lib/leveldb/version.rb
CHANGED
metadata
CHANGED
@@ -1,14 +1,14 @@
|
|
1
1
|
--- !ruby/object:Gem::Specification
|
2
2
|
name: leveldb
|
3
3
|
version: !ruby/object:Gem::Version
|
4
|
-
version: 0.1.
|
4
|
+
version: 0.1.4
|
5
5
|
platform: ruby
|
6
6
|
authors:
|
7
7
|
- DAddYE
|
8
8
|
autorequire:
|
9
9
|
bindir: bin
|
10
10
|
cert_chain: []
|
11
|
-
date: 2013-
|
11
|
+
date: 2013-09-18 00:00:00.000000000 Z
|
12
12
|
dependencies:
|
13
13
|
- !ruby/object:Gem::Dependency
|
14
14
|
name: fiddler-rb
|
@@ -90,6 +90,7 @@ extra_rdoc_files: []
|
|
90
90
|
files:
|
91
91
|
- ext/Rakefile
|
92
92
|
- ext/leveldb/db/c_test.c
|
93
|
+
- ext/leveldb/db/autocompact_test.cc
|
93
94
|
- ext/leveldb/db/builder.cc
|
94
95
|
- ext/leveldb/db/c.cc
|
95
96
|
- ext/leveldb/db/corruption_test.cc
|
@@ -244,7 +245,7 @@ required_rubygems_version: !ruby/object:Gem::Requirement
|
|
244
245
|
version: '0'
|
245
246
|
requirements: []
|
246
247
|
rubyforge_project:
|
247
|
-
rubygems_version: 2.
|
248
|
+
rubygems_version: 2.1.2
|
248
249
|
signing_key:
|
249
250
|
specification_version: 4
|
250
251
|
summary: LevelDB for Ruby
|