leveldb-native 0.2 → 0.3

Sign up to get free protection for your applications and to get access to all the features.
checksums.yaml CHANGED
@@ -1,7 +1,7 @@
1
1
  ---
2
2
  SHA1:
3
- metadata.gz: 825dfca4d9cdc2c1fee7e1a2c5bb75ace2c8a950
4
- data.tar.gz: 790ec80932d2f102a25497ee9498d7ff48a1a92c
3
+ metadata.gz: 87b336d5ad009cf92a483c22c12daf7a5ddbe709
4
+ data.tar.gz: b89a5864acb35075e2c97ae5da9d509093ff90da
5
5
  SHA512:
6
- metadata.gz: 78b3240db4455f492f27d299c22ebc32a2b29626d9b9de874b81e8ddd90582b21d91aec2c13d331faefa62cd73055e12faf4f8595d4c0b47f0a8112c957d6013
7
- data.tar.gz: e316ae49c4f3f55a0ee720e042235b4698d53457761e15961adbd75b65ea325d108b57a92f1e70b1eb466e7cb706d22dcafe03b88f36f1dcd287d5f241993cac
6
+ metadata.gz: 3a22b86e3e2ac2291bde9bb3685bcd859dff59661bf879f13f20a0b79b21b8a4f9a750c023fa46a55bd58a82b8be8ac257425548120849435719cb5bde9918b9
7
+ data.tar.gz: 5529fc3e0d84002e96389ec091fc5c025c9be88e16fa9d64593dccc4c3d88b44c9304844fce31f95be9dc439f02836a21d7de087f72d536234b88fe9c244b974
@@ -0,0 +1,839 @@
1
+ #include <ruby.h>
2
+ #include <memory>
3
+
4
+ #include <leveldb/db.h>
5
+ #include <leveldb/cache.h>
6
+ #include <leveldb/write_batch.h>
7
+
8
+ using namespace std;
9
+
10
+ static VALUE m_leveldb;
11
+ static VALUE c_db;
12
+ static VALUE c_iter;
13
+ static VALUE c_batch;
14
+ static VALUE c_error;
15
+ static VALUE c_no_compression;
16
+ static VALUE c_snappy_compression;
17
+ static VALUE k_fill;
18
+ static VALUE k_verify;
19
+ static VALUE k_snapshot;
20
+ static VALUE k_sync;
21
+ static VALUE k_from;
22
+ static VALUE k_to;
23
+ static VALUE k_reversed;
24
+ static VALUE k_class;
25
+ static VALUE k_name;
26
+ static ID k_to_s;
27
+ static leveldb::ReadOptions uncached_read_options;
28
+
29
+ static VALUE c_db_options;
30
+ static VALUE c_snapshot;
31
+ static VALUE k_create_if_missing;
32
+ static VALUE k_error_if_exists;
33
+ static VALUE k_paranoid_checks;
34
+ static VALUE k_write_buffer_size;
35
+ static VALUE k_block_cache_size;
36
+ static VALUE k_block_size;
37
+ static VALUE k_block_restart_interval;
38
+ static VALUE k_compression;
39
+ static VALUE k_max_open_files;
40
+
41
+ // support 1.9 and 1.8
42
+ #ifndef RSTRING_PTR
43
+ #define RSTRING_PTR(v) RSTRING(v)->ptr
44
+ #endif
45
+
46
+ // convert status errors into exceptions
47
+ #define RAISE_ON_ERROR(status) do { \
48
+ if(!status.ok()) { \
49
+ VALUE exc = rb_exc_new2(c_error, status.ToString().c_str()); \
50
+ rb_exc_raise(exc); \
51
+ } \
52
+ } while(0)
53
+
54
+ typedef struct bound_db {
55
+ leveldb::DB* db;
56
+ } bound_db;
57
+
58
+ typedef struct bound_snapshot {
59
+ const leveldb::Snapshot* snapshot;
60
+ VALUE v_db;
61
+ } bound_snapshot;
62
+
63
+ static void db_free(bound_db* db) {
64
+ if(db->db != NULL) {
65
+ delete db->db;
66
+ db->db = NULL;
67
+ }
68
+ delete db;
69
+ }
70
+
71
+ static void sync_vals(VALUE opts, VALUE key, VALUE db_options, bool* pOptionVal) {
72
+ VALUE v = rb_hash_aref(opts, key);
73
+
74
+ if(!NIL_P(v)) *pOptionVal = RTEST(v);
75
+ string param("@");
76
+ param += rb_id2name(SYM2ID(key));
77
+ rb_iv_set(db_options, param.c_str(), *pOptionVal ? Qtrue : Qfalse);
78
+ }
79
+
80
+ static void sync_vals(VALUE opts, VALUE key, VALUE db_options, size_t* pOptionVal) {
81
+ VALUE v = rb_hash_aref(opts, key);
82
+
83
+ if(!NIL_P(v)) *pOptionVal = NUM2UINT(v);
84
+ string param("@");
85
+ param += rb_id2name(SYM2ID(key));
86
+ rb_iv_set(db_options, param.c_str(), UINT2NUM(*pOptionVal));
87
+ }
88
+
89
+ static void sync_vals(VALUE opts, VALUE key, VALUE db_options, int* pOptionVal) {
90
+ VALUE v = rb_hash_aref(opts, key);
91
+
92
+ if(!NIL_P(v)) *pOptionVal = NUM2INT(v);
93
+ string param("@");
94
+ param += rb_id2name(SYM2ID(key));
95
+ rb_iv_set(db_options, param.c_str(), INT2NUM(*pOptionVal));
96
+ }
97
+
98
+ static void set_db_option(VALUE o_options, VALUE opts, leveldb::Options* options) {
99
+ if(NIL_P(o_options)) return;
100
+ Check_Type(opts, T_HASH);
101
+
102
+ sync_vals(opts, k_create_if_missing, o_options, &(options->create_if_missing));
103
+ sync_vals(opts, k_error_if_exists, o_options, &(options->error_if_exists));
104
+ sync_vals(opts, k_paranoid_checks, o_options, &(options->paranoid_checks));
105
+ sync_vals(opts, k_write_buffer_size, o_options, &(options->write_buffer_size));
106
+ sync_vals(opts, k_max_open_files, o_options, &(options->max_open_files));
107
+ sync_vals(opts, k_block_size, o_options, &(options->block_size));
108
+ sync_vals(opts, k_block_restart_interval, o_options, &(options->block_restart_interval));
109
+
110
+ VALUE v = rb_hash_aref(opts, k_block_cache_size);
111
+ if(!NIL_P(v)) {
112
+ options->block_cache = leveldb::NewLRUCache(NUM2INT(v));
113
+ rb_iv_set(o_options, "@block_cache_size", v);
114
+ }
115
+
116
+ v = rb_hash_aref(opts, k_compression);
117
+ if(!NIL_P(v)) {
118
+ if(v == c_no_compression) options->compression = leveldb::kNoCompression;
119
+ else if(v == c_snappy_compression) options->compression = leveldb::kSnappyCompression;
120
+ else rb_raise(rb_eTypeError, "invalid type for %s", rb_id2name(SYM2ID(k_compression)));
121
+ }
122
+
123
+ if(options->compression == leveldb::kNoCompression) rb_iv_set(o_options, "@compression", c_no_compression);
124
+ else if(options->compression == leveldb::kSnappyCompression) rb_iv_set(o_options, "@compression", c_snappy_compression);
125
+ }
126
+
127
+ /*
128
+ * call-seq:
129
+ * make(pathname, options)
130
+ *
131
+ * open level-db database
132
+ *
133
+ * pathname path for database
134
+ *
135
+ * [options[ :create_if_missing ]] create if database doesn't exit
136
+ *
137
+ * [options[ :error_if_exists ]] raise error if database exists
138
+ *
139
+ * [options[ :paranoid_checks ]] If true, the implementation will do aggressive checking of the
140
+ * data it is processing and will stop early if it detects any
141
+ * errors. This may have unforeseen ramifications: for example, a
142
+ * corruption of one DB entry may cause a large number of entries to
143
+ * become unreadable or for the entire DB to become unopenable.
144
+ *
145
+ * Default: false
146
+ * [options[ :write_buffer_size ]] Amount of data to build up in memory (backed by an unsorted log
147
+ * on disk) before converting to a sorted on-disk file.
148
+ *
149
+ * Larger values increase performance, especially during bulk
150
+ * loads.
151
+ * Up to two write buffers may be held in memory at the same time,
152
+ * so you may wish to adjust this parameter to control memory
153
+ * usage.
154
+ * Also, a larger write buffer will result in a longer recovery
155
+ * time the next time the database is opened.
156
+ *
157
+ * Default: 4MB
158
+ * [options[ :max_open_files ]] Number of open files that can be used by the DB. You may need to
159
+ * increase this if your database has a large working set (budget
160
+ * one open file per 2MB of working set).
161
+ *
162
+ * Default: 1000
163
+ * [options[ :block_cache_size ]] Control over blocks (user data is stored in a set of blocks,
164
+ * and a block is the unit of reading from disk).
165
+ *
166
+ * If non nil, use the specified cache size.
167
+ * If nil, leveldb will automatically create and use an 8MB
168
+ * internal cache.
169
+ *
170
+ * Default: nil
171
+ * [options[ :block_size ]] Approximate size of user data packed per block. Note that the
172
+ * block size specified here corresponds to uncompressed data. The
173
+ * actual size of the unit read from disk may be smaller if
174
+ * compression is enabled. This parameter can be changed dynamically.
175
+ *
176
+ * Default: 4K
177
+ * [options[ :block_restart_interval ]] Number of keys between restart points for delta
178
+ * encoding of keys.
179
+ * This parameter can be changed dynamically.
180
+ * Most clients should leave this parameter alone.
181
+ *
182
+ * Default: 16
183
+ * [options[ :compression ]] LevelDB::CompressionType::SnappyCompression or
184
+ * LevelDB::CompressionType::NoCompression.
185
+ *
186
+ * Compress blocks using the specified compression algorithm.
187
+ * This parameter can be changed dynamically.
188
+ *
189
+ * Default: LevelDB::CompressionType::SnappyCompression,
190
+ * which gives lightweight but fast compression.
191
+ *
192
+ * Typical speeds of SnappyCompression on an Intel(R) Core(TM)2 2.4GHz:
193
+ * ~200-500MB/s compression
194
+ * ~400-800MB/s decompression
195
+ * Note that these speeds are significantly faster than most
196
+ * persistent storage speeds, and therefore it is typically never
197
+ * worth switching to NoCompression. Even if the input data is
198
+ * incompressible, the SnappyCompression implementation will
199
+ * efficiently detect that and will switch to uncompressed mode.
200
+ * [return] LevelDB::DB instance
201
+ */
202
+ static VALUE db_make(VALUE self, VALUE v_pathname, VALUE v_options) {
203
+ Check_Type(v_pathname, T_STRING);
204
+
205
+ auto_ptr<bound_db> db(new bound_db);
206
+ std::string pathname = std::string((char*)RSTRING_PTR(v_pathname));
207
+
208
+ leveldb::Options options;
209
+ VALUE o_options = rb_class_new_instance(0, NULL, c_db_options);
210
+ set_db_option(o_options, v_options, &options);
211
+
212
+ leveldb::Status status = leveldb::DB::Open(options, pathname, &db->db);
213
+ VALUE o_db = Data_Wrap_Struct(self, NULL, db_free, db.release());
214
+ RAISE_ON_ERROR(status);
215
+
216
+ rb_iv_set(o_db, "@options", o_options);
217
+ VALUE init_argv[1] = { v_pathname };
218
+ rb_obj_call_init(o_db, 1, init_argv);
219
+
220
+ return o_db;
221
+ }
222
+
223
+ static VALUE db_close(VALUE self) {
224
+ bound_db* db;
225
+ Data_Get_Struct(self, bound_db, db);
226
+
227
+ if(db->db != NULL) {
228
+ delete db->db;
229
+ db->db = NULL;
230
+ }
231
+ return Qtrue;
232
+ }
233
+
234
+ static leveldb::ReadOptions parse_read_options(VALUE options) {
235
+ leveldb::ReadOptions readOptions;
236
+
237
+ if(!NIL_P(options)) {
238
+ Check_Type(options, T_HASH);
239
+
240
+ VALUE v_fill = rb_hash_aref(options, k_fill);
241
+ VALUE v_verify = rb_hash_aref(options, k_verify);
242
+ VALUE v_snapshot = rb_hash_aref(options, k_snapshot);
243
+
244
+ if(!NIL_P(v_fill)) readOptions.fill_cache = RTEST(v_fill);
245
+ if(!NIL_P(v_verify)) readOptions.verify_checksums = RTEST(v_verify);
246
+
247
+ if(!NIL_P(v_snapshot)) {
248
+ bound_snapshot* sn;
249
+ Data_Get_Struct(v_snapshot, bound_snapshot, sn);
250
+ readOptions.snapshot = sn->snapshot;
251
+ }
252
+ }
253
+
254
+ return readOptions;
255
+ }
256
+
257
+ static leveldb::WriteOptions parse_write_options(VALUE options) {
258
+ leveldb::WriteOptions writeOptions;
259
+
260
+ if(!NIL_P(options)) {
261
+ Check_Type(options, T_HASH);
262
+ VALUE v_sync = rb_hash_aref(options, k_sync);
263
+ if(!NIL_P(v_sync)) writeOptions.sync = RTEST(v_sync);
264
+ }
265
+
266
+ return writeOptions;
267
+ }
268
+
269
+ #define RUBY_STRING_TO_SLICE(x) leveldb::Slice(RSTRING_PTR(x), RSTRING_LEN(x))
270
+ #define SLICE_TO_RUBY_STRING(x) rb_str_new(x.data(), x.size())
271
+ #define STRING_TO_RUBY_STRING(x) rb_str_new(x.data(), x.size())
272
+
273
+ /*
274
+ * call-seq:
275
+ * get(key, options = nil)
276
+ *
277
+ * get data from db
278
+ *
279
+ * [key] key you want to get
280
+ * [options[ :fill_cache ]] Should the data read for this iteration be cached in memory?
281
+ * Callers may wish to set this field to false for bulk scans.
282
+ *
283
+ * true or false
284
+ *
285
+ * Default: true
286
+ * [options[ :verify_checksums ]] If true, all data read from underlying storage will be
287
+ * verified against corresponding checksums.
288
+ *
289
+ * Default: false
290
+ * [options[ :snapshot ]] If value is a Snapshot instance, read from that version of DB.
291
+ *
292
+ * Default: nil
293
+ * [return] value of stored db
294
+ */
295
+ static VALUE db_get(int argc, VALUE* argv, VALUE self) {
296
+ VALUE v_key, v_options;
297
+ rb_scan_args(argc, argv, "11", &v_key, &v_options);
298
+ Check_Type(v_key, T_STRING);
299
+ leveldb::ReadOptions readOptions = parse_read_options(v_options);
300
+
301
+ bound_db* db;
302
+ Data_Get_Struct(self, bound_db, db);
303
+
304
+ leveldb::Slice key = RUBY_STRING_TO_SLICE(v_key);
305
+ std::string value;
306
+ leveldb::Status status = db->db->Get(readOptions, key, &value);
307
+ if(status.IsNotFound()) return Qnil;
308
+
309
+ RAISE_ON_ERROR(status);
310
+ return STRING_TO_RUBY_STRING(value);
311
+ }
312
+
313
+ static VALUE db_delete(int argc, VALUE* argv, VALUE self) {
314
+ VALUE v_key, v_options;
315
+ rb_scan_args(argc, argv, "11", &v_key, &v_options);
316
+ Check_Type(v_key, T_STRING);
317
+ leveldb::WriteOptions writeOptions = parse_write_options(v_options);
318
+
319
+ bound_db* db;
320
+ Data_Get_Struct(self, bound_db, db);
321
+
322
+ leveldb::Slice key = RUBY_STRING_TO_SLICE(v_key);
323
+ std::string value;
324
+ leveldb::Status status = db->db->Get(uncached_read_options, key, &value);
325
+
326
+ if(status.IsNotFound()) return Qnil;
327
+
328
+ status = db->db->Delete(writeOptions, key);
329
+ RAISE_ON_ERROR(status);
330
+
331
+ return STRING_TO_RUBY_STRING(value);
332
+ }
333
+
334
+ static VALUE db_exists(VALUE self, VALUE v_key) {
335
+ Check_Type(v_key, T_STRING);
336
+
337
+ bound_db* db;
338
+ Data_Get_Struct(self, bound_db, db);
339
+
340
+ leveldb::Slice key = RUBY_STRING_TO_SLICE(v_key);
341
+ std::string value;
342
+ leveldb::Status status = db->db->Get(leveldb::ReadOptions(), key, &value);
343
+
344
+ if(status.IsNotFound()) return Qfalse;
345
+ return Qtrue;
346
+ }
347
+
348
+ /*
349
+ * call-seq:
350
+ * put(key, value, options = nil)
351
+ *
352
+ * store data into DB
353
+ *
354
+ * [key] key you want to store
355
+ * [value] data you want to store
356
+ * [options[ :sync ]] If true, the write will be flushed from the operating system
357
+ * buffer cache (by calling WritableFile::Sync()) before the write
358
+ * is considered complete. If this flag is true, writes will be
359
+ * slower.
360
+ *
361
+ * If this flag is false, and the machine crashes, some recent
362
+ * writes may be lost. Note that if it is just the process that
363
+ * crashes (i.e., the machine does not reboot), no writes will be
364
+ * lost even if sync==false.
365
+ *
366
+ * In other words, a DB write with sync==false has similar
367
+ * crash semantics as the "write()" system call. A DB write
368
+ * with sync==true has similar crash semantics to a "write()"
369
+ * system call followed by "fsync()".
370
+ *
371
+ * Default: false
372
+ * [return] stored value
373
+ */
374
+ static VALUE db_put(int argc, VALUE* argv, VALUE self) {
375
+ VALUE v_key, v_value, v_options;
376
+
377
+ rb_scan_args(argc, argv, "21", &v_key, &v_value, &v_options);
378
+ Check_Type(v_key, T_STRING);
379
+ Check_Type(v_value, T_STRING);
380
+ leveldb::WriteOptions writeOptions = parse_write_options(v_options);
381
+
382
+ bound_db* db;
383
+ Data_Get_Struct(self, bound_db, db);
384
+
385
+ leveldb::Slice key = RUBY_STRING_TO_SLICE(v_key);
386
+ leveldb::Slice value = RUBY_STRING_TO_SLICE(v_value);
387
+ leveldb::Status status = db->db->Put(writeOptions, key, value);
388
+
389
+ RAISE_ON_ERROR(status);
390
+
391
+ return v_value;
392
+ }
393
+
394
+ static VALUE db_size(VALUE self) {
395
+ long count = 0;
396
+
397
+ bound_db* db;
398
+ Data_Get_Struct(self, bound_db, db);
399
+ leveldb::Iterator* it = db->db->NewIterator(uncached_read_options);
400
+
401
+ // apparently this is how we have to do it. slow and painful!
402
+ for (it->SeekToFirst(); it->Valid(); it->Next()) count++;
403
+ RAISE_ON_ERROR(it->status());
404
+ delete it;
405
+ return INT2NUM(count);
406
+ }
407
+
408
+ static VALUE db_init(VALUE self, VALUE v_pathname) {
409
+ rb_iv_set(self, "@pathname", v_pathname);
410
+ return self;
411
+ }
412
+
413
+ typedef struct current_iteration {
414
+ leveldb::Iterator* iterator;
415
+ bool passed_limit;
416
+ bool check_limit;
417
+ bool reversed;
418
+ int checked_valid; // 0 = unchecked, 1 = valid, -1 = invalid
419
+ std::string key_to_str;
420
+ leveldb::Slice current_key;
421
+ } current_iteration;
422
+
423
+ static void current_iteration_free(current_iteration* iter) {
424
+ delete iter;
425
+ }
426
+
427
+ static VALUE iter_make(VALUE klass, VALUE db, VALUE options) {
428
+ if(c_db != rb_funcall(db, k_class, 0)) {
429
+ rb_raise(rb_eArgError, "db must be a LevelDB::DB");
430
+ }
431
+
432
+ bound_db* b_db;
433
+ Data_Get_Struct(db, bound_db, b_db);
434
+
435
+ leveldb::ReadOptions read_options = parse_read_options(options);
436
+ read_options.fill_cache = false;
437
+
438
+ current_iteration* iter = new current_iteration;
439
+ iter->passed_limit = false;
440
+ iter->check_limit = false;
441
+ iter->checked_valid = 0;
442
+ iter->iterator = b_db->db->NewIterator(read_options);
443
+
444
+ VALUE o_iter = Data_Wrap_Struct(klass, NULL, current_iteration_free, iter);
445
+
446
+ VALUE argv[2];
447
+ argv[0] = db;
448
+ argv[1] = options;
449
+ rb_obj_call_init(o_iter, 2, argv);
450
+
451
+ return o_iter;
452
+ }
453
+
454
+ static VALUE iter_init(VALUE self, VALUE db, VALUE options) {
455
+ if(c_db != rb_funcall(db, k_class, 0)) {
456
+ rb_raise(rb_eArgError, "db must be a LevelDB::DB");
457
+ }
458
+
459
+ rb_iv_set(self, "@db", db);
460
+ current_iteration* iter;
461
+ Data_Get_Struct(self, current_iteration, iter);
462
+
463
+ VALUE key_from = Qnil;
464
+ VALUE key_to = Qnil;
465
+
466
+ if(!NIL_P(options)) {
467
+ Check_Type(options, T_HASH);
468
+ key_from = rb_hash_aref(options, k_from);
469
+ key_to = rb_hash_aref(options, k_to);
470
+
471
+ if(RTEST(key_to)) {
472
+ iter->check_limit = true;
473
+ iter->key_to_str = RUBY_STRING_TO_SLICE(rb_funcall(key_to, k_to_s, 0)).ToString();
474
+ }
475
+
476
+ rb_iv_set(self, "@from", key_from);
477
+ rb_iv_set(self, "@to", key_to);
478
+ if(NIL_P(rb_hash_aref(options, k_reversed))) {
479
+ iter->reversed = false;
480
+ rb_iv_set(self, "@reversed", false);
481
+ } else {
482
+ iter->reversed = true;
483
+ rb_iv_set(self, "@reversed", true);
484
+ }
485
+ }
486
+
487
+ if(RTEST(key_from)) {
488
+ iter->iterator->Seek(RUBY_STRING_TO_SLICE(rb_funcall(key_from, k_to_s, 0)));
489
+ } else {
490
+ if(iter->reversed) {
491
+ iter->iterator->SeekToLast();
492
+ } else {
493
+ iter->iterator->SeekToFirst();
494
+ }
495
+ }
496
+
497
+ return self;
498
+ }
499
+
500
+ static bool iter_valid(current_iteration* iter) {
501
+ if(iter->checked_valid == 0) {
502
+ if(iter->passed_limit) {
503
+ iter->checked_valid = -2;
504
+ } else {
505
+ if(iter->iterator->Valid()) {
506
+ iter->current_key = iter->iterator->key();
507
+
508
+ if(iter->check_limit &&
509
+ (iter->reversed ?
510
+ (iter->current_key.ToString() < iter->key_to_str) :
511
+ (iter->current_key.ToString() > iter->key_to_str))) {
512
+ iter->passed_limit = true;
513
+ iter->checked_valid = -2;
514
+ } else {
515
+ iter->checked_valid = 1;
516
+ }
517
+
518
+ } else {
519
+ iter->checked_valid = -1;
520
+ }
521
+ }
522
+ }
523
+
524
+ if(iter->checked_valid == 1)
525
+ return true;
526
+ else
527
+ return false;
528
+ }
529
+
530
+ static VALUE iter_invalid_reason(VALUE self) {
531
+ current_iteration* iter;
532
+ Data_Get_Struct(self, current_iteration, iter);
533
+ if(iter_valid(iter)) {
534
+ return Qnil;
535
+ } else {
536
+ return INT2FIX(iter->checked_valid);
537
+ }
538
+ }
539
+
540
+ static VALUE iter_next_value(current_iteration* iter) {
541
+ VALUE arr = rb_ary_new2(2);
542
+ rb_ary_push(arr, SLICE_TO_RUBY_STRING(iter->current_key));
543
+ rb_ary_push(arr, SLICE_TO_RUBY_STRING(iter->iterator->value()));
544
+ return arr;
545
+ }
546
+
547
+ static void iter_scan_iterator(current_iteration* iter) {
548
+ if(iter->reversed)
549
+ iter->iterator->Prev();
550
+ else
551
+ iter->iterator->Next();
552
+ iter->checked_valid = 0;
553
+ }
554
+
555
+ static VALUE iter_peek(VALUE self) {
556
+ current_iteration* iter;
557
+ Data_Get_Struct(self, current_iteration, iter);
558
+ if(iter_valid(iter)) {
559
+ return iter_next_value(iter);
560
+ } else {
561
+ return Qnil;
562
+ }
563
+ }
564
+
565
+ static VALUE iter_scan(VALUE self) {
566
+ current_iteration* iter;
567
+ Data_Get_Struct(self, current_iteration, iter);
568
+ if(iter_valid(iter))
569
+ iter_scan_iterator(iter);
570
+ return Qnil;
571
+ }
572
+
573
+ static VALUE iter_next(VALUE self) {
574
+ current_iteration* iter;
575
+ Data_Get_Struct(self, current_iteration, iter);
576
+
577
+ VALUE arr = Qnil;
578
+
579
+ if(iter_valid(iter)) {
580
+ arr = iter_next_value(iter);
581
+ iter_scan_iterator(iter);
582
+ }
583
+
584
+ return arr;
585
+ }
586
+
587
+ static VALUE iter_each(VALUE self) {
588
+ current_iteration* iter;
589
+ Data_Get_Struct(self, current_iteration, iter);
590
+
591
+ while(iter_valid(iter)) {
592
+ rb_yield(iter_next_value(iter));
593
+ iter_scan_iterator(iter);
594
+ }
595
+
596
+ RAISE_ON_ERROR(iter->iterator->status());
597
+ delete iter->iterator;
598
+ return self;
599
+ }
600
+
601
+ typedef struct bound_batch {
602
+ leveldb::WriteBatch batch;
603
+ } bound_batch;
604
+
605
+ static void batch_free(bound_batch* batch) {
606
+ delete batch;
607
+ }
608
+
609
+ static VALUE batch_make(VALUE klass) {
610
+ bound_batch* batch = new bound_batch;
611
+ batch->batch = leveldb::WriteBatch();
612
+
613
+ VALUE o_batch = Data_Wrap_Struct(klass, NULL, batch_free, batch);
614
+ VALUE argv[0];
615
+ rb_obj_call_init(o_batch, 0, argv);
616
+
617
+ return o_batch;
618
+ }
619
+
620
+ static VALUE batch_put(VALUE self, VALUE v_key, VALUE v_value) {
621
+ Check_Type(v_key, T_STRING);
622
+ Check_Type(v_value, T_STRING);
623
+
624
+ bound_batch* batch;
625
+ Data_Get_Struct(self, bound_batch, batch);
626
+ batch->batch.Put(RUBY_STRING_TO_SLICE(v_key), RUBY_STRING_TO_SLICE(v_value));
627
+
628
+ return v_value;
629
+ }
630
+
631
+ static VALUE batch_delete(VALUE self, VALUE v_key) {
632
+ Check_Type(v_key, T_STRING);
633
+ bound_batch* batch;
634
+ Data_Get_Struct(self, bound_batch, batch);
635
+ batch->batch.Delete(RUBY_STRING_TO_SLICE(v_key));
636
+ return Qtrue;
637
+ }
638
+
639
+ static VALUE db_batch(int argc, VALUE* argv, VALUE self) {
640
+ VALUE o_batch = batch_make(c_batch);
641
+
642
+ rb_yield(o_batch);
643
+
644
+ bound_batch* batch;
645
+ bound_db* db;
646
+ Data_Get_Struct(o_batch, bound_batch, batch);
647
+ Data_Get_Struct(self, bound_db, db);
648
+
649
+ VALUE v_options;
650
+ rb_scan_args(argc, argv, "01", &v_options);
651
+ leveldb::WriteOptions writeOptions = parse_write_options(v_options);
652
+
653
+ leveldb::Status status = db->db->Write(writeOptions, &batch->batch);
654
+ RAISE_ON_ERROR(status);
655
+ return Qtrue;
656
+ }
657
+
658
+ static void bound_snapshot_mark(bound_snapshot* b_sn) {
659
+ rb_gc_mark(b_sn->v_db);
660
+ }
661
+
662
+ static void bound_snapshot_free(bound_snapshot* b_sn) {
663
+ if (b_sn->snapshot && rb_during_gc()) {
664
+ bound_db* b_db;
665
+ Data_Get_Struct(b_sn->v_db, bound_db, b_db);
666
+ b_db->db->ReleaseSnapshot(b_sn->snapshot);
667
+ }
668
+ // If not rb_during_gc, then ruby vm is finalizing, and db either has been freed
669
+ // (in which case we can't call ReleaseSnapshot) or is about to be freed (in which
670
+ // case we don't need to).
671
+ delete b_sn;
672
+ }
673
+
674
+ static VALUE snapshot_make(VALUE klass, VALUE v_db) {
675
+ if (c_db != rb_funcall(v_db, k_class, 0)) {
676
+ rb_raise(rb_eArgError, "db must be a LevelDB::DB");
677
+ }
678
+
679
+ bound_db* b_db;
680
+ Data_Get_Struct(v_db, bound_db, b_db);
681
+
682
+ bound_snapshot* b_sn = new bound_snapshot;
683
+ b_sn->snapshot = b_db->db->GetSnapshot();
684
+ b_sn->v_db = v_db;
685
+ VALUE o_snapshot = Data_Wrap_Struct(klass, bound_snapshot_mark, bound_snapshot_free, b_sn);
686
+
687
+ VALUE argv[1];
688
+ argv[0] = v_db;
689
+ rb_obj_call_init(o_snapshot, 1, argv);
690
+
691
+ return o_snapshot;
692
+ }
693
+
694
+ static VALUE snapshot_init(VALUE self, VALUE v_db) {
695
+ return self;
696
+ }
697
+
698
+ /*
699
+ * call-seq:
700
+ * db()
701
+ *
702
+ * [return] the db that the snapshot references.
703
+ */
704
+ static VALUE snapshot_db(VALUE self) {
705
+ bound_snapshot* b_sn;
706
+ Data_Get_Struct(self, bound_snapshot, b_sn);
707
+ return b_sn->v_db;
708
+ }
709
+
710
+ /*
711
+ * call-seq:
712
+ * release()
713
+ *
714
+ * Release the snapshot; after calling this method, the snapshot can still be used,
715
+ * but it reads from the current database state.
716
+ *
717
+ * [return] self.
718
+ */
719
+ static VALUE snapshot_release(VALUE self) {
720
+ bound_snapshot* b_sn;
721
+ Data_Get_Struct(self, bound_snapshot, b_sn);
722
+
723
+ if (b_sn->snapshot) {
724
+ bound_db* b_db;
725
+ Data_Get_Struct(b_sn->v_db, bound_db, b_db);
726
+ b_db->db->ReleaseSnapshot(b_sn->snapshot);
727
+ b_sn->snapshot = NULL;
728
+ }
729
+
730
+ return self;
731
+ }
732
+
733
+ /*
734
+ * call-seq:
735
+ * released?()
736
+ *
737
+ * [return] true if the snapshot has been released, false otherwise.
738
+ */
739
+ static VALUE snapshot_released(VALUE self) {
740
+ bound_snapshot* b_sn;
741
+ Data_Get_Struct(self, bound_snapshot, b_sn);
742
+ return b_sn->snapshot ? Qfalse : Qtrue;
743
+ }
744
+
745
+ /*
746
+ * call-seq:
747
+ * exists?()
748
+ *
749
+ * [return] true if the key exists in the snapshot of the db, false otherwise.
750
+ */
751
+ static VALUE snapshot_exists(VALUE self, VALUE v_key) {
752
+ Check_Type(v_key, T_STRING);
753
+
754
+ bound_snapshot* b_sn;
755
+ Data_Get_Struct(self, bound_snapshot, b_sn);
756
+
757
+ leveldb::Slice key = RUBY_STRING_TO_SLICE(v_key);
758
+ std::string value;
759
+ leveldb::ReadOptions options;
760
+ options.snapshot = b_sn->snapshot;
761
+
762
+ bound_db* b_db;
763
+ Data_Get_Struct(b_sn->v_db, bound_db, b_db);
764
+ leveldb::Status status = b_db->db->Get(options, key, &value);
765
+
766
+ if(status.IsNotFound()) return Qfalse;
767
+ return Qtrue;
768
+ }
769
+
770
+ extern "C" {
771
+ void Init_leveldb_native() {
772
+ k_fill = ID2SYM(rb_intern("fill_cache"));
773
+ k_verify = ID2SYM(rb_intern("verify_checksums"));
774
+ k_snapshot = ID2SYM(rb_intern("snapshot"));
775
+ k_sync = ID2SYM(rb_intern("sync"));
776
+ k_from = ID2SYM(rb_intern("from"));
777
+ k_to = ID2SYM(rb_intern("to"));
778
+ k_reversed = ID2SYM(rb_intern("reversed"));
779
+ k_class = rb_intern("class");
780
+ k_name = rb_intern("name");
781
+ k_create_if_missing = ID2SYM(rb_intern("create_if_missing"));
782
+ k_error_if_exists = ID2SYM(rb_intern("error_if_exists"));
783
+ k_paranoid_checks = ID2SYM(rb_intern("paranoid_checks"));
784
+ k_write_buffer_size = ID2SYM(rb_intern("write_buffer_size"));
785
+ k_block_cache_size = ID2SYM(rb_intern("block_cache_size"));
786
+ k_block_size = ID2SYM(rb_intern("block_size"));
787
+ k_block_restart_interval = ID2SYM(rb_intern("block_restart_interval"));
788
+ k_compression = ID2SYM(rb_intern("compression"));
789
+ k_max_open_files = ID2SYM(rb_intern("max_open_files"));
790
+ k_to_s = rb_intern("to_s");
791
+
792
+ uncached_read_options = leveldb::ReadOptions();
793
+ uncached_read_options.fill_cache = false;
794
+
795
+ m_leveldb = rb_define_module("LevelDBNative");
796
+
797
+ c_db = rb_define_class_under(m_leveldb, "DB", rb_cObject);
798
+ rb_define_singleton_method(c_db, "make", RUBY_METHOD_FUNC(db_make), 2);
799
+ rb_define_method(c_db, "initialize", RUBY_METHOD_FUNC(db_init), 1);
800
+ rb_define_method(c_db, "get", RUBY_METHOD_FUNC(db_get), -1);
801
+ rb_define_method(c_db, "delete", RUBY_METHOD_FUNC(db_delete), -1);
802
+ rb_define_method(c_db, "put", RUBY_METHOD_FUNC(db_put), -1);
803
+ rb_define_method(c_db, "exists?", RUBY_METHOD_FUNC(db_exists), 1);
804
+ rb_define_method(c_db, "close", RUBY_METHOD_FUNC(db_close), 0);
805
+ rb_define_method(c_db, "size", RUBY_METHOD_FUNC(db_size), 0);
806
+ rb_define_method(c_db, "batch", RUBY_METHOD_FUNC(db_batch), -1);
807
+
808
+ c_iter = rb_define_class_under(m_leveldb, "Iterator", rb_cObject);
809
+ rb_define_singleton_method(c_iter, "make", RUBY_METHOD_FUNC(iter_make), 2);
810
+ rb_define_method(c_iter, "initialize", RUBY_METHOD_FUNC(iter_init), 2);
811
+ rb_define_method(c_iter, "each", RUBY_METHOD_FUNC(iter_each), 0);
812
+ rb_define_method(c_iter, "next", RUBY_METHOD_FUNC(iter_next), 0);
813
+ rb_define_method(c_iter, "scan", RUBY_METHOD_FUNC(iter_scan), 0);
814
+ rb_define_method(c_iter, "peek", RUBY_METHOD_FUNC(iter_peek), 0);
815
+ rb_define_method(c_iter, "invalid_reason", RUBY_METHOD_FUNC(iter_invalid_reason), 0);
816
+
817
+ c_batch = rb_define_class_under(m_leveldb, "WriteBatch", rb_cObject);
818
+ rb_define_singleton_method(c_batch, "make", RUBY_METHOD_FUNC(batch_make), 0);
819
+ rb_define_method(c_batch, "put", RUBY_METHOD_FUNC(batch_put), 2);
820
+ rb_define_method(c_batch, "delete", RUBY_METHOD_FUNC(batch_delete), 1);
821
+
822
+ c_db_options = rb_define_class_under(m_leveldb, "Options", rb_cObject);
823
+
824
+ c_snapshot = rb_define_class_under(m_leveldb, "Snapshot", rb_cObject);
825
+ rb_define_singleton_method(c_snapshot, "make", RUBY_METHOD_FUNC(snapshot_make), 1);
826
+ rb_define_method(c_snapshot, "initialize", RUBY_METHOD_FUNC(snapshot_init), 1);
827
+ rb_define_method(c_snapshot, "db", RUBY_METHOD_FUNC(snapshot_db), 0);
828
+ rb_define_method(c_snapshot, "release", RUBY_METHOD_FUNC(snapshot_release), 0);
829
+ rb_define_method(c_snapshot, "released?", RUBY_METHOD_FUNC(snapshot_released), 0);
830
+ rb_define_method(c_snapshot, "exists?", RUBY_METHOD_FUNC(snapshot_exists), 1);
831
+
832
+ VALUE m_ctype = rb_define_module_under(m_leveldb, "CompressionType");
833
+ VALUE c_base = rb_define_class_under(m_ctype, "Base", rb_cObject);
834
+ c_no_compression = rb_define_class_under(m_ctype, "NoCompression", c_base);
835
+ c_snappy_compression = rb_define_class_under(m_ctype, "SnappyCompression", c_base);
836
+
837
+ c_error = rb_define_class_under(m_leveldb, "Error", rb_eStandardError);
838
+ }
839
+ }
@@ -1,3 +1,3 @@
1
1
  module LevelDBNative
2
- VERSION = "0.2"
2
+ VERSION = "0.3"
3
3
  end
metadata CHANGED
@@ -1,7 +1,7 @@
1
1
  --- !ruby/object:Gem::Specification
2
2
  name: leveldb-native
3
3
  version: !ruby/object:Gem::Version
4
- version: '0.2'
4
+ version: '0.3'
5
5
  platform: ruby
6
6
  authors:
7
7
  - Joel VanderWerf
@@ -25,6 +25,7 @@ files:
25
25
  - lib/leveldb-native.rb
26
26
  - lib/leveldb-native/version.rb
27
27
  - ext/leveldb-native/extconf.rb
28
+ - ext/leveldb-native/leveldb-native.cc
28
29
  - example/snapshot.rb
29
30
  - test/test-db.rb
30
31
  - test/test-db-options.rb