@nxtedition/rocksdb 7.0.43 → 7.0.46

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/binding.cc CHANGED
@@ -180,6 +180,7 @@ static napi_status ToString(napi_env env, napi_value from, rocksdb::PinnableSlic
180
180
  size_t length = 0;
181
181
  NAPI_STATUS_RETURN(napi_get_buffer_info(env, from, reinterpret_cast<void**>(&buf), &length));
182
182
 
183
+ // TODO (fix): Should extend life of "from". Or "to" should be a non-pinnable slice.
183
184
  to.PinSlice(rocksdb::Slice(buf, length), noop, nullptr, nullptr);
184
185
  } else {
185
186
  return napi_invalid_arg;
@@ -248,7 +249,7 @@ napi_status Convert(napi_env env, T&& s, bool asBuffer, napi_value& result) {
248
249
  if (!s) {
249
250
  return napi_get_null(env, &result);
250
251
  } else if (asBuffer) {
251
- using Y = typename std::remove_pointer<typename std::decay<decltype(*s)>::type>::type;
252
+ using Y = typename std::decay<decltype(*s)>::type;
252
253
  auto ptr = new Y(std::move(*s));
253
254
  return napi_create_external_buffer(env, ptr->size(), const_cast<char*>(ptr->data()), Finalize<Y>, ptr, &result);
254
255
  } else {
@@ -328,6 +329,13 @@ struct Worker {
328
329
  rocksdb::Status status_;
329
330
  };
330
331
 
332
+ struct ColumnFamily {
333
+ napi_ref ref;
334
+ napi_value val;
335
+ rocksdb::ColumnFamilyHandle* handle;
336
+ rocksdb::ColumnFamilyDescriptor descriptor;
337
+ };
338
+
331
339
  struct Database {
332
340
  void AttachIterator(napi_env env, Iterator* iterator) {
333
341
  iterators_.insert(iterator);
@@ -366,13 +374,228 @@ struct Database {
366
374
  Worker* pendingCloseWorker_;
367
375
  std::set<Iterator*> iterators_;
368
376
  std::set<Updates*> updates_;
369
- std::vector<rocksdb::ColumnFamilyHandle*> columns_;
377
+ std::map<int32_t, ColumnFamily> columns_;
370
378
  napi_ref priorityRef_;
371
379
 
372
380
  private:
373
381
  uint32_t priorityWork_ = 0;
374
382
  };
375
383
 
384
+ enum BatchOp { Empty, Put, Delete, Merge, Data };
385
+
386
+ struct BatchEntry {
387
+ BatchOp op = BatchOp::Empty;
388
+ std::optional<std::string> key;
389
+ std::optional<std::string> val;
390
+ std::optional<ColumnFamily> column;
391
+ };
392
+
393
+ struct BatchIterator : public rocksdb::WriteBatch::Handler {
394
+ BatchIterator(Database* database,
395
+ bool keys = true,
396
+ bool values = true,
397
+ bool data = true,
398
+ const rocksdb::ColumnFamilyHandle* column = nullptr,
399
+ bool keyAsBuffer = false,
400
+ bool valueAsBuffer = false)
401
+ : database_(database),
402
+ keys_(keys),
403
+ values_(values),
404
+ data_(data),
405
+ column_(column),
406
+ keyAsBuffer_(keyAsBuffer),
407
+ valueAsBuffer_(valueAsBuffer) {}
408
+
409
+ napi_status Iterate(napi_env env, const rocksdb::WriteBatch& batch, napi_value* result) {
410
+ cache_.reserve(batch.Count());
411
+ batch.Iterate(this); // TODO (fix): Error?
412
+
413
+ napi_value putStr;
414
+ NAPI_STATUS_RETURN(napi_create_string_utf8(env, "put", NAPI_AUTO_LENGTH, &putStr));
415
+
416
+ napi_value delStr;
417
+ NAPI_STATUS_RETURN(napi_create_string_utf8(env, "del", NAPI_AUTO_LENGTH, &delStr));
418
+
419
+ napi_value mergeStr;
420
+ NAPI_STATUS_RETURN(napi_create_string_utf8(env, "merge", NAPI_AUTO_LENGTH, &mergeStr));
421
+
422
+ napi_value dataStr;
423
+ NAPI_STATUS_RETURN(napi_create_string_utf8(env, "data", NAPI_AUTO_LENGTH, &dataStr));
424
+
425
+ napi_value nullVal;
426
+ NAPI_STATUS_RETURN(napi_get_null(env, &nullVal));
427
+
428
+ NAPI_STATUS_RETURN(napi_create_array_with_length(env, cache_.size() * 4, result));
429
+ for (size_t n = 0; n < cache_.size(); ++n) {
430
+ napi_value op;
431
+ if (cache_[n].op == BatchOp::Put) {
432
+ op = putStr;
433
+ } else if (cache_[n].op == BatchOp::Delete) {
434
+ op = delStr;
435
+ } else if (cache_[n].op == BatchOp::Merge) {
436
+ op = mergeStr;
437
+ } else if (cache_[n].op == BatchOp::Data) {
438
+ op = dataStr;
439
+ } else {
440
+ continue;
441
+ }
442
+
443
+ NAPI_STATUS_RETURN(napi_set_element(env, *result, n * 4 + 0, op));
444
+
445
+ napi_value key;
446
+ NAPI_STATUS_RETURN(Convert(env, cache_[n].key, keyAsBuffer_, key));
447
+ NAPI_STATUS_RETURN(napi_set_element(env, *result, n * 4 + 1, key));
448
+
449
+ napi_value val;
450
+ NAPI_STATUS_RETURN(Convert(env, cache_[n].val, valueAsBuffer_, val));
451
+ NAPI_STATUS_RETURN(napi_set_element(env, *result, n * 4 + 2, val));
452
+
453
+ // TODO (fix)
454
+ // napi_value column = cache_[n].column ? cache_[n].column->val : nullVal;
455
+ NAPI_STATUS_RETURN(napi_set_element(env, *result, n * 4 + 3, nullVal));
456
+ }
457
+
458
+ return napi_ok;
459
+ }
460
+
461
+ rocksdb::Status PutCF(uint32_t column_family_id, const rocksdb::Slice& key, const rocksdb::Slice& value) override {
462
+ if (column_ && column_->GetID() != column_family_id) {
463
+ return rocksdb::Status::OK();
464
+ }
465
+
466
+ BatchEntry entry;
467
+
468
+ entry.op = BatchOp::Put;
469
+
470
+ if (keys_) {
471
+ entry.key = key.ToStringView();
472
+ }
473
+
474
+ if (values_) {
475
+ entry.val = value.ToStringView();
476
+ }
477
+
478
+ // if (database_ && database_->columns_.find(column_family_id) != database_->columns_.end()) {
479
+ // entry.column = database_->columns_[column_family_id];
480
+ // }
481
+
482
+ cache_.push_back(entry);
483
+
484
+ return rocksdb::Status::OK();
485
+ }
486
+
487
+ rocksdb::Status DeleteCF(uint32_t column_family_id, const rocksdb::Slice& key) override {
488
+ if (column_ && column_->GetID() != column_family_id) {
489
+ return rocksdb::Status::OK();
490
+ }
491
+
492
+ BatchEntry entry;
493
+
494
+ entry.op = BatchOp::Delete;
495
+
496
+ if (keys_) {
497
+ entry.key = key.ToStringView();
498
+ }
499
+
500
+ // if (database_ && database_->columns_.find(column_family_id) != database_->columns_.end()) {
501
+ // entry.column = database_->columns_[column_family_id];
502
+ // }
503
+
504
+ cache_.push_back(entry);
505
+
506
+ return rocksdb::Status::OK();
507
+ }
508
+
509
+ rocksdb::Status MergeCF(uint32_t column_family_id, const rocksdb::Slice& key, const rocksdb::Slice& value) override {
510
+ if (column_ && column_->GetID() != column_family_id) {
511
+ return rocksdb::Status::OK();
512
+ }
513
+
514
+ BatchEntry entry;
515
+
516
+ entry.op = BatchOp::Merge;
517
+
518
+ if (keys_) {
519
+ entry.key = key.ToStringView();
520
+ }
521
+
522
+ if (values_) {
523
+ entry.val = value.ToStringView();
524
+ }
525
+
526
+ // if (database_ && database_->columns_.find(column_family_id) != database_->columns_.end()) {
527
+ // entry.column = database_->columns_[column_family_id];
528
+ // }
529
+
530
+ cache_.push_back(entry);
531
+
532
+ return rocksdb::Status::OK();
533
+ }
534
+
535
+ void LogData(const rocksdb::Slice& data) override {
536
+ if (!data_) {
537
+ return;
538
+ }
539
+
540
+ BatchEntry entry;
541
+
542
+ entry.op = BatchOp::Data;
543
+
544
+ entry.val = data.ToStringView();
545
+
546
+ cache_.push_back(entry);
547
+ }
548
+
549
+ bool Continue() override { return true; }
550
+
551
+ private:
552
+ Database* database_;
553
+ bool keys_;
554
+ bool values_;
555
+ bool data_;
556
+ const rocksdb::ColumnFamilyHandle* column_;
557
+ bool keyAsBuffer_;
558
+ bool valueAsBuffer_;
559
+ std::vector<BatchEntry> cache_;
560
+ };
561
+
562
+ struct Updates : public BatchIterator {
563
+ Updates(Database* database,
564
+ int64_t seqNumber,
565
+ bool keys,
566
+ bool values,
567
+ bool data,
568
+ const rocksdb::ColumnFamilyHandle* column,
569
+ bool keyAsBuffer,
570
+ bool valueAsBuffer)
571
+ : BatchIterator(database, keys, values, data, column, keyAsBuffer, valueAsBuffer),
572
+ database_(database),
573
+ sequence_(seqNumber),
574
+ start_(seqNumber) {}
575
+
576
+ void Close() { iterator_.reset(); }
577
+
578
+ void Attach(napi_env env, napi_value context) {
579
+ napi_create_reference(env, context, 1, &ref_);
580
+ database_->AttachUpdates(env, this);
581
+ }
582
+
583
+ void Detach(napi_env env) {
584
+ database_->DetachUpdates(env, this);
585
+ if (ref_) {
586
+ napi_delete_reference(env, ref_);
587
+ }
588
+ }
589
+
590
+ Database* database_;
591
+ int64_t sequence_;
592
+ int64_t start_;
593
+ std::unique_ptr<rocksdb::TransactionLogIterator> iterator_;
594
+
595
+ private:
596
+ napi_ref ref_ = nullptr;
597
+ };
598
+
376
599
  struct BaseIterator {
377
600
  BaseIterator(Database* database,
378
601
  rocksdb::ColumnFamilyHandle* column,
@@ -561,53 +784,10 @@ struct Iterator final : public BaseIterator {
561
784
  napi_ref ref_ = nullptr;
562
785
  };
563
786
 
564
- struct Updates {
565
- Updates(Database* database,
566
- int64_t seqNumber,
567
- bool keys,
568
- bool values,
569
- bool data,
570
- const rocksdb::ColumnFamilyHandle* column)
571
- : database_(database),
572
- sequence_(seqNumber),
573
- start_(seqNumber),
574
- keys_(keys),
575
- values_(values),
576
- data_(data),
577
- column_(column) {}
578
-
579
- void Close() { iterator_.reset(); }
580
-
581
- void Attach(napi_env env, napi_value context) {
582
- napi_create_reference(env, context, 1, &ref_);
583
- database_->AttachUpdates(env, this);
584
- }
585
-
586
- void Detach(napi_env env) {
587
- database_->DetachUpdates(env, this);
588
- if (ref_) {
589
- napi_delete_reference(env, ref_);
590
- }
591
- }
592
-
593
- Database* database_;
594
- int64_t sequence_;
595
- int64_t start_;
596
- std::unique_ptr<rocksdb::TransactionLogIterator> iterator_;
597
- bool keys_;
598
- bool values_;
599
- bool data_;
600
- const rocksdb::ColumnFamilyHandle* column_;
601
-
602
- private:
603
- napi_ref ref_ = nullptr;
604
- };
605
-
606
787
  static napi_status GetColumnFamily(Database* database,
607
788
  napi_env env,
608
789
  napi_value options,
609
- rocksdb::ColumnFamilyHandle** column,
610
- bool fallback = true) {
790
+ rocksdb::ColumnFamilyHandle** column) {
611
791
  bool hasColumn = false;
612
792
  NAPI_STATUS_RETURN(napi_has_named_property(env, options, "column", &hasColumn));
613
793
 
@@ -615,7 +795,7 @@ static napi_status GetColumnFamily(Database* database,
615
795
  napi_value value = nullptr;
616
796
  NAPI_STATUS_RETURN(napi_get_named_property(env, options, "column", &value));
617
797
  NAPI_STATUS_RETURN(napi_get_value_external(env, value, reinterpret_cast<void**>(column)));
618
- } else if (fallback) {
798
+ } else if (database) {
619
799
  *column = database->db_->DefaultColumnFamily();
620
800
  } else {
621
801
  *column = nullptr;
@@ -640,18 +820,18 @@ static void env_cleanup_hook(void* arg) {
640
820
  // following code must be a safe noop if called before db_open() or after
641
821
  // db_close().
642
822
  if (database && database->db_) {
643
- for (auto it : database->iterators_) {
823
+ for (auto& it : database->iterators_) {
644
824
  // TODO: does not do `napi_delete_reference`. Problem?
645
825
  it->Close();
646
826
  }
647
827
 
648
- for (auto it : database->updates_) {
828
+ for (auto& it : database->updates_) {
649
829
  // TODO: does not do `napi_delete_reference`. Problem?
650
830
  it->Close();
651
831
  }
652
832
 
653
- for (auto it : database->columns_) {
654
- database->db_->DestroyColumnFamilyHandle(it);
833
+ for (auto& it : database->columns_) {
834
+ database->db_->DestroyColumnFamilyHandle(it.second.handle);
655
835
  }
656
836
 
657
837
  // Having closed the iterators (and released snapshots) we can safely close.
@@ -665,6 +845,9 @@ static void FinalizeDatabase(napi_env env, void* data, void* hint) {
665
845
  napi_remove_env_cleanup_hook(env, env_cleanup_hook, database);
666
846
  if (database->priorityRef_)
667
847
  napi_delete_reference(env, database->priorityRef_);
848
+ for (auto& it : database->columns_) {
849
+ napi_delete_reference(env, it.second.ref);
850
+ }
668
851
  delete database;
669
852
  }
670
853
  }
@@ -687,17 +870,16 @@ struct OpenWorker final : public Worker {
687
870
  napi_value callback,
688
871
  const std::string& location,
689
872
  const rocksdb::Options& options,
690
- std::vector<rocksdb::ColumnFamilyDescriptor> column_families)
873
+ std::vector<rocksdb::ColumnFamilyDescriptor> descriptors)
691
874
  : Worker(env, database, callback, "leveldown.db.open"),
692
875
  options_(options),
693
876
  location_(location),
694
- column_families_(std::move(column_families)) {}
877
+ descriptors_(std::move(descriptors)) {}
695
878
 
696
879
  rocksdb::Status Execute(Database& database) override {
697
880
  rocksdb::DB* db = nullptr;
698
- const auto status = column_families_.empty()
699
- ? rocksdb::DB::Open(options_, location_, &db)
700
- : rocksdb::DB::Open(options_, location_, column_families_, &database.columns_, &db);
881
+ const auto status = descriptors_.empty() ? rocksdb::DB::Open(options_, location_, &db)
882
+ : rocksdb::DB::Open(options_, location_, descriptors_, &handles_, &db);
701
883
  database.db_.reset(db);
702
884
  return status;
703
885
  }
@@ -706,13 +888,19 @@ struct OpenWorker final : public Worker {
706
888
  napi_value argv[2];
707
889
  NAPI_STATUS_RETURN(napi_get_null(env, &argv[0]));
708
890
 
709
- const auto size = database_->columns_.size();
891
+ const auto size = handles_.size();
710
892
  NAPI_STATUS_RETURN(napi_create_object(env, &argv[1]));
711
893
 
712
894
  for (size_t n = 0; n < size; ++n) {
713
- napi_value column;
714
- NAPI_STATUS_RETURN(napi_create_external(env, database_->columns_[n], nullptr, nullptr, &column));
715
- NAPI_STATUS_RETURN(napi_set_named_property(env, argv[1], column_families_[n].name.c_str(), column));
895
+ ColumnFamily column;
896
+ column.handle = handles_[n];
897
+ column.descriptor = descriptors_[n];
898
+ NAPI_STATUS_RETURN(napi_create_external(env, column.handle, nullptr, nullptr, &column.val));
899
+ NAPI_STATUS_RETURN(napi_create_reference(env, column.val, 1, &column.ref));
900
+
901
+ NAPI_STATUS_RETURN(napi_set_named_property(env, argv[1], descriptors_[n].name.c_str(), column.val));
902
+
903
+ database_->columns_[column.handle->GetID()] = column;
716
904
  }
717
905
 
718
906
  return CallFunction(env, callback, 2, argv);
@@ -720,7 +908,8 @@ struct OpenWorker final : public Worker {
720
908
 
721
909
  rocksdb::Options options_;
722
910
  const std::string location_;
723
- std::vector<rocksdb::ColumnFamilyDescriptor> column_families_;
911
+ std::vector<rocksdb::ColumnFamilyDescriptor> descriptors_;
912
+ std::vector<rocksdb::ColumnFamilyHandle*> handles_;
724
913
  };
725
914
 
726
915
  template <typename T, typename U>
@@ -939,8 +1128,8 @@ struct CloseWorker final : public Worker {
939
1128
  : Worker(env, database, callback, "leveldown.db.close") {}
940
1129
 
941
1130
  rocksdb::Status Execute(Database& database) override {
942
- for (auto it : database.columns_) {
943
- database.db_->DestroyColumnFamilyHandle(it);
1131
+ for (auto& it : database.columns_) {
1132
+ database.db_->DestroyColumnFamilyHandle(it.second.handle);
944
1133
  }
945
1134
 
946
1135
  return database.db_->Close();
@@ -968,7 +1157,7 @@ NAPI_METHOD(db_close) {
968
1157
  return 0;
969
1158
  }
970
1159
 
971
- struct UpdatesNextWorker final : public rocksdb::WriteBatch::Handler, public Worker {
1160
+ struct UpdatesNextWorker final : public Worker {
972
1161
  UpdatesNextWorker(napi_env env, Updates* updates, napi_value callback)
973
1162
  : Worker(env, updates->database_, callback, "rocks_level.db.get"), updates_(updates) {
974
1163
  database_->IncrementPriorityWork(env);
@@ -993,10 +1182,10 @@ struct UpdatesNextWorker final : public rocksdb::WriteBatch::Handler, public Wor
993
1182
 
994
1183
  updates_->sequence_ = batch.sequence;
995
1184
 
996
- count_ = batch.writeBatchPtr->Count();
997
- cache_.reserve(batch.writeBatchPtr->Count() * 4);
1185
+ batch_ = std::move(batch.writeBatchPtr);
1186
+ count_ = batch_->Count();
998
1187
 
999
- return batch.writeBatchPtr->Iterate(this);
1188
+ return rocksdb::Status::OK();
1000
1189
  }
1001
1190
 
1002
1191
  napi_status OnOk(napi_env env, napi_value callback) override {
@@ -1008,12 +1197,7 @@ struct UpdatesNextWorker final : public rocksdb::WriteBatch::Handler, public Wor
1008
1197
  return CallFunction(env, callback, 1, argv);
1009
1198
  }
1010
1199
 
1011
- NAPI_STATUS_RETURN(napi_create_array_with_length(env, cache_.size(), &argv[1]));
1012
- for (size_t idx = 0; idx < cache_.size(); idx++) {
1013
- napi_value val;
1014
- NAPI_STATUS_RETURN(Convert(env, cache_[idx], false, val));
1015
- NAPI_STATUS_RETURN(napi_set_element(env, argv[1], idx, val));
1016
- }
1200
+ NAPI_STATUS_RETURN(updates_->Iterate(env, *batch_, &argv[1]));
1017
1201
 
1018
1202
  NAPI_STATUS_RETURN(napi_create_int64(env, updates_->sequence_, &argv[2]));
1019
1203
 
@@ -1029,110 +1213,9 @@ struct UpdatesNextWorker final : public rocksdb::WriteBatch::Handler, public Wor
1029
1213
  Worker::Destroy(env);
1030
1214
  }
1031
1215
 
1032
- std::optional<std::string> GetColumnName(uint32_t column_family_id) {
1033
- if (column_family_id == 0) {
1034
- return "default";
1035
- }
1036
- auto columns = database_->columns_;
1037
- auto columnIt = std::find_if(columns.begin(), columns.end(),
1038
- [&](const auto& handle) { return handle->GetID() == column_family_id; });
1039
- return columnIt == columns.end() ? std::nullopt : std::optional<std::string>((*columnIt)->GetName());
1040
- }
1041
-
1042
- rocksdb::Status PutCF(uint32_t column_family_id, const rocksdb::Slice& key, const rocksdb::Slice& value) override {
1043
- if (updates_->column_ && updates_->column_->GetID() != column_family_id) {
1044
- return rocksdb::Status::OK();
1045
- }
1046
-
1047
- cache_.emplace_back("put");
1048
-
1049
- if (updates_->keys_) {
1050
- cache_.emplace_back(key.ToStringView());
1051
- } else {
1052
- cache_.emplace_back(std::nullopt);
1053
- }
1054
-
1055
- if (updates_->values_) {
1056
- cache_.emplace_back(value.ToStringView());
1057
- } else {
1058
- cache_.emplace_back(std::nullopt);
1059
- }
1060
-
1061
- if (!updates_->column_) {
1062
- cache_.emplace_back(GetColumnName(column_family_id));
1063
- } else {
1064
- cache_.emplace_back(std::nullopt);
1065
- }
1066
-
1067
- return rocksdb::Status::OK();
1068
- }
1069
-
1070
- rocksdb::Status DeleteCF(uint32_t column_family_id, const rocksdb::Slice& key) override {
1071
- if (updates_->column_ && updates_->column_->GetID() != column_family_id) {
1072
- return rocksdb::Status::OK();
1073
- }
1074
-
1075
- cache_.emplace_back("del");
1076
-
1077
- if (updates_->keys_) {
1078
- cache_.emplace_back(key.ToStringView());
1079
- } else {
1080
- cache_.emplace_back(std::nullopt);
1081
- }
1082
-
1083
- cache_.emplace_back(std::nullopt);
1084
-
1085
- if (!updates_->column_) {
1086
- cache_.emplace_back(GetColumnName(column_family_id));
1087
- } else {
1088
- cache_.emplace_back(std::nullopt);
1089
- }
1090
-
1091
- return rocksdb::Status::OK();
1092
- }
1093
-
1094
- rocksdb::Status MergeCF(uint32_t column_family_id, const rocksdb::Slice& key, const rocksdb::Slice& value) override {
1095
- if (updates_->column_ && updates_->column_->GetID() != column_family_id) {
1096
- return rocksdb::Status::OK();
1097
- }
1098
-
1099
- cache_.emplace_back("merge");
1100
-
1101
- if (updates_->keys_) {
1102
- cache_.emplace_back(key.ToStringView());
1103
- } else {
1104
- cache_.emplace_back(std::nullopt);
1105
- }
1106
-
1107
- if (updates_->values_) {
1108
- cache_.emplace_back(value.ToStringView());
1109
- } else {
1110
- cache_.emplace_back(std::nullopt);
1111
- }
1112
-
1113
- if (!updates_->column_) {
1114
- cache_.emplace_back(GetColumnName(column_family_id));
1115
- } else {
1116
- cache_.emplace_back(std::nullopt);
1117
- }
1118
-
1119
- return rocksdb::Status::OK();
1120
- }
1121
-
1122
- void LogData(const rocksdb::Slice& data) override {
1123
- if (updates_->data_) {
1124
- cache_.emplace_back("data");
1125
- cache_.emplace_back(std::nullopt);
1126
- cache_.emplace_back(data.ToStringView());
1127
- cache_.emplace_back(std::nullopt);
1128
- }
1129
- }
1130
-
1131
- bool Continue() override { return true; }
1132
-
1133
1216
  private:
1134
1217
  int64_t count_ = -1;
1135
- std::vector<std::optional<std::string>> cache_;
1218
+ std::unique_ptr<rocksdb::WriteBatch> batch_;
1136
1219
  Updates* updates_;
1137
1220
  };
1138
1221
 
@@ -1162,11 +1245,13 @@ NAPI_METHOD(updates_init) {
1162
1245
  NAPI_STATUS_THROWS(napi_get_named_property(env, argv[1], "data", &dataProperty));
1163
1246
  NAPI_STATUS_THROWS(napi_get_value_bool(env, dataProperty, &data));
1164
1247
 
1165
- // TODO (fix): Needs to support { column: null }
1248
+ const bool keyAsBuffer = EncodingIsBuffer(env, argv[1], "keyEncoding");
1249
+ const bool valueAsBuffer = EncodingIsBuffer(env, argv[1], "valueEncoding");
1250
+
1166
1251
  rocksdb::ColumnFamilyHandle* column;
1167
- NAPI_STATUS_THROWS(GetColumnFamily(database, env, argv[1], &column, false));
1252
+ NAPI_STATUS_THROWS(GetColumnFamily(nullptr, env, argv[1], &column));
1168
1253
 
1169
- auto updates = std::make_unique<Updates>(database, since, keys, values, data, column);
1254
+ auto updates = std::make_unique<Updates>(database, since, keys, values, data, column, keyAsBuffer, valueAsBuffer);
1170
1255
 
1171
1256
  napi_value result;
1172
1257
  NAPI_STATUS_THROWS(napi_create_external(env, updates.get(), Finalize<Updates>, updates.get(), &result));
@@ -1202,102 +1287,6 @@ NAPI_METHOD(updates_close) {
1202
1287
  return 0;
1203
1288
  }
1204
1289
 
1205
- NAPI_METHOD(db_put) {
1206
- NAPI_ARGV(4);
1207
-
1208
- Database* database;
1209
- NAPI_STATUS_THROWS(napi_get_value_external(env, argv[0], reinterpret_cast<void**>(&database)));
1210
-
1211
- rocksdb::PinnableSlice key;
1212
- NAPI_STATUS_THROWS(ToString(env, argv[1], key));
1213
-
1214
- rocksdb::PinnableSlice val;
1215
- NAPI_STATUS_THROWS(ToString(env, argv[2], val));
1216
-
1217
- rocksdb::ColumnFamilyHandle* column;
1218
- NAPI_STATUS_THROWS(GetColumnFamily(database, env, argv[3], &column));
1219
-
1220
- rocksdb::WriteOptions writeOptions;
1221
- ROCKS_STATUS_THROWS(database->db_->Put(writeOptions, column, key, val));
1222
-
1223
- return 0;
1224
- }
1225
-
1226
- struct GetWorker final : public Worker {
1227
- GetWorker(napi_env env,
1228
- Database* database,
1229
- rocksdb::ColumnFamilyHandle* column,
1230
- napi_value callback,
1231
- const std::string& key,
1232
- const bool asBuffer,
1233
- const bool fillCache)
1234
- : Worker(env, database, callback, "rocks_level.db.get"),
1235
- column_(column),
1236
- key_(key),
1237
- asBuffer_(asBuffer),
1238
- fillCache_(fillCache),
1239
- snapshot_(database_->db_->GetSnapshot(),
1240
- [this](const rocksdb::Snapshot* ptr) { database_->db_->ReleaseSnapshot(ptr); }) {
1241
- database_->IncrementPriorityWork(env);
1242
- }
1243
-
1244
- rocksdb::Status Execute(Database& database) override {
1245
- rocksdb::ReadOptions readOptions;
1246
- readOptions.fill_cache = fillCache_;
1247
- readOptions.snapshot = snapshot_.get();
1248
-
1249
- auto status = database.db_->Get(readOptions, column_, key_, &value_);
1250
-
1251
- key_.clear();
1252
- snapshot_ = nullptr;
1253
-
1254
- return status;
1255
- }
1256
-
1257
- napi_status OnOk(napi_env env, napi_value callback) override {
1258
- napi_value argv[2];
1259
- NAPI_STATUS_RETURN(napi_get_null(env, &argv[0]));
1260
-
1261
- NAPI_STATUS_RETURN(Convert(env, &value_, asBuffer_, argv[1]));
1262
-
1263
- return CallFunction(env, callback, 2, argv);
1264
- }
1265
-
1266
- void Destroy(napi_env env) override {
1267
- database_->DecrementPriorityWork(env);
1268
- Worker::Destroy(env);
1269
- }
1270
-
1271
- private:
1272
- rocksdb::ColumnFamilyHandle* column_;
1273
- std::string key_;
1274
- rocksdb::PinnableSlice value_;
1275
- const bool asBuffer_;
1276
- const bool fillCache_;
1277
- std::shared_ptr<const rocksdb::Snapshot> snapshot_;
1278
- };
1279
-
1280
- NAPI_METHOD(db_get) {
1281
- NAPI_ARGV(4);
1282
-
1283
- Database* database;
1284
- NAPI_STATUS_THROWS(napi_get_value_external(env, argv[0], reinterpret_cast<void**>(&database)));
1285
-
1286
- std::string key;
1287
- NAPI_STATUS_THROWS(ToString(env, argv[1], key));
1288
-
1289
- const auto asBuffer = EncodingIsBuffer(env, argv[2], "valueEncoding");
1290
- const auto fillCache = BooleanProperty(env, argv[2], "fillCache").value_or(true);
1291
-
1292
- rocksdb::ColumnFamilyHandle* column;
1293
- NAPI_STATUS_THROWS(GetColumnFamily(database, env, argv[2], &column));
1294
-
1295
- auto worker = new GetWorker(env, database, column, argv[3], key, asBuffer, fillCache);
1296
- worker->Queue(env);
1297
-
1298
- return 0;
1299
- }
1300
-
1301
1290
  struct GetManyWorker final : public Worker {
1302
1291
  GetManyWorker(napi_env env,
1303
1292
  Database* database,
@@ -1420,24 +1409,6 @@ NAPI_METHOD(db_get_many) {
1420
1409
  return 0;
1421
1410
  }
1422
1411
 
1423
- NAPI_METHOD(db_del) {
1424
- NAPI_ARGV(3);
1425
-
1426
- Database* database;
1427
- NAPI_STATUS_THROWS(napi_get_value_external(env, argv[0], reinterpret_cast<void**>(&database)));
1428
-
1429
- rocksdb::PinnableSlice key;
1430
- NAPI_STATUS_THROWS(ToString(env, argv[1], key));
1431
-
1432
- rocksdb::ColumnFamilyHandle* column;
1433
- NAPI_STATUS_THROWS(GetColumnFamily(database, env, argv[2], &column));
1434
-
1435
- rocksdb::WriteOptions writeOptions;
1436
- ROCKS_STATUS_THROWS(database->db_->Delete(writeOptions, column, key));
1437
-
1438
- return 0;
1439
- }
1440
-
1441
1412
  NAPI_METHOD(db_clear) {
1442
1413
  NAPI_ARGV(2);
1443
1414
 
@@ -1806,11 +1777,6 @@ NAPI_METHOD(batch_do) {
1806
1777
  }
1807
1778
 
1808
1779
  NAPI_METHOD(batch_init) {
1809
- NAPI_ARGV(1);
1810
-
1811
- Database* database;
1812
- NAPI_STATUS_THROWS(napi_get_value_external(env, argv[0], reinterpret_cast<void**>(&database)));
1813
-
1814
1780
  auto batch = new rocksdb::WriteBatch();
1815
1781
 
1816
1782
  napi_value result;
@@ -1820,53 +1786,79 @@ NAPI_METHOD(batch_init) {
1820
1786
  }
1821
1787
 
1822
1788
  NAPI_METHOD(batch_put) {
1823
- NAPI_ARGV(5);
1824
-
1825
- Database* database;
1826
- NAPI_STATUS_THROWS(napi_get_value_external(env, argv[0], reinterpret_cast<void**>(&database)));
1789
+ NAPI_ARGV(4);
1827
1790
 
1828
1791
  rocksdb::WriteBatch* batch;
1829
- NAPI_STATUS_THROWS(napi_get_value_external(env, argv[1], (void**)(&batch)));
1792
+ NAPI_STATUS_THROWS(napi_get_value_external(env, argv[0], (void**)(&batch)));
1830
1793
 
1831
1794
  rocksdb::PinnableSlice key;
1832
- NAPI_STATUS_THROWS(ToString(env, argv[2], key));
1795
+ NAPI_STATUS_THROWS(ToString(env, argv[1], key));
1833
1796
 
1834
1797
  rocksdb::PinnableSlice val;
1835
- NAPI_STATUS_THROWS(ToString(env, argv[3], val));
1798
+ NAPI_STATUS_THROWS(ToString(env, argv[2], val));
1836
1799
 
1837
1800
  rocksdb::ColumnFamilyHandle* column;
1838
- NAPI_STATUS_THROWS(GetColumnFamily(database, env, argv[4], &column));
1801
+ NAPI_STATUS_THROWS(GetColumnFamily(nullptr, env, argv[3], &column));
1839
1802
 
1840
- ROCKS_STATUS_THROWS(batch->Put(column, key, val));
1803
+ if (column) {
1804
+ ROCKS_STATUS_THROWS(batch->Put(column, key, val));
1805
+ } else {
1806
+ ROCKS_STATUS_THROWS(batch->Put(key, val));
1807
+ }
1841
1808
 
1842
1809
  return 0;
1843
1810
  }
1844
1811
 
1845
1812
  NAPI_METHOD(batch_del) {
1846
- NAPI_ARGV(4);
1813
+ NAPI_ARGV(3);
1847
1814
 
1848
- Database* database;
1849
- NAPI_STATUS_THROWS(napi_get_value_external(env, argv[0], reinterpret_cast<void**>(&database)));
1815
+ rocksdb::WriteBatch* batch;
1816
+ NAPI_STATUS_THROWS(napi_get_value_external(env, argv[0], reinterpret_cast<void**>(&batch)));
1817
+
1818
+ rocksdb::PinnableSlice key;
1819
+ NAPI_STATUS_THROWS(ToString(env, argv[1], key));
1820
+
1821
+ rocksdb::ColumnFamilyHandle* column;
1822
+ NAPI_STATUS_THROWS(GetColumnFamily(nullptr, env, argv[2], &column));
1823
+
1824
+ if (column) {
1825
+ ROCKS_STATUS_THROWS(batch->Delete(column, key));
1826
+ } else {
1827
+ ROCKS_STATUS_THROWS(batch->Delete(key));
1828
+ }
1829
+
1830
+ return 0;
1831
+ }
1832
+
1833
+ NAPI_METHOD(batch_merge) {
1834
+ NAPI_ARGV(4);
1850
1835
 
1851
1836
  rocksdb::WriteBatch* batch;
1852
- NAPI_STATUS_THROWS(napi_get_value_external(env, argv[1], reinterpret_cast<void**>(&batch)));
1837
+ NAPI_STATUS_THROWS(napi_get_value_external(env, argv[0], (void**)(&batch)));
1853
1838
 
1854
1839
  rocksdb::PinnableSlice key;
1855
- NAPI_STATUS_THROWS(ToString(env, argv[2], key));
1840
+ NAPI_STATUS_THROWS(ToString(env, argv[1], key));
1841
+
1842
+ rocksdb::PinnableSlice val;
1843
+ NAPI_STATUS_THROWS(ToString(env, argv[2], val));
1856
1844
 
1857
1845
  rocksdb::ColumnFamilyHandle* column;
1858
- NAPI_STATUS_THROWS(GetColumnFamily(database, env, argv[3], &column));
1846
+ NAPI_STATUS_THROWS(GetColumnFamily(nullptr, env, argv[3], &column));
1859
1847
 
1860
- ROCKS_STATUS_THROWS(batch->Delete(column, key));
1848
+ if (column) {
1849
+ ROCKS_STATUS_THROWS(batch->Merge(column, key, val));
1850
+ } else {
1851
+ ROCKS_STATUS_THROWS(batch->Merge(key, val));
1852
+ }
1861
1853
 
1862
1854
  return 0;
1863
1855
  }
1864
1856
 
1865
1857
  NAPI_METHOD(batch_clear) {
1866
- NAPI_ARGV(2);
1858
+ NAPI_ARGV(1);
1867
1859
 
1868
1860
  rocksdb::WriteBatch* batch;
1869
- NAPI_STATUS_THROWS(napi_get_value_external(env, argv[1], reinterpret_cast<void**>(&batch)));
1861
+ NAPI_STATUS_THROWS(napi_get_value_external(env, argv[0], reinterpret_cast<void**>(&batch)));
1870
1862
 
1871
1863
  batch->Clear();
1872
1864
 
@@ -1889,53 +1881,65 @@ NAPI_METHOD(batch_write) {
1889
1881
  }
1890
1882
 
1891
1883
  NAPI_METHOD(batch_put_log_data) {
1892
- NAPI_ARGV(4);
1893
-
1894
- Database* database;
1895
- NAPI_STATUS_THROWS(napi_get_value_external(env, argv[0], reinterpret_cast<void**>(&database)));
1884
+ NAPI_ARGV(3);
1896
1885
 
1897
1886
  rocksdb::WriteBatch* batch;
1898
- NAPI_STATUS_THROWS(napi_get_value_external(env, argv[1], reinterpret_cast<void**>(&batch)));
1887
+ NAPI_STATUS_THROWS(napi_get_value_external(env, argv[0], reinterpret_cast<void**>(&batch)));
1899
1888
 
1900
1889
  rocksdb::PinnableSlice logData;
1901
- NAPI_STATUS_THROWS(ToString(env, argv[2], logData));
1890
+ NAPI_STATUS_THROWS(ToString(env, argv[1], logData));
1902
1891
 
1903
1892
  ROCKS_STATUS_THROWS(batch->PutLogData(logData));
1904
1893
 
1905
1894
  return 0;
1906
1895
  }
1907
1896
 
1908
- NAPI_METHOD(batch_merge) {
1909
- NAPI_ARGV(5);
1897
+ NAPI_METHOD(batch_count) {
1898
+ NAPI_ARGV(1);
1899
+
1900
+ rocksdb::WriteBatch* batch;
1901
+ NAPI_STATUS_THROWS(napi_get_value_external(env, argv[0], reinterpret_cast<void**>(&batch)));
1902
+
1903
+ napi_value result;
1904
+ NAPI_STATUS_THROWS(napi_create_int64(env, batch->Count(), &result));
1905
+
1906
+ return result;
1907
+ }
1908
+
1909
+ NAPI_METHOD(batch_iterate) {
1910
+ NAPI_ARGV(3);
1910
1911
 
1911
1912
  Database* database;
1912
1913
  NAPI_STATUS_THROWS(napi_get_value_external(env, argv[0], reinterpret_cast<void**>(&database)));
1913
1914
 
1914
1915
  rocksdb::WriteBatch* batch;
1915
- NAPI_STATUS_THROWS(napi_get_value_external(env, argv[1], (void**)(&batch)));
1916
-
1917
- rocksdb::PinnableSlice key;
1918
- NAPI_STATUS_THROWS(ToString(env, argv[2], key));
1919
-
1920
- rocksdb::PinnableSlice val;
1921
- NAPI_STATUS_THROWS(ToString(env, argv[3], val));
1916
+ NAPI_STATUS_THROWS(napi_get_value_external(env, argv[1], reinterpret_cast<void**>(&batch)));
1922
1917
 
1923
- rocksdb::ColumnFamilyHandle* column;
1924
- NAPI_STATUS_THROWS(GetColumnFamily(database, env, argv[4], &column));
1918
+ napi_value keysProperty;
1919
+ bool keys;
1920
+ NAPI_STATUS_THROWS(napi_get_named_property(env, argv[2], "keys", &keysProperty));
1921
+ NAPI_STATUS_THROWS(napi_get_value_bool(env, keysProperty, &keys));
1925
1922
 
1926
- ROCKS_STATUS_THROWS(batch->Merge(column, key, val));
1923
+ napi_value valuesProperty;
1924
+ bool values;
1925
+ NAPI_STATUS_THROWS(napi_get_named_property(env, argv[2], "values", &valuesProperty));
1926
+ NAPI_STATUS_THROWS(napi_get_value_bool(env, valuesProperty, &values));
1927
1927
 
1928
- return 0;
1929
- }
1928
+ napi_value dataProperty;
1929
+ bool data;
1930
+ NAPI_STATUS_THROWS(napi_get_named_property(env, argv[2], "data", &dataProperty));
1931
+ NAPI_STATUS_THROWS(napi_get_value_bool(env, dataProperty, &data));
1930
1932
 
1931
- NAPI_METHOD(batch_count) {
1932
- NAPI_ARGV(2);
1933
+ const bool keyAsBuffer = EncodingIsBuffer(env, argv[1], "keyEncoding");
1934
+ const bool valueAsBuffer = EncodingIsBuffer(env, argv[1], "valueEncoding");
1933
1935
 
1934
- rocksdb::WriteBatch* batch;
1935
- NAPI_STATUS_THROWS(napi_get_value_external(env, argv[1], reinterpret_cast<void**>(&batch)));
1936
+ rocksdb::ColumnFamilyHandle* column;
1937
+ NAPI_STATUS_THROWS(GetColumnFamily(nullptr, env, argv[2], &column));
1936
1938
 
1937
1939
  napi_value result;
1938
- NAPI_STATUS_THROWS(napi_create_int64(env, batch->Count(), &result));
1940
+ BatchIterator iterator(nullptr, keys, values, data, column, keyAsBuffer, valueAsBuffer);
1941
+
1942
+ NAPI_STATUS_THROWS(iterator.Iterate(env, *batch, &result));
1939
1943
 
1940
1944
  return result;
1941
1945
  }
@@ -2020,10 +2024,7 @@ NAPI_INIT() {
2020
2024
  NAPI_EXPORT_FUNCTION(db_init);
2021
2025
  NAPI_EXPORT_FUNCTION(db_open);
2022
2026
  NAPI_EXPORT_FUNCTION(db_close);
2023
- NAPI_EXPORT_FUNCTION(db_put);
2024
- NAPI_EXPORT_FUNCTION(db_get);
2025
2027
  NAPI_EXPORT_FUNCTION(db_get_many);
2026
- NAPI_EXPORT_FUNCTION(db_del);
2027
2028
  NAPI_EXPORT_FUNCTION(db_clear);
2028
2029
  NAPI_EXPORT_FUNCTION(db_get_property);
2029
2030
  NAPI_EXPORT_FUNCTION(db_get_latest_sequence);
@@ -2051,4 +2052,5 @@ NAPI_INIT() {
2051
2052
  NAPI_EXPORT_FUNCTION(batch_put_log_data);
2052
2053
  NAPI_EXPORT_FUNCTION(batch_merge);
2053
2054
  NAPI_EXPORT_FUNCTION(batch_count);
2055
+ NAPI_EXPORT_FUNCTION(batch_iterate);
2054
2056
  }
package/chained-batch.js CHANGED
@@ -2,55 +2,111 @@
2
2
 
3
3
  const { AbstractChainedBatch } = require('abstract-level')
4
4
  const binding = require('./binding')
5
+ const ModuleError = require('module-error')
6
+ const { fromCallback } = require('catering')
5
7
 
6
- const kDbContext = Symbol('db')
7
- const kBatchContext = Symbol('context')
8
+ const kWrite = Symbol('write')
9
+ const kBatchContext = Symbol('batchContext')
10
+ const kDbContext = Symbol('dbContext')
11
+ const kPromise = Symbol('promise')
12
+
13
+ const NOOP = () => {}
14
+ const EMPTY = {}
8
15
 
9
16
  class ChainedBatch extends AbstractChainedBatch {
10
- constructor (db, context) {
17
+ constructor (db, context, write) {
11
18
  super(db)
12
19
 
20
+ this[kWrite] = write
13
21
  this[kDbContext] = context
14
- this[kBatchContext] = binding.batch_init(this[kDbContext])
22
+ this[kBatchContext] = binding.batch_init()
15
23
  }
16
24
 
17
25
  _put (key, value, options) {
18
- binding.batch_put(this[kDbContext], this[kBatchContext], key, value, options)
26
+ if (key === null || key === undefined) {
27
+ throw new ModuleError('Key cannot be null or undefined', {
28
+ code: 'LEVEL_INVALID_KEY'
29
+ })
30
+ }
31
+
32
+ if (value === null || value === undefined) {
33
+ throw new ModuleError('value cannot be null or undefined', {
34
+ code: 'LEVEL_INVALID_VALUE'
35
+ })
36
+ }
37
+
38
+ binding.batch_put(this[kBatchContext], key, value, options ?? EMPTY)
19
39
  }
20
40
 
21
41
  _del (key, options) {
22
- binding.batch_del(this[kDbContext], this[kBatchContext], key, options)
42
+ if (key === null || key === undefined) {
43
+ throw new ModuleError('Key cannot be null or undefined', {
44
+ code: 'LEVEL_INVALID_KEY'
45
+ })
46
+ }
47
+
48
+ binding.batch_del(this[kBatchContext], key, options ?? EMPTY)
23
49
  }
24
50
 
25
51
  _clear () {
26
- binding.batch_clear(this[kDbContext], this[kBatchContext])
52
+ binding.batch_clear(this[kBatchContext])
27
53
  }
28
54
 
29
55
  _write (options, callback) {
30
- try {
31
- binding.batch_write(this[kDbContext], this[kBatchContext], options)
32
- process.nextTick(callback, null)
33
- } catch (err) {
34
- process.nextTick(callback, err)
35
- }
56
+ callback = fromCallback(callback, kPromise)
57
+
58
+ this[kWrite](this, this[kBatchContext], options ?? EMPTY, callback)
59
+
60
+ return callback[kPromise]
36
61
  }
37
62
 
38
63
  _close (callback) {
39
64
  process.nextTick(callback)
40
65
  }
41
66
 
42
- putLogData (data, options) {
43
- // TODO (fix): Check if open...
44
- binding.batch_put_log_data(this[kDbContext], this[kBatchContext], data, options)
67
+ get length () {
68
+ return binding.batch_count(this[kBatchContext])
45
69
  }
46
70
 
47
- merge (key, value, options = {}) {
48
- // TODO (fix): Check if open...
49
- binding.batch_merge(this[kDbContext], this[kBatchContext], key, value, options)
71
+ _putLogData (value, options) {
72
+ if (value === null || value === undefined) {
73
+ throw new ModuleError('value cannot be null or undefined', {
74
+ code: 'LEVEL_INVALID_VALUE'
75
+ })
76
+ }
77
+
78
+ binding.batch_put_log_data(this[kBatchContext], value, options ?? EMPTY)
50
79
  }
51
80
 
52
- get count () {
53
- return binding.batch_count(this[kDbContext], this[kBatchContext])
81
+ _merge (key, value, options) {
82
+ if (key === null || key === undefined) {
83
+ throw new ModuleError('Key cannot be null or undefined', {
84
+ code: 'LEVEL_INVALID_KEY'
85
+ })
86
+ }
87
+
88
+ if (value === null || value === undefined) {
89
+ throw new ModuleError('value cannot be null or undefined', {
90
+ code: 'LEVEL_INVALID_VALUE'
91
+ })
92
+ }
93
+
94
+ binding.batch_merge(this[kBatchContext], key, value, options ?? EMPTY)
95
+ }
96
+
97
+ * [Symbol.iterator] () {
98
+ const rows = binding.batch_iterate(this[kDbContext], this[kBatchContext], {
99
+ keys: true,
100
+ values: true,
101
+ data: true
102
+ })
103
+ for (let n = 0; n < rows.length; n += 4) {
104
+ yield {
105
+ type: rows[n + 0],
106
+ key: rows[n + 1],
107
+ value: rows[n + 2]
108
+ }
109
+ }
54
110
  }
55
111
  }
56
112
 
package/index.js CHANGED
@@ -1,5 +1,6 @@
1
1
  'use strict'
2
2
 
3
+ const { fromCallback } = require('catering')
3
4
  const { AbstractLevel } = require('abstract-level')
4
5
  const ModuleError = require('module-error')
5
6
  const fs = require('fs')
@@ -11,6 +12,9 @@ const os = require('os')
11
12
  const kContext = Symbol('context')
12
13
  const kColumns = Symbol('columns')
13
14
  const kLocation = Symbol('location')
15
+ const kPromise = Symbol('promise')
16
+
17
+ const EMPTY = {}
14
18
 
15
19
  class RocksLevel extends AbstractLevel {
16
20
  constructor (location, options, _) {
@@ -95,55 +99,101 @@ class RocksLevel extends AbstractLevel {
95
99
  }
96
100
 
97
101
  _put (key, value, options, callback) {
102
+ callback = fromCallback(callback, kPromise)
103
+
98
104
  try {
99
- binding.db_put(this[kContext], key, value, options)
100
- process.nextTick(callback, null)
105
+ const batch = this.batch()
106
+ batch.put(key, value, options ?? EMPTY)
107
+ batch.write(callback)
101
108
  } catch (err) {
102
109
  process.nextTick(callback, err)
103
110
  }
111
+
112
+ return callback[kPromise]
104
113
  }
105
114
 
106
115
  _get (key, options, callback) {
107
- binding.db_get(this[kContext], key, options, callback)
116
+ this._getMany([key], options ?? EMPTY, (err, val) => {
117
+ if (err) {
118
+ callback(err)
119
+ } else if (val[0] === undefined) {
120
+ callback(Object.assign(new Error('not found'), {
121
+ code: 'LEVEL_NOT_FOUND'
122
+ }))
123
+ } else {
124
+ callback(null, val[0])
125
+ }
126
+ })
108
127
  }
109
128
 
110
129
  _getMany (keys, options, callback) {
111
- binding.db_get_many(this[kContext], keys, options, callback)
130
+ callback = fromCallback(callback, kPromise)
131
+
132
+ try {
133
+ binding.db_get_many(this[kContext], keys, options ?? EMPTY, callback)
134
+ } catch (err) {
135
+ process.nextTick(callback, err)
136
+ }
137
+
138
+ return callback[kPromise]
112
139
  }
113
140
 
114
141
  _del (key, options, callback) {
142
+ callback = fromCallback(callback, kPromise)
143
+
115
144
  try {
116
- binding.db_del(this[kContext], key, options)
117
- process.nextTick(callback, null)
145
+ const batch = this.batch()
146
+ batch.del(key, options ?? EMPTY)
147
+ batch.write(callback)
118
148
  } catch (err) {
119
149
  process.nextTick(callback, err)
120
150
  }
151
+
152
+ return callback[kPromise]
121
153
  }
122
154
 
123
155
  _clear (options, callback) {
156
+ callback = fromCallback(callback, kPromise)
157
+
124
158
  try {
125
- binding.db_clear(this[kContext], options)
159
+ // TODO (fix): Use batch + DeleteRange...
160
+ binding.db_clear(this[kContext], options ?? EMPTY)
126
161
  process.nextTick(callback, null)
127
162
  } catch (err) {
128
163
  process.nextTick(callback, err)
129
164
  }
165
+
166
+ return callback[kPromise]
130
167
  }
131
168
 
132
169
  _chainedBatch () {
133
- return new ChainedBatch(this, this[kContext])
170
+ return new ChainedBatch(this, this[kContext], (batch, context, options, callback) => {
171
+ try {
172
+ const seq = this.sequence
173
+ binding.batch_write(this[kContext], context, options)
174
+ this.emit('write', batch, seq + 1)
175
+ process.nextTick(callback, null)
176
+ } catch (err) {
177
+ process.nextTick(callback, err)
178
+ }
179
+ })
134
180
  }
135
181
 
136
182
  _batch (operations, options, callback) {
183
+ callback = fromCallback(callback, kPromise)
184
+
137
185
  try {
138
- binding.batch_do(this[kContext], operations, options)
186
+ binding.batch_do(this[kContext], operations, options ?? EMPTY)
139
187
  process.nextTick(callback, null)
140
188
  } catch (err) {
141
189
  process.nextTick(callback, err)
142
190
  }
191
+
192
+ return callback[kPromise]
143
193
  }
144
194
 
145
195
  _iterator (options) {
146
- return new Iterator(this, this[kContext], options)
196
+ return new Iterator(this, this[kContext], options ?? EMPTY)
147
197
  }
148
198
 
149
199
  getProperty (property) {
@@ -9,7 +9,7 @@ int compareRev(const rocksdb::Slice& a, const rocksdb::Slice& b) {
9
9
  // Compare the revision number
10
10
  auto result = 0;
11
11
  const auto end = std::min(endA, endB);
12
- while (indexA < end) {
12
+ while (indexA < end && indexB < end) {
13
13
  const auto ac = a[indexA++];
14
14
  const auto bc = b[indexB++];
15
15
 
@@ -43,8 +43,6 @@ int compareRev(const rocksdb::Slice& a, const rocksdb::Slice& b) {
43
43
  return endA - endB;
44
44
  }
45
45
 
46
- // Merge operator that picks the maximum operand, Comparison is based on
47
- // Slice::compare
48
46
  class MaxRevOperator : public rocksdb::MergeOperator {
49
47
  public:
50
48
  bool FullMergeV2(const MergeOperationInput& merge_in,
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "@nxtedition/rocksdb",
3
- "version": "7.0.43",
3
+ "version": "7.0.46",
4
4
  "description": "A low-level Node.js RocksDB binding",
5
5
  "license": "MIT",
6
6
  "main": "index.js",
@@ -12,7 +12,8 @@
12
12
  },
13
13
  "dependencies": {
14
14
  "abstract-level": "^1.0.2",
15
- "module-error": "^1.0.1",
15
+ "catering": "^2.1.1",
16
+ "module-error": "^1.0.2",
16
17
  "napi-macros": "~2.0.0",
17
18
  "node-gyp-build": "^4.3.0"
18
19
  },