@nxtedition/rocksdb 10.3.8 → 11.0.1
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/binding.cc +97 -346
- package/chained-batch.js +8 -8
- package/index.js +28 -66
- package/iterator.js +42 -64
- package/package.json +1 -1
- package/prebuilds/darwin-arm64/@nxtedition+rocksdb.node +0 -0
- package/prebuilds/linux-x64/@nxtedition+rocksdb.node +0 -0
- package/util.h +0 -17
package/binding.cc
CHANGED
|
@@ -284,15 +284,11 @@ struct BaseIterator : public Closable {
|
|
|
284
284
|
const std::optional<std::string>& gte,
|
|
285
285
|
const int limit,
|
|
286
286
|
const bool fillCache,
|
|
287
|
-
std::shared_ptr<const rocksdb::Snapshot> snapshot,
|
|
288
287
|
bool tailing = false)
|
|
289
288
|
: database_(database),
|
|
290
289
|
column_(column),
|
|
291
|
-
snapshot_(snapshot),
|
|
292
290
|
reverse_(reverse),
|
|
293
|
-
limit_(limit)
|
|
294
|
-
fillCache_(fillCache),
|
|
295
|
-
tailing_(tailing) {
|
|
291
|
+
limit_(limit) {
|
|
296
292
|
if (lte) {
|
|
297
293
|
upper_bound_ = rocksdb::PinnableSlice();
|
|
298
294
|
*upper_bound_->GetSelf() = std::move(*lte) + '\0';
|
|
@@ -312,30 +308,33 @@ struct BaseIterator : public Closable {
|
|
|
312
308
|
*lower_bound_->GetSelf() = std::move(*gt) + '\0';
|
|
313
309
|
lower_bound_->PinSelf();
|
|
314
310
|
}
|
|
315
|
-
database_->Attach(this);
|
|
316
|
-
}
|
|
317
|
-
|
|
318
|
-
virtual ~BaseIterator() { assert(!iterator_); }
|
|
319
|
-
|
|
320
|
-
bool DidSeek() const { return iterator_ != nullptr; }
|
|
321
311
|
|
|
322
|
-
|
|
323
|
-
if (
|
|
324
|
-
|
|
312
|
+
rocksdb::ReadOptions readOptions;
|
|
313
|
+
if (upper_bound_) {
|
|
314
|
+
readOptions.iterate_upper_bound = &*upper_bound_;
|
|
315
|
+
}
|
|
316
|
+
if (lower_bound_) {
|
|
317
|
+
readOptions.iterate_lower_bound = &*lower_bound_;
|
|
325
318
|
}
|
|
319
|
+
readOptions.fill_cache = fillCache;
|
|
320
|
+
readOptions.async_io = true;
|
|
321
|
+
readOptions.adaptive_readahead = true;
|
|
322
|
+
readOptions.tailing = tailing;
|
|
323
|
+
|
|
324
|
+
iterator_.reset(database_->db->NewIterator(readOptions, column_));
|
|
326
325
|
|
|
327
326
|
if (reverse_) {
|
|
328
327
|
iterator_->SeekToLast();
|
|
329
328
|
} else {
|
|
330
329
|
iterator_->SeekToFirst();
|
|
331
330
|
}
|
|
331
|
+
|
|
332
|
+
database_->Attach(this);
|
|
332
333
|
}
|
|
333
334
|
|
|
334
|
-
virtual
|
|
335
|
-
if (!iterator_) {
|
|
336
|
-
Init();
|
|
337
|
-
}
|
|
335
|
+
virtual ~BaseIterator() { assert(!iterator_); }
|
|
338
336
|
|
|
337
|
+
virtual void Seek(const rocksdb::Slice& target) {
|
|
339
338
|
if ((upper_bound_ && target.compare(*upper_bound_) >= 0) || (lower_bound_ && target.compare(*lower_bound_) < 0)) {
|
|
340
339
|
// TODO (fix): Why is this required? Seek should handle it?
|
|
341
340
|
// https://github.com/facebook/rocksdb/issues/9904
|
|
@@ -351,7 +350,6 @@ struct BaseIterator : public Closable {
|
|
|
351
350
|
}
|
|
352
351
|
|
|
353
352
|
virtual rocksdb::Status Close() override {
|
|
354
|
-
snapshot_.reset();
|
|
355
353
|
iterator_.reset();
|
|
356
354
|
database_->Detach(this);
|
|
357
355
|
return rocksdb::Status::OK();
|
|
@@ -393,25 +391,8 @@ struct BaseIterator : public Closable {
|
|
|
393
391
|
|
|
394
392
|
Database* database_;
|
|
395
393
|
rocksdb::ColumnFamilyHandle* column_;
|
|
396
|
-
std::shared_ptr<const rocksdb::Snapshot> snapshot_;
|
|
397
394
|
|
|
398
395
|
private:
|
|
399
|
-
void Init() {
|
|
400
|
-
rocksdb::ReadOptions readOptions;
|
|
401
|
-
if (upper_bound_) {
|
|
402
|
-
readOptions.iterate_upper_bound = &*upper_bound_;
|
|
403
|
-
}
|
|
404
|
-
if (lower_bound_) {
|
|
405
|
-
readOptions.iterate_lower_bound = &*lower_bound_;
|
|
406
|
-
}
|
|
407
|
-
readOptions.fill_cache = fillCache_;
|
|
408
|
-
readOptions.snapshot = snapshot_.get();
|
|
409
|
-
readOptions.async_io = true;
|
|
410
|
-
readOptions.adaptive_readahead = true;
|
|
411
|
-
readOptions.tailing = tailing_;
|
|
412
|
-
|
|
413
|
-
iterator_.reset(database_->db->NewIterator(readOptions, column_));
|
|
414
|
-
}
|
|
415
396
|
|
|
416
397
|
int count_ = 0;
|
|
417
398
|
std::optional<rocksdb::PinnableSlice> lower_bound_;
|
|
@@ -419,8 +400,6 @@ struct BaseIterator : public Closable {
|
|
|
419
400
|
std::unique_ptr<rocksdb::Iterator> iterator_;
|
|
420
401
|
const bool reverse_;
|
|
421
402
|
const int limit_;
|
|
422
|
-
const bool fillCache_;
|
|
423
|
-
const bool tailing_;
|
|
424
403
|
};
|
|
425
404
|
|
|
426
405
|
struct Iterator final : public BaseIterator {
|
|
@@ -436,12 +415,15 @@ struct Iterator final : public BaseIterator {
|
|
|
436
415
|
const std::optional<std::string>& gte,
|
|
437
416
|
const bool fillCache,
|
|
438
417
|
const size_t highWaterMarkBytes,
|
|
439
|
-
|
|
440
|
-
|
|
441
|
-
|
|
418
|
+
bool tailing = false,
|
|
419
|
+
Encoding keyEncoding = Encoding::Invalid,
|
|
420
|
+
Encoding valueEncoding = Encoding::Invalid)
|
|
421
|
+
: BaseIterator(database, column, reverse, lt, lte, gt, gte, limit, fillCache, tailing),
|
|
442
422
|
keys_(keys),
|
|
443
423
|
values_(values),
|
|
444
|
-
highWaterMarkBytes_(highWaterMarkBytes)
|
|
424
|
+
highWaterMarkBytes_(highWaterMarkBytes),
|
|
425
|
+
keyEncoding_(keyEncoding),
|
|
426
|
+
valueEncoding_(valueEncoding) {}
|
|
445
427
|
|
|
446
428
|
void Seek(const rocksdb::Slice& target) override {
|
|
447
429
|
first_ = true;
|
|
@@ -452,6 +434,8 @@ struct Iterator final : public BaseIterator {
|
|
|
452
434
|
const bool values_;
|
|
453
435
|
const size_t highWaterMarkBytes_;
|
|
454
436
|
bool first_ = true;
|
|
437
|
+
const Encoding keyEncoding_;
|
|
438
|
+
const Encoding valueEncoding_;
|
|
455
439
|
};
|
|
456
440
|
|
|
457
441
|
/**
|
|
@@ -698,64 +682,6 @@ napi_status InitOptions(napi_env env, T& columnOptions, const U& options) {
|
|
|
698
682
|
return napi_ok;
|
|
699
683
|
}
|
|
700
684
|
|
|
701
|
-
NAPI_METHOD(db_get_merge_operands) {
|
|
702
|
-
NAPI_ARGV(4);
|
|
703
|
-
|
|
704
|
-
Database* database;
|
|
705
|
-
NAPI_STATUS_THROWS(napi_get_value_external(env, argv[0], reinterpret_cast<void**>(&database)));
|
|
706
|
-
|
|
707
|
-
std::string key;
|
|
708
|
-
NAPI_STATUS_THROWS(GetValue(env, argv[1], key));
|
|
709
|
-
|
|
710
|
-
const auto options = argv[2];
|
|
711
|
-
|
|
712
|
-
Encoding valueEncoding = Encoding::String;
|
|
713
|
-
NAPI_STATUS_THROWS(GetProperty(env, options, "valueEncoding", valueEncoding));
|
|
714
|
-
|
|
715
|
-
rocksdb::ColumnFamilyHandle* column = database->db->DefaultColumnFamily();
|
|
716
|
-
NAPI_STATUS_THROWS(GetProperty(env, options, "column", column));
|
|
717
|
-
|
|
718
|
-
auto callback = argv[3];
|
|
719
|
-
|
|
720
|
-
runAsync<std::vector<rocksdb::PinnableSlice>>(
|
|
721
|
-
"leveldown.get.mergeOperands", env, callback,
|
|
722
|
-
[=, key = std::move(key)](auto& values) {
|
|
723
|
-
rocksdb::ReadOptions readOptions;
|
|
724
|
-
|
|
725
|
-
values.resize(16); // TODO (fix): Make option
|
|
726
|
-
|
|
727
|
-
rocksdb::GetMergeOperandsOptions mergeOperandsOptions;
|
|
728
|
-
mergeOperandsOptions.expected_max_number_of_operands = values.size();
|
|
729
|
-
|
|
730
|
-
int size = 0;
|
|
731
|
-
const auto status =
|
|
732
|
-
database->db->GetMergeOperands(readOptions, column, key, values.data(), &mergeOperandsOptions, &size);
|
|
733
|
-
|
|
734
|
-
values.resize(size);
|
|
735
|
-
|
|
736
|
-
return status;
|
|
737
|
-
},
|
|
738
|
-
[=](auto& values, auto env, auto& argv) {
|
|
739
|
-
argv.resize(2);
|
|
740
|
-
|
|
741
|
-
NAPI_STATUS_RETURN(napi_create_array_with_length(env, values.size(), &argv[1]));
|
|
742
|
-
|
|
743
|
-
for (size_t idx = 0; idx < values.size(); idx++) {
|
|
744
|
-
napi_value element;
|
|
745
|
-
if (values[idx].GetSelf()) {
|
|
746
|
-
NAPI_STATUS_RETURN(Convert(env, &values[idx], valueEncoding, element));
|
|
747
|
-
} else {
|
|
748
|
-
NAPI_STATUS_RETURN(napi_get_undefined(env, &element));
|
|
749
|
-
}
|
|
750
|
-
NAPI_STATUS_RETURN(napi_set_element(env, argv[1], static_cast<uint32_t>(idx), element));
|
|
751
|
-
}
|
|
752
|
-
|
|
753
|
-
return napi_ok;
|
|
754
|
-
});
|
|
755
|
-
|
|
756
|
-
return 0;
|
|
757
|
-
}
|
|
758
|
-
|
|
759
685
|
NAPI_METHOD(db_get_identity) {
|
|
760
686
|
NAPI_ARGV(1);
|
|
761
687
|
|
|
@@ -946,117 +872,6 @@ NAPI_METHOD(db_close) {
|
|
|
946
872
|
}
|
|
947
873
|
|
|
948
874
|
NAPI_METHOD(db_get_many) {
|
|
949
|
-
NAPI_ARGV(4);
|
|
950
|
-
|
|
951
|
-
Database* database;
|
|
952
|
-
NAPI_STATUS_THROWS(napi_get_value_external(env, argv[0], reinterpret_cast<void**>(&database)));
|
|
953
|
-
|
|
954
|
-
uint32_t count;
|
|
955
|
-
NAPI_STATUS_THROWS(napi_get_array_length(env, argv[1], &count));
|
|
956
|
-
|
|
957
|
-
const auto options = argv[2];
|
|
958
|
-
|
|
959
|
-
bool fillCache = true;
|
|
960
|
-
NAPI_STATUS_THROWS(GetProperty(env, options, "fillCache", fillCache));
|
|
961
|
-
|
|
962
|
-
rocksdb::ColumnFamilyHandle* column = database->db->DefaultColumnFamily();
|
|
963
|
-
NAPI_STATUS_THROWS(GetProperty(env, options, "column", column));
|
|
964
|
-
|
|
965
|
-
bool takeSnapshot = true;
|
|
966
|
-
NAPI_STATUS_THROWS(GetProperty(env, options, "snapshot", takeSnapshot));
|
|
967
|
-
|
|
968
|
-
auto callback = argv[3];
|
|
969
|
-
|
|
970
|
-
std::shared_ptr<const rocksdb::Snapshot> snapshot;
|
|
971
|
-
if (takeSnapshot) {
|
|
972
|
-
snapshot.reset(database->db->GetSnapshot(), [=](const auto ptr) { database->db->ReleaseSnapshot(ptr); });
|
|
973
|
-
}
|
|
974
|
-
|
|
975
|
-
std::vector<rocksdb::PinnableSlice> keys{count};
|
|
976
|
-
|
|
977
|
-
for (uint32_t n = 0; n < count; n++) {
|
|
978
|
-
napi_value element;
|
|
979
|
-
NAPI_STATUS_THROWS(napi_get_element(env, argv[1], n, &element));
|
|
980
|
-
NAPI_STATUS_THROWS(GetValue(env, element, keys[n]));
|
|
981
|
-
}
|
|
982
|
-
|
|
983
|
-
struct State {
|
|
984
|
-
std::vector<uint8_t> data;
|
|
985
|
-
std::vector<int32_t> sizes;
|
|
986
|
-
};
|
|
987
|
-
|
|
988
|
-
runAsync<State>(
|
|
989
|
-
"leveldown.get.many", env, callback,
|
|
990
|
-
[=, keys = std::move(keys), snapshot = std::move(snapshot)](auto& state) {
|
|
991
|
-
rocksdb::ReadOptions readOptions;
|
|
992
|
-
readOptions.fill_cache = fillCache;
|
|
993
|
-
readOptions.snapshot = snapshot.get();
|
|
994
|
-
readOptions.async_io = true;
|
|
995
|
-
readOptions.optimize_multiget_for_io = true;
|
|
996
|
-
|
|
997
|
-
std::vector<rocksdb::Status> statuses{count};
|
|
998
|
-
std::vector<rocksdb::PinnableSlice> values{count};
|
|
999
|
-
std::vector<rocksdb::Slice> keys2{count};
|
|
1000
|
-
|
|
1001
|
-
for (auto n = 0; n < count; n++) {
|
|
1002
|
-
keys2[n] = keys[n];
|
|
1003
|
-
}
|
|
1004
|
-
|
|
1005
|
-
database->db->MultiGet(readOptions, column, count, keys2.data(), values.data(), statuses.data());
|
|
1006
|
-
|
|
1007
|
-
auto size = 0;
|
|
1008
|
-
for (auto n = 0; n < count; n++) {
|
|
1009
|
-
const auto valueSize = values[n].size();
|
|
1010
|
-
size += valueSize & 0x7 ? (valueSize | 0x7) + 1 : valueSize;
|
|
1011
|
-
}
|
|
1012
|
-
|
|
1013
|
-
state.data.reserve(size);
|
|
1014
|
-
|
|
1015
|
-
auto push = [&](rocksdb::Slice* slice){
|
|
1016
|
-
if (slice) {
|
|
1017
|
-
state.sizes.push_back(static_cast<int32_t>(slice->size()));
|
|
1018
|
-
std::copy_n(slice->data(), slice->size(), std::back_inserter(state.data));
|
|
1019
|
-
|
|
1020
|
-
if (state.data.size() & 0x7) {
|
|
1021
|
-
state.data.resize((state.data.size() | 0x7) + 1);
|
|
1022
|
-
}
|
|
1023
|
-
} else {
|
|
1024
|
-
state.sizes.push_back(-1);
|
|
1025
|
-
}
|
|
1026
|
-
};
|
|
1027
|
-
|
|
1028
|
-
for (auto n = 0; n < count; n++) {
|
|
1029
|
-
push(statuses[n].ok() ? &values[n] : nullptr);
|
|
1030
|
-
}
|
|
1031
|
-
|
|
1032
|
-
return rocksdb::Status::OK();
|
|
1033
|
-
},
|
|
1034
|
-
[=](auto& state, auto env, auto& argv) {
|
|
1035
|
-
argv.resize(3);
|
|
1036
|
-
|
|
1037
|
-
if (state.sizes.size() > 0) {
|
|
1038
|
-
auto sizes = std::make_unique<std::vector<int32_t>>(std::move(state.sizes));
|
|
1039
|
-
NAPI_STATUS_RETURN(napi_create_external_buffer(env, sizes->size() * 4, sizes->data(), Finalize<std::vector<int32_t>>, sizes.get(), &argv[1]));
|
|
1040
|
-
sizes.release();
|
|
1041
|
-
} else {
|
|
1042
|
-
NAPI_STATUS_RETURN(napi_get_undefined(env, &argv[1]));
|
|
1043
|
-
}
|
|
1044
|
-
|
|
1045
|
-
if (state.data.size() > 0) {
|
|
1046
|
-
auto data = std::make_unique<std::vector<uint8_t>>(std::move(state.data));
|
|
1047
|
-
NAPI_STATUS_RETURN(napi_create_external_buffer(env, data->size(), data->data(), Finalize<std::vector<uint8_t>>, data.get(), &argv[2]));
|
|
1048
|
-
data.release();
|
|
1049
|
-
} else {
|
|
1050
|
-
NAPI_STATUS_RETURN(napi_get_undefined(env, &argv[2]));
|
|
1051
|
-
}
|
|
1052
|
-
|
|
1053
|
-
return napi_ok;
|
|
1054
|
-
});
|
|
1055
|
-
|
|
1056
|
-
return 0;
|
|
1057
|
-
}
|
|
1058
|
-
|
|
1059
|
-
NAPI_METHOD(db_get_many_sync) {
|
|
1060
875
|
NAPI_ARGV(3);
|
|
1061
876
|
|
|
1062
877
|
Database* database;
|
|
@@ -1071,7 +886,7 @@ NAPI_METHOD(db_get_many_sync) {
|
|
|
1071
886
|
rocksdb::ColumnFamilyHandle* column = database->db->DefaultColumnFamily();
|
|
1072
887
|
NAPI_STATUS_THROWS(GetProperty(env, argv[2], "column", column));
|
|
1073
888
|
|
|
1074
|
-
Encoding valueEncoding = Encoding::
|
|
889
|
+
Encoding valueEncoding = Encoding::Buffer;
|
|
1075
890
|
NAPI_STATUS_THROWS(GetProperty(env, argv[2], "valueEncoding", valueEncoding));
|
|
1076
891
|
|
|
1077
892
|
std::vector<rocksdb::Slice> keys{count};
|
|
@@ -1094,19 +909,25 @@ NAPI_METHOD(db_get_many_sync) {
|
|
|
1094
909
|
napi_value rows;
|
|
1095
910
|
NAPI_STATUS_THROWS(napi_create_array_with_length(env, count, &rows));
|
|
1096
911
|
|
|
912
|
+
napi_value finished;
|
|
913
|
+
NAPI_STATUS_THROWS(napi_get_boolean(env, true, &finished));
|
|
914
|
+
|
|
1097
915
|
for (auto n = 0; n < count; n++) {
|
|
1098
916
|
napi_value row;
|
|
1099
|
-
if (statuses[n].
|
|
1100
|
-
NAPI_STATUS_THROWS(Convert(env, &values[n], valueEncoding, row));
|
|
1101
|
-
} else if (statuses[n].IsNotFound()) {
|
|
917
|
+
if (statuses[n].IsNotFound()) {
|
|
1102
918
|
NAPI_STATUS_THROWS(napi_get_undefined(env, &row));
|
|
1103
919
|
} else {
|
|
1104
920
|
ROCKS_STATUS_THROWS_NAPI(statuses[n]);
|
|
921
|
+
NAPI_STATUS_THROWS(Convert(env, &values[n], valueEncoding, row));
|
|
1105
922
|
}
|
|
1106
923
|
NAPI_STATUS_THROWS(napi_set_element(env, rows, n, row));
|
|
1107
924
|
}
|
|
1108
925
|
|
|
1109
|
-
|
|
926
|
+
napi_value ret;
|
|
927
|
+
NAPI_STATUS_THROWS(napi_create_object(env, &ret));
|
|
928
|
+
NAPI_STATUS_THROWS(napi_set_named_property(env, ret, "rows", rows));
|
|
929
|
+
NAPI_STATUS_THROWS(napi_set_named_property(env, ret, "finished", finished));
|
|
930
|
+
return ret;
|
|
1110
931
|
}
|
|
1111
932
|
|
|
1112
933
|
NAPI_METHOD(db_clear) {
|
|
@@ -1169,11 +990,7 @@ NAPI_METHOD(db_clear) {
|
|
|
1169
990
|
// TODO (fix): Error handling.
|
|
1170
991
|
// TODO (fix): This should be async...
|
|
1171
992
|
|
|
1172
|
-
|
|
1173
|
-
[=](const auto ptr) { database->db->ReleaseSnapshot(ptr); });
|
|
1174
|
-
BaseIterator it(database, column, reverse, lt, lte, gt, gte, limit, false, snapshot);
|
|
1175
|
-
|
|
1176
|
-
it.SeekToRange();
|
|
993
|
+
BaseIterator it(database, column, reverse, lt, lte, gt, gte, limit, false);
|
|
1177
994
|
|
|
1178
995
|
rocksdb::WriteBatch batch;
|
|
1179
996
|
rocksdb::WriteOptions writeOptions;
|
|
@@ -1288,17 +1105,15 @@ NAPI_METHOD(iterator_init) {
|
|
|
1288
1105
|
rocksdb::ColumnFamilyHandle* column = database->db->DefaultColumnFamily();
|
|
1289
1106
|
NAPI_STATUS_THROWS(GetProperty(env, options, "column", column));
|
|
1290
1107
|
|
|
1291
|
-
|
|
1292
|
-
NAPI_STATUS_THROWS(GetProperty(env, options, "
|
|
1108
|
+
Encoding keyEncoding;
|
|
1109
|
+
NAPI_STATUS_THROWS(GetProperty(env, options, "keyEncoding", keyEncoding));
|
|
1293
1110
|
|
|
1294
|
-
|
|
1295
|
-
|
|
1296
|
-
snapshot.reset(database->db->GetSnapshot(), [=](const auto ptr) { database->db->ReleaseSnapshot(ptr); });
|
|
1297
|
-
}
|
|
1111
|
+
Encoding valueEncoding;
|
|
1112
|
+
NAPI_STATUS_THROWS(GetProperty(env, options, "valueEncoding", valueEncoding));
|
|
1298
1113
|
|
|
1299
1114
|
auto iterator = std::unique_ptr<Iterator>(new Iterator(database, column, reverse, keys, values, limit, lt, lte, gt,
|
|
1300
1115
|
gte, fillCache, highWaterMarkBytes,
|
|
1301
|
-
|
|
1116
|
+
tailing, keyEncoding, valueEncoding));
|
|
1302
1117
|
|
|
1303
1118
|
napi_value result;
|
|
1304
1119
|
NAPI_STATUS_THROWS(napi_create_external(env, iterator.get(), Finalize<Iterator>, iterator.get(), &result));
|
|
@@ -1333,7 +1148,7 @@ NAPI_METHOD(iterator_close) {
|
|
|
1333
1148
|
}
|
|
1334
1149
|
|
|
1335
1150
|
NAPI_METHOD(iterator_nextv) {
|
|
1336
|
-
NAPI_ARGV(
|
|
1151
|
+
NAPI_ARGV(2);
|
|
1337
1152
|
|
|
1338
1153
|
Iterator* iterator;
|
|
1339
1154
|
NAPI_STATUS_THROWS(napi_get_value_external(env, argv[0], reinterpret_cast<void**>(&iterator)));
|
|
@@ -1341,102 +1156,71 @@ NAPI_METHOD(iterator_nextv) {
|
|
|
1341
1156
|
uint32_t count;
|
|
1342
1157
|
NAPI_STATUS_THROWS(napi_get_value_uint32(env, argv[1], &count));
|
|
1343
1158
|
|
|
1344
|
-
|
|
1345
|
-
|
|
1346
|
-
struct State {
|
|
1347
|
-
std::vector<uint8_t> data;
|
|
1348
|
-
std::vector<int32_t> sizes;
|
|
1349
|
-
bool finished = false;
|
|
1350
|
-
};
|
|
1351
|
-
|
|
1352
|
-
runAsync<State>(
|
|
1353
|
-
std::string("leveldown.iterator.next"), env, callback,
|
|
1354
|
-
[=](auto& state) {
|
|
1355
|
-
if (!iterator->DidSeek()) {
|
|
1356
|
-
iterator->SeekToRange();
|
|
1357
|
-
}
|
|
1358
|
-
|
|
1359
|
-
state.sizes.reserve(count * 2);
|
|
1360
|
-
state.data.reserve(iterator->highWaterMarkBytes_);
|
|
1159
|
+
napi_value finished;
|
|
1160
|
+
NAPI_STATUS_THROWS(napi_get_boolean(env, false, &finished));
|
|
1361
1161
|
|
|
1362
|
-
|
|
1363
|
-
|
|
1364
|
-
auto push = [&](const std::optional<rocksdb::Slice>& slice){
|
|
1365
|
-
if (slice) {
|
|
1366
|
-
state.sizes.push_back(static_cast<int32_t>(slice->size()));
|
|
1367
|
-
std::copy_n(slice->data(), slice->size(), std::back_inserter(state.data));
|
|
1368
|
-
|
|
1369
|
-
if (state.data.size() & 0x7) {
|
|
1370
|
-
state.data.resize((state.data.size() | 0x7) + 1);
|
|
1371
|
-
}
|
|
1372
|
-
|
|
1373
|
-
bytesRead += slice->size();
|
|
1374
|
-
} else {
|
|
1375
|
-
state.sizes.push_back(-1);
|
|
1376
|
-
}
|
|
1377
|
-
};
|
|
1378
|
-
|
|
1379
|
-
while (true) {
|
|
1380
|
-
if (!iterator->first_) {
|
|
1381
|
-
iterator->Next();
|
|
1382
|
-
} else {
|
|
1383
|
-
iterator->first_ = false;
|
|
1384
|
-
}
|
|
1385
|
-
|
|
1386
|
-
if (!iterator->Valid() || !iterator->Increment()) {
|
|
1387
|
-
state.finished = true;
|
|
1388
|
-
return iterator->Status();
|
|
1389
|
-
}
|
|
1162
|
+
napi_value rows;
|
|
1163
|
+
NAPI_STATUS_THROWS(napi_create_array(env, &rows));
|
|
1390
1164
|
|
|
1391
|
-
|
|
1392
|
-
|
|
1393
|
-
|
|
1394
|
-
|
|
1395
|
-
|
|
1396
|
-
|
|
1397
|
-
|
|
1398
|
-
|
|
1399
|
-
push(iterator->CurrentValue());
|
|
1400
|
-
}
|
|
1165
|
+
size_t idx = 0;
|
|
1166
|
+
size_t bytesRead = 0;
|
|
1167
|
+
while (true) {
|
|
1168
|
+
if (!iterator->first_) {
|
|
1169
|
+
iterator->Next();
|
|
1170
|
+
} else {
|
|
1171
|
+
iterator->first_ = false;
|
|
1172
|
+
}
|
|
1401
1173
|
|
|
1402
|
-
|
|
1403
|
-
|
|
1404
|
-
|
|
1405
|
-
|
|
1406
|
-
|
|
1407
|
-
},
|
|
1408
|
-
[=](auto& state, auto env, auto& argv) {
|
|
1409
|
-
argv.resize(4);
|
|
1410
|
-
|
|
1411
|
-
if (state.sizes.size() > 0) {
|
|
1412
|
-
auto sizes = std::make_unique<std::vector<int32_t>>(std::move(state.sizes));
|
|
1413
|
-
NAPI_STATUS_RETURN(napi_create_external_buffer(env, sizes->size() * 4, sizes->data(), Finalize<std::vector<int32_t>>, sizes.get(), &argv[1]));
|
|
1414
|
-
sizes.release();
|
|
1415
|
-
} else {
|
|
1416
|
-
NAPI_STATUS_RETURN(napi_get_undefined(env, &argv[1]));
|
|
1417
|
-
}
|
|
1174
|
+
if (!iterator->Valid() || !iterator->Increment()) {
|
|
1175
|
+
ROCKS_STATUS_THROWS_NAPI(iterator->Status());
|
|
1176
|
+
NAPI_STATUS_THROWS(napi_get_boolean(env, true, &finished));
|
|
1177
|
+
break;
|
|
1178
|
+
}
|
|
1418
1179
|
|
|
1419
|
-
|
|
1420
|
-
|
|
1421
|
-
|
|
1422
|
-
|
|
1423
|
-
|
|
1424
|
-
|
|
1425
|
-
|
|
1180
|
+
napi_value key;
|
|
1181
|
+
napi_value val;
|
|
1182
|
+
|
|
1183
|
+
if (iterator->keys_ && iterator->values_) {
|
|
1184
|
+
const auto k = iterator->CurrentKey();
|
|
1185
|
+
const auto v = iterator->CurrentValue();
|
|
1186
|
+
NAPI_STATUS_THROWS(Convert(env, &k, iterator->keyEncoding_, key));
|
|
1187
|
+
NAPI_STATUS_THROWS(Convert(env, &v, iterator->valueEncoding_, val));
|
|
1188
|
+
bytesRead += k.size() + v.size();
|
|
1189
|
+
} else if (iterator->keys_) {
|
|
1190
|
+
const auto k = iterator->CurrentKey();
|
|
1191
|
+
NAPI_STATUS_THROWS(Convert(env, &k, iterator->keyEncoding_, key));
|
|
1192
|
+
NAPI_STATUS_THROWS(napi_get_undefined(env, &val));
|
|
1193
|
+
bytesRead += k.size();
|
|
1194
|
+
} else if (iterator->values_) {
|
|
1195
|
+
const auto v = iterator->CurrentValue();
|
|
1196
|
+
NAPI_STATUS_THROWS(napi_get_undefined(env, &key));
|
|
1197
|
+
NAPI_STATUS_THROWS(Convert(env, &v, iterator->valueEncoding_, val));
|
|
1198
|
+
bytesRead += v.size();
|
|
1199
|
+
} else {
|
|
1200
|
+
assert(false);
|
|
1201
|
+
}
|
|
1426
1202
|
|
|
1427
|
-
|
|
1203
|
+
NAPI_STATUS_THROWS(napi_set_element(env, rows, idx++, key));
|
|
1204
|
+
NAPI_STATUS_THROWS(napi_set_element(env, rows, idx++, val));
|
|
1428
1205
|
|
|
1429
|
-
|
|
1430
|
-
|
|
1206
|
+
if (bytesRead > iterator->highWaterMarkBytes_ || idx / 2 >= count) {
|
|
1207
|
+
break;
|
|
1208
|
+
}
|
|
1209
|
+
}
|
|
1431
1210
|
|
|
1432
|
-
|
|
1211
|
+
napi_value ret;
|
|
1212
|
+
NAPI_STATUS_THROWS(napi_create_object(env, &ret));
|
|
1213
|
+
NAPI_STATUS_THROWS(napi_set_named_property(env, ret, "rows", rows));
|
|
1214
|
+
NAPI_STATUS_THROWS(napi_set_named_property(env, ret, "finished", finished));
|
|
1215
|
+
return ret;
|
|
1433
1216
|
}
|
|
1434
1217
|
|
|
1435
1218
|
NAPI_METHOD(batch_init) {
|
|
1436
|
-
auto batch =
|
|
1219
|
+
auto batch = std::make_unique<rocksdb::WriteBatch>();
|
|
1437
1220
|
|
|
1438
1221
|
napi_value result;
|
|
1439
|
-
NAPI_STATUS_THROWS(napi_create_external(env, batch, Finalize<rocksdb::WriteBatch>, batch, &result));
|
|
1222
|
+
NAPI_STATUS_THROWS(napi_create_external(env, batch.get(), Finalize<rocksdb::WriteBatch>, batch.get(), &result));
|
|
1223
|
+
batch.release();
|
|
1440
1224
|
|
|
1441
1225
|
return result;
|
|
1442
1226
|
}
|
|
@@ -1545,36 +1329,6 @@ NAPI_METHOD(batch_write) {
|
|
|
1545
1329
|
bool lowPriority = false;
|
|
1546
1330
|
NAPI_STATUS_THROWS(GetProperty(env, options, "lowPriority", lowPriority));
|
|
1547
1331
|
|
|
1548
|
-
runAsync<int64_t>(
|
|
1549
|
-
"leveldown.batch.write", env, callback,
|
|
1550
|
-
[=](int64_t& seq) {
|
|
1551
|
-
rocksdb::WriteOptions writeOptions;
|
|
1552
|
-
writeOptions.sync = sync;
|
|
1553
|
-
writeOptions.low_pri = lowPriority;
|
|
1554
|
-
return database->db->Write(writeOptions, batch);
|
|
1555
|
-
},
|
|
1556
|
-
[=](int64_t& seq, auto env, auto& argv) { return napi_ok; });
|
|
1557
|
-
|
|
1558
|
-
return 0;
|
|
1559
|
-
}
|
|
1560
|
-
|
|
1561
|
-
NAPI_METHOD(batch_write_sync) {
|
|
1562
|
-
NAPI_ARGV(3);
|
|
1563
|
-
|
|
1564
|
-
Database* database;
|
|
1565
|
-
NAPI_STATUS_THROWS(napi_get_value_external(env, argv[0], reinterpret_cast<void**>(&database)));
|
|
1566
|
-
|
|
1567
|
-
rocksdb::WriteBatch* batch;
|
|
1568
|
-
NAPI_STATUS_THROWS(napi_get_value_external(env, argv[1], reinterpret_cast<void**>(&batch)));
|
|
1569
|
-
|
|
1570
|
-
auto options = argv[2];
|
|
1571
|
-
|
|
1572
|
-
bool sync = false;
|
|
1573
|
-
NAPI_STATUS_THROWS(GetProperty(env, options, "sync", sync));
|
|
1574
|
-
|
|
1575
|
-
bool lowPriority = false;
|
|
1576
|
-
NAPI_STATUS_THROWS(GetProperty(env, options, "lowPriority", lowPriority));
|
|
1577
|
-
|
|
1578
1332
|
rocksdb::WriteOptions writeOptions;
|
|
1579
1333
|
writeOptions.sync = sync;
|
|
1580
1334
|
writeOptions.low_pri = lowPriority;
|
|
@@ -1640,11 +1394,9 @@ NAPI_INIT() {
|
|
|
1640
1394
|
NAPI_EXPORT_FUNCTION(db_get_location);
|
|
1641
1395
|
NAPI_EXPORT_FUNCTION(db_close);
|
|
1642
1396
|
NAPI_EXPORT_FUNCTION(db_get_many);
|
|
1643
|
-
NAPI_EXPORT_FUNCTION(db_get_many_sync);
|
|
1644
1397
|
NAPI_EXPORT_FUNCTION(db_clear);
|
|
1645
1398
|
NAPI_EXPORT_FUNCTION(db_get_property);
|
|
1646
1399
|
NAPI_EXPORT_FUNCTION(db_get_latest_sequence);
|
|
1647
|
-
NAPI_EXPORT_FUNCTION(db_get_merge_operands);
|
|
1648
1400
|
|
|
1649
1401
|
NAPI_EXPORT_FUNCTION(iterator_init);
|
|
1650
1402
|
NAPI_EXPORT_FUNCTION(iterator_seek);
|
|
@@ -1656,7 +1408,6 @@ NAPI_INIT() {
|
|
|
1656
1408
|
NAPI_EXPORT_FUNCTION(batch_del);
|
|
1657
1409
|
NAPI_EXPORT_FUNCTION(batch_clear);
|
|
1658
1410
|
NAPI_EXPORT_FUNCTION(batch_write);
|
|
1659
|
-
NAPI_EXPORT_FUNCTION(batch_write_sync);
|
|
1660
1411
|
NAPI_EXPORT_FUNCTION(batch_merge);
|
|
1661
1412
|
NAPI_EXPORT_FUNCTION(batch_count);
|
|
1662
1413
|
NAPI_EXPORT_FUNCTION(batch_iterate);
|
package/chained-batch.js
CHANGED
|
@@ -5,7 +5,6 @@ const binding = require('./binding')
|
|
|
5
5
|
const ModuleError = require('module-error')
|
|
6
6
|
const { fromCallback } = require('catering')
|
|
7
7
|
|
|
8
|
-
const kWrite = Symbol('write')
|
|
9
8
|
const kBatchContext = Symbol('batchContext')
|
|
10
9
|
const kDbContext = Symbol('dbContext')
|
|
11
10
|
const kPromise = Symbol('promise')
|
|
@@ -13,10 +12,9 @@ const kPromise = Symbol('promise')
|
|
|
13
12
|
const EMPTY = {}
|
|
14
13
|
|
|
15
14
|
class ChainedBatch extends AbstractChainedBatch {
|
|
16
|
-
constructor (db, context
|
|
15
|
+
constructor (db, context) {
|
|
17
16
|
super(db)
|
|
18
17
|
|
|
19
|
-
this[kWrite] = write
|
|
20
18
|
this[kDbContext] = context
|
|
21
19
|
this[kBatchContext] = binding.batch_init()
|
|
22
20
|
}
|
|
@@ -59,16 +57,18 @@ class ChainedBatch extends AbstractChainedBatch {
|
|
|
59
57
|
_write (options, callback) {
|
|
60
58
|
callback = fromCallback(callback, kPromise)
|
|
61
59
|
|
|
62
|
-
|
|
63
|
-
|
|
64
|
-
callback
|
|
65
|
-
})
|
|
60
|
+
try {
|
|
61
|
+
this._writeSync(options)
|
|
62
|
+
process.nextTick(callback, null)
|
|
63
|
+
} catch (err) {
|
|
64
|
+
process.nextTick(callback, err)
|
|
65
|
+
}
|
|
66
66
|
|
|
67
67
|
return callback[kPromise]
|
|
68
68
|
}
|
|
69
69
|
|
|
70
70
|
_writeSync (options) {
|
|
71
|
-
binding.
|
|
71
|
+
binding.batch_write(this[kDbContext], this[kBatchContext], options ?? EMPTY)
|
|
72
72
|
}
|
|
73
73
|
|
|
74
74
|
_close (callback) {
|
package/index.js
CHANGED
|
@@ -8,7 +8,6 @@ const { ChainedBatch } = require('./chained-batch')
|
|
|
8
8
|
const { Iterator } = require('./iterator')
|
|
9
9
|
const fs = require('node:fs')
|
|
10
10
|
const assert = require('node:assert')
|
|
11
|
-
const { handleNextv, handleMany } = require('./util')
|
|
12
11
|
|
|
13
12
|
const kContext = Symbol('context')
|
|
14
13
|
const kColumns = Symbol('columns')
|
|
@@ -18,7 +17,7 @@ const kUnref = Symbol('unref')
|
|
|
18
17
|
const kRefs = Symbol('refs')
|
|
19
18
|
const kPendingClose = Symbol('pendingClose')
|
|
20
19
|
|
|
21
|
-
const
|
|
20
|
+
const kEmpty = {}
|
|
22
21
|
|
|
23
22
|
class RocksLevel extends AbstractLevel {
|
|
24
23
|
constructor (locationOrHandle, options) {
|
|
@@ -125,7 +124,7 @@ class RocksLevel extends AbstractLevel {
|
|
|
125
124
|
|
|
126
125
|
try {
|
|
127
126
|
const batch = this.batch()
|
|
128
|
-
batch.put(key, value, options ??
|
|
127
|
+
batch.put(key, value, options ?? kEmpty)
|
|
129
128
|
batch.write(callback)
|
|
130
129
|
} catch (err) {
|
|
131
130
|
process.nextTick(callback, err)
|
|
@@ -137,7 +136,7 @@ class RocksLevel extends AbstractLevel {
|
|
|
137
136
|
_get (key, options, callback) {
|
|
138
137
|
callback = fromCallback(callback, kPromise)
|
|
139
138
|
|
|
140
|
-
this._getMany([key], options ??
|
|
139
|
+
this._getMany([key], options ?? kEmpty, (err, val) => {
|
|
141
140
|
if (err) {
|
|
142
141
|
callback(err)
|
|
143
142
|
} else if (val[0] === undefined) {
|
|
@@ -152,52 +151,27 @@ class RocksLevel extends AbstractLevel {
|
|
|
152
151
|
return callback[kPromise]
|
|
153
152
|
}
|
|
154
153
|
|
|
155
|
-
_getManySync (keys, options) {
|
|
156
|
-
return binding.db_get_many_sync(this[kContext], keys, options ?? EMPTY)
|
|
157
|
-
}
|
|
158
|
-
|
|
159
154
|
_getMany (keys, options, callback) {
|
|
160
155
|
callback = fromCallback(callback, kPromise)
|
|
161
156
|
|
|
162
157
|
try {
|
|
163
|
-
|
|
164
|
-
|
|
165
|
-
if (err) {
|
|
166
|
-
callback(err)
|
|
167
|
-
} else {
|
|
168
|
-
let rows
|
|
169
|
-
try {
|
|
170
|
-
rows = handleMany(sizes, data, options ?? EMPTY)
|
|
171
|
-
} catch (err) {
|
|
172
|
-
callback(err)
|
|
173
|
-
}
|
|
174
|
-
callback(null, rows)
|
|
175
|
-
}
|
|
176
|
-
this[kUnref]()
|
|
177
|
-
})
|
|
158
|
+
// TODO (fix): highWaterMark and limit with async between...
|
|
159
|
+
process.nextTick(callback, null, this._getManySync(keys, options ?? kEmpty))
|
|
178
160
|
} catch (err) {
|
|
179
161
|
process.nextTick(callback, err)
|
|
180
|
-
this[kUnref]()
|
|
181
162
|
}
|
|
182
163
|
|
|
183
164
|
return callback[kPromise]
|
|
184
165
|
}
|
|
185
166
|
|
|
186
|
-
|
|
187
|
-
|
|
188
|
-
|
|
189
|
-
try {
|
|
190
|
-
this[kRef]()
|
|
191
|
-
binding.db_get_merge_operands(this[kContext], key, options ?? EMPTY, (err, val) => {
|
|
192
|
-
callback(err, val)
|
|
193
|
-
this[kUnref]()
|
|
194
|
-
})
|
|
195
|
-
} catch (err) {
|
|
196
|
-
process.nextTick(callback, err)
|
|
197
|
-
this[kUnref]()
|
|
167
|
+
_getManySync (keys, options) {
|
|
168
|
+
if (keys.some(key => typeof key === 'string')) {
|
|
169
|
+
keys = keys.map(key => typeof key === 'string' ? Buffer.from(key) : key)
|
|
198
170
|
}
|
|
199
171
|
|
|
200
|
-
|
|
172
|
+
const { rows, finished } = binding.db_get_many(this[kContext], keys, options ?? kEmpty)
|
|
173
|
+
assert(finished)
|
|
174
|
+
return rows
|
|
201
175
|
}
|
|
202
176
|
|
|
203
177
|
_del (key, options, callback) {
|
|
@@ -205,7 +179,7 @@ class RocksLevel extends AbstractLevel {
|
|
|
205
179
|
|
|
206
180
|
try {
|
|
207
181
|
const batch = this.batch()
|
|
208
|
-
batch.del(key, options ??
|
|
182
|
+
batch.del(key, options ?? kEmpty)
|
|
209
183
|
batch.write(callback)
|
|
210
184
|
} catch (err) {
|
|
211
185
|
process.nextTick(callback, err)
|
|
@@ -219,7 +193,7 @@ class RocksLevel extends AbstractLevel {
|
|
|
219
193
|
|
|
220
194
|
try {
|
|
221
195
|
// TODO (fix): Use batch + DeleteRange...
|
|
222
|
-
binding.db_clear(this[kContext], options ??
|
|
196
|
+
binding.db_clear(this[kContext], options ?? kEmpty)
|
|
223
197
|
process.nextTick(callback, null)
|
|
224
198
|
} catch (err) {
|
|
225
199
|
process.nextTick(callback, err)
|
|
@@ -229,17 +203,7 @@ class RocksLevel extends AbstractLevel {
|
|
|
229
203
|
}
|
|
230
204
|
|
|
231
205
|
_chainedBatch () {
|
|
232
|
-
return new ChainedBatch(this, this[kContext]
|
|
233
|
-
try {
|
|
234
|
-
this[kRef]()
|
|
235
|
-
binding.batch_write(this[kContext], context, options, (err) => {
|
|
236
|
-
this[kUnref]()
|
|
237
|
-
callback(err)
|
|
238
|
-
})
|
|
239
|
-
} catch (err) {
|
|
240
|
-
process.nextTick(callback, err)
|
|
241
|
-
}
|
|
242
|
-
})
|
|
206
|
+
return new ChainedBatch(this, this[kContext])
|
|
243
207
|
}
|
|
244
208
|
|
|
245
209
|
_batch (operations, options, callback) {
|
|
@@ -261,7 +225,7 @@ class RocksLevel extends AbstractLevel {
|
|
|
261
225
|
}
|
|
262
226
|
|
|
263
227
|
_iterator (options) {
|
|
264
|
-
return new Iterator(this, this[kContext], options ??
|
|
228
|
+
return new Iterator(this, this[kContext], options ?? kEmpty)
|
|
265
229
|
}
|
|
266
230
|
|
|
267
231
|
get identity () {
|
|
@@ -290,24 +254,22 @@ class RocksLevel extends AbstractLevel {
|
|
|
290
254
|
})
|
|
291
255
|
}
|
|
292
256
|
|
|
293
|
-
|
|
257
|
+
// TOOD (perf): Merge into single call...
|
|
258
|
+
const context = binding.iterator_init(this[kContext], options ?? kEmpty)
|
|
294
259
|
try {
|
|
295
|
-
|
|
296
|
-
|
|
297
|
-
|
|
298
|
-
|
|
299
|
-
|
|
300
|
-
|
|
301
|
-
|
|
302
|
-
|
|
303
|
-
|
|
304
|
-
|
|
305
|
-
|
|
306
|
-
})
|
|
307
|
-
}))
|
|
260
|
+
return binding.iterator_nextv(context, options.limit)
|
|
261
|
+
} finally {
|
|
262
|
+
binding.iterator_close(context)
|
|
263
|
+
}
|
|
264
|
+
}
|
|
265
|
+
|
|
266
|
+
querySync (options) {
|
|
267
|
+
// TOOD (perf): Merge into single call...
|
|
268
|
+
const context = binding.iterator_init(this[kContext], options ?? kEmpty)
|
|
269
|
+
try {
|
|
270
|
+
return binding.iterator_nextv(context, options.limit)
|
|
308
271
|
} finally {
|
|
309
272
|
binding.iterator_close(context)
|
|
310
|
-
this[kUnref]()
|
|
311
273
|
}
|
|
312
274
|
}
|
|
313
275
|
}
|
package/iterator.js
CHANGED
|
@@ -2,7 +2,6 @@
|
|
|
2
2
|
|
|
3
3
|
const { fromCallback } = require('catering')
|
|
4
4
|
const { AbstractIterator } = require('abstract-level')
|
|
5
|
-
const { handleNextv } = require('./util')
|
|
6
5
|
|
|
7
6
|
const binding = require('./binding')
|
|
8
7
|
|
|
@@ -12,29 +11,16 @@ const kCache = Symbol('cache')
|
|
|
12
11
|
const kFinished = Symbol('finished')
|
|
13
12
|
const kFirst = Symbol('first')
|
|
14
13
|
const kPosition = Symbol('position')
|
|
15
|
-
const
|
|
16
|
-
const kHandleNextv = Symbol('handleNextv')
|
|
17
|
-
const kCallback = Symbol('callback')
|
|
18
|
-
const kOptions = Symbol('options')
|
|
19
|
-
const empty = []
|
|
20
|
-
|
|
21
|
-
const registry = new FinalizationRegistry((context) => {
|
|
22
|
-
binding.iterator_close(context)
|
|
23
|
-
})
|
|
14
|
+
const kEmpty = []
|
|
24
15
|
|
|
25
16
|
class Iterator extends AbstractIterator {
|
|
26
17
|
constructor (db, context, options) {
|
|
27
18
|
super(db, options)
|
|
28
19
|
|
|
29
20
|
this[kContext] = binding.iterator_init(context, options)
|
|
30
|
-
registry.register(this, this[kContext], this[kContext])
|
|
31
21
|
|
|
32
|
-
this[kOptions] = { ...options }
|
|
33
|
-
this[kHandleNext] = this[kHandleNext].bind(this)
|
|
34
|
-
this[kHandleNextv] = this[kHandleNextv].bind(this)
|
|
35
|
-
this[kCallback] = null
|
|
36
22
|
this[kFirst] = true
|
|
37
|
-
this[kCache] =
|
|
23
|
+
this[kCache] = kEmpty
|
|
38
24
|
this[kFinished] = false
|
|
39
25
|
this[kPosition] = 0
|
|
40
26
|
}
|
|
@@ -45,7 +31,7 @@ class Iterator extends AbstractIterator {
|
|
|
45
31
|
}
|
|
46
32
|
|
|
47
33
|
this[kFirst] = true
|
|
48
|
-
this[kCache] =
|
|
34
|
+
this[kCache] = kEmpty
|
|
49
35
|
this[kFinished] = false
|
|
50
36
|
this[kPosition] = 0
|
|
51
37
|
|
|
@@ -60,35 +46,22 @@ class Iterator extends AbstractIterator {
|
|
|
60
46
|
} else if (this[kFinished]) {
|
|
61
47
|
process.nextTick(callback)
|
|
62
48
|
} else {
|
|
63
|
-
this[
|
|
64
|
-
|
|
65
|
-
if (this[kFirst]) {
|
|
66
|
-
// It's common to only want one entry initially or after a seek()
|
|
67
|
-
this[kFirst] = false
|
|
68
|
-
binding.iterator_nextv(this[kContext], 1, this[kHandleNext])
|
|
69
|
-
} else {
|
|
70
|
-
// Limit the size of the cache to prevent starving the event loop
|
|
71
|
-
// while we're recursively calling process.nextTick().
|
|
72
|
-
binding.iterator_nextv(this[kContext], 1000, this[kHandleNext])
|
|
73
|
-
}
|
|
74
|
-
}
|
|
49
|
+
const size = this[kFirst] ? 1 : 1000
|
|
50
|
+
this[kFirst] = false
|
|
75
51
|
|
|
76
|
-
|
|
77
|
-
|
|
52
|
+
try {
|
|
53
|
+
const { rows, finished } = binding.iterator_nextv(this[kContext], size)
|
|
54
|
+
this[kCache] = rows
|
|
55
|
+
this[kFinished] = finished
|
|
56
|
+
this[kPosition] = 0
|
|
78
57
|
|
|
79
|
-
|
|
80
|
-
|
|
81
|
-
|
|
82
|
-
if (err) {
|
|
83
|
-
return callback(err)
|
|
58
|
+
setImmediate(() => this._next(callback))
|
|
59
|
+
} catch (err) {
|
|
60
|
+
process.nextTick(callback, err)
|
|
84
61
|
}
|
|
62
|
+
}
|
|
85
63
|
|
|
86
|
-
|
|
87
|
-
this[kFinished] = finished
|
|
88
|
-
this[kPosition] = 0
|
|
89
|
-
|
|
90
|
-
this._next(callback)
|
|
91
|
-
})
|
|
64
|
+
return this
|
|
92
65
|
}
|
|
93
66
|
|
|
94
67
|
_nextv (size, options, callback) {
|
|
@@ -97,38 +70,43 @@ class Iterator extends AbstractIterator {
|
|
|
97
70
|
if (this[kFinished]) {
|
|
98
71
|
process.nextTick(callback, null, [])
|
|
99
72
|
} else {
|
|
100
|
-
this[kCallback] = callback
|
|
101
73
|
this[kFirst] = false
|
|
102
|
-
binding.iterator_nextv(this[kContext], size, this[kHandleNextv])
|
|
103
|
-
}
|
|
104
74
|
|
|
105
|
-
|
|
106
|
-
|
|
75
|
+
setImmediate(() => {
|
|
76
|
+
try {
|
|
77
|
+
const { rows, finished } = binding.iterator_nextv(this[kContext], size)
|
|
107
78
|
|
|
108
|
-
|
|
109
|
-
|
|
110
|
-
|
|
111
|
-
|
|
112
|
-
return callback(err)
|
|
113
|
-
}
|
|
79
|
+
const entries = []
|
|
80
|
+
for (let n = 0; n < rows.length; n += 2) {
|
|
81
|
+
entries.push([rows[n + 0], rows[n + 1]])
|
|
82
|
+
}
|
|
114
83
|
|
|
115
|
-
|
|
84
|
+
this[kFinished] = finished
|
|
116
85
|
|
|
117
|
-
|
|
118
|
-
|
|
119
|
-
|
|
120
|
-
|
|
86
|
+
callback(null, entries, finished)
|
|
87
|
+
} catch (err) {
|
|
88
|
+
callback(err)
|
|
89
|
+
}
|
|
90
|
+
})
|
|
91
|
+
}
|
|
121
92
|
|
|
122
|
-
|
|
123
|
-
|
|
93
|
+
return callback[kPromise]
|
|
94
|
+
}
|
|
95
|
+
|
|
96
|
+
_nextvSync (size, options) {
|
|
97
|
+
this[kFirst] = false
|
|
98
|
+
return binding.iterator_nextv(this[kContext], size)
|
|
124
99
|
}
|
|
125
100
|
|
|
126
101
|
_close (callback) {
|
|
127
|
-
this[kCache] =
|
|
128
|
-
this[kCallback] = null
|
|
102
|
+
this[kCache] = kEmpty
|
|
129
103
|
|
|
130
|
-
|
|
131
|
-
|
|
104
|
+
try {
|
|
105
|
+
binding.iterator_close(this[kContext])
|
|
106
|
+
process.nextTick(callback)
|
|
107
|
+
} catch (err) {
|
|
108
|
+
process.nextTick(callback, err)
|
|
109
|
+
}
|
|
132
110
|
}
|
|
133
111
|
|
|
134
112
|
_end (callback) {
|
package/package.json
CHANGED
|
Binary file
|
|
Binary file
|
package/util.h
CHANGED
|
@@ -263,23 +263,6 @@ static napi_status GetProperty(napi_env env,
|
|
|
263
263
|
return GetValue(env, value, result);
|
|
264
264
|
}
|
|
265
265
|
|
|
266
|
-
// NOTE: napi_create_external_buffer is slow with finalizer...
|
|
267
|
-
// template <typename T>
|
|
268
|
-
// napi_status Convert(napi_env env, rocksdb::PinnableSlice* s, Encoding encoding, napi_value& result) {
|
|
269
|
-
// if (!s || !s->IsPinned()) {
|
|
270
|
-
// return napi_get_null(env, &result);
|
|
271
|
-
// } else if (encoding == Encoding::Buffer) {
|
|
272
|
-
// auto ptr = new rocksdb::PinnableSlice(std::move(*s));
|
|
273
|
-
// return napi_create_external_buffer(env, ptr->size(), const_cast<char*>(ptr->data()),
|
|
274
|
-
// Finalize<rocksdb::PinnableSlice>, ptr, &result);
|
|
275
|
-
// return napi_create_buffer_copy(env, s->size(), s->data(), nullptr, &result);
|
|
276
|
-
// } else if (encoding == Encoding::String) {
|
|
277
|
-
// return napi_create_string_utf8(env, s->data(), s->size(), &result);
|
|
278
|
-
// } else {
|
|
279
|
-
// return napi_invalid_arg;
|
|
280
|
-
// }
|
|
281
|
-
// }
|
|
282
|
-
|
|
283
266
|
template <typename T>
|
|
284
267
|
napi_status Convert(napi_env env, T&& s, Encoding encoding, napi_value& result) {
|
|
285
268
|
if (!s) {
|