@nxtedition/rocksdb 10.1.6 → 10.2.2
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/binding.cc +130 -80
- package/index.js +41 -23
- package/iterator.js +28 -16
- package/package.json +1 -1
- package/prebuilds/darwin-arm64/@nxtedition+rocksdb.node +0 -0
- package/prebuilds/linux-x64/@nxtedition+rocksdb.node +0 -0
- package/util.js +37 -0
package/binding.cc
CHANGED
|
@@ -435,16 +435,12 @@ struct Iterator final : public BaseIterator {
|
|
|
435
435
|
const std::optional<std::string>& gt,
|
|
436
436
|
const std::optional<std::string>& gte,
|
|
437
437
|
const bool fillCache,
|
|
438
|
-
const Encoding keyEncoding,
|
|
439
|
-
const Encoding valueEncoding,
|
|
440
438
|
const size_t highWaterMarkBytes,
|
|
441
439
|
std::shared_ptr<const rocksdb::Snapshot> snapshot,
|
|
442
440
|
bool tailing = false)
|
|
443
441
|
: BaseIterator(database, column, reverse, lt, lte, gt, gte, limit, fillCache, snapshot, tailing),
|
|
444
442
|
keys_(keys),
|
|
445
443
|
values_(values),
|
|
446
|
-
keyEncoding_(keyEncoding),
|
|
447
|
-
valueEncoding_(valueEncoding),
|
|
448
444
|
highWaterMarkBytes_(highWaterMarkBytes) {}
|
|
449
445
|
|
|
450
446
|
void Seek(const rocksdb::Slice& target) override {
|
|
@@ -454,8 +450,6 @@ struct Iterator final : public BaseIterator {
|
|
|
454
450
|
|
|
455
451
|
const bool keys_;
|
|
456
452
|
const bool values_;
|
|
457
|
-
const Encoding keyEncoding_;
|
|
458
|
-
const Encoding valueEncoding_;
|
|
459
453
|
const size_t highWaterMarkBytes_;
|
|
460
454
|
bool first_ = true;
|
|
461
455
|
};
|
|
@@ -957,8 +951,8 @@ NAPI_METHOD(db_get_many) {
|
|
|
957
951
|
Database* database;
|
|
958
952
|
NAPI_STATUS_THROWS(napi_get_value_external(env, argv[0], reinterpret_cast<void**>(&database)));
|
|
959
953
|
|
|
960
|
-
uint32_t
|
|
961
|
-
NAPI_STATUS_THROWS(napi_get_array_length(env, argv[1], &
|
|
954
|
+
uint32_t count;
|
|
955
|
+
NAPI_STATUS_THROWS(napi_get_array_length(env, argv[1], &count));
|
|
962
956
|
|
|
963
957
|
const auto options = argv[2];
|
|
964
958
|
|
|
@@ -968,29 +962,30 @@ NAPI_METHOD(db_get_many) {
|
|
|
968
962
|
bool ignoreRangeDeletions = false;
|
|
969
963
|
NAPI_STATUS_THROWS(GetProperty(env, options, "ignoreRangeDeletions", ignoreRangeDeletions));
|
|
970
964
|
|
|
971
|
-
Encoding valueEncoding = Encoding::String;
|
|
972
|
-
NAPI_STATUS_THROWS(GetProperty(env, options, "valueEncoding", valueEncoding));
|
|
973
|
-
|
|
974
965
|
rocksdb::ColumnFamilyHandle* column = database->db->DefaultColumnFamily();
|
|
975
966
|
NAPI_STATUS_THROWS(GetProperty(env, options, "column", column));
|
|
976
967
|
|
|
968
|
+
bool takeSnapshot = true;
|
|
969
|
+
NAPI_STATUS_THROWS(GetProperty(env, options, "snapshot", takeSnapshot));
|
|
970
|
+
|
|
977
971
|
auto callback = argv[3];
|
|
978
972
|
|
|
979
|
-
|
|
980
|
-
|
|
973
|
+
std::shared_ptr<const rocksdb::Snapshot> snapshot;
|
|
974
|
+
if (takeSnapshot) {
|
|
975
|
+
snapshot.reset(database->db->GetSnapshot(), [=](const auto ptr) { database->db->ReleaseSnapshot(ptr); });
|
|
976
|
+
}
|
|
981
977
|
|
|
982
|
-
std::vector<rocksdb::PinnableSlice> keys{
|
|
978
|
+
std::vector<rocksdb::PinnableSlice> keys{count};
|
|
983
979
|
|
|
984
|
-
for (uint32_t n = 0; n <
|
|
980
|
+
for (uint32_t n = 0; n < count; n++) {
|
|
985
981
|
napi_value element;
|
|
986
982
|
NAPI_STATUS_THROWS(napi_get_element(env, argv[1], n, &element));
|
|
987
983
|
NAPI_STATUS_THROWS(GetValue(env, element, keys[n]));
|
|
988
984
|
}
|
|
989
985
|
|
|
990
986
|
struct State {
|
|
991
|
-
std::vector<
|
|
992
|
-
std::vector<
|
|
993
|
-
std::vector<rocksdb::Slice> keys;
|
|
987
|
+
std::vector<uint8_t> data;
|
|
988
|
+
std::vector<std::optional<size_t>> sizes;
|
|
994
989
|
};
|
|
995
990
|
|
|
996
991
|
runAsync<State>(
|
|
@@ -1003,41 +998,69 @@ NAPI_METHOD(db_get_many) {
|
|
|
1003
998
|
readOptions.ignore_range_deletions = ignoreRangeDeletions;
|
|
1004
999
|
readOptions.optimize_multiget_for_io = true;
|
|
1005
1000
|
|
|
1006
|
-
|
|
1007
|
-
|
|
1008
|
-
|
|
1001
|
+
std::vector<rocksdb::Status> statuses{count};
|
|
1002
|
+
std::vector<rocksdb::PinnableSlice> values{count};
|
|
1003
|
+
std::vector<rocksdb::Slice> keys2{count};
|
|
1004
|
+
|
|
1005
|
+
for (auto n = 0; n < count; n++) {
|
|
1006
|
+
keys2[n] = keys[n];
|
|
1007
|
+
}
|
|
1008
|
+
|
|
1009
|
+
database->db->MultiGet(readOptions, column, count, keys2.data(), values.data(), statuses.data());
|
|
1009
1010
|
|
|
1010
|
-
|
|
1011
|
-
|
|
1011
|
+
auto size = 0;
|
|
1012
|
+
for (auto n = 0; n < count; n++) {
|
|
1013
|
+
const auto valueSize = values[n].size();
|
|
1014
|
+
size += valueSize & 0x7 ? (valueSize | 0x7) + 1 : valueSize;
|
|
1012
1015
|
}
|
|
1013
1016
|
|
|
1014
|
-
|
|
1015
|
-
|
|
1017
|
+
state.data.reserve(size);
|
|
1018
|
+
|
|
1019
|
+
auto push = [&](rocksdb::Slice* slice){
|
|
1020
|
+
if (slice) {
|
|
1021
|
+
state.sizes.push_back(slice->size());
|
|
1022
|
+
std::copy_n(slice->data(), slice->size(), std::back_inserter(state.data));
|
|
1023
|
+
|
|
1024
|
+
if (state.data.size() & 0x7) {
|
|
1025
|
+
state.data.resize((state.data.size() | 0x7) + 1);
|
|
1026
|
+
}
|
|
1027
|
+
} else {
|
|
1028
|
+
state.sizes.push_back(std::nullopt);
|
|
1029
|
+
}
|
|
1030
|
+
};
|
|
1031
|
+
|
|
1032
|
+
for (auto n = 0; n < count; n++) {
|
|
1033
|
+
push(statuses[n].ok() ? &values[n] : nullptr);
|
|
1034
|
+
}
|
|
1016
1035
|
|
|
1017
1036
|
return rocksdb::Status::OK();
|
|
1018
1037
|
},
|
|
1019
1038
|
[=](auto& state, auto env, auto& argv) {
|
|
1020
|
-
argv.resize(
|
|
1039
|
+
argv.resize(3);
|
|
1040
|
+
|
|
1041
|
+
const auto count = state.sizes.size();
|
|
1021
1042
|
|
|
1022
|
-
NAPI_STATUS_RETURN(napi_create_array_with_length(env,
|
|
1043
|
+
NAPI_STATUS_RETURN(napi_create_array_with_length(env, count, &argv[1]));
|
|
1023
1044
|
|
|
1024
|
-
for (uint32_t idx = 0; idx <
|
|
1025
|
-
const auto&
|
|
1026
|
-
const auto& value = state.values[idx];
|
|
1045
|
+
for (uint32_t idx = 0; idx < count; idx++) {
|
|
1046
|
+
const auto& maybeSize = state.sizes[idx];
|
|
1027
1047
|
|
|
1028
1048
|
napi_value element;
|
|
1029
|
-
if (
|
|
1030
|
-
NAPI_STATUS_RETURN(
|
|
1049
|
+
if (maybeSize) {
|
|
1050
|
+
NAPI_STATUS_RETURN(napi_create_uint32(env, *maybeSize, &element));
|
|
1031
1051
|
} else {
|
|
1032
|
-
|
|
1033
|
-
NAPI_STATUS_RETURN(Convert(env, &value, valueEncoding, element));
|
|
1052
|
+
NAPI_STATUS_RETURN(napi_get_undefined(env, &element));
|
|
1034
1053
|
}
|
|
1035
1054
|
NAPI_STATUS_RETURN(napi_set_element(env, argv[1], idx, element));
|
|
1036
1055
|
}
|
|
1037
1056
|
|
|
1038
|
-
state.
|
|
1039
|
-
|
|
1040
|
-
|
|
1057
|
+
if (state.data.size() > 0) {
|
|
1058
|
+
auto data = std::make_unique<std::vector<uint8_t>>(std::move(state.data));
|
|
1059
|
+
NAPI_STATUS_RETURN(napi_create_external_buffer(env, data->size(), data->data(), Finalize<std::vector<uint8_t>>, data.get(), &argv[2]));
|
|
1060
|
+
data.release();
|
|
1061
|
+
} else {
|
|
1062
|
+
NAPI_STATUS_RETURN(napi_get_undefined(env, &argv[2]));
|
|
1063
|
+
}
|
|
1041
1064
|
|
|
1042
1065
|
return napi_ok;
|
|
1043
1066
|
});
|
|
@@ -1203,12 +1226,6 @@ NAPI_METHOD(iterator_init) {
|
|
|
1203
1226
|
bool fillCache = false;
|
|
1204
1227
|
NAPI_STATUS_THROWS(GetProperty(env, options, "fillCache", fillCache));
|
|
1205
1228
|
|
|
1206
|
-
Encoding keyEncoding = Encoding::String;
|
|
1207
|
-
NAPI_STATUS_THROWS(GetProperty(env, options, "keyEncoding", keyEncoding));
|
|
1208
|
-
|
|
1209
|
-
Encoding valueEncoding = Encoding::String;
|
|
1210
|
-
NAPI_STATUS_THROWS(GetProperty(env, options, "valueEncoding", valueEncoding));
|
|
1211
|
-
|
|
1212
1229
|
int32_t limit = -1;
|
|
1213
1230
|
NAPI_STATUS_THROWS(GetProperty(env, options, "limit", limit));
|
|
1214
1231
|
|
|
@@ -1230,11 +1247,16 @@ NAPI_METHOD(iterator_init) {
|
|
|
1230
1247
|
rocksdb::ColumnFamilyHandle* column = database->db->DefaultColumnFamily();
|
|
1231
1248
|
NAPI_STATUS_THROWS(GetProperty(env, options, "column", column));
|
|
1232
1249
|
|
|
1233
|
-
|
|
1234
|
-
|
|
1250
|
+
bool takeSnapshot = !tailing;
|
|
1251
|
+
NAPI_STATUS_THROWS(GetProperty(env, options, "snapshot", takeSnapshot));
|
|
1252
|
+
|
|
1253
|
+
std::shared_ptr<const rocksdb::Snapshot> snapshot;
|
|
1254
|
+
if (takeSnapshot) {
|
|
1255
|
+
snapshot.reset(database->db->GetSnapshot(), [=](const auto ptr) { database->db->ReleaseSnapshot(ptr); });
|
|
1256
|
+
}
|
|
1235
1257
|
|
|
1236
1258
|
auto iterator = std::unique_ptr<Iterator>(new Iterator(database, column, reverse, keys, values, limit, lt, lte, gt,
|
|
1237
|
-
gte, fillCache,
|
|
1259
|
+
gte, fillCache, highWaterMarkBytes,
|
|
1238
1260
|
snapshot, tailing));
|
|
1239
1261
|
|
|
1240
1262
|
napi_value result;
|
|
@@ -1275,13 +1297,14 @@ NAPI_METHOD(iterator_nextv) {
|
|
|
1275
1297
|
Iterator* iterator;
|
|
1276
1298
|
NAPI_STATUS_THROWS(napi_get_value_external(env, argv[0], reinterpret_cast<void**>(&iterator)));
|
|
1277
1299
|
|
|
1278
|
-
uint32_t
|
|
1279
|
-
NAPI_STATUS_THROWS(napi_get_value_uint32(env, argv[1], &
|
|
1300
|
+
uint32_t count;
|
|
1301
|
+
NAPI_STATUS_THROWS(napi_get_value_uint32(env, argv[1], &count));
|
|
1280
1302
|
|
|
1281
1303
|
auto callback = argv[2];
|
|
1282
1304
|
|
|
1283
1305
|
struct State {
|
|
1284
|
-
std::vector<
|
|
1306
|
+
std::vector<uint8_t> data;
|
|
1307
|
+
std::vector<std::optional<size_t>> sizes;
|
|
1285
1308
|
bool finished = false;
|
|
1286
1309
|
};
|
|
1287
1310
|
|
|
@@ -1292,61 +1315,88 @@ NAPI_METHOD(iterator_nextv) {
|
|
|
1292
1315
|
iterator->SeekToRange();
|
|
1293
1316
|
}
|
|
1294
1317
|
|
|
1295
|
-
state.
|
|
1296
|
-
|
|
1318
|
+
state.sizes.reserve(count * 2);
|
|
1319
|
+
state.data.reserve(iterator->highWaterMarkBytes_);
|
|
1320
|
+
|
|
1321
|
+
auto bytesRead = 0;
|
|
1322
|
+
|
|
1323
|
+
auto push = [&](const std::optional<rocksdb::Slice>& slice){
|
|
1324
|
+
if (slice) {
|
|
1325
|
+
state.sizes.push_back(slice->size());
|
|
1326
|
+
std::copy_n(slice->data(), slice->size(), std::back_inserter(state.data));
|
|
1327
|
+
|
|
1328
|
+
if (state.data.size() & 0x7) {
|
|
1329
|
+
state.data.resize((state.data.size() | 0x7) + 1);
|
|
1330
|
+
}
|
|
1297
1331
|
|
|
1332
|
+
bytesRead += slice->size();
|
|
1333
|
+
} else {
|
|
1334
|
+
state.sizes.push_back(std::nullopt);
|
|
1335
|
+
}
|
|
1336
|
+
};
|
|
1337
|
+
|
|
1338
|
+
auto status = rocksdb::Status::OK();
|
|
1298
1339
|
while (true) {
|
|
1299
|
-
if (!iterator->first_)
|
|
1340
|
+
if (!iterator->first_) {
|
|
1300
1341
|
iterator->Next();
|
|
1301
|
-
else
|
|
1342
|
+
} else {
|
|
1302
1343
|
iterator->first_ = false;
|
|
1344
|
+
}
|
|
1303
1345
|
|
|
1304
|
-
if (!iterator->Valid() || !iterator->Increment())
|
|
1346
|
+
if (!iterator->Valid() || !iterator->Increment()) {
|
|
1347
|
+
status = iterator->Status();
|
|
1348
|
+
state.finished = true;
|
|
1305
1349
|
break;
|
|
1306
|
-
|
|
1307
|
-
auto k = rocksdb::PinnableSlice();
|
|
1308
|
-
auto v = rocksdb::PinnableSlice();
|
|
1350
|
+
}
|
|
1309
1351
|
|
|
1310
1352
|
if (iterator->keys_ && iterator->values_) {
|
|
1311
|
-
|
|
1312
|
-
|
|
1353
|
+
push(iterator->CurrentKey());
|
|
1354
|
+
push(iterator->CurrentValue());
|
|
1313
1355
|
} else if (iterator->keys_) {
|
|
1314
|
-
|
|
1356
|
+
push(iterator->CurrentKey());
|
|
1357
|
+
push(std::nullopt);
|
|
1315
1358
|
} else if (iterator->values_) {
|
|
1316
|
-
|
|
1359
|
+
push(std::nullopt);
|
|
1360
|
+
push(iterator->CurrentValue());
|
|
1317
1361
|
}
|
|
1318
1362
|
|
|
1319
|
-
bytesRead
|
|
1320
|
-
|
|
1321
|
-
state.cache.push_back(std::move(v));
|
|
1322
|
-
|
|
1323
|
-
if (bytesRead > iterator->highWaterMarkBytes_ || state.cache.size() / 2 >= size) {
|
|
1363
|
+
if (bytesRead > iterator->highWaterMarkBytes_ || state.sizes.size() / 2 >= count) {
|
|
1364
|
+
status = rocksdb::Status::OK();
|
|
1324
1365
|
state.finished = false;
|
|
1325
|
-
|
|
1366
|
+
break;
|
|
1326
1367
|
}
|
|
1327
1368
|
}
|
|
1328
1369
|
|
|
1329
|
-
|
|
1330
|
-
|
|
1331
|
-
return iterator->Status();
|
|
1370
|
+
return status;
|
|
1332
1371
|
},
|
|
1333
1372
|
[=](auto& state, auto env, auto& argv) {
|
|
1334
|
-
argv.resize(
|
|
1373
|
+
argv.resize(4);
|
|
1335
1374
|
|
|
1336
|
-
|
|
1375
|
+
const auto count = state.sizes.size();
|
|
1337
1376
|
|
|
1338
|
-
|
|
1339
|
-
napi_value key;
|
|
1340
|
-
napi_value val;
|
|
1377
|
+
NAPI_STATUS_RETURN(napi_create_array_with_length(env, count, &argv[1]));
|
|
1341
1378
|
|
|
1342
|
-
|
|
1343
|
-
|
|
1379
|
+
for (uint32_t idx = 0; idx < count; idx++) {
|
|
1380
|
+
const auto& maybeSize = state.sizes[idx];
|
|
1344
1381
|
|
|
1345
|
-
|
|
1346
|
-
|
|
1382
|
+
napi_value element;
|
|
1383
|
+
if (maybeSize) {
|
|
1384
|
+
NAPI_STATUS_RETURN(napi_create_uint32(env, *maybeSize, &element));
|
|
1385
|
+
} else {
|
|
1386
|
+
NAPI_STATUS_RETURN(napi_get_undefined(env, &element));
|
|
1387
|
+
}
|
|
1388
|
+
NAPI_STATUS_RETURN(napi_set_element(env, argv[1], idx, element));
|
|
1389
|
+
}
|
|
1390
|
+
|
|
1391
|
+
if (state.data.size() > 0) {
|
|
1392
|
+
auto data = std::make_unique<std::vector<uint8_t>>(std::move(state.data));
|
|
1393
|
+
NAPI_STATUS_RETURN(napi_create_external_buffer(env, data->size(), data->data(), Finalize<std::vector<uint8_t>>, data.get(), &argv[2]));
|
|
1394
|
+
data.release();
|
|
1395
|
+
} else {
|
|
1396
|
+
NAPI_STATUS_RETURN(napi_get_undefined(env, &argv[2]));
|
|
1347
1397
|
}
|
|
1348
1398
|
|
|
1349
|
-
NAPI_STATUS_RETURN(napi_get_boolean(env, state.finished, &argv[
|
|
1399
|
+
NAPI_STATUS_RETURN(napi_get_boolean(env, state.finished, &argv[3]));
|
|
1350
1400
|
|
|
1351
1401
|
return napi_ok;
|
|
1352
1402
|
});
|
package/index.js
CHANGED
|
@@ -8,6 +8,7 @@ const { ChainedBatch } = require('./chained-batch')
|
|
|
8
8
|
const { Iterator } = require('./iterator')
|
|
9
9
|
const fs = require('node:fs')
|
|
10
10
|
const assert = require('node:assert')
|
|
11
|
+
const { handleNextv } = require('./util')
|
|
11
12
|
|
|
12
13
|
const kContext = Symbol('context')
|
|
13
14
|
const kColumns = Symbol('columns')
|
|
@@ -148,16 +149,35 @@ class RocksLevel extends AbstractLevel {
|
|
|
148
149
|
_getMany (keys, options, callback) {
|
|
149
150
|
callback = fromCallback(callback, kPromise)
|
|
150
151
|
|
|
151
|
-
const {
|
|
152
|
-
|
|
153
|
-
if (keyEncoding !== 'buffer') {
|
|
154
|
-
keys = keys.map(key => typeof key === 'string' ? Buffer.from(key) : key)
|
|
155
|
-
}
|
|
156
|
-
|
|
152
|
+
const { valueEncoding } = options ?? EMPTY
|
|
157
153
|
try {
|
|
158
154
|
this[kRef]()
|
|
159
|
-
binding.db_get_many(this[kContext], keys, options ?? EMPTY, (err,
|
|
160
|
-
|
|
155
|
+
binding.db_get_many(this[kContext], keys, options ?? EMPTY, (err, sizes, buffer) => {
|
|
156
|
+
if (err) {
|
|
157
|
+
callback(err)
|
|
158
|
+
} else {
|
|
159
|
+
buffer ??= Buffer.alloc(0)
|
|
160
|
+
const val = []
|
|
161
|
+
let offset = 0
|
|
162
|
+
for (const size of sizes) {
|
|
163
|
+
if (size == null) {
|
|
164
|
+
val.push(undefined)
|
|
165
|
+
} else {
|
|
166
|
+
if (!valueEncoding || valueEncoding === 'buffer') {
|
|
167
|
+
val.push(buffer.subarray(offset, offset + size))
|
|
168
|
+
} else if (valueEncoding === 'slice') {
|
|
169
|
+
val.push({ buffer, byteOffset: offset, byteLength: size })
|
|
170
|
+
} else {
|
|
171
|
+
val.push(buffer.toString(valueEncoding, offset, offset + size))
|
|
172
|
+
}
|
|
173
|
+
offset += size
|
|
174
|
+
if (offset & 0x7) {
|
|
175
|
+
offset = (offset | 0x7) + 1
|
|
176
|
+
}
|
|
177
|
+
}
|
|
178
|
+
}
|
|
179
|
+
callback(null, val)
|
|
180
|
+
}
|
|
161
181
|
this[kUnref]()
|
|
162
182
|
})
|
|
163
183
|
} catch (err) {
|
|
@@ -217,14 +237,10 @@ class RocksLevel extends AbstractLevel {
|
|
|
217
237
|
return new ChainedBatch(this, this[kContext], (batch, context, options, callback) => {
|
|
218
238
|
try {
|
|
219
239
|
this[kRef]()
|
|
220
|
-
|
|
240
|
+
binding.batch_write(this[kContext], context, options, (err) => {
|
|
221
241
|
this[kUnref]()
|
|
222
242
|
callback(err)
|
|
223
243
|
})
|
|
224
|
-
if (sync) {
|
|
225
|
-
this[kUnref]()
|
|
226
|
-
process.nextTick(callback, null)
|
|
227
|
-
}
|
|
228
244
|
} catch (err) {
|
|
229
245
|
process.nextTick(callback, err)
|
|
230
246
|
}
|
|
@@ -244,7 +260,7 @@ class RocksLevel extends AbstractLevel {
|
|
|
244
260
|
assert(false)
|
|
245
261
|
}
|
|
246
262
|
}
|
|
247
|
-
batch.
|
|
263
|
+
batch._write(options, callback)
|
|
248
264
|
|
|
249
265
|
return callback[kPromise]
|
|
250
266
|
}
|
|
@@ -282,15 +298,17 @@ class RocksLevel extends AbstractLevel {
|
|
|
282
298
|
const context = binding.iterator_init(this[kContext], options ?? {})
|
|
283
299
|
try {
|
|
284
300
|
this[kRef]()
|
|
285
|
-
return await new Promise((resolve, reject) => binding.iterator_nextv(context, options.limit, (err,
|
|
286
|
-
|
|
287
|
-
|
|
288
|
-
|
|
289
|
-
|
|
290
|
-
|
|
291
|
-
|
|
292
|
-
|
|
293
|
-
|
|
301
|
+
return await new Promise((resolve, reject) => binding.iterator_nextv(context, options.limit, (err, sizes, buffer, finished) => {
|
|
302
|
+
handleNextv(err, sizes, buffer, finished, options, (err, rows, finished) => {
|
|
303
|
+
if (err) {
|
|
304
|
+
reject(err)
|
|
305
|
+
} else {
|
|
306
|
+
resolve({
|
|
307
|
+
rows,
|
|
308
|
+
finished
|
|
309
|
+
})
|
|
310
|
+
}
|
|
311
|
+
})
|
|
294
312
|
}))
|
|
295
313
|
} finally {
|
|
296
314
|
binding.iterator_close(context)
|
package/iterator.js
CHANGED
|
@@ -2,6 +2,7 @@
|
|
|
2
2
|
|
|
3
3
|
const { fromCallback } = require('catering')
|
|
4
4
|
const { AbstractIterator } = require('abstract-level')
|
|
5
|
+
const { handleNextv } = require('./util')
|
|
5
6
|
|
|
6
7
|
const binding = require('./binding')
|
|
7
8
|
|
|
@@ -14,6 +15,7 @@ const kPosition = Symbol('position')
|
|
|
14
15
|
const kHandleNext = Symbol('handleNext')
|
|
15
16
|
const kHandleNextv = Symbol('handleNextv')
|
|
16
17
|
const kCallback = Symbol('callback')
|
|
18
|
+
const kOptions = Symbol('options')
|
|
17
19
|
const empty = []
|
|
18
20
|
|
|
19
21
|
const registry = new FinalizationRegistry((context) => {
|
|
@@ -27,6 +29,7 @@ class Iterator extends AbstractIterator {
|
|
|
27
29
|
this[kContext] = binding.iterator_init(context, options)
|
|
28
30
|
registry.register(this, this[kContext], this[kContext])
|
|
29
31
|
|
|
32
|
+
this[kOptions] = { ...options }
|
|
30
33
|
this[kHandleNext] = this[kHandleNext].bind(this)
|
|
31
34
|
this[kHandleNextv] = this[kHandleNextv].bind(this)
|
|
32
35
|
this[kCallback] = null
|
|
@@ -73,15 +76,19 @@ class Iterator extends AbstractIterator {
|
|
|
73
76
|
return this
|
|
74
77
|
}
|
|
75
78
|
|
|
76
|
-
[kHandleNext] (err,
|
|
77
|
-
|
|
78
|
-
|
|
79
|
+
[kHandleNext] (err, sizes, buffer, finished) {
|
|
80
|
+
handleNextv(err, sizes, buffer, finished, this[kOptions], (err, items, finished) => {
|
|
81
|
+
const callback = this[kCallback]
|
|
82
|
+
if (err) {
|
|
83
|
+
return callback(err)
|
|
84
|
+
}
|
|
79
85
|
|
|
80
|
-
|
|
81
|
-
|
|
82
|
-
|
|
86
|
+
this[kCache] = items
|
|
87
|
+
this[kFinished] = finished
|
|
88
|
+
this[kPosition] = 0
|
|
83
89
|
|
|
84
|
-
|
|
90
|
+
this._next(callback)
|
|
91
|
+
})
|
|
85
92
|
}
|
|
86
93
|
|
|
87
94
|
_nextv (size, options, callback) {
|
|
@@ -98,17 +105,22 @@ class Iterator extends AbstractIterator {
|
|
|
98
105
|
return callback[kPromise]
|
|
99
106
|
}
|
|
100
107
|
|
|
101
|
-
[kHandleNextv] (err,
|
|
102
|
-
|
|
103
|
-
|
|
104
|
-
|
|
108
|
+
[kHandleNextv] (err, sizes, buffer, finished) {
|
|
109
|
+
handleNextv(err, sizes, buffer, finished, this[kOptions], (err, items, finished) => {
|
|
110
|
+
const callback = this[kCallback]
|
|
111
|
+
if (err) {
|
|
112
|
+
return callback(err)
|
|
113
|
+
}
|
|
114
|
+
|
|
115
|
+
this[kFinished] = finished
|
|
105
116
|
|
|
106
|
-
|
|
107
|
-
|
|
108
|
-
|
|
109
|
-
|
|
117
|
+
const entries = []
|
|
118
|
+
for (let n = 0; n < items.length; n += 2) {
|
|
119
|
+
entries.push([items[n + 0], items[n + 1]])
|
|
120
|
+
}
|
|
110
121
|
|
|
111
|
-
|
|
122
|
+
callback(null, entries)
|
|
123
|
+
})
|
|
112
124
|
}
|
|
113
125
|
|
|
114
126
|
_close (callback) {
|
package/package.json
CHANGED
|
Binary file
|
|
Binary file
|
package/util.js
ADDED
|
@@ -0,0 +1,37 @@
|
|
|
1
|
+
'use strict'
|
|
2
|
+
|
|
3
|
+
function handleNextv (err, sizes, buffer, finished, options, callback) {
|
|
4
|
+
if (err) {
|
|
5
|
+
callback(err)
|
|
6
|
+
} else {
|
|
7
|
+
buffer ??= Buffer.alloc(0)
|
|
8
|
+
|
|
9
|
+
const { keyEncoding, valueEncoding } = options ?? {}
|
|
10
|
+
|
|
11
|
+
const rows = []
|
|
12
|
+
let offset = 0
|
|
13
|
+
for (let n = 0; n < sizes.length; n++) {
|
|
14
|
+
const size = sizes[n]
|
|
15
|
+
const encoding = n & 1 ? valueEncoding : keyEncoding
|
|
16
|
+
if (size == null) {
|
|
17
|
+
rows.push(undefined)
|
|
18
|
+
} else {
|
|
19
|
+
if (!encoding || encoding === 'buffer') {
|
|
20
|
+
rows.push(buffer.subarray(offset, offset + size))
|
|
21
|
+
} else if (encoding === 'slice') {
|
|
22
|
+
rows.push({ buffer, byteOffset: offset, byteLength: size })
|
|
23
|
+
} else {
|
|
24
|
+
rows.push(buffer.toString(encoding, offset, offset + size))
|
|
25
|
+
}
|
|
26
|
+
offset += size
|
|
27
|
+
if (offset & 0x7) {
|
|
28
|
+
offset = (offset | 0x7) + 1
|
|
29
|
+
}
|
|
30
|
+
}
|
|
31
|
+
}
|
|
32
|
+
|
|
33
|
+
callback(null, rows, finished)
|
|
34
|
+
}
|
|
35
|
+
}
|
|
36
|
+
|
|
37
|
+
exports.handleNextv = handleNextv
|