@nxtedition/rocksdb 10.2.2 → 10.3.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/binding.cc +20 -41
- package/index.js +23 -12
- package/package.json +1 -1
- package/prebuilds/darwin-arm64/@nxtedition+rocksdb.node +0 -0
- package/prebuilds/linux-x64/@nxtedition+rocksdb.node +0 -0
- package/util.js +7 -5
package/binding.cc
CHANGED
|
@@ -985,7 +985,7 @@ NAPI_METHOD(db_get_many) {
|
|
|
985
985
|
|
|
986
986
|
struct State {
|
|
987
987
|
std::vector<uint8_t> data;
|
|
988
|
-
std::vector<
|
|
988
|
+
std::vector<int32_t> sizes;
|
|
989
989
|
};
|
|
990
990
|
|
|
991
991
|
runAsync<State>(
|
|
@@ -1018,14 +1018,14 @@ NAPI_METHOD(db_get_many) {
|
|
|
1018
1018
|
|
|
1019
1019
|
auto push = [&](rocksdb::Slice* slice){
|
|
1020
1020
|
if (slice) {
|
|
1021
|
-
state.sizes.push_back(slice->size());
|
|
1021
|
+
state.sizes.push_back(static_cast<int32_t>(slice->size()));
|
|
1022
1022
|
std::copy_n(slice->data(), slice->size(), std::back_inserter(state.data));
|
|
1023
1023
|
|
|
1024
1024
|
if (state.data.size() & 0x7) {
|
|
1025
1025
|
state.data.resize((state.data.size() | 0x7) + 1);
|
|
1026
1026
|
}
|
|
1027
1027
|
} else {
|
|
1028
|
-
state.sizes.push_back(
|
|
1028
|
+
state.sizes.push_back(-1);
|
|
1029
1029
|
}
|
|
1030
1030
|
};
|
|
1031
1031
|
|
|
@@ -1038,20 +1038,12 @@ NAPI_METHOD(db_get_many) {
|
|
|
1038
1038
|
[=](auto& state, auto env, auto& argv) {
|
|
1039
1039
|
argv.resize(3);
|
|
1040
1040
|
|
|
1041
|
-
|
|
1042
|
-
|
|
1043
|
-
|
|
1044
|
-
|
|
1045
|
-
|
|
1046
|
-
|
|
1047
|
-
|
|
1048
|
-
napi_value element;
|
|
1049
|
-
if (maybeSize) {
|
|
1050
|
-
NAPI_STATUS_RETURN(napi_create_uint32(env, *maybeSize, &element));
|
|
1051
|
-
} else {
|
|
1052
|
-
NAPI_STATUS_RETURN(napi_get_undefined(env, &element));
|
|
1053
|
-
}
|
|
1054
|
-
NAPI_STATUS_RETURN(napi_set_element(env, argv[1], idx, element));
|
|
1041
|
+
if (state.sizes.size() > 0) {
|
|
1042
|
+
auto sizes = std::make_unique<std::vector<int32_t>>(std::move(state.sizes));
|
|
1043
|
+
NAPI_STATUS_RETURN(napi_create_external_buffer(env, sizes->size() * 4, sizes->data(), Finalize<std::vector<int32_t>>, sizes.get(), &argv[1]));
|
|
1044
|
+
sizes.release();
|
|
1045
|
+
} else {
|
|
1046
|
+
NAPI_STATUS_RETURN(napi_get_undefined(env, &argv[1]));
|
|
1055
1047
|
}
|
|
1056
1048
|
|
|
1057
1049
|
if (state.data.size() > 0) {
|
|
@@ -1304,7 +1296,7 @@ NAPI_METHOD(iterator_nextv) {
|
|
|
1304
1296
|
|
|
1305
1297
|
struct State {
|
|
1306
1298
|
std::vector<uint8_t> data;
|
|
1307
|
-
std::vector<
|
|
1299
|
+
std::vector<int32_t> sizes;
|
|
1308
1300
|
bool finished = false;
|
|
1309
1301
|
};
|
|
1310
1302
|
|
|
@@ -1322,7 +1314,7 @@ NAPI_METHOD(iterator_nextv) {
|
|
|
1322
1314
|
|
|
1323
1315
|
auto push = [&](const std::optional<rocksdb::Slice>& slice){
|
|
1324
1316
|
if (slice) {
|
|
1325
|
-
state.sizes.push_back(slice->size());
|
|
1317
|
+
state.sizes.push_back(static_cast<int32_t>(slice->size()));
|
|
1326
1318
|
std::copy_n(slice->data(), slice->size(), std::back_inserter(state.data));
|
|
1327
1319
|
|
|
1328
1320
|
if (state.data.size() & 0x7) {
|
|
@@ -1331,11 +1323,10 @@ NAPI_METHOD(iterator_nextv) {
|
|
|
1331
1323
|
|
|
1332
1324
|
bytesRead += slice->size();
|
|
1333
1325
|
} else {
|
|
1334
|
-
state.sizes.push_back(
|
|
1326
|
+
state.sizes.push_back(-1);
|
|
1335
1327
|
}
|
|
1336
1328
|
};
|
|
1337
1329
|
|
|
1338
|
-
auto status = rocksdb::Status::OK();
|
|
1339
1330
|
while (true) {
|
|
1340
1331
|
if (!iterator->first_) {
|
|
1341
1332
|
iterator->Next();
|
|
@@ -1344,9 +1335,8 @@ NAPI_METHOD(iterator_nextv) {
|
|
|
1344
1335
|
}
|
|
1345
1336
|
|
|
1346
1337
|
if (!iterator->Valid() || !iterator->Increment()) {
|
|
1347
|
-
status = iterator->Status();
|
|
1348
1338
|
state.finished = true;
|
|
1349
|
-
|
|
1339
|
+
return iterator->Status();
|
|
1350
1340
|
}
|
|
1351
1341
|
|
|
1352
1342
|
if (iterator->keys_ && iterator->values_) {
|
|
@@ -1361,31 +1351,20 @@ NAPI_METHOD(iterator_nextv) {
|
|
|
1361
1351
|
}
|
|
1362
1352
|
|
|
1363
1353
|
if (bytesRead > iterator->highWaterMarkBytes_ || state.sizes.size() / 2 >= count) {
|
|
1364
|
-
status = rocksdb::Status::OK();
|
|
1365
1354
|
state.finished = false;
|
|
1366
|
-
|
|
1355
|
+
return rocksdb::Status::OK();
|
|
1367
1356
|
}
|
|
1368
1357
|
}
|
|
1369
|
-
|
|
1370
|
-
return status;
|
|
1371
1358
|
},
|
|
1372
1359
|
[=](auto& state, auto env, auto& argv) {
|
|
1373
1360
|
argv.resize(4);
|
|
1374
1361
|
|
|
1375
|
-
|
|
1376
|
-
|
|
1377
|
-
|
|
1378
|
-
|
|
1379
|
-
|
|
1380
|
-
|
|
1381
|
-
|
|
1382
|
-
napi_value element;
|
|
1383
|
-
if (maybeSize) {
|
|
1384
|
-
NAPI_STATUS_RETURN(napi_create_uint32(env, *maybeSize, &element));
|
|
1385
|
-
} else {
|
|
1386
|
-
NAPI_STATUS_RETURN(napi_get_undefined(env, &element));
|
|
1387
|
-
}
|
|
1388
|
-
NAPI_STATUS_RETURN(napi_set_element(env, argv[1], idx, element));
|
|
1362
|
+
if (state.sizes.size() > 0) {
|
|
1363
|
+
auto sizes = std::make_unique<std::vector<int32_t>>(std::move(state.sizes));
|
|
1364
|
+
NAPI_STATUS_RETURN(napi_create_external_buffer(env, sizes->size() * 4, sizes->data(), Finalize<std::vector<int32_t>>, sizes.get(), &argv[1]));
|
|
1365
|
+
sizes.release();
|
|
1366
|
+
} else {
|
|
1367
|
+
NAPI_STATUS_RETURN(napi_get_undefined(env, &argv[1]));
|
|
1389
1368
|
}
|
|
1390
1369
|
|
|
1391
1370
|
if (state.data.size() > 0) {
|
package/index.js
CHANGED
|
@@ -41,6 +41,12 @@ class RocksLevel extends AbstractLevel {
|
|
|
41
41
|
this[kPendingClose] = null
|
|
42
42
|
}
|
|
43
43
|
|
|
44
|
+
static async create(...args) {
|
|
45
|
+
const db = new this(...args)
|
|
46
|
+
await db.open()
|
|
47
|
+
return db
|
|
48
|
+
}
|
|
49
|
+
|
|
44
50
|
get sequence () {
|
|
45
51
|
return binding.db_get_latest_sequence(this[kContext])
|
|
46
52
|
}
|
|
@@ -152,23 +158,28 @@ class RocksLevel extends AbstractLevel {
|
|
|
152
158
|
const { valueEncoding } = options ?? EMPTY
|
|
153
159
|
try {
|
|
154
160
|
this[kRef]()
|
|
155
|
-
binding.db_get_many(this[kContext], keys, options ?? EMPTY, (err, sizes,
|
|
161
|
+
binding.db_get_many(this[kContext], keys, options ?? EMPTY, (err, sizes, data) => {
|
|
156
162
|
if (err) {
|
|
157
163
|
callback(err)
|
|
158
164
|
} else {
|
|
159
|
-
|
|
160
|
-
|
|
165
|
+
data ??= Buffer.alloc(0)
|
|
166
|
+
sizes ??= Buffer.alloc(0)
|
|
167
|
+
|
|
168
|
+
const rows = []
|
|
161
169
|
let offset = 0
|
|
162
|
-
|
|
163
|
-
|
|
164
|
-
|
|
170
|
+
const sizes32 = new Int32Array(sizes.buffer, sizes.byteOffset, sizes.byteLength / 4)
|
|
171
|
+
for (let n = 0; n < sizes32.length; n++) {
|
|
172
|
+
const size = sizes32[n]
|
|
173
|
+
const encoding = valueEncoding
|
|
174
|
+
if (size < 0) {
|
|
175
|
+
rows.push(undefined)
|
|
165
176
|
} else {
|
|
166
|
-
if (!
|
|
167
|
-
|
|
168
|
-
} else if (
|
|
169
|
-
|
|
177
|
+
if (!encoding || encoding === 'buffer') {
|
|
178
|
+
rows.push(data.subarray(offset, offset + size))
|
|
179
|
+
} else if (encoding === 'slice') {
|
|
180
|
+
rows.push({ buffer: data, byteOffset: offset, byteLength: size })
|
|
170
181
|
} else {
|
|
171
|
-
|
|
182
|
+
rows.push(data.toString(encoding, offset, offset + size))
|
|
172
183
|
}
|
|
173
184
|
offset += size
|
|
174
185
|
if (offset & 0x7) {
|
|
@@ -176,7 +187,7 @@ class RocksLevel extends AbstractLevel {
|
|
|
176
187
|
}
|
|
177
188
|
}
|
|
178
189
|
}
|
|
179
|
-
callback(null,
|
|
190
|
+
callback(null, rows)
|
|
180
191
|
}
|
|
181
192
|
this[kUnref]()
|
|
182
193
|
})
|
package/package.json
CHANGED
|
Binary file
|
|
Binary file
|
package/util.js
CHANGED
|
@@ -1,19 +1,21 @@
|
|
|
1
1
|
'use strict'
|
|
2
2
|
|
|
3
3
|
function handleNextv (err, sizes, buffer, finished, options, callback) {
|
|
4
|
+
const { keyEncoding, valueEncoding } = options ?? {}
|
|
5
|
+
|
|
4
6
|
if (err) {
|
|
5
7
|
callback(err)
|
|
6
8
|
} else {
|
|
7
9
|
buffer ??= Buffer.alloc(0)
|
|
8
|
-
|
|
9
|
-
const { keyEncoding, valueEncoding } = options ?? {}
|
|
10
|
+
sizes ??= Buffer.alloc(0)
|
|
10
11
|
|
|
11
12
|
const rows = []
|
|
12
13
|
let offset = 0
|
|
13
|
-
|
|
14
|
-
|
|
14
|
+
const sizes32 = new Int32Array(sizes.buffer, sizes.byteOffset, sizes.byteLength / 4)
|
|
15
|
+
for (let n = 0; n < sizes32.length; n++) {
|
|
16
|
+
const size = sizes32[n]
|
|
15
17
|
const encoding = n & 1 ? valueEncoding : keyEncoding
|
|
16
|
-
if (size
|
|
18
|
+
if (size < 0) {
|
|
17
19
|
rows.push(undefined)
|
|
18
20
|
} else {
|
|
19
21
|
if (!encoding || encoding === 'buffer') {
|