@nxtedition/rocksdb 10.3.4 → 10.3.6
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/binding.cc +117 -3
- package/chained-batch.js +10 -4
- package/index.js +12 -26
- package/package.json +1 -1
- package/prebuilds/darwin-arm64/@nxtedition+rocksdb.node +0 -0
- package/prebuilds/linux-x64/@nxtedition+rocksdb.node +0 -0
- package/util.js +32 -0
package/binding.cc
CHANGED
|
@@ -1060,6 +1060,97 @@ NAPI_METHOD(db_get_many) {
|
|
|
1060
1060
|
return 0;
|
|
1061
1061
|
}
|
|
1062
1062
|
|
|
1063
|
+
NAPI_METHOD(db_get_many_sync) {
|
|
1064
|
+
NAPI_ARGV(3);
|
|
1065
|
+
|
|
1066
|
+
Database* database;
|
|
1067
|
+
NAPI_STATUS_THROWS(napi_get_value_external(env, argv[0], reinterpret_cast<void**>(&database)));
|
|
1068
|
+
|
|
1069
|
+
uint32_t count;
|
|
1070
|
+
NAPI_STATUS_THROWS(napi_get_array_length(env, argv[1], &count));
|
|
1071
|
+
|
|
1072
|
+
bool fillCache = true;
|
|
1073
|
+
NAPI_STATUS_THROWS(GetProperty(env, argv[2], "fillCache", fillCache));
|
|
1074
|
+
|
|
1075
|
+
bool ignoreRangeDeletions = false;
|
|
1076
|
+
NAPI_STATUS_THROWS(GetProperty(env, argv[2], "ignoreRangeDeletions", ignoreRangeDeletions));
|
|
1077
|
+
|
|
1078
|
+
rocksdb::ColumnFamilyHandle* column = database->db->DefaultColumnFamily();
|
|
1079
|
+
NAPI_STATUS_THROWS(GetProperty(env, argv[2], "column", column));
|
|
1080
|
+
|
|
1081
|
+
std::vector<rocksdb::Slice> keys{count};
|
|
1082
|
+
std::vector<rocksdb::Status> statuses{count};
|
|
1083
|
+
std::vector<rocksdb::PinnableSlice> values{count};
|
|
1084
|
+
|
|
1085
|
+
for (uint32_t n = 0; n < count; n++) {
|
|
1086
|
+
napi_value element;
|
|
1087
|
+
NAPI_STATUS_THROWS(napi_get_element(env, argv[1], n, &element));
|
|
1088
|
+
NAPI_STATUS_THROWS(GetValue(env, element, keys[n]));
|
|
1089
|
+
}
|
|
1090
|
+
|
|
1091
|
+
struct State {
|
|
1092
|
+
std::vector<uint8_t> data;
|
|
1093
|
+
std::vector<int32_t> sizes;
|
|
1094
|
+
} state;
|
|
1095
|
+
|
|
1096
|
+
rocksdb::ReadOptions readOptions;
|
|
1097
|
+
readOptions.fill_cache = fillCache;
|
|
1098
|
+
readOptions.async_io = true;
|
|
1099
|
+
readOptions.ignore_range_deletions = ignoreRangeDeletions;
|
|
1100
|
+
readOptions.optimize_multiget_for_io = true;
|
|
1101
|
+
|
|
1102
|
+
database->db->MultiGet(readOptions, column, count, keys.data(), values.data(), statuses.data());
|
|
1103
|
+
|
|
1104
|
+
auto size = 0;
|
|
1105
|
+
for (auto n = 0; n < count; n++) {
|
|
1106
|
+
const auto valueSize = values[n].size();
|
|
1107
|
+
size += valueSize & 0x7 ? (valueSize | 0x7) + 1 : valueSize;
|
|
1108
|
+
}
|
|
1109
|
+
|
|
1110
|
+
state.data.reserve(size);
|
|
1111
|
+
|
|
1112
|
+
auto push = [&](rocksdb::Slice* slice){
|
|
1113
|
+
if (slice) {
|
|
1114
|
+
state.sizes.push_back(static_cast<int32_t>(slice->size()));
|
|
1115
|
+
std::copy_n(slice->data(), slice->size(), std::back_inserter(state.data));
|
|
1116
|
+
|
|
1117
|
+
if (state.data.size() & 0x7) {
|
|
1118
|
+
state.data.resize((state.data.size() | 0x7) + 1);
|
|
1119
|
+
}
|
|
1120
|
+
} else {
|
|
1121
|
+
state.sizes.push_back(-1);
|
|
1122
|
+
}
|
|
1123
|
+
};
|
|
1124
|
+
|
|
1125
|
+
for (auto n = 0; n < count; n++) {
|
|
1126
|
+
push(statuses[n].ok() ? &values[n] : nullptr);
|
|
1127
|
+
}
|
|
1128
|
+
|
|
1129
|
+
napi_value sizes;
|
|
1130
|
+
if (state.sizes.size() > 0) {
|
|
1131
|
+
auto sizes_ptr = std::make_unique<std::vector<int32_t>>(std::move(state.sizes));
|
|
1132
|
+
NAPI_STATUS_THROWS(napi_create_external_buffer(env, sizes_ptr->size() * 4, sizes_ptr->data(), Finalize<std::vector<int32_t>>, sizes_ptr.get(), &sizes));
|
|
1133
|
+
sizes_ptr.release();
|
|
1134
|
+
} else {
|
|
1135
|
+
NAPI_STATUS_THROWS(napi_get_undefined(env, &sizes));
|
|
1136
|
+
}
|
|
1137
|
+
|
|
1138
|
+
napi_value data;
|
|
1139
|
+
if (state.data.size() > 0) {
|
|
1140
|
+
auto data_ptr = std::make_unique<std::vector<uint8_t>>(std::move(state.data));
|
|
1141
|
+
NAPI_STATUS_THROWS(napi_create_external_buffer(env, data_ptr->size(), data_ptr->data(), Finalize<std::vector<uint8_t>>, data_ptr.get(), &data));
|
|
1142
|
+
data_ptr.release();
|
|
1143
|
+
} else {
|
|
1144
|
+
NAPI_STATUS_THROWS(napi_get_undefined(env, &data));
|
|
1145
|
+
}
|
|
1146
|
+
|
|
1147
|
+
napi_value result;
|
|
1148
|
+
NAPI_STATUS_THROWS(napi_create_array_with_length(env, 2, &result));
|
|
1149
|
+
NAPI_STATUS_THROWS(napi_set_element(env, result, 0, sizes));
|
|
1150
|
+
NAPI_STATUS_THROWS(napi_set_element(env, result, 1, data));
|
|
1151
|
+
return result;
|
|
1152
|
+
}
|
|
1153
|
+
|
|
1063
1154
|
NAPI_METHOD(db_clear) {
|
|
1064
1155
|
NAPI_ARGV(2);
|
|
1065
1156
|
|
|
@@ -1481,8 +1572,6 @@ NAPI_METHOD(batch_clear) {
|
|
|
1481
1572
|
NAPI_METHOD(batch_write) {
|
|
1482
1573
|
NAPI_ARGV(4);
|
|
1483
1574
|
|
|
1484
|
-
napi_value result = 0;
|
|
1485
|
-
|
|
1486
1575
|
Database* database;
|
|
1487
1576
|
NAPI_STATUS_THROWS(napi_get_value_external(env, argv[0], reinterpret_cast<void**>(&database)));
|
|
1488
1577
|
|
|
@@ -1508,7 +1597,30 @@ NAPI_METHOD(batch_write) {
|
|
|
1508
1597
|
},
|
|
1509
1598
|
[=](int64_t& seq, auto env, auto& argv) { return napi_ok; });
|
|
1510
1599
|
|
|
1511
|
-
return
|
|
1600
|
+
return 0;
|
|
1601
|
+
}
|
|
1602
|
+
|
|
1603
|
+
NAPI_METHOD(batch_write_sync) {
|
|
1604
|
+
NAPI_ARGV(3);
|
|
1605
|
+
|
|
1606
|
+
Database* database;
|
|
1607
|
+
NAPI_STATUS_THROWS(napi_get_value_external(env, argv[0], reinterpret_cast<void**>(&database)));
|
|
1608
|
+
|
|
1609
|
+
rocksdb::WriteBatch* batch;
|
|
1610
|
+
NAPI_STATUS_THROWS(napi_get_value_external(env, argv[1], reinterpret_cast<void**>(&batch)));
|
|
1611
|
+
|
|
1612
|
+
auto options = argv[2];
|
|
1613
|
+
|
|
1614
|
+
bool sync = false;
|
|
1615
|
+
NAPI_STATUS_THROWS(GetProperty(env, options, "sync", sync));
|
|
1616
|
+
|
|
1617
|
+
bool lowPriority = false;
|
|
1618
|
+
NAPI_STATUS_THROWS(GetProperty(env, options, "lowPriority", lowPriority));
|
|
1619
|
+
|
|
1620
|
+
rocksdb::WriteOptions writeOptions;
|
|
1621
|
+
writeOptions.sync = sync;
|
|
1622
|
+
writeOptions.low_pri = lowPriority;
|
|
1623
|
+
ROCKS_STATUS_THROWS_NAPI(database->db->Write(writeOptions, batch));
|
|
1512
1624
|
}
|
|
1513
1625
|
|
|
1514
1626
|
NAPI_METHOD(batch_count) {
|
|
@@ -1568,6 +1680,7 @@ NAPI_INIT() {
|
|
|
1568
1680
|
NAPI_EXPORT_FUNCTION(db_get_location);
|
|
1569
1681
|
NAPI_EXPORT_FUNCTION(db_close);
|
|
1570
1682
|
NAPI_EXPORT_FUNCTION(db_get_many);
|
|
1683
|
+
NAPI_EXPORT_FUNCTION(db_get_many_sync);
|
|
1571
1684
|
NAPI_EXPORT_FUNCTION(db_clear);
|
|
1572
1685
|
NAPI_EXPORT_FUNCTION(db_get_property);
|
|
1573
1686
|
NAPI_EXPORT_FUNCTION(db_get_latest_sequence);
|
|
@@ -1583,6 +1696,7 @@ NAPI_INIT() {
|
|
|
1583
1696
|
NAPI_EXPORT_FUNCTION(batch_del);
|
|
1584
1697
|
NAPI_EXPORT_FUNCTION(batch_clear);
|
|
1585
1698
|
NAPI_EXPORT_FUNCTION(batch_write);
|
|
1699
|
+
NAPI_EXPORT_FUNCTION(batch_write_sync);
|
|
1586
1700
|
NAPI_EXPORT_FUNCTION(batch_merge);
|
|
1587
1701
|
NAPI_EXPORT_FUNCTION(batch_count);
|
|
1588
1702
|
NAPI_EXPORT_FUNCTION(batch_iterate);
|
package/chained-batch.js
CHANGED
|
@@ -59,14 +59,20 @@ class ChainedBatch extends AbstractChainedBatch {
|
|
|
59
59
|
_write (options, callback) {
|
|
60
60
|
callback = fromCallback(callback, kPromise)
|
|
61
61
|
|
|
62
|
-
|
|
63
|
-
|
|
64
|
-
callback
|
|
65
|
-
})
|
|
62
|
+
try {
|
|
63
|
+
binding.batch_write_sync(this[kDbContext], this[kBatchContext], options)
|
|
64
|
+
process.nextTick(callback)
|
|
65
|
+
} catch (err) {
|
|
66
|
+
process.nextTick(callback, err)
|
|
67
|
+
}
|
|
66
68
|
|
|
67
69
|
return callback[kPromise]
|
|
68
70
|
}
|
|
69
71
|
|
|
72
|
+
_writeSync (options, callback) {
|
|
73
|
+
binding.batch_write_sync(this[kDbContext], this[kBatchContext], options)
|
|
74
|
+
}
|
|
75
|
+
|
|
70
76
|
_close (callback) {
|
|
71
77
|
process.nextTick(callback)
|
|
72
78
|
}
|
package/index.js
CHANGED
|
@@ -8,7 +8,7 @@ const { ChainedBatch } = require('./chained-batch')
|
|
|
8
8
|
const { Iterator } = require('./iterator')
|
|
9
9
|
const fs = require('node:fs')
|
|
10
10
|
const assert = require('node:assert')
|
|
11
|
-
const { handleNextv } = require('./util')
|
|
11
|
+
const { handleNextv, handleMany } = require('./util')
|
|
12
12
|
|
|
13
13
|
const kContext = Symbol('context')
|
|
14
14
|
const kColumns = Symbol('columns')
|
|
@@ -152,40 +152,26 @@ class RocksLevel extends AbstractLevel {
|
|
|
152
152
|
return callback[kPromise]
|
|
153
153
|
}
|
|
154
154
|
|
|
155
|
+
_getManySync (keys, options) {
|
|
156
|
+
const ret = binding.db_get_many_sync(this[kContext], keys, options ?? EMPTY)
|
|
157
|
+
const [sizes, data] = ret
|
|
158
|
+
return handleMany(sizes, data, options ?? EMPTY)
|
|
159
|
+
}
|
|
160
|
+
|
|
155
161
|
_getMany (keys, options, callback) {
|
|
156
162
|
callback = fromCallback(callback, kPromise)
|
|
157
163
|
|
|
158
|
-
const { valueEncoding } = options ?? EMPTY
|
|
159
164
|
try {
|
|
160
165
|
this[kRef]()
|
|
161
166
|
binding.db_get_many(this[kContext], keys, options ?? EMPTY, (err, sizes, data) => {
|
|
162
167
|
if (err) {
|
|
163
168
|
callback(err)
|
|
164
169
|
} else {
|
|
165
|
-
|
|
166
|
-
|
|
167
|
-
|
|
168
|
-
|
|
169
|
-
|
|
170
|
-
const sizes32 = new Int32Array(sizes.buffer, sizes.byteOffset, sizes.byteLength / 4)
|
|
171
|
-
for (let n = 0; n < sizes32.length; n++) {
|
|
172
|
-
const size = sizes32[n]
|
|
173
|
-
const encoding = valueEncoding
|
|
174
|
-
if (size < 0) {
|
|
175
|
-
rows.push(undefined)
|
|
176
|
-
} else {
|
|
177
|
-
if (!encoding || encoding === 'buffer') {
|
|
178
|
-
rows.push(data.subarray(offset, offset + size))
|
|
179
|
-
} else if (encoding === 'slice') {
|
|
180
|
-
rows.push({ buffer: data, byteOffset: offset, byteLength: size })
|
|
181
|
-
} else {
|
|
182
|
-
rows.push(data.toString(encoding, offset, offset + size))
|
|
183
|
-
}
|
|
184
|
-
offset += size
|
|
185
|
-
if (offset & 0x7) {
|
|
186
|
-
offset = (offset | 0x7) + 1
|
|
187
|
-
}
|
|
188
|
-
}
|
|
170
|
+
let rows
|
|
171
|
+
try {
|
|
172
|
+
rows = handleMany(sizes, data, options ?? EMPTY)
|
|
173
|
+
} catch (err) {
|
|
174
|
+
callback(err)
|
|
189
175
|
}
|
|
190
176
|
callback(null, rows)
|
|
191
177
|
}
|
package/package.json
CHANGED
|
Binary file
|
|
Binary file
|
package/util.js
CHANGED
|
@@ -1,5 +1,36 @@
|
|
|
1
1
|
'use strict'
|
|
2
2
|
|
|
3
|
+
function handleMany (sizes, data, options) {
|
|
4
|
+
const { valueEncoding } = options ?? {}
|
|
5
|
+
|
|
6
|
+
data ??= Buffer.alloc(0)
|
|
7
|
+
sizes ??= Buffer.alloc(0)
|
|
8
|
+
|
|
9
|
+
const rows = []
|
|
10
|
+
let offset = 0
|
|
11
|
+
const sizes32 = new Int32Array(sizes.buffer, sizes.byteOffset, sizes.byteLength / 4)
|
|
12
|
+
for (let n = 0; n < sizes32.length; n++) {
|
|
13
|
+
const size = sizes32[n]
|
|
14
|
+
const encoding = valueEncoding
|
|
15
|
+
if (size < 0) {
|
|
16
|
+
rows.push(undefined)
|
|
17
|
+
} else {
|
|
18
|
+
if (!encoding || encoding === 'buffer') {
|
|
19
|
+
rows.push(data.subarray(offset, offset + size))
|
|
20
|
+
} else if (encoding === 'slice') {
|
|
21
|
+
rows.push({ buffer: data, byteOffset: offset, byteLength: size })
|
|
22
|
+
} else {
|
|
23
|
+
rows.push(data.toString(encoding, offset, offset + size))
|
|
24
|
+
}
|
|
25
|
+
offset += size
|
|
26
|
+
if (offset & 0x7) {
|
|
27
|
+
offset = (offset | 0x7) + 1
|
|
28
|
+
}
|
|
29
|
+
}
|
|
30
|
+
}
|
|
31
|
+
|
|
32
|
+
return rows
|
|
33
|
+
}
|
|
3
34
|
function handleNextv (err, sizes, buffer, finished, options, callback) {
|
|
4
35
|
const { keyEncoding, valueEncoding } = options ?? {}
|
|
5
36
|
|
|
@@ -36,4 +67,5 @@ function handleNextv (err, sizes, buffer, finished, options, callback) {
|
|
|
36
67
|
}
|
|
37
68
|
}
|
|
38
69
|
|
|
70
|
+
exports.handleMany = handleMany
|
|
39
71
|
exports.handleNextv = handleNextv
|