@nxtedition/rocksdb 10.3.3 → 10.3.5

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/binding.cc CHANGED
@@ -965,9 +965,6 @@ NAPI_METHOD(db_get_many) {
965
965
  rocksdb::ColumnFamilyHandle* column = database->db->DefaultColumnFamily();
966
966
  NAPI_STATUS_THROWS(GetProperty(env, options, "column", column));
967
967
 
968
- Encoding valueEncoding = Encoding::Buffer;
969
- NAPI_STATUS_THROWS(GetProperty(env, options, "valueEncoding", valueEncoding));
970
-
971
968
  bool takeSnapshot = true;
972
969
  NAPI_STATUS_THROWS(GetProperty(env, options, "snapshot", takeSnapshot));
973
970
 
@@ -1041,43 +1038,20 @@ NAPI_METHOD(db_get_many) {
1041
1038
  [=](auto& state, auto env, auto& argv) {
1042
1039
  argv.resize(3);
1043
1040
 
1044
- if (valueEncoding == Encoding::String) {
1045
- napi_value arr;
1046
- NAPI_STATUS_RETURN(napi_create_array_with_length(env, state.sizes.size(), &argv[1]));
1047
-
1048
- const auto ptr = reinterpret_cast<char*>(state.data.data());
1049
- auto offset = 0;
1050
- for (auto n = 0; n < state.sizes.size(); n++) {
1051
- napi_value str;
1052
- const auto size = state.sizes[n];
1053
- if (size >= 0) {
1054
- NAPI_STATUS_RETURN(napi_create_string_utf8(env, ptr + offset, static_cast<size_t>(size), &str));
1055
- offset += size;
1056
- if (offset & 0x7) {
1057
- offset = (offset | 0x7) + 1;
1058
- }
1059
- } else {
1060
- NAPI_STATUS_RETURN(napi_get_undefined(env, &str));
1061
- }
1062
- NAPI_STATUS_RETURN(napi_set_element(env, argv[1], n, str));
1063
- }
1064
- NAPI_STATUS_RETURN(napi_get_undefined(env, &argv[2]));
1041
+ if (state.sizes.size() > 0) {
1042
+ auto sizes = std::make_unique<std::vector<int32_t>>(std::move(state.sizes));
1043
+ NAPI_STATUS_RETURN(napi_create_external_buffer(env, sizes->size() * 4, sizes->data(), Finalize<std::vector<int32_t>>, sizes.get(), &argv[1]));
1044
+ sizes.release();
1065
1045
  } else {
1066
- if (state.sizes.size() > 0) {
1067
- auto sizes = std::make_unique<std::vector<int32_t>>(std::move(state.sizes));
1068
- NAPI_STATUS_RETURN(napi_create_external_buffer(env, sizes->size() * 4, sizes->data(), Finalize<std::vector<int32_t>>, sizes.get(), &argv[1]));
1069
- sizes.release();
1070
- } else {
1071
- NAPI_STATUS_RETURN(napi_get_undefined(env, &argv[1]));
1072
- }
1046
+ NAPI_STATUS_RETURN(napi_get_undefined(env, &argv[1]));
1047
+ }
1073
1048
 
1074
- if (state.data.size() > 0) {
1075
- auto data = std::make_unique<std::vector<uint8_t>>(std::move(state.data));
1076
- NAPI_STATUS_RETURN(napi_create_external_buffer(env, data->size(), data->data(), Finalize<std::vector<uint8_t>>, data.get(), &argv[2]));
1077
- data.release();
1078
- } else {
1079
- NAPI_STATUS_RETURN(napi_get_undefined(env, &argv[2]));
1080
- };
1049
+ if (state.data.size() > 0) {
1050
+ auto data = std::make_unique<std::vector<uint8_t>>(std::move(state.data));
1051
+ NAPI_STATUS_RETURN(napi_create_external_buffer(env, data->size(), data->data(), Finalize<std::vector<uint8_t>>, data.get(), &argv[2]));
1052
+ data.release();
1053
+ } else {
1054
+ NAPI_STATUS_RETURN(napi_get_undefined(env, &argv[2]));
1081
1055
  }
1082
1056
 
1083
1057
  return napi_ok;
@@ -1086,6 +1060,97 @@ NAPI_METHOD(db_get_many) {
1086
1060
  return 0;
1087
1061
  }
1088
1062
 
1063
+ NAPI_METHOD(db_get_many_sync) {
1064
+ NAPI_ARGV(3);
1065
+
1066
+ Database* database;
1067
+ NAPI_STATUS_THROWS(napi_get_value_external(env, argv[0], reinterpret_cast<void**>(&database)));
1068
+
1069
+ uint32_t count;
1070
+ NAPI_STATUS_THROWS(napi_get_array_length(env, argv[1], &count));
1071
+
1072
+ bool fillCache = true;
1073
+ NAPI_STATUS_THROWS(GetProperty(env, argv[2], "fillCache", fillCache));
1074
+
1075
+ bool ignoreRangeDeletions = false;
1076
+ NAPI_STATUS_THROWS(GetProperty(env, argv[2], "ignoreRangeDeletions", ignoreRangeDeletions));
1077
+
1078
+ rocksdb::ColumnFamilyHandle* column = database->db->DefaultColumnFamily();
1079
+ NAPI_STATUS_THROWS(GetProperty(env, argv[2], "column", column));
1080
+
1081
+ std::vector<rocksdb::Slice> keys{count};
1082
+ std::vector<rocksdb::Status> statuses{count};
1083
+ std::vector<rocksdb::PinnableSlice> values{count};
1084
+
1085
+ for (uint32_t n = 0; n < count; n++) {
1086
+ napi_value element;
1087
+ NAPI_STATUS_THROWS(napi_get_element(env, argv[1], n, &element));
1088
+ NAPI_STATUS_THROWS(GetValue(env, element, keys[n]));
1089
+ }
1090
+
1091
+ struct State {
1092
+ std::vector<uint8_t> data;
1093
+ std::vector<int32_t> sizes;
1094
+ } state;
1095
+
1096
+ rocksdb::ReadOptions readOptions;
1097
+ readOptions.fill_cache = fillCache;
1098
+ readOptions.async_io = true;
1099
+ readOptions.ignore_range_deletions = ignoreRangeDeletions;
1100
+ readOptions.optimize_multiget_for_io = true;
1101
+
1102
+ database->db->MultiGet(readOptions, column, count, keys.data(), values.data(), statuses.data());
1103
+
1104
+ auto size = 0;
1105
+ for (auto n = 0; n < count; n++) {
1106
+ const auto valueSize = values[n].size();
1107
+ size += valueSize & 0x7 ? (valueSize | 0x7) + 1 : valueSize;
1108
+ }
1109
+
1110
+ state.data.reserve(size);
1111
+
1112
+ auto push = [&](rocksdb::Slice* slice){
1113
+ if (slice) {
1114
+ state.sizes.push_back(static_cast<int32_t>(slice->size()));
1115
+ std::copy_n(slice->data(), slice->size(), std::back_inserter(state.data));
1116
+
1117
+ if (state.data.size() & 0x7) {
1118
+ state.data.resize((state.data.size() | 0x7) + 1);
1119
+ }
1120
+ } else {
1121
+ state.sizes.push_back(-1);
1122
+ }
1123
+ };
1124
+
1125
+ for (auto n = 0; n < count; n++) {
1126
+ push(statuses[n].ok() ? &values[n] : nullptr);
1127
+ }
1128
+
1129
+ napi_value sizes;
1130
+ if (state.sizes.size() > 0) {
1131
+ auto sizes_ptr = std::make_unique<std::vector<int32_t>>(std::move(state.sizes));
1132
+ NAPI_STATUS_THROWS(napi_create_external_buffer(env, sizes_ptr->size() * 4, sizes_ptr->data(), Finalize<std::vector<int32_t>>, sizes_ptr.get(), &sizes));
1133
+ sizes_ptr.release();
1134
+ } else {
1135
+ NAPI_STATUS_THROWS(napi_get_undefined(env, &sizes));
1136
+ }
1137
+
1138
+ napi_value data;
1139
+ if (state.data.size() > 0) {
1140
+ auto data_ptr = std::make_unique<std::vector<uint8_t>>(std::move(state.data));
1141
+ NAPI_STATUS_THROWS(napi_create_external_buffer(env, data_ptr->size(), data_ptr->data(), Finalize<std::vector<uint8_t>>, data_ptr.get(), &data));
1142
+ data_ptr.release();
1143
+ } else {
1144
+ NAPI_STATUS_THROWS(napi_get_undefined(env, &data));
1145
+ }
1146
+
1147
+ napi_value result;
1148
+ NAPI_STATUS_THROWS(napi_create_array_with_length(env, 2, &result));
1149
+ NAPI_STATUS_THROWS(napi_set_element(env, result, 0, sizes));
1150
+ NAPI_STATUS_THROWS(napi_set_element(env, result, 1, data));
1151
+ return result;
1152
+ }
1153
+
1089
1154
  NAPI_METHOD(db_clear) {
1090
1155
  NAPI_ARGV(2);
1091
1156
 
@@ -1507,8 +1572,6 @@ NAPI_METHOD(batch_clear) {
1507
1572
  NAPI_METHOD(batch_write) {
1508
1573
  NAPI_ARGV(4);
1509
1574
 
1510
- napi_value result = 0;
1511
-
1512
1575
  Database* database;
1513
1576
  NAPI_STATUS_THROWS(napi_get_value_external(env, argv[0], reinterpret_cast<void**>(&database)));
1514
1577
 
@@ -1534,7 +1597,30 @@ NAPI_METHOD(batch_write) {
1534
1597
  },
1535
1598
  [=](int64_t& seq, auto env, auto& argv) { return napi_ok; });
1536
1599
 
1537
- return result;
1600
+ return 0;
1601
+ }
1602
+
1603
+ NAPI_METHOD(batch_write_sync) {
1604
+ NAPI_ARGV(3);
1605
+
1606
+ Database* database;
1607
+ NAPI_STATUS_THROWS(napi_get_value_external(env, argv[0], reinterpret_cast<void**>(&database)));
1608
+
1609
+ rocksdb::WriteBatch* batch;
1610
+ NAPI_STATUS_THROWS(napi_get_value_external(env, argv[1], reinterpret_cast<void**>(&batch)));
1611
+
1612
+ auto options = argv[2];
1613
+
1614
+ bool sync = false;
1615
+ NAPI_STATUS_THROWS(GetProperty(env, options, "sync", sync));
1616
+
1617
+ bool lowPriority = false;
1618
+ NAPI_STATUS_THROWS(GetProperty(env, options, "lowPriority", lowPriority));
1619
+
1620
+ rocksdb::WriteOptions writeOptions;
1621
+ writeOptions.sync = sync;
1622
+ writeOptions.low_pri = lowPriority;
1623
+ ROCKS_STATUS_THROWS_NAPI(database->db->Write(writeOptions, batch));
1538
1624
  }
1539
1625
 
1540
1626
  NAPI_METHOD(batch_count) {
@@ -1594,6 +1680,7 @@ NAPI_INIT() {
1594
1680
  NAPI_EXPORT_FUNCTION(db_get_location);
1595
1681
  NAPI_EXPORT_FUNCTION(db_close);
1596
1682
  NAPI_EXPORT_FUNCTION(db_get_many);
1683
+ NAPI_EXPORT_FUNCTION(db_get_many_sync);
1597
1684
  NAPI_EXPORT_FUNCTION(db_clear);
1598
1685
  NAPI_EXPORT_FUNCTION(db_get_property);
1599
1686
  NAPI_EXPORT_FUNCTION(db_get_latest_sequence);
@@ -1609,6 +1696,7 @@ NAPI_INIT() {
1609
1696
  NAPI_EXPORT_FUNCTION(batch_del);
1610
1697
  NAPI_EXPORT_FUNCTION(batch_clear);
1611
1698
  NAPI_EXPORT_FUNCTION(batch_write);
1699
+ NAPI_EXPORT_FUNCTION(batch_write_sync);
1612
1700
  NAPI_EXPORT_FUNCTION(batch_merge);
1613
1701
  NAPI_EXPORT_FUNCTION(batch_count);
1614
1702
  NAPI_EXPORT_FUNCTION(batch_iterate);
package/chained-batch.js CHANGED
@@ -59,14 +59,20 @@ class ChainedBatch extends AbstractChainedBatch {
59
59
  _write (options, callback) {
60
60
  callback = fromCallback(callback, kPromise)
61
61
 
62
- // NOTE: `this` needs to be referenced until callback is called
63
- this[kWrite](this, this[kBatchContext], options ?? EMPTY, (err) => {
64
- callback(err, null, this)
65
- })
62
+ try {
63
+ binding.batch_write_sync(this[kDbContext], this[kBatchContext], options)
64
+ process.nextTick(callback)
65
+ } catch (err) {
66
+ process.nextTick(callback, err)
67
+ }
66
68
 
67
69
  return callback[kPromise]
68
70
  }
69
71
 
72
+ _writeSync (options, callback) {
73
+ binding.batch_write_sync(this[kDbContext], this[kBatchContext], options)
74
+ }
75
+
70
76
  _close (callback) {
71
77
  process.nextTick(callback)
72
78
  }
package/index.js CHANGED
@@ -8,8 +8,8 @@ const { ChainedBatch } = require('./chained-batch')
8
8
  const { Iterator } = require('./iterator')
9
9
  const fs = require('node:fs')
10
10
  const assert = require('node:assert')
11
- const { handleNextv } = require('./util')
12
- const FastBuffer = Buffer[Symbol.species]
11
+ const { handleNextv, handleMany } = require('./util')
12
+
13
13
  const kContext = Symbol('context')
14
14
  const kColumns = Symbol('columns')
15
15
  const kPromise = Symbol('promise')
@@ -152,47 +152,29 @@ class RocksLevel extends AbstractLevel {
152
152
  return callback[kPromise]
153
153
  }
154
154
 
155
+ _getManySync (keys, options) {
156
+ keys = keys.map(key => Buffer.from(key))
157
+ const ret = binding.db_get_many_sync(this[kContext], keys, options ?? EMPTY)
158
+ const [sizes, data] = ret
159
+ return handleMany(sizes, data, options ?? EMPTY)
160
+ }
161
+
155
162
  _getMany (keys, options, callback) {
156
163
  callback = fromCallback(callback, kPromise)
157
164
 
158
- const { valueEncoding } = options ?? EMPTY
159
165
  try {
160
166
  this[kRef]()
161
-
162
- function bufferHandler (sizes, buffer) {
163
- buffer ??= Buffer.alloc(0)
164
- sizes ??= Buffer.alloc(0)
165
-
166
- let offset = 0
167
- const rows = []
168
- const sizes32 = new Int32Array(sizes.buffer, sizes.byteOffset, sizes.byteLength / 4)
169
- for (let n = 0; n < sizes32.length; n++) {
170
- const size = sizes32[n]
171
- const encoding = valueEncoding
172
- if (size < 0) {
173
- rows.push(undefined)
174
- } else {
175
- if (encoding === 'slice') {
176
- rows.push({ buffer, byteOffset: offset, byteLength: size })
177
- } else {
178
- rows.push(new FastBuffer(buffer.buffer, offset, size))
179
- }
180
- offset += size
181
- if (offset & 0x7) {
182
- offset = (offset | 0x7) + 1
183
- }
184
- }
185
- }
186
- callback(null, rows)
187
- }
188
-
189
- binding.db_get_many(this[kContext], keys, options ?? EMPTY, (err, arg1, arg2) => {
167
+ binding.db_get_many(this[kContext], keys, options ?? EMPTY, (err, sizes, data) => {
190
168
  if (err) {
191
169
  callback(err)
192
- } else if (valueEncoding === 'utf8') {
193
- callback(null, arg1)
194
170
  } else {
195
- bufferHandler(arg1, arg2)
171
+ let rows
172
+ try {
173
+ rows = handleMany(sizes, data, options ?? EMPTY)
174
+ } catch (err) {
175
+ callback(err)
176
+ }
177
+ callback(null, rows)
196
178
  }
197
179
  this[kUnref]()
198
180
  })
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "@nxtedition/rocksdb",
3
- "version": "10.3.3",
3
+ "version": "10.3.5",
4
4
  "description": "A low-level Node.js RocksDB binding",
5
5
  "license": "MIT",
6
6
  "main": "index.js",
package/util.js CHANGED
@@ -1,5 +1,36 @@
1
1
  'use strict'
2
2
 
3
+ function handleMany (sizes, data, options) {
4
+ const { valueEncoding } = options ?? {}
5
+
6
+ data ??= Buffer.alloc(0)
7
+ sizes ??= Buffer.alloc(0)
8
+
9
+ const rows = []
10
+ let offset = 0
11
+ const sizes32 = new Int32Array(sizes.buffer, sizes.byteOffset, sizes.byteLength / 4)
12
+ for (let n = 0; n < sizes32.length; n++) {
13
+ const size = sizes32[n]
14
+ const encoding = valueEncoding
15
+ if (size < 0) {
16
+ rows.push(undefined)
17
+ } else {
18
+ if (!encoding || encoding === 'buffer') {
19
+ rows.push(data.subarray(offset, offset + size))
20
+ } else if (encoding === 'slice') {
21
+ rows.push({ buffer: data, byteOffset: offset, byteLength: size })
22
+ } else {
23
+ rows.push(data.toString(encoding, offset, offset + size))
24
+ }
25
+ offset += size
26
+ if (offset & 0x7) {
27
+ offset = (offset | 0x7) + 1
28
+ }
29
+ }
30
+ }
31
+
32
+ return rows
33
+ }
3
34
  function handleNextv (err, sizes, buffer, finished, options, callback) {
4
35
  const { keyEncoding, valueEncoding } = options ?? {}
5
36
 
@@ -36,4 +67,5 @@ function handleNextv (err, sizes, buffer, finished, options, callback) {
36
67
  }
37
68
  }
38
69
 
70
+ exports.handleMany = handleMany
39
71
  exports.handleNextv = handleNextv