@nxtedition/rocksdb 15.2.6 → 15.4.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -28,7 +28,8 @@
28
28
  "Bash(git rm:*)",
29
29
  "WebFetch(domain:raw.githubusercontent.com)",
30
30
  "Bash(git log:*)",
31
- "WebFetch(domain:github.com)"
31
+ "WebFetch(domain:github.com)",
32
+ "Bash(npm run rebuild:*)"
32
33
  ]
33
34
  }
34
35
  }
package/binding.cc CHANGED
@@ -812,7 +812,7 @@ static void FinalizeDatabase(napi_env env, void* data, void* hint) {
812
812
  }
813
813
 
814
814
  NAPI_METHOD(db_init) {
815
- NAPI_ARGV(1);
815
+ NAPI_ARGV(2);
816
816
 
817
817
  Database* database = nullptr;
818
818
 
@@ -1024,7 +1024,35 @@ napi_status InitOptions(napi_env env, T& columnOptions, const U& options) {
1024
1024
  tableOptions.decouple_partitioned_filters = true;
1025
1025
 
1026
1026
  std::shared_ptr<rocksdb::Cache> cache;
1027
+
1027
1028
  {
1029
+ napi_value cacheValue;
1030
+ NAPI_STATUS_RETURN(napi_get_named_property(env, options, "cache", &cacheValue));
1031
+
1032
+ napi_valuetype cacheType;
1033
+ NAPI_STATUS_RETURN(napi_typeof(env, cacheValue, &cacheType));
1034
+
1035
+ if (cacheType == napi_object) {
1036
+ napi_value handleValue;
1037
+ NAPI_STATUS_RETURN(napi_get_named_property(env, cacheValue, "handle", &handleValue));
1038
+
1039
+ bool lossless;
1040
+ int64_t ptr;
1041
+ NAPI_STATUS_RETURN(napi_get_value_bigint_int64(env, handleValue, &ptr, &lossless));
1042
+
1043
+ cache = *reinterpret_cast<std::shared_ptr<rocksdb::Cache>*>(ptr);
1044
+ } else if (cacheType == napi_bigint) {
1045
+ bool lossless;
1046
+ int64_t ptr;
1047
+ NAPI_STATUS_RETURN(napi_get_value_bigint_int64(env, cacheValue, &ptr, &lossless));
1048
+
1049
+ cache = *reinterpret_cast<std::shared_ptr<rocksdb::Cache>*>(ptr);
1050
+ } else if (cacheType != napi_undefined && cacheType != napi_null) {
1051
+ return napi_invalid_arg;
1052
+ }
1053
+ }
1054
+
1055
+ if (!cache) {
1028
1056
  uint32_t cacheSize = 8 << 20;
1029
1057
  double compressedRatio = 0.0;
1030
1058
 
@@ -1032,7 +1060,7 @@ napi_status InitOptions(napi_env env, T& columnOptions, const U& options) {
1032
1060
  NAPI_STATUS_RETURN(GetProperty(env, options, "cacheCompressedRatio", compressedRatio));
1033
1061
 
1034
1062
  if (cacheSize == 0) {
1035
- cache = nullptr;
1063
+ // Do nothing...
1036
1064
  } else if (compressedRatio > 0.0) {
1037
1065
  rocksdb::TieredCacheOptions options;
1038
1066
  options.cache_type = rocksdb::PrimaryCacheType::kCacheTypeHCC;
@@ -1960,7 +1988,6 @@ NAPI_METHOD(batch_write) {
1960
1988
 
1961
1989
  rocksdb::WriteBatch* batch;
1962
1990
  NAPI_STATUS_THROWS(napi_get_value_external(env, argv[1], reinterpret_cast<void**>(&batch)));
1963
-
1964
1991
  bool sync = false;
1965
1992
  NAPI_STATUS_THROWS(GetProperty(env, argv[2], "sync", sync));
1966
1993
 
@@ -2249,6 +2276,32 @@ NAPI_METHOD(db_compact_range) {
2249
2276
  return 0;
2250
2277
  }
2251
2278
 
2279
+ NAPI_METHOD(cache_init) {
2280
+ NAPI_ARGV(1);
2281
+
2282
+ size_t capacity = 32 * 1024 * 1024; // 32 MiB
2283
+ NAPI_STATUS_THROWS(GetProperty(env, argv[0], "capacity", capacity));
2284
+
2285
+ auto cache = new std::shared_ptr<rocksdb::Cache>(rocksdb::HyperClockCacheOptions(capacity, 0).MakeSharedCache());
2286
+
2287
+ napi_value result;
2288
+ NAPI_STATUS_THROWS(napi_create_external(env, cache, Finalize<std::shared_ptr<rocksdb::Cache>>, cache, &result));
2289
+
2290
+ return result;
2291
+ }
2292
+
2293
+ NAPI_METHOD(cache_get_handle) {
2294
+ NAPI_ARGV(1);
2295
+
2296
+ std::shared_ptr<rocksdb::Cache>* cache;
2297
+ NAPI_STATUS_THROWS(napi_get_value_external(env, argv[0], reinterpret_cast<void**>(&cache)));
2298
+
2299
+ napi_value result;
2300
+ NAPI_STATUS_THROWS(napi_create_bigint_int64(env, reinterpret_cast<intptr_t>(cache), &result));
2301
+
2302
+ return result;
2303
+ }
2304
+
2252
2305
  NAPI_INIT() {
2253
2306
  NAPI_EXPORT_FUNCTION(db_init);
2254
2307
  NAPI_EXPORT_FUNCTION(db_open);
@@ -2287,4 +2340,7 @@ NAPI_INIT() {
2287
2340
  NAPI_EXPORT_FUNCTION(batch_merge);
2288
2341
  NAPI_EXPORT_FUNCTION(batch_count);
2289
2342
  NAPI_EXPORT_FUNCTION(batch_iterate);
2343
+
2344
+ NAPI_EXPORT_FUNCTION(cache_init);
2345
+ NAPI_EXPORT_FUNCTION(cache_get_handle);
2290
2346
  }
package/cache.js ADDED
@@ -0,0 +1,17 @@
1
+ 'use strict'
2
+
3
+ const binding = require('./binding')
4
+
5
+ const kCacheContext = Symbol('cacheContext')
6
+
7
+ class RocksCache {
8
+ constructor(optionsOrHandle = {}) {
9
+ this[kCacheContext] = binding.cache_init(optionsOrHandle)
10
+ }
11
+
12
+ get handle () {
13
+ return binding.cache_get_handle(this[kCacheContext])
14
+ }
15
+ }
16
+
17
+ exports.RocksCache = RocksCache
package/chained-batch.js CHANGED
@@ -1,12 +1,17 @@
1
1
  'use strict'
2
2
 
3
+ const { fromCallback } = require('catering')
3
4
  const { AbstractChainedBatch } = require('abstract-level')
4
- const binding = require('./binding')
5
5
  const ModuleError = require('module-error')
6
6
  const assert = require('node:assert')
7
7
 
8
+ const binding = require('./binding')
9
+
10
+ const kPromise = Symbol('promise')
8
11
  const kBatchContext = Symbol('batchContext')
9
12
  const kDbContext = Symbol('dbContext')
13
+ const kBusy = Symbol('busy')
14
+
10
15
  const EMPTY = {}
11
16
 
12
17
  class ChainedBatch extends AbstractChainedBatch {
@@ -15,10 +20,18 @@ class ChainedBatch extends AbstractChainedBatch {
15
20
 
16
21
  this[kDbContext] = context
17
22
  this[kBatchContext] = binding.batch_init()
23
+ this[kBusy] = false
24
+ }
25
+
26
+ get length () {
27
+ assert(this[kBatchContext])
28
+
29
+ return binding.batch_count(this[kBatchContext])
18
30
  }
19
31
 
20
32
  _put (key, value, options) {
21
33
  assert(this[kBatchContext])
34
+ assert(!this[kBusy])
22
35
 
23
36
  if (key === null || key === undefined) {
24
37
  throw new ModuleError('Key cannot be null or undefined', {
@@ -40,6 +53,7 @@ class ChainedBatch extends AbstractChainedBatch {
40
53
 
41
54
  _putLogData (blob) {
42
55
  assert(this[kBatchContext])
56
+ assert(!this[kBusy])
43
57
 
44
58
  if (blob === null || blob === undefined) {
45
59
  throw new ModuleError('Blob cannot be null or undefined', {
@@ -54,6 +68,7 @@ class ChainedBatch extends AbstractChainedBatch {
54
68
 
55
69
  _del (key, options) {
56
70
  assert(this[kBatchContext])
71
+ assert(!this[kBusy])
57
72
 
58
73
  if (key === null || key === undefined) {
59
74
  throw new ModuleError('Key cannot be null or undefined', {
@@ -68,24 +83,43 @@ class ChainedBatch extends AbstractChainedBatch {
68
83
 
69
84
  _clear () {
70
85
  assert(this[kBatchContext])
86
+ assert(!this[kBusy])
71
87
 
72
88
  binding.batch_clear(this[kBatchContext])
73
89
  }
74
90
 
75
91
  _write (options, callback) {
76
92
  assert(this[kBatchContext])
93
+ assert(!this[kBusy])
77
94
 
78
- binding.batch_write(this[kDbContext], this[kBatchContext], options ?? EMPTY, callback)
95
+ return this._writeAsync(options, callback)
79
96
  }
80
97
 
81
98
  _writeSync (options) {
82
99
  assert(this[kBatchContext])
100
+ assert(!this[kBusy])
83
101
 
84
102
  binding.batch_write_sync(this[kDbContext], this[kBatchContext], options ?? EMPTY)
85
103
  }
86
104
 
105
+ _writeAsync (options, callback) {
106
+ assert(this[kBatchContext])
107
+ assert(!this[kBusy])
108
+
109
+ callback = fromCallback(callback, kPromise)
110
+
111
+ this[kBusy] = true
112
+ binding.batch_write(this[kDbContext], this[kBatchContext], options ?? EMPTY, (err) => {
113
+ this[kBusy] = false
114
+ callback(err)
115
+ })
116
+
117
+ return callback[kPromise]
118
+ }
119
+
87
120
  _close (callback) {
88
121
  assert(this[kBatchContext])
122
+ assert(!this[kBusy])
89
123
 
90
124
  try {
91
125
  this._closeSync()
@@ -96,20 +130,16 @@ class ChainedBatch extends AbstractChainedBatch {
96
130
  }
97
131
 
98
132
  _closeSync () {
99
- if (this[kBatchContext]) {
100
- binding.batch_clear(this[kBatchContext])
101
- this[kBatchContext] = null
102
- }
103
- }
104
-
105
- get length () {
106
133
  assert(this[kBatchContext])
134
+ assert(!this[kBusy])
107
135
 
108
- return binding.batch_count(this[kBatchContext])
136
+ binding.batch_clear(this[kBatchContext])
137
+ this[kBatchContext] = null
109
138
  }
110
139
 
111
140
  _merge (key, value, options) {
112
141
  assert(this[kBatchContext])
142
+ assert(!this[kBusy])
113
143
 
114
144
  if (key === null || key === undefined) {
115
145
  throw new ModuleError('Key cannot be null or undefined', {
package/index.js CHANGED
@@ -5,6 +5,7 @@ const { AbstractLevel } = require('abstract-level')
5
5
  const ModuleError = require('module-error')
6
6
  const binding = require('./binding')
7
7
  const { ChainedBatch } = require('./chained-batch')
8
+ const { RocksCache } = require('./cache')
8
9
  const { Iterator } = require('./iterator')
9
10
  const fs = require('node:fs')
10
11
  const assert = require('node:assert')
@@ -20,7 +21,7 @@ const { kRef, kUnref } = require('./util')
20
21
  const kEmpty = {}
21
22
 
22
23
  class RocksLevel extends AbstractLevel {
23
- constructor (locationOrHandle, options) {
24
+ constructor (locationOrHandle, { ...options } = {}) {
24
25
  super({
25
26
  encodings: {
26
27
  buffer: true,
@@ -339,3 +340,4 @@ class RocksLevel extends AbstractLevel {
339
340
  }
340
341
 
341
342
  exports.RocksLevel = RocksLevel
343
+ exports.RocksCache = RocksCache
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "@nxtedition/rocksdb",
3
- "version": "15.2.6",
3
+ "version": "15.4.0",
4
4
  "description": "A low-level Node.js RocksDB binding",
5
5
  "license": "MIT",
6
6
  "main": "index.js",