@nxtedition/rocksdb 9.0.1 → 10.0.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/binding.cc CHANGED
@@ -40,8 +40,6 @@ struct Database;
40
40
  struct Iterator;
41
41
 
42
42
  struct ColumnFamily {
43
- napi_ref ref;
44
- napi_value val;
45
43
  rocksdb::ColumnFamilyHandle* handle;
46
44
  rocksdb::ColumnFamilyDescriptor descriptor;
47
45
  };
@@ -52,9 +50,12 @@ struct Closable {
52
50
  };
53
51
 
54
52
  struct Database final {
53
+ Database(std::string location) : location(std::move(location)) {}
55
54
  ~Database() { assert(!db); }
56
55
 
57
56
  rocksdb::Status Close() {
57
+ std::lock_guard<std::mutex> lock(mutex);
58
+
58
59
  if (!db) {
59
60
  return rocksdb::Status::OK();
60
61
  }
@@ -64,17 +65,35 @@ struct Database final {
64
65
  }
65
66
  closables.clear();
66
67
 
67
- for (auto& [id, column] : columns) {
68
- db->DestroyColumnFamilyHandle(column.handle);
69
- }
70
- columns.clear();
71
-
72
68
  db->FlushWAL(true);
73
69
 
74
- auto db2 = std::move(db);
75
- return db2->Close();
70
+ for (auto& [id, column] : columns) {
71
+ db->DestroyColumnFamilyHandle(column.handle);
72
+ }
73
+ columns.clear();
74
+
75
+ auto db2 = std::move(db);
76
+ return db2->Close();
76
77
  }
77
78
 
79
+ void Ref() {
80
+ std::lock_guard<std::mutex> lock(mutex);
81
+
82
+ refs++;
83
+ }
84
+
85
+ void Unref() {
86
+ std::lock_guard<std::mutex> lock(mutex);
87
+
88
+ if (--refs == 0) {
89
+ Close();
90
+ delete this;
91
+ }
92
+ }
93
+
94
+ int refs = 0;
95
+ std::mutex mutex;
96
+ std::string location;
78
97
  std::unique_ptr<rocksdb::DB> db;
79
98
  std::set<Closable*> closables;
80
99
  std::map<int32_t, ColumnFamily> columns;
@@ -449,32 +468,74 @@ struct Iterator final : public BaseIterator {
449
468
  napi_ref ref_ = nullptr;
450
469
  };
451
470
 
452
- static void env_cleanup_hook(void* arg) {
453
- auto database = reinterpret_cast<Database*>(arg);
454
- if (database) {
455
- database->Close();
456
- }
457
- }
458
-
459
471
  static void FinalizeDatabase(napi_env env, void* data, void* hint) {
460
472
  if (data) {
461
- auto database = reinterpret_cast<Database*>(data);
462
- database->Close();
463
- napi_remove_env_cleanup_hook(env, env_cleanup_hook, database);
464
- for (auto& [id, column] : database->columns) {
465
- napi_delete_reference(env, column.ref);
466
- }
467
- delete database;
473
+ auto database = reinterpret_cast<Database*>(data);
474
+
475
+ std::lock_guard<std::mutex> lock(database->mutex);
476
+
477
+ database->Unref();
468
478
  }
469
479
  }
470
480
 
471
481
  NAPI_METHOD(db_init) {
472
- auto database = new Database();
473
- napi_add_env_cleanup_hook(env, env_cleanup_hook, database);
482
+ NAPI_ARGV(1);
483
+
484
+ Database* database = nullptr;
485
+
486
+ napi_valuetype type;
487
+ NAPI_STATUS_THROWS(napi_typeof(env, argv[0], &type));
488
+
489
+ if (type == napi_string) {
490
+ std::string location;
491
+ size_t length = 0;
492
+ NAPI_STATUS_THROWS(napi_get_value_string_utf8(env, argv[0], nullptr, 0, &length));
493
+ location.resize(length, '\0');
494
+ NAPI_STATUS_THROWS(napi_get_value_string_utf8(env, argv[0], &location[0], length + 1, &length));
495
+
496
+ database = new Database(location);
497
+ } else if (type == napi_bigint) {
498
+ int64_t value;
499
+ bool lossless;
500
+ NAPI_STATUS_THROWS(napi_get_value_bigint_int64(env, argv[0], &value, &lossless));
501
+
502
+ database = reinterpret_cast<Database*>(value);
503
+ } else {
504
+ NAPI_STATUS_THROWS(napi_invalid_arg);
505
+ }
474
506
 
475
507
  napi_value result;
476
508
  NAPI_STATUS_THROWS(napi_create_external(env, database, FinalizeDatabase, nullptr, &result));
477
509
 
510
+ {
511
+ std::lock_guard<std::mutex> lock(database->mutex);
512
+ database->Ref();
513
+ }
514
+
515
+ return result;
516
+ }
517
+
518
+ NAPI_METHOD(db_get_handle) {
519
+ NAPI_ARGV(1);
520
+
521
+ Database* database;
522
+ NAPI_STATUS_THROWS(napi_get_value_external(env, argv[0], reinterpret_cast<void**>(&database)));
523
+
524
+ napi_value result;
525
+ NAPI_STATUS_THROWS(napi_create_bigint_int64(env, reinterpret_cast<intptr_t>(database), &result));
526
+
527
+ return result;
528
+ }
529
+
530
+ NAPI_METHOD(db_get_location) {
531
+ NAPI_ARGV(1);
532
+
533
+ Database* database;
534
+ NAPI_STATUS_THROWS(napi_get_value_external(env, argv[0], reinterpret_cast<void**>(&database)));
535
+
536
+ napi_value result;
537
+ NAPI_STATUS_THROWS(Convert(env, &database->location, Encoding::String, result));
538
+
478
539
  return result;
479
540
  }
480
541
 
@@ -710,141 +771,164 @@ NAPI_METHOD(db_get_identity) {
710
771
  }
711
772
 
712
773
  NAPI_METHOD(db_open) {
713
- NAPI_ARGV(4);
774
+ NAPI_ARGV(3);
714
775
 
715
776
  Database* database;
716
777
  NAPI_STATUS_THROWS(napi_get_value_external(env, argv[0], reinterpret_cast<void**>(&database)));
717
778
 
718
- std::string location;
719
- NAPI_STATUS_THROWS(GetValue(env, argv[1], location));
720
-
721
- rocksdb::Options dbOptions;
722
-
723
- const auto options = argv[2];
724
-
725
- int parallelism = std::max<int>(1, std::thread::hardware_concurrency() / 2);
726
- NAPI_STATUS_THROWS(GetProperty(env, options, "parallelism", parallelism));
727
- dbOptions.IncreaseParallelism(parallelism);
728
-
729
- uint32_t walTTL = 0;
730
- NAPI_STATUS_THROWS(GetProperty(env, options, "walTTL", walTTL));
731
- dbOptions.WAL_ttl_seconds = walTTL / 1e3;
732
-
733
- uint32_t walSizeLimit = 0;
734
- NAPI_STATUS_THROWS(GetProperty(env, options, "walSizeLimit", walSizeLimit));
735
- dbOptions.WAL_size_limit_MB = walSizeLimit / 1e6;
736
-
737
- uint32_t maxTotalWalSize = 0;
738
- NAPI_STATUS_THROWS(GetProperty(env, options, "walTotalSizeLimit", walSizeLimit));
739
- dbOptions.max_total_wal_size = maxTotalWalSize / 1e6;
740
-
741
- bool walCompression = false;
742
- NAPI_STATUS_THROWS(GetProperty(env, options, "walCompression", walCompression));
743
- dbOptions.wal_compression =
744
- walCompression ? rocksdb::CompressionType::kZSTD : rocksdb::CompressionType::kNoCompression;
745
-
746
- dbOptions.avoid_unnecessary_blocking_io = true;
747
- dbOptions.write_dbid_to_manifest = true;
748
- dbOptions.use_adaptive_mutex = true; // We don't have soo many threads in the libuv thread pool...
749
- dbOptions.enable_pipelined_write = true; // We only write in the main thread...
750
- dbOptions.create_missing_column_families = true;
751
- dbOptions.fail_if_options_file_error = true;
752
-
753
- NAPI_STATUS_THROWS(GetProperty(env, options, "createIfMissing", dbOptions.create_if_missing));
754
- NAPI_STATUS_THROWS(GetProperty(env, options, "errorIfExists", dbOptions.error_if_exists));
755
- NAPI_STATUS_THROWS(GetProperty(env, options, "unorderedWrite", dbOptions.unordered_write));
756
- NAPI_STATUS_THROWS(GetProperty(env, options, "pipelinedWrite", dbOptions.enable_pipelined_write));
757
-
758
- // TODO (feat): dbOptions.listeners
759
-
760
- std::string infoLogLevel;
761
- NAPI_STATUS_THROWS(GetProperty(env, options, "infoLogLevel", infoLogLevel));
762
- if (infoLogLevel.size() > 0) {
763
- rocksdb::InfoLogLevel lvl = {};
764
-
765
- if (infoLogLevel == "debug")
766
- lvl = rocksdb::InfoLogLevel::DEBUG_LEVEL;
767
- else if (infoLogLevel == "info")
768
- lvl = rocksdb::InfoLogLevel::INFO_LEVEL;
769
- else if (infoLogLevel == "warn")
770
- lvl = rocksdb::InfoLogLevel::WARN_LEVEL;
771
- else if (infoLogLevel == "error")
772
- lvl = rocksdb::InfoLogLevel::ERROR_LEVEL;
773
- else if (infoLogLevel == "fatal")
774
- lvl = rocksdb::InfoLogLevel::FATAL_LEVEL;
775
- else if (infoLogLevel == "header")
776
- lvl = rocksdb::InfoLogLevel::HEADER_LEVEL;
777
- else
778
- napi_throw_error(env, nullptr, "invalid log level");
779
-
780
- dbOptions.info_log_level = lvl;
781
- } else {
782
- // In some places RocksDB checks this option to see if it should prepare
783
- // debug information (ahead of logging), so set it to the highest level.
784
- dbOptions.info_log_level = rocksdb::InfoLogLevel::HEADER_LEVEL;
785
- dbOptions.info_log.reset(new NullLogger());
786
- }
787
-
788
- NAPI_STATUS_THROWS(InitOptions(env, dbOptions, options));
789
-
790
- std::vector<rocksdb::ColumnFamilyDescriptor> descriptors;
791
-
792
- bool hasColumns;
793
- NAPI_STATUS_THROWS(napi_has_named_property(env, options, "columns", &hasColumns));
794
-
795
- if (hasColumns) {
796
- napi_value columns;
797
- NAPI_STATUS_THROWS(napi_get_named_property(env, options, "columns", &columns));
798
-
799
- napi_value keys;
800
- NAPI_STATUS_THROWS(napi_get_property_names(env, columns, &keys));
801
-
802
- uint32_t len;
803
- NAPI_STATUS_THROWS(napi_get_array_length(env, keys, &len));
804
-
805
- descriptors.resize(len);
806
- for (uint32_t n = 0; n < len; ++n) {
807
- napi_value key;
808
- NAPI_STATUS_THROWS(napi_get_element(env, keys, n, &key));
809
-
810
- napi_value column;
811
- NAPI_STATUS_THROWS(napi_get_property(env, columns, key, &column));
812
-
813
- NAPI_STATUS_THROWS(InitOptions(env, descriptors[n].options, column));
814
-
815
- NAPI_STATUS_THROWS(GetValue(env, key, descriptors[n].name));
816
- }
817
- }
818
-
819
- auto callback = argv[3];
820
-
821
- runAsync<std::vector<rocksdb::ColumnFamilyHandle*>>(
822
- "leveldown.open", env, callback,
823
- [=](auto& handles) {
824
- rocksdb::DB* db = nullptr;
825
- const auto status = descriptors.empty() ? rocksdb::DB::Open(dbOptions, location, &db)
826
- : rocksdb::DB::Open(dbOptions, location, descriptors, &handles, &db);
827
- database->db.reset(db);
828
- return status;
829
- },
830
- [=](auto& handles, auto env, auto& argv) {
831
- argv.resize(2);
832
-
833
- const auto size = handles.size();
834
- NAPI_STATUS_RETURN(napi_create_object(env, &argv[1]));
835
-
836
- for (size_t n = 0; n < size; ++n) {
837
- ColumnFamily column;
838
- column.handle = handles[n];
839
- column.descriptor = descriptors[n];
840
- NAPI_STATUS_RETURN(napi_create_external(env, column.handle, nullptr, nullptr, &column.val));
841
- NAPI_STATUS_RETURN(napi_create_reference(env, column.val, 1, &column.ref));
842
- NAPI_STATUS_RETURN(napi_set_named_property(env, argv[1], descriptors[n].name.c_str(), column.val));
843
- database->columns[column.handle->GetID()] = column;
844
- }
845
-
846
- return napi_ok;
847
- });
779
+ std::lock_guard<std::mutex> lock(database->mutex);
780
+
781
+ if (database->db) {
782
+ napi_value columns;
783
+ NAPI_STATUS_THROWS(napi_create_object(env, &columns));
784
+ for (auto& [id, column] : database->columns) {
785
+ napi_value val;
786
+ NAPI_STATUS_THROWS(napi_create_external(env, column.handle, nullptr, nullptr, &val));
787
+ NAPI_STATUS_THROWS(napi_set_named_property(env, columns, column.descriptor.name.c_str(), val));
788
+ }
789
+ return columns;
790
+ } else {
791
+ rocksdb::Options dbOptions;
792
+
793
+ const auto options = argv[1];
794
+
795
+ int parallelism = std::max<int>(1, std::thread::hardware_concurrency() / 2);
796
+ NAPI_STATUS_THROWS(GetProperty(env, options, "parallelism", parallelism));
797
+ dbOptions.IncreaseParallelism(parallelism);
798
+
799
+ uint32_t walTTL = 0;
800
+ NAPI_STATUS_THROWS(GetProperty(env, options, "walTTL", walTTL));
801
+ dbOptions.WAL_ttl_seconds = walTTL / 1e3;
802
+
803
+ uint32_t walSizeLimit = 0;
804
+ NAPI_STATUS_THROWS(GetProperty(env, options, "walSizeLimit", walSizeLimit));
805
+ dbOptions.WAL_size_limit_MB = walSizeLimit / 1e6;
806
+
807
+ uint32_t maxTotalWalSize = 0;
808
+ NAPI_STATUS_THROWS(GetProperty(env, options, "walTotalSizeLimit", walSizeLimit));
809
+ dbOptions.max_total_wal_size = maxTotalWalSize / 1e6;
810
+
811
+ bool walCompression = false;
812
+ NAPI_STATUS_THROWS(GetProperty(env, options, "walCompression", walCompression));
813
+ dbOptions.wal_compression =
814
+ walCompression ? rocksdb::CompressionType::kZSTD : rocksdb::CompressionType::kNoCompression;
815
+
816
+ dbOptions.avoid_unnecessary_blocking_io = true;
817
+ dbOptions.write_dbid_to_manifest = true;
818
+ dbOptions.enable_pipelined_write = true; // We only write in the main thread...
819
+ dbOptions.create_missing_column_families = true;
820
+ dbOptions.fail_if_options_file_error = true;
821
+
822
+ NAPI_STATUS_THROWS(GetProperty(env, options, "createIfMissing", dbOptions.create_if_missing));
823
+ NAPI_STATUS_THROWS(GetProperty(env, options, "errorIfExists", dbOptions.error_if_exists));
824
+ NAPI_STATUS_THROWS(GetProperty(env, options, "pipelinedWrite", dbOptions.enable_pipelined_write));
825
+
826
+ // TODO (feat): dbOptions.listeners
827
+
828
+ std::string infoLogLevel;
829
+ NAPI_STATUS_THROWS(GetProperty(env, options, "infoLogLevel", infoLogLevel));
830
+ if (infoLogLevel.size() > 0) {
831
+ rocksdb::InfoLogLevel lvl = {};
832
+
833
+ if (infoLogLevel == "debug")
834
+ lvl = rocksdb::InfoLogLevel::DEBUG_LEVEL;
835
+ else if (infoLogLevel == "info")
836
+ lvl = rocksdb::InfoLogLevel::INFO_LEVEL;
837
+ else if (infoLogLevel == "warn")
838
+ lvl = rocksdb::InfoLogLevel::WARN_LEVEL;
839
+ else if (infoLogLevel == "error")
840
+ lvl = rocksdb::InfoLogLevel::ERROR_LEVEL;
841
+ else if (infoLogLevel == "fatal")
842
+ lvl = rocksdb::InfoLogLevel::FATAL_LEVEL;
843
+ else if (infoLogLevel == "header")
844
+ lvl = rocksdb::InfoLogLevel::HEADER_LEVEL;
845
+ else
846
+ napi_throw_error(env, nullptr, "invalid log level");
847
+
848
+ dbOptions.info_log_level = lvl;
849
+ } else {
850
+ // In some places RocksDB checks this option to see if it should prepare
851
+ // debug information (ahead of logging), so set it to the highest level.
852
+ dbOptions.info_log_level = rocksdb::InfoLogLevel::HEADER_LEVEL;
853
+ dbOptions.info_log.reset(new NullLogger());
854
+ }
855
+
856
+ NAPI_STATUS_THROWS(InitOptions(env, dbOptions, options));
857
+
858
+ std::vector<rocksdb::ColumnFamilyDescriptor> descriptors;
859
+
860
+ bool hasColumns;
861
+ NAPI_STATUS_THROWS(napi_has_named_property(env, options, "columns", &hasColumns));
862
+
863
+ if (hasColumns) {
864
+ napi_value columns;
865
+ NAPI_STATUS_THROWS(napi_get_named_property(env, options, "columns", &columns));
866
+
867
+ napi_value keys;
868
+ NAPI_STATUS_THROWS(napi_get_property_names(env, columns, &keys));
869
+
870
+ uint32_t len;
871
+ NAPI_STATUS_THROWS(napi_get_array_length(env, keys, &len));
872
+
873
+ descriptors.resize(len);
874
+ for (uint32_t n = 0; n < len; ++n) {
875
+ napi_value key;
876
+ NAPI_STATUS_THROWS(napi_get_element(env, keys, n, &key));
877
+
878
+ napi_value column;
879
+ NAPI_STATUS_THROWS(napi_get_property(env, columns, key, &column));
880
+
881
+ NAPI_STATUS_THROWS(InitOptions(env, descriptors[n].options, column));
882
+
883
+ NAPI_STATUS_THROWS(GetValue(env, key, descriptors[n].name));
884
+ }
885
+ }
886
+
887
+ auto callback = argv[2];
888
+
889
+ runAsync<std::vector<rocksdb::ColumnFamilyHandle*>>(
890
+ "leveldown.open", env, callback,
891
+ [=](auto& handles) {
892
+ assert(!database->db);
893
+
894
+ rocksdb::DB* db = nullptr;
895
+
896
+ // TODO (fix): There is a race condition here... Check if we are already opening the database.
897
+
898
+ const auto status = descriptors.empty()
899
+ ? rocksdb::DB::Open(dbOptions, database->location, &db)
900
+ : rocksdb::DB::Open(dbOptions, database->location, descriptors, &handles, &db);
901
+
902
+ {
903
+ std::lock_guard<std::mutex> lock(database->mutex);
904
+ database->db.reset(db);
905
+ }
906
+
907
+ return status;
908
+ },
909
+ [=](auto& handles, auto env, auto& argv) {
910
+ argv.resize(2);
911
+
912
+ NAPI_STATUS_RETURN(napi_create_object(env, &argv[1]));
913
+
914
+ for (size_t n = 0; n < handles.size(); ++n) {
915
+ ColumnFamily column;
916
+ column.handle = handles[n];
917
+ column.descriptor = descriptors[n];
918
+ database->columns[column.handle->GetID()] = column;
919
+ }
920
+
921
+ napi_value columns;
922
+ NAPI_STATUS_RETURN(napi_create_object(env, &columns));
923
+ for (auto& [id, column] : database->columns) {
924
+ napi_value val;
925
+ NAPI_STATUS_RETURN(napi_create_external(env, column.handle, nullptr, nullptr, &val));
926
+ NAPI_STATUS_RETURN(napi_set_named_property(env, columns, column.descriptor.name.c_str(), val));
927
+ }
928
+
929
+ return napi_ok;
930
+ });
931
+ }
848
932
 
849
933
  return 0;
850
934
  }
@@ -859,6 +943,8 @@ NAPI_METHOD(db_close) {
859
943
  Database* database;
860
944
  NAPI_STATUS_THROWS(napi_get_value_external(env, argv[0], reinterpret_cast<void**>(&database)));
861
945
 
946
+ std::lock_guard<std::mutex> lock(database->mutex);
947
+
862
948
  auto callback = argv[1];
863
949
 
864
950
  struct State {};
@@ -1567,6 +1653,8 @@ NAPI_INIT() {
1567
1653
  NAPI_EXPORT_FUNCTION(db_init);
1568
1654
  NAPI_EXPORT_FUNCTION(db_open);
1569
1655
  NAPI_EXPORT_FUNCTION(db_get_identity);
1656
+ NAPI_EXPORT_FUNCTION(db_get_handle);
1657
+ NAPI_EXPORT_FUNCTION(db_get_location);
1570
1658
  NAPI_EXPORT_FUNCTION(db_close);
1571
1659
  NAPI_EXPORT_FUNCTION(db_get_many);
1572
1660
  NAPI_EXPORT_FUNCTION(db_clear);
@@ -1224,13 +1224,11 @@ TEST_P(CompressedSecCacheTestWithTiered, DynamicUpdate) {
1224
1224
  ASSERT_OK(sec_cache->GetCapacity(sec_capacity));
1225
1225
  ASSERT_EQ(sec_capacity, 0);
1226
1226
 
1227
- ASSERT_NOK(UpdateTieredCache(tiered_cache, -1, 0.3));
1228
- // Only check usage for LRU cache. HCC shows a 64KB usage for some reason
1229
- if (std::get<0>(GetParam()) == PrimaryCacheType::kCacheTypeLRU) {
1230
- ASSERT_EQ(GetCache()->GetUsage(), 0);
1231
- }
1227
+ ASSERT_OK(UpdateTieredCache(tiered_cache, -1, 0.3));
1228
+ EXPECT_PRED3(CacheUsageWithinBounds, GetCache()->GetUsage(), (30 << 20),
1229
+ GetPercent(30 << 20, 1));
1232
1230
  ASSERT_OK(sec_cache->GetCapacity(sec_capacity));
1233
- ASSERT_EQ(sec_capacity, 0);
1231
+ ASSERT_EQ(sec_capacity, (30 << 20));
1234
1232
  }
1235
1233
 
1236
1234
  TEST_P(CompressedSecCacheTestWithTiered, DynamicUpdateWithReservation) {
@@ -1316,28 +1314,50 @@ TEST_P(CompressedSecCacheTestWithTiered, DynamicUpdateWithReservation) {
1316
1314
  ASSERT_OK(sec_cache->GetCapacity(sec_capacity));
1317
1315
  ASSERT_EQ(sec_capacity, 0);
1318
1316
 
1317
+ ASSERT_OK(UpdateTieredCache(tiered_cache, -1, 0.3));
1318
+ EXPECT_PRED3(CacheUsageWithinBounds, GetCache()->GetUsage(), (37 << 20),
1319
+ GetPercent(37 << 20, 1));
1320
+ EXPECT_PRED3(CacheUsageWithinBounds, sec_cache->TEST_GetUsage(), (3 << 20),
1321
+ GetPercent(3 << 20, 1));
1322
+ ASSERT_OK(sec_cache->GetCapacity(sec_capacity));
1323
+ ASSERT_EQ(sec_capacity, 30 << 20);
1324
+
1319
1325
  ASSERT_OK(cache_res_mgr()->UpdateCacheReservation(0));
1320
1326
  }
1321
1327
 
1322
- TEST_P(CompressedSecCacheTestWithTiered,
1323
- DynamicUpdateWithReservationUnderflow) {
1328
+ TEST_P(CompressedSecCacheTestWithTiered, ReservationOverCapacity) {
1329
+ CompressedSecondaryCache* sec_cache =
1330
+ reinterpret_cast<CompressedSecondaryCache*>(GetSecondaryCache());
1324
1331
  std::shared_ptr<Cache> tiered_cache = GetTieredCache();
1325
- ROCKSDB_NAMESPACE::SyncPoint::GetInstance()->LoadDependency(
1326
- {{"CacheWithSecondaryAdapter::Release:ChargeSecCache1",
1327
- "CacheWithSecondaryAdapter::UpdateCacheReservationRatio:Begin"},
1328
- {"CacheWithSecondaryAdapter::UpdateCacheReservationRatio:End",
1329
- "CacheWithSecondaryAdapter::Release:ChargeSecCache2"}});
1330
- ROCKSDB_NAMESPACE::SyncPoint::GetInstance()->EnableProcessing();
1331
-
1332
- port::Thread reserve_release_thread([&]() {
1333
- EXPECT_EQ(cache_res_mgr()->UpdateCacheReservation(50), Status::OK());
1334
- EXPECT_EQ(cache_res_mgr()->UpdateCacheReservation(0), Status::OK());
1335
- });
1336
- ASSERT_OK(UpdateTieredCache(tiered_cache, 100 << 20, 0.01));
1337
- reserve_release_thread.join();
1338
- ROCKSDB_NAMESPACE::SyncPoint::GetInstance()->DisableProcessing();
1339
-
1340
- ASSERT_OK(UpdateTieredCache(tiered_cache, 100 << 20, 0.3));
1332
+
1333
+ ASSERT_OK(cache_res_mgr()->UpdateCacheReservation(110 << 20));
1334
+ // Use EXPECT_PRED3 instead of EXPECT_NEAR to void too many size_t to
1335
+ // double explicit casts
1336
+ EXPECT_PRED3(CacheUsageWithinBounds, GetCache()->GetUsage(), (110 << 20),
1337
+ GetPercent(110 << 20, 1));
1338
+ EXPECT_PRED3(CacheUsageWithinBounds, sec_cache->TEST_GetUsage(), (30 << 20),
1339
+ GetPercent(30 << 20, 1));
1340
+ size_t sec_capacity;
1341
+ ASSERT_OK(sec_cache->GetCapacity(sec_capacity));
1342
+ ASSERT_EQ(sec_capacity, (30 << 20));
1343
+
1344
+ ASSERT_OK(UpdateTieredCache(tiered_cache, -1, 0.39));
1345
+ EXPECT_PRED3(CacheUsageWithinBounds, GetCache()->GetUsage(), (110 << 20),
1346
+ GetPercent(110 << 20, 1));
1347
+ EXPECT_PRED3(CacheUsageWithinBounds, sec_cache->TEST_GetUsage(), (39 << 20),
1348
+ GetPercent(39 << 20, 1));
1349
+ ASSERT_OK(sec_cache->GetCapacity(sec_capacity));
1350
+ ASSERT_EQ(sec_capacity, (39 << 20));
1351
+
1352
+ ASSERT_OK(cache_res_mgr()->UpdateCacheReservation(90 << 20));
1353
+ EXPECT_PRED3(CacheUsageWithinBounds, GetCache()->GetUsage(), (94 << 20),
1354
+ GetPercent(94 << 20, 1));
1355
+ EXPECT_PRED3(CacheUsageWithinBounds, sec_cache->TEST_GetUsage(), (35 << 20),
1356
+ GetPercent(35 << 20, 1));
1357
+ ASSERT_OK(sec_cache->GetCapacity(sec_capacity));
1358
+ ASSERT_EQ(sec_capacity, (39 << 20));
1359
+
1360
+ ASSERT_OK(cache_res_mgr()->UpdateCacheReservation(0));
1341
1361
  }
1342
1362
 
1343
1363
  INSTANTIATE_TEST_CASE_P(
@@ -83,7 +83,10 @@ CacheWithSecondaryAdapter::CacheWithSecondaryAdapter(
83
83
  : CacheWrapper(std::move(target)),
84
84
  secondary_cache_(std::move(secondary_cache)),
85
85
  adm_policy_(adm_policy),
86
- distribute_cache_res_(distribute_cache_res) {
86
+ distribute_cache_res_(distribute_cache_res),
87
+ placeholder_usage_(0),
88
+ reserved_usage_(0),
89
+ sec_reserved_(0) {
87
90
  target_->SetEvictionCallback(
88
91
  [this](const Slice& key, Handle* handle, bool was_hit) {
89
92
  return EvictionHandler(key, handle, was_hit);
@@ -103,8 +106,7 @@ CacheWithSecondaryAdapter::CacheWithSecondaryAdapter(
103
106
  // secondary cache is freed from the reservation.
104
107
  s = pri_cache_res_->UpdateCacheReservation(sec_capacity);
105
108
  assert(s.ok());
106
- sec_cache_res_ratio_.store((double)sec_capacity / target_->GetCapacity(),
107
- std::memory_order_relaxed);
109
+ sec_cache_res_ratio_ = (double)sec_capacity / target_->GetCapacity();
108
110
  }
109
111
  }
110
112
 
@@ -113,7 +115,7 @@ CacheWithSecondaryAdapter::~CacheWithSecondaryAdapter() {
113
115
  // use after free
114
116
  target_->SetEvictionCallback({});
115
117
  #ifndef NDEBUG
116
- if (distribute_cache_res_ && !ratio_changed_) {
118
+ if (distribute_cache_res_) {
117
119
  size_t sec_capacity = 0;
118
120
  Status s = secondary_cache_->GetCapacity(sec_capacity);
119
121
  assert(s.ok());
@@ -236,13 +238,31 @@ Status CacheWithSecondaryAdapter::Insert(const Slice& key, ObjectPtr value,
236
238
  const Slice& compressed_value,
237
239
  CompressionType type) {
238
240
  Status s = target_->Insert(key, value, helper, charge, handle, priority);
239
- if (s.ok() && value == nullptr && distribute_cache_res_) {
240
- size_t sec_charge = static_cast<size_t>(
241
- charge * (sec_cache_res_ratio_.load(std::memory_order_relaxed)));
242
- s = secondary_cache_->Deflate(sec_charge);
243
- assert(s.ok());
244
- s = pri_cache_res_->UpdateCacheReservation(sec_charge, /*increase=*/false);
245
- assert(s.ok());
241
+ if (s.ok() && value == nullptr && distribute_cache_res_ && handle) {
242
+ charge = target_->GetCharge(*handle);
243
+
244
+ MutexLock l(&cache_res_mutex_);
245
+ placeholder_usage_ += charge;
246
+ // Check if total placeholder reservation is more than the overall
247
+ // cache capacity. If it is, then we don't try to charge the
248
+ // secondary cache because we don't want to overcharge it (beyond
249
+ // its capacity).
250
+ // In order to make this a bit more lightweight, we also check if
251
+ // the difference between placeholder_usage_ and reserved_usage_ is
252
+ // atleast kReservationChunkSize and avoid any adjustments if not.
253
+ if ((placeholder_usage_ <= target_->GetCapacity()) &&
254
+ ((placeholder_usage_ - reserved_usage_) >= kReservationChunkSize)) {
255
+ reserved_usage_ = placeholder_usage_ & ~(kReservationChunkSize - 1);
256
+ size_t new_sec_reserved =
257
+ static_cast<size_t>(reserved_usage_ * sec_cache_res_ratio_);
258
+ size_t sec_charge = new_sec_reserved - sec_reserved_;
259
+ s = secondary_cache_->Deflate(sec_charge);
260
+ assert(s.ok());
261
+ s = pri_cache_res_->UpdateCacheReservation(sec_charge,
262
+ /*increase=*/false);
263
+ assert(s.ok());
264
+ sec_reserved_ += sec_charge;
265
+ }
246
266
  }
247
267
  // Warm up the secondary cache with the compressed block. The secondary
248
268
  // cache may choose to ignore it based on the admission policy.
@@ -287,14 +307,27 @@ bool CacheWithSecondaryAdapter::Release(Handle* handle,
287
307
  ObjectPtr v = target_->Value(handle);
288
308
  if (v == nullptr && distribute_cache_res_) {
289
309
  size_t charge = target_->GetCharge(handle);
290
- size_t sec_charge = static_cast<size_t>(
291
- charge * (sec_cache_res_ratio_.load(std::memory_order_relaxed)));
292
- TEST_SYNC_POINT("CacheWithSecondaryAdapter::Release:ChargeSecCache1");
293
- TEST_SYNC_POINT("CacheWithSecondaryAdapter::Release:ChargeSecCache2");
294
- Status s = secondary_cache_->Inflate(sec_charge);
295
- assert(s.ok());
296
- s = pri_cache_res_->UpdateCacheReservation(sec_charge, /*increase=*/true);
297
- assert(s.ok());
310
+
311
+ MutexLock l(&cache_res_mutex_);
312
+ placeholder_usage_ -= charge;
313
+ // Check if total placeholder reservation is more than the overall
314
+ // cache capacity. If it is, then we do nothing as reserved_usage_ must
315
+ // be already maxed out
316
+ if ((placeholder_usage_ <= target_->GetCapacity()) &&
317
+ (placeholder_usage_ < reserved_usage_)) {
318
+ // Adjust reserved_usage_ in chunks of kReservationChunkSize, so
319
+ // we don't hit this slow path too often.
320
+ reserved_usage_ = placeholder_usage_ & ~(kReservationChunkSize - 1);
321
+ size_t new_sec_reserved =
322
+ static_cast<size_t>(reserved_usage_ * sec_cache_res_ratio_);
323
+ size_t sec_charge = sec_reserved_ - new_sec_reserved;
324
+ Status s = secondary_cache_->Inflate(sec_charge);
325
+ assert(s.ok());
326
+ s = pri_cache_res_->UpdateCacheReservation(sec_charge,
327
+ /*increase=*/true);
328
+ assert(s.ok());
329
+ sec_reserved_ -= sec_charge;
330
+ }
298
331
  }
299
332
  }
300
333
  return target_->Release(handle, erase_if_last_ref);
@@ -441,13 +474,11 @@ const char* CacheWithSecondaryAdapter::Name() const {
441
474
  // where the new capacity < total cache reservations.
442
475
  void CacheWithSecondaryAdapter::SetCapacity(size_t capacity) {
443
476
  size_t sec_capacity = static_cast<size_t>(
444
- capacity * (distribute_cache_res_
445
- ? sec_cache_res_ratio_.load(std::memory_order_relaxed)
446
- : 0.0));
477
+ capacity * (distribute_cache_res_ ? sec_cache_res_ratio_ : 0.0));
447
478
  size_t old_sec_capacity = 0;
448
479
 
449
480
  if (distribute_cache_res_) {
450
- MutexLock m(&mutex_);
481
+ MutexLock m(&cache_res_mutex_);
451
482
 
452
483
  Status s = secondary_cache_->GetCapacity(old_sec_capacity);
453
484
  if (!s.ok()) {
@@ -462,9 +493,17 @@ void CacheWithSecondaryAdapter::SetCapacity(size_t capacity) {
462
493
  // 3. Decrease the primary cache capacity to the total budget
463
494
  s = secondary_cache_->SetCapacity(sec_capacity);
464
495
  if (s.ok()) {
496
+ if (placeholder_usage_ > capacity) {
497
+ // Adjust reserved_usage_ down
498
+ reserved_usage_ = capacity & ~(kReservationChunkSize - 1);
499
+ }
500
+ size_t new_sec_reserved =
501
+ static_cast<size_t>(reserved_usage_ * sec_cache_res_ratio_);
465
502
  s = pri_cache_res_->UpdateCacheReservation(
466
- old_sec_capacity - sec_capacity,
503
+ (old_sec_capacity - sec_capacity) -
504
+ (sec_reserved_ - new_sec_reserved),
467
505
  /*increase=*/false);
506
+ sec_reserved_ = new_sec_reserved;
468
507
  assert(s.ok());
469
508
  target_->SetCapacity(capacity);
470
509
  }
@@ -498,7 +537,7 @@ Status CacheWithSecondaryAdapter::GetSecondaryCachePinnedUsage(
498
537
  size_t& size) const {
499
538
  Status s;
500
539
  if (distribute_cache_res_) {
501
- MutexLock m(&mutex_);
540
+ MutexLock m(&cache_res_mutex_);
502
541
  size_t capacity = 0;
503
542
  s = secondary_cache_->GetCapacity(capacity);
504
543
  if (s.ok()) {
@@ -526,12 +565,11 @@ Status CacheWithSecondaryAdapter::GetSecondaryCachePinnedUsage(
526
565
  // in the future.
527
566
  Status CacheWithSecondaryAdapter::UpdateCacheReservationRatio(
528
567
  double compressed_secondary_ratio) {
529
- if (!distribute_cache_res_ ||
530
- sec_cache_res_ratio_.load(std::memory_order_relaxed) == 0.0) {
568
+ if (!distribute_cache_res_) {
531
569
  return Status::NotSupported();
532
570
  }
533
571
 
534
- MutexLock m(&mutex_);
572
+ MutexLock m(&cache_res_mutex_);
535
573
  size_t pri_capacity = target_->GetCapacity();
536
574
  size_t sec_capacity =
537
575
  static_cast<size_t>(pri_capacity * compressed_secondary_ratio);
@@ -541,38 +579,17 @@ Status CacheWithSecondaryAdapter::UpdateCacheReservationRatio(
541
579
  return s;
542
580
  }
543
581
 
544
- TEST_SYNC_POINT(
545
- "CacheWithSecondaryAdapter::UpdateCacheReservationRatio:Begin");
546
-
547
- // There's a possible race condition here. Since the read of pri_cache_res_
548
- // memory used (secondary cache usage charged to primary cache), and the
549
- // change to sec_cache_res_ratio_ are not guarded by a mutex, its possible
550
- // that an Insert/Release in another thread might decrease/increase the
551
- // pri_cache_res_ reservation by the wrong amount. This should not be a
552
- // problem because updating the sec/pri ratio is a rare operation, and
553
- // the worst that can happen is we may over/under charge the secondary
554
- // cache usage by a little bit. But we do need to protect against
555
- // underflow of old_sec_reserved.
556
- // TODO: Make the accounting more accurate by tracking the total memory
557
- // reservation on the primary cache. This will also allow us to remove
558
- // the restriction of not being able to change the sec/pri ratio from
559
- // 0.0 to higher.
560
- size_t sec_charge_to_pri = pri_cache_res_->GetTotalMemoryUsed();
561
- size_t old_sec_reserved = (old_sec_capacity > sec_charge_to_pri)
562
- ? (old_sec_capacity - sec_charge_to_pri)
563
- : 0;
564
582
  // Calculate the new secondary cache reservation
565
- size_t sec_reserved = static_cast<size_t>(
566
- old_sec_reserved *
567
- (double)(compressed_secondary_ratio /
568
- sec_cache_res_ratio_.load(std::memory_order_relaxed)));
569
- sec_cache_res_ratio_.store(compressed_secondary_ratio,
570
- std::memory_order_relaxed);
583
+ // reserved_usage_ will never be > the cache capacity, so we don't
584
+ // have to worry about adjusting it here.
585
+ sec_cache_res_ratio_ = compressed_secondary_ratio;
586
+ size_t new_sec_reserved =
587
+ static_cast<size_t>(reserved_usage_ * sec_cache_res_ratio_);
571
588
  if (sec_capacity > old_sec_capacity) {
572
589
  // We're increasing the ratio, thus ending up with a larger secondary
573
590
  // cache and a smaller usable primary cache capacity. Similar to
574
591
  // SetCapacity(), we try to avoid a temporary increase in total usage
575
- // beyond teh configured capacity -
592
+ // beyond the configured capacity -
576
593
  // 1. A higher secondary cache ratio means it gets a higher share of
577
594
  // cache reservations. So first account for that by deflating the
578
595
  // secondary cache
@@ -580,12 +597,13 @@ Status CacheWithSecondaryAdapter::UpdateCacheReservationRatio(
580
597
  // cache utilization (increase in capacity - increase in share of cache
581
598
  // reservation)
582
599
  // 3. Increase secondary cache capacity
583
- s = secondary_cache_->Deflate(sec_reserved - old_sec_reserved);
600
+ s = secondary_cache_->Deflate(new_sec_reserved - sec_reserved_);
584
601
  assert(s.ok());
585
602
  s = pri_cache_res_->UpdateCacheReservation(
586
- (sec_capacity - old_sec_capacity) - (sec_reserved - old_sec_reserved),
603
+ (sec_capacity - old_sec_capacity) - (new_sec_reserved - sec_reserved_),
587
604
  /*increase=*/true);
588
605
  assert(s.ok());
606
+ sec_reserved_ = new_sec_reserved;
589
607
  s = secondary_cache_->SetCapacity(sec_capacity);
590
608
  assert(s.ok());
591
609
  } else {
@@ -599,21 +617,16 @@ Status CacheWithSecondaryAdapter::UpdateCacheReservationRatio(
599
617
  s = secondary_cache_->SetCapacity(sec_capacity);
600
618
  if (s.ok()) {
601
619
  s = pri_cache_res_->UpdateCacheReservation(
602
- (old_sec_capacity - sec_capacity) - (old_sec_reserved - sec_reserved),
620
+ (old_sec_capacity - sec_capacity) -
621
+ (sec_reserved_ - new_sec_reserved),
603
622
  /*increase=*/false);
604
623
  assert(s.ok());
605
- s = secondary_cache_->Inflate(old_sec_reserved - sec_reserved);
624
+ s = secondary_cache_->Inflate(sec_reserved_ - new_sec_reserved);
606
625
  assert(s.ok());
626
+ sec_reserved_ = new_sec_reserved;
607
627
  }
608
628
  }
609
629
 
610
- TEST_SYNC_POINT("CacheWithSecondaryAdapter::UpdateCacheReservationRatio:End");
611
- #ifndef NDEBUG
612
- // As mentioned in the function comments, we may accumulate some erros when
613
- // the ratio is changed. We set a flag here which disables some assertions
614
- // in the destructor
615
- ratio_changed_ = true;
616
- #endif
617
630
  return s;
618
631
  }
619
632
 
@@ -60,6 +60,8 @@ class CacheWithSecondaryAdapter : public CacheWrapper {
60
60
  SecondaryCache* TEST_GetSecondaryCache() { return secondary_cache_.get(); }
61
61
 
62
62
  private:
63
+ static constexpr size_t kReservationChunkSize = 1 << 20;
64
+
63
65
  bool EvictionHandler(const Slice& key, Handle* handle, bool was_hit);
64
66
 
65
67
  void StartAsyncLookupOnMySecondary(AsyncLookupHandle& async_handle);
@@ -84,11 +86,18 @@ class CacheWithSecondaryAdapter : public CacheWrapper {
84
86
  std::shared_ptr<ConcurrentCacheReservationManager> pri_cache_res_;
85
87
  // Fraction of a cache memory reservation to be assigned to the secondary
86
88
  // cache
87
- std::atomic<double> sec_cache_res_ratio_;
88
- mutable port::Mutex mutex_;
89
- #ifndef NDEBUG
90
- bool ratio_changed_ = false;
91
- #endif
89
+ double sec_cache_res_ratio_;
90
+ // Mutex for use when managing cache memory reservations. Should not be used
91
+ // for other purposes, as it may risk causing deadlocks.
92
+ mutable port::Mutex cache_res_mutex_;
93
+ // Total memory reserved by placeholder entriesin the cache
94
+ size_t placeholder_usage_;
95
+ // Total placeholoder memory charged to both the primary and secondary
96
+ // caches. Will be <= placeholder_usage_.
97
+ size_t reserved_usage_;
98
+ // Amount of memory reserved in the secondary cache. This should be
99
+ // reserved_usage_ * sec_cache_res_ratio_ in steady state.
100
+ size_t sec_reserved_;
92
101
  };
93
102
 
94
103
  } // namespace ROCKSDB_NAMESPACE
@@ -189,7 +189,7 @@ void CompressedCacheSetCapacityThread(void* v) {
189
189
  s.ToString().c_str());
190
190
  }
191
191
  } else if (FLAGS_compressed_secondary_cache_ratio > 0.0) {
192
- if (thread->rand.OneIn(2)) {
192
+ if (thread->rand.OneIn(2)) { // if (thread->rand.OneIn(2)) {
193
193
  size_t capacity = block_cache->GetCapacity();
194
194
  size_t adjustment;
195
195
  if (FLAGS_use_write_buffer_manager && FLAGS_db_write_buffer_size > 0) {
@@ -13,7 +13,7 @@
13
13
  // minor or major version number planned for release.
14
14
  #define ROCKSDB_MAJOR 8
15
15
  #define ROCKSDB_MINOR 8
16
- #define ROCKSDB_PATCH 0
16
+ #define ROCKSDB_PATCH 1
17
17
 
18
18
  // Do not use these. We made the mistake of declaring macros starting with
19
19
  // double underscore. Now we have to live with our choice. We'll deprecate these
package/index.js CHANGED
@@ -3,15 +3,14 @@
3
3
  const { fromCallback } = require('catering')
4
4
  const { AbstractLevel } = require('abstract-level')
5
5
  const ModuleError = require('module-error')
6
- const fs = require('fs')
7
6
  const binding = require('./binding')
8
7
  const { ChainedBatch } = require('./chained-batch')
9
8
  const { Iterator } = require('./iterator')
10
- const os = require('os')
9
+ const fs = require('node:fs')
10
+ const assert = require('node:assert')
11
11
 
12
12
  const kContext = Symbol('context')
13
13
  const kColumns = Symbol('columns')
14
- const kLocation = Symbol('location')
15
14
  const kPromise = Symbol('promise')
16
15
  const kRef = Symbol('ref')
17
16
  const kUnref = Symbol('unref')
@@ -21,34 +20,7 @@ const kPendingClose = Symbol('pendingClose')
21
20
  const EMPTY = {}
22
21
 
23
22
  class RocksLevel extends AbstractLevel {
24
- constructor (location, options, _) {
25
- // To help migrating to abstract-level
26
- if (typeof options === 'function' || typeof _ === 'function') {
27
- throw new ModuleError('The levelup-style callback argument has been removed', {
28
- code: 'LEVEL_LEGACY'
29
- })
30
- }
31
-
32
- if (typeof location !== 'string' || location === '') {
33
- throw new TypeError("The first argument 'location' must be a non-empty string")
34
- }
35
-
36
- options = {
37
- ...options, // TODO (fix): Other defaults...
38
- parallelism: options?.parallelism ?? Math.max(1, os.cpus().length / 2),
39
- createIfMissing: options?.createIfMissing ?? true,
40
- errorIfExists: options?.errorIfExists ?? false,
41
- walTTL: options?.walTTL ?? 0,
42
- walSizeLimit: options?.walSizeLimit ?? 0,
43
- walCompression: options?.walCompression ?? false,
44
- unorderedWrite: options?.unorderedWrite ?? false,
45
- manualWalFlush: options?.manualWalFlush ?? false,
46
- walTotalSizeLimit: options?.walTotalSizeLimit ?? 0,
47
- infoLogLevel: options?.infoLogLevel ?? ''
48
- }
49
-
50
- // TODO (fix): Check options.
51
-
23
+ constructor (locationOrHandle, options) {
52
24
  super({
53
25
  encodings: {
54
26
  buffer: true,
@@ -61,45 +33,64 @@ class RocksLevel extends AbstractLevel {
61
33
  }
62
34
  }, options)
63
35
 
64
- this[kLocation] = location
65
- this[kContext] = binding.db_init()
36
+ this[kContext] = binding.db_init(locationOrHandle)
66
37
  this[kColumns] = {}
67
38
 
68
39
  this[kRefs] = 0
69
40
  this[kPendingClose] = null
70
-
71
- // .updates(...) uses 'update' listener.
72
- this.setMaxListeners(100)
73
41
  }
74
42
 
75
43
  get sequence () {
76
44
  return binding.db_get_latest_sequence(this[kContext])
77
45
  }
78
46
 
79
- get location () {
80
- return this[kLocation]
81
- }
82
-
83
47
  get columns () {
84
48
  return this[kColumns]
85
49
  }
86
50
 
51
+ get handle () {
52
+ // TODO (fix): Support returning handle even if not open yet...
53
+ assert(this.status === 'open', 'Database is not open')
54
+
55
+ return binding.db_get_handle(this[kContext])
56
+ }
57
+
58
+ get location () {
59
+ return binding.db_get_location(this[kContext])
60
+ }
61
+
87
62
  _open (options, callback) {
88
- const onOpen = (err, columns) => {
89
- if (err) {
63
+ const doOpen = () => {
64
+ let columns
65
+ try {
66
+ columns = binding.db_open(this[kContext], options, (err, columns) => {
67
+ if (err) {
68
+ callback(err)
69
+ } else {
70
+ this[kColumns] = columns
71
+ callback(null)
72
+ }
73
+ })
74
+ } catch (err) {
90
75
  callback(err)
91
- } else {
76
+ }
77
+
78
+ if (columns) {
92
79
  this[kColumns] = columns
93
80
  callback(null)
94
81
  }
95
82
  }
83
+
96
84
  if (options.createIfMissing) {
97
- fs.mkdir(this[kLocation], { recursive: true }, (err) => {
98
- if (err) return callback(err)
99
- binding.db_open(this[kContext], this[kLocation], options, onOpen)
85
+ fs.mkdir(this.location, { recursive: true }, (err) => {
86
+ if (err) {
87
+ callback(err)
88
+ } else {
89
+ doOpen()
90
+ }
100
91
  })
101
92
  } else {
102
- binding.db_open(this[kContext], this[kLocation], options, onOpen)
93
+ doOpen()
103
94
  }
104
95
  }
105
96
 
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "@nxtedition/rocksdb",
3
- "version": "9.0.1",
3
+ "version": "10.0.0",
4
4
  "description": "A low-level Node.js RocksDB binding",
5
5
  "license": "MIT",
6
6
  "main": "index.js",
@@ -14,8 +14,8 @@
14
14
  "abstract-level": "^1.0.2",
15
15
  "catering": "^2.1.1",
16
16
  "module-error": "^1.0.2",
17
- "napi-macros": "~2.0.0",
18
- "node-gyp-build": "^4.5.0"
17
+ "napi-macros": "~2.2.2",
18
+ "node-gyp-build": "^4.8.0"
19
19
  },
20
20
  "devDependencies": {
21
21
  "@types/node": "^18.11.3",
@@ -33,7 +33,7 @@
33
33
  "readfiletree": "^1.0.0",
34
34
  "rimraf": "^3.0.0",
35
35
  "standard": "^17.0.0",
36
- "tape": "^5.6.1",
36
+ "tape": "^5.7.5",
37
37
  "tempy": "^1.0.1"
38
38
  },
39
39
  "standard": {
package/util.h CHANGED
@@ -45,6 +45,12 @@ static void Finalize(napi_env env, void* data, void* hint) {
45
45
  }
46
46
  }
47
47
 
48
+ static void FinalizeFree(napi_env env, void* data, void* hint) {
49
+ if (hint) {
50
+ free(hint);
51
+ }
52
+ }
53
+
48
54
  static napi_value CreateError(napi_env env, const std::optional<std::string_view>& code, const std::string_view& msg) {
49
55
  napi_value codeValue = nullptr;
50
56
  if (code) {
@@ -352,4 +358,4 @@ napi_status runAsync(const std::string& name, napi_env env, napi_value callback,
352
358
  worker.release();
353
359
 
354
360
  return napi_ok;
355
- }
361
+ }
Binary file