@nxtedition/rocksdb 9.0.1 → 10.0.1
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/binding.cc +226 -156
- package/deps/rocksdb/rocksdb/cache/compressed_secondary_cache_test.cc +44 -24
- package/deps/rocksdb/rocksdb/cache/secondary_cache_adapter.cc +79 -66
- package/deps/rocksdb/rocksdb/cache/secondary_cache_adapter.h +14 -5
- package/deps/rocksdb/rocksdb/db_stress_tool/db_stress_common.cc +1 -1
- package/deps/rocksdb/rocksdb/include/rocksdb/version.h +1 -1
- package/index.js +38 -47
- package/package.json +4 -4
- package/prebuilds/{linux-x64/node.napi.node → darwin-arm64/@nxtedition+rocksdb.node} +0 -0
- package/util.h +7 -1
- package/prebuilds/darwin-arm64/node.napi.node +0 -0
package/binding.cc
CHANGED
|
@@ -40,8 +40,6 @@ struct Database;
|
|
|
40
40
|
struct Iterator;
|
|
41
41
|
|
|
42
42
|
struct ColumnFamily {
|
|
43
|
-
napi_ref ref;
|
|
44
|
-
napi_value val;
|
|
45
43
|
rocksdb::ColumnFamilyHandle* handle;
|
|
46
44
|
rocksdb::ColumnFamilyDescriptor descriptor;
|
|
47
45
|
};
|
|
@@ -52,6 +50,7 @@ struct Closable {
|
|
|
52
50
|
};
|
|
53
51
|
|
|
54
52
|
struct Database final {
|
|
53
|
+
Database(std::string location) : location(std::move(location)) {}
|
|
55
54
|
~Database() { assert(!db); }
|
|
56
55
|
|
|
57
56
|
rocksdb::Status Close() {
|
|
@@ -64,17 +63,35 @@ struct Database final {
|
|
|
64
63
|
}
|
|
65
64
|
closables.clear();
|
|
66
65
|
|
|
67
|
-
for (auto& [id, column] : columns) {
|
|
68
|
-
db->DestroyColumnFamilyHandle(column.handle);
|
|
69
|
-
}
|
|
70
|
-
columns.clear();
|
|
71
|
-
|
|
72
66
|
db->FlushWAL(true);
|
|
73
67
|
|
|
74
|
-
|
|
75
|
-
|
|
68
|
+
for (auto& [id, column] : columns) {
|
|
69
|
+
db->DestroyColumnFamilyHandle(column.handle);
|
|
70
|
+
}
|
|
71
|
+
columns.clear();
|
|
72
|
+
|
|
73
|
+
auto db2 = std::move(db);
|
|
74
|
+
return db2->Close();
|
|
76
75
|
}
|
|
77
76
|
|
|
77
|
+
void Ref() {
|
|
78
|
+
std::lock_guard<std::mutex> lock(mutex);
|
|
79
|
+
|
|
80
|
+
refs++;
|
|
81
|
+
}
|
|
82
|
+
|
|
83
|
+
void Unref() {
|
|
84
|
+
std::lock_guard<std::mutex> lock(mutex);
|
|
85
|
+
|
|
86
|
+
if (--refs == 0) {
|
|
87
|
+
Close();
|
|
88
|
+
delete this;
|
|
89
|
+
}
|
|
90
|
+
}
|
|
91
|
+
|
|
92
|
+
std::mutex mutex;
|
|
93
|
+
int refs = 0;
|
|
94
|
+
std::string location;
|
|
78
95
|
std::unique_ptr<rocksdb::DB> db;
|
|
79
96
|
std::set<Closable*> closables;
|
|
80
97
|
std::map<int32_t, ColumnFamily> columns;
|
|
@@ -449,32 +466,67 @@ struct Iterator final : public BaseIterator {
|
|
|
449
466
|
napi_ref ref_ = nullptr;
|
|
450
467
|
};
|
|
451
468
|
|
|
452
|
-
static void env_cleanup_hook(void* arg) {
|
|
453
|
-
auto database = reinterpret_cast<Database*>(arg);
|
|
454
|
-
if (database) {
|
|
455
|
-
database->Close();
|
|
456
|
-
}
|
|
457
|
-
}
|
|
458
|
-
|
|
459
469
|
static void FinalizeDatabase(napi_env env, void* data, void* hint) {
|
|
460
470
|
if (data) {
|
|
461
|
-
|
|
462
|
-
database->Close();
|
|
463
|
-
napi_remove_env_cleanup_hook(env, env_cleanup_hook, database);
|
|
464
|
-
for (auto& [id, column] : database->columns) {
|
|
465
|
-
napi_delete_reference(env, column.ref);
|
|
466
|
-
}
|
|
467
|
-
delete database;
|
|
471
|
+
reinterpret_cast<Database*>(data)->Unref();
|
|
468
472
|
}
|
|
469
473
|
}
|
|
470
474
|
|
|
471
475
|
NAPI_METHOD(db_init) {
|
|
472
|
-
|
|
473
|
-
|
|
476
|
+
NAPI_ARGV(1);
|
|
477
|
+
|
|
478
|
+
Database* database = nullptr;
|
|
479
|
+
|
|
480
|
+
napi_valuetype type;
|
|
481
|
+
NAPI_STATUS_THROWS(napi_typeof(env, argv[0], &type));
|
|
482
|
+
|
|
483
|
+
if (type == napi_string) {
|
|
484
|
+
std::string location;
|
|
485
|
+
size_t length = 0;
|
|
486
|
+
NAPI_STATUS_THROWS(napi_get_value_string_utf8(env, argv[0], nullptr, 0, &length));
|
|
487
|
+
location.resize(length, '\0');
|
|
488
|
+
NAPI_STATUS_THROWS(napi_get_value_string_utf8(env, argv[0], &location[0], length + 1, &length));
|
|
489
|
+
|
|
490
|
+
database = new Database(location);
|
|
491
|
+
} else if (type == napi_bigint) {
|
|
492
|
+
int64_t value;
|
|
493
|
+
bool lossless;
|
|
494
|
+
NAPI_STATUS_THROWS(napi_get_value_bigint_int64(env, argv[0], &value, &lossless));
|
|
495
|
+
|
|
496
|
+
database = reinterpret_cast<Database*>(value);
|
|
497
|
+
} else {
|
|
498
|
+
NAPI_STATUS_THROWS(napi_invalid_arg);
|
|
499
|
+
}
|
|
474
500
|
|
|
475
501
|
napi_value result;
|
|
476
502
|
NAPI_STATUS_THROWS(napi_create_external(env, database, FinalizeDatabase, nullptr, &result));
|
|
477
503
|
|
|
504
|
+
database->Ref();
|
|
505
|
+
|
|
506
|
+
return result;
|
|
507
|
+
}
|
|
508
|
+
|
|
509
|
+
NAPI_METHOD(db_get_handle) {
|
|
510
|
+
NAPI_ARGV(1);
|
|
511
|
+
|
|
512
|
+
Database* database;
|
|
513
|
+
NAPI_STATUS_THROWS(napi_get_value_external(env, argv[0], reinterpret_cast<void**>(&database)));
|
|
514
|
+
|
|
515
|
+
napi_value result;
|
|
516
|
+
NAPI_STATUS_THROWS(napi_create_bigint_int64(env, reinterpret_cast<intptr_t>(database), &result));
|
|
517
|
+
|
|
518
|
+
return result;
|
|
519
|
+
}
|
|
520
|
+
|
|
521
|
+
NAPI_METHOD(db_get_location) {
|
|
522
|
+
NAPI_ARGV(1);
|
|
523
|
+
|
|
524
|
+
Database* database;
|
|
525
|
+
NAPI_STATUS_THROWS(napi_get_value_external(env, argv[0], reinterpret_cast<void**>(&database)));
|
|
526
|
+
|
|
527
|
+
napi_value result;
|
|
528
|
+
NAPI_STATUS_THROWS(Convert(env, &database->location, Encoding::String, result));
|
|
529
|
+
|
|
478
530
|
return result;
|
|
479
531
|
}
|
|
480
532
|
|
|
@@ -710,141 +762,157 @@ NAPI_METHOD(db_get_identity) {
|
|
|
710
762
|
}
|
|
711
763
|
|
|
712
764
|
NAPI_METHOD(db_open) {
|
|
713
|
-
NAPI_ARGV(
|
|
765
|
+
NAPI_ARGV(3);
|
|
714
766
|
|
|
715
767
|
Database* database;
|
|
716
768
|
NAPI_STATUS_THROWS(napi_get_value_external(env, argv[0], reinterpret_cast<void**>(&database)));
|
|
717
769
|
|
|
718
|
-
|
|
719
|
-
|
|
720
|
-
|
|
721
|
-
|
|
722
|
-
|
|
723
|
-
|
|
724
|
-
|
|
725
|
-
|
|
726
|
-
|
|
727
|
-
|
|
728
|
-
|
|
729
|
-
|
|
730
|
-
|
|
731
|
-
|
|
732
|
-
|
|
733
|
-
|
|
734
|
-
|
|
735
|
-
|
|
736
|
-
|
|
737
|
-
|
|
738
|
-
|
|
739
|
-
|
|
740
|
-
|
|
741
|
-
|
|
742
|
-
|
|
743
|
-
|
|
744
|
-
|
|
745
|
-
|
|
746
|
-
|
|
747
|
-
|
|
748
|
-
|
|
749
|
-
|
|
750
|
-
|
|
751
|
-
|
|
752
|
-
|
|
753
|
-
|
|
754
|
-
|
|
755
|
-
|
|
756
|
-
|
|
757
|
-
|
|
758
|
-
|
|
759
|
-
|
|
760
|
-
|
|
761
|
-
|
|
762
|
-
|
|
763
|
-
|
|
764
|
-
|
|
765
|
-
|
|
766
|
-
|
|
767
|
-
|
|
768
|
-
|
|
769
|
-
|
|
770
|
-
|
|
771
|
-
|
|
772
|
-
|
|
773
|
-
|
|
774
|
-
|
|
775
|
-
|
|
776
|
-
|
|
777
|
-
|
|
778
|
-
|
|
779
|
-
|
|
780
|
-
|
|
781
|
-
|
|
782
|
-
|
|
783
|
-
|
|
784
|
-
|
|
785
|
-
|
|
786
|
-
|
|
787
|
-
|
|
788
|
-
|
|
789
|
-
|
|
790
|
-
|
|
791
|
-
|
|
792
|
-
|
|
793
|
-
|
|
794
|
-
|
|
795
|
-
|
|
796
|
-
|
|
797
|
-
|
|
798
|
-
|
|
799
|
-
|
|
800
|
-
|
|
801
|
-
|
|
802
|
-
|
|
803
|
-
|
|
804
|
-
|
|
805
|
-
|
|
806
|
-
|
|
807
|
-
|
|
808
|
-
|
|
809
|
-
|
|
810
|
-
|
|
811
|
-
|
|
812
|
-
|
|
813
|
-
|
|
814
|
-
|
|
815
|
-
|
|
816
|
-
|
|
817
|
-
|
|
818
|
-
|
|
819
|
-
|
|
820
|
-
|
|
821
|
-
|
|
822
|
-
|
|
823
|
-
|
|
824
|
-
|
|
825
|
-
|
|
826
|
-
|
|
827
|
-
|
|
828
|
-
|
|
829
|
-
|
|
830
|
-
|
|
831
|
-
|
|
832
|
-
|
|
833
|
-
|
|
834
|
-
|
|
835
|
-
|
|
836
|
-
|
|
837
|
-
|
|
838
|
-
|
|
839
|
-
|
|
840
|
-
|
|
841
|
-
|
|
842
|
-
|
|
843
|
-
|
|
844
|
-
|
|
845
|
-
|
|
846
|
-
|
|
847
|
-
|
|
770
|
+
if (database->db) {
|
|
771
|
+
napi_value columns;
|
|
772
|
+
NAPI_STATUS_THROWS(napi_create_object(env, &columns));
|
|
773
|
+
for (auto& [id, column] : database->columns) {
|
|
774
|
+
napi_value val;
|
|
775
|
+
NAPI_STATUS_THROWS(napi_create_external(env, column.handle, nullptr, nullptr, &val));
|
|
776
|
+
NAPI_STATUS_THROWS(napi_set_named_property(env, columns, column.descriptor.name.c_str(), val));
|
|
777
|
+
}
|
|
778
|
+
return columns;
|
|
779
|
+
} else {
|
|
780
|
+
rocksdb::Options dbOptions;
|
|
781
|
+
|
|
782
|
+
const auto options = argv[1];
|
|
783
|
+
|
|
784
|
+
int parallelism = std::max<int>(1, std::thread::hardware_concurrency() / 2);
|
|
785
|
+
NAPI_STATUS_THROWS(GetProperty(env, options, "parallelism", parallelism));
|
|
786
|
+
dbOptions.IncreaseParallelism(parallelism);
|
|
787
|
+
|
|
788
|
+
uint32_t walTTL = 0;
|
|
789
|
+
NAPI_STATUS_THROWS(GetProperty(env, options, "walTTL", walTTL));
|
|
790
|
+
dbOptions.WAL_ttl_seconds = walTTL / 1e3;
|
|
791
|
+
|
|
792
|
+
uint32_t walSizeLimit = 0;
|
|
793
|
+
NAPI_STATUS_THROWS(GetProperty(env, options, "walSizeLimit", walSizeLimit));
|
|
794
|
+
dbOptions.WAL_size_limit_MB = walSizeLimit / 1e6;
|
|
795
|
+
|
|
796
|
+
uint32_t maxTotalWalSize = 0;
|
|
797
|
+
NAPI_STATUS_THROWS(GetProperty(env, options, "walTotalSizeLimit", walSizeLimit));
|
|
798
|
+
dbOptions.max_total_wal_size = maxTotalWalSize / 1e6;
|
|
799
|
+
|
|
800
|
+
bool walCompression = false;
|
|
801
|
+
NAPI_STATUS_THROWS(GetProperty(env, options, "walCompression", walCompression));
|
|
802
|
+
dbOptions.wal_compression =
|
|
803
|
+
walCompression ? rocksdb::CompressionType::kZSTD : rocksdb::CompressionType::kNoCompression;
|
|
804
|
+
|
|
805
|
+
dbOptions.avoid_unnecessary_blocking_io = true;
|
|
806
|
+
dbOptions.write_dbid_to_manifest = true;
|
|
807
|
+
dbOptions.enable_pipelined_write = true; // We only write in the main thread...
|
|
808
|
+
dbOptions.create_missing_column_families = true;
|
|
809
|
+
dbOptions.fail_if_options_file_error = true;
|
|
810
|
+
|
|
811
|
+
NAPI_STATUS_THROWS(GetProperty(env, options, "createIfMissing", dbOptions.create_if_missing));
|
|
812
|
+
NAPI_STATUS_THROWS(GetProperty(env, options, "errorIfExists", dbOptions.error_if_exists));
|
|
813
|
+
NAPI_STATUS_THROWS(GetProperty(env, options, "pipelinedWrite", dbOptions.enable_pipelined_write));
|
|
814
|
+
|
|
815
|
+
// TODO (feat): dbOptions.listeners
|
|
816
|
+
|
|
817
|
+
std::string infoLogLevel;
|
|
818
|
+
NAPI_STATUS_THROWS(GetProperty(env, options, "infoLogLevel", infoLogLevel));
|
|
819
|
+
if (infoLogLevel.size() > 0) {
|
|
820
|
+
rocksdb::InfoLogLevel lvl = {};
|
|
821
|
+
|
|
822
|
+
if (infoLogLevel == "debug")
|
|
823
|
+
lvl = rocksdb::InfoLogLevel::DEBUG_LEVEL;
|
|
824
|
+
else if (infoLogLevel == "info")
|
|
825
|
+
lvl = rocksdb::InfoLogLevel::INFO_LEVEL;
|
|
826
|
+
else if (infoLogLevel == "warn")
|
|
827
|
+
lvl = rocksdb::InfoLogLevel::WARN_LEVEL;
|
|
828
|
+
else if (infoLogLevel == "error")
|
|
829
|
+
lvl = rocksdb::InfoLogLevel::ERROR_LEVEL;
|
|
830
|
+
else if (infoLogLevel == "fatal")
|
|
831
|
+
lvl = rocksdb::InfoLogLevel::FATAL_LEVEL;
|
|
832
|
+
else if (infoLogLevel == "header")
|
|
833
|
+
lvl = rocksdb::InfoLogLevel::HEADER_LEVEL;
|
|
834
|
+
else
|
|
835
|
+
napi_throw_error(env, nullptr, "invalid log level");
|
|
836
|
+
|
|
837
|
+
dbOptions.info_log_level = lvl;
|
|
838
|
+
} else {
|
|
839
|
+
// In some places RocksDB checks this option to see if it should prepare
|
|
840
|
+
// debug information (ahead of logging), so set it to the highest level.
|
|
841
|
+
dbOptions.info_log_level = rocksdb::InfoLogLevel::HEADER_LEVEL;
|
|
842
|
+
dbOptions.info_log.reset(new NullLogger());
|
|
843
|
+
}
|
|
844
|
+
|
|
845
|
+
NAPI_STATUS_THROWS(InitOptions(env, dbOptions, options));
|
|
846
|
+
|
|
847
|
+
std::vector<rocksdb::ColumnFamilyDescriptor> descriptors;
|
|
848
|
+
|
|
849
|
+
bool hasColumns;
|
|
850
|
+
NAPI_STATUS_THROWS(napi_has_named_property(env, options, "columns", &hasColumns));
|
|
851
|
+
|
|
852
|
+
if (hasColumns) {
|
|
853
|
+
napi_value columns;
|
|
854
|
+
NAPI_STATUS_THROWS(napi_get_named_property(env, options, "columns", &columns));
|
|
855
|
+
|
|
856
|
+
napi_value keys;
|
|
857
|
+
NAPI_STATUS_THROWS(napi_get_property_names(env, columns, &keys));
|
|
858
|
+
|
|
859
|
+
uint32_t len;
|
|
860
|
+
NAPI_STATUS_THROWS(napi_get_array_length(env, keys, &len));
|
|
861
|
+
|
|
862
|
+
descriptors.resize(len);
|
|
863
|
+
for (uint32_t n = 0; n < len; ++n) {
|
|
864
|
+
napi_value key;
|
|
865
|
+
NAPI_STATUS_THROWS(napi_get_element(env, keys, n, &key));
|
|
866
|
+
|
|
867
|
+
napi_value column;
|
|
868
|
+
NAPI_STATUS_THROWS(napi_get_property(env, columns, key, &column));
|
|
869
|
+
|
|
870
|
+
NAPI_STATUS_THROWS(InitOptions(env, descriptors[n].options, column));
|
|
871
|
+
|
|
872
|
+
NAPI_STATUS_THROWS(GetValue(env, key, descriptors[n].name));
|
|
873
|
+
}
|
|
874
|
+
}
|
|
875
|
+
|
|
876
|
+
auto callback = argv[2];
|
|
877
|
+
|
|
878
|
+
runAsync<std::vector<rocksdb::ColumnFamilyHandle*>>(
|
|
879
|
+
"leveldown.open", env, callback,
|
|
880
|
+
[=](auto& handles) {
|
|
881
|
+
assert(!database->db);
|
|
882
|
+
|
|
883
|
+
rocksdb::DB* db = nullptr;
|
|
884
|
+
|
|
885
|
+
const auto status = descriptors.empty()
|
|
886
|
+
? rocksdb::DB::Open(dbOptions, database->location, &db)
|
|
887
|
+
: rocksdb::DB::Open(dbOptions, database->location, descriptors, &handles, &db);
|
|
888
|
+
|
|
889
|
+
database->db.reset(db);
|
|
890
|
+
|
|
891
|
+
return status;
|
|
892
|
+
},
|
|
893
|
+
[=](auto& handles, auto env, auto& argv) {
|
|
894
|
+
argv.resize(2);
|
|
895
|
+
|
|
896
|
+
NAPI_STATUS_RETURN(napi_create_object(env, &argv[1]));
|
|
897
|
+
|
|
898
|
+
for (size_t n = 0; n < handles.size(); ++n) {
|
|
899
|
+
ColumnFamily column;
|
|
900
|
+
column.handle = handles[n];
|
|
901
|
+
column.descriptor = descriptors[n];
|
|
902
|
+
database->columns[column.handle->GetID()] = column;
|
|
903
|
+
}
|
|
904
|
+
|
|
905
|
+
napi_value columns;
|
|
906
|
+
NAPI_STATUS_RETURN(napi_create_object(env, &columns));
|
|
907
|
+
for (auto& [id, column] : database->columns) {
|
|
908
|
+
napi_value val;
|
|
909
|
+
NAPI_STATUS_RETURN(napi_create_external(env, column.handle, nullptr, nullptr, &val));
|
|
910
|
+
NAPI_STATUS_RETURN(napi_set_named_property(env, columns, column.descriptor.name.c_str(), val));
|
|
911
|
+
}
|
|
912
|
+
|
|
913
|
+
return napi_ok;
|
|
914
|
+
});
|
|
915
|
+
}
|
|
848
916
|
|
|
849
917
|
return 0;
|
|
850
918
|
}
|
|
@@ -1567,6 +1635,8 @@ NAPI_INIT() {
|
|
|
1567
1635
|
NAPI_EXPORT_FUNCTION(db_init);
|
|
1568
1636
|
NAPI_EXPORT_FUNCTION(db_open);
|
|
1569
1637
|
NAPI_EXPORT_FUNCTION(db_get_identity);
|
|
1638
|
+
NAPI_EXPORT_FUNCTION(db_get_handle);
|
|
1639
|
+
NAPI_EXPORT_FUNCTION(db_get_location);
|
|
1570
1640
|
NAPI_EXPORT_FUNCTION(db_close);
|
|
1571
1641
|
NAPI_EXPORT_FUNCTION(db_get_many);
|
|
1572
1642
|
NAPI_EXPORT_FUNCTION(db_clear);
|
|
@@ -1224,13 +1224,11 @@ TEST_P(CompressedSecCacheTestWithTiered, DynamicUpdate) {
|
|
|
1224
1224
|
ASSERT_OK(sec_cache->GetCapacity(sec_capacity));
|
|
1225
1225
|
ASSERT_EQ(sec_capacity, 0);
|
|
1226
1226
|
|
|
1227
|
-
|
|
1228
|
-
|
|
1229
|
-
|
|
1230
|
-
ASSERT_EQ(GetCache()->GetUsage(), 0);
|
|
1231
|
-
}
|
|
1227
|
+
ASSERT_OK(UpdateTieredCache(tiered_cache, -1, 0.3));
|
|
1228
|
+
EXPECT_PRED3(CacheUsageWithinBounds, GetCache()->GetUsage(), (30 << 20),
|
|
1229
|
+
GetPercent(30 << 20, 1));
|
|
1232
1230
|
ASSERT_OK(sec_cache->GetCapacity(sec_capacity));
|
|
1233
|
-
ASSERT_EQ(sec_capacity,
|
|
1231
|
+
ASSERT_EQ(sec_capacity, (30 << 20));
|
|
1234
1232
|
}
|
|
1235
1233
|
|
|
1236
1234
|
TEST_P(CompressedSecCacheTestWithTiered, DynamicUpdateWithReservation) {
|
|
@@ -1316,28 +1314,50 @@ TEST_P(CompressedSecCacheTestWithTiered, DynamicUpdateWithReservation) {
|
|
|
1316
1314
|
ASSERT_OK(sec_cache->GetCapacity(sec_capacity));
|
|
1317
1315
|
ASSERT_EQ(sec_capacity, 0);
|
|
1318
1316
|
|
|
1317
|
+
ASSERT_OK(UpdateTieredCache(tiered_cache, -1, 0.3));
|
|
1318
|
+
EXPECT_PRED3(CacheUsageWithinBounds, GetCache()->GetUsage(), (37 << 20),
|
|
1319
|
+
GetPercent(37 << 20, 1));
|
|
1320
|
+
EXPECT_PRED3(CacheUsageWithinBounds, sec_cache->TEST_GetUsage(), (3 << 20),
|
|
1321
|
+
GetPercent(3 << 20, 1));
|
|
1322
|
+
ASSERT_OK(sec_cache->GetCapacity(sec_capacity));
|
|
1323
|
+
ASSERT_EQ(sec_capacity, 30 << 20);
|
|
1324
|
+
|
|
1319
1325
|
ASSERT_OK(cache_res_mgr()->UpdateCacheReservation(0));
|
|
1320
1326
|
}
|
|
1321
1327
|
|
|
1322
|
-
TEST_P(CompressedSecCacheTestWithTiered,
|
|
1323
|
-
|
|
1328
|
+
TEST_P(CompressedSecCacheTestWithTiered, ReservationOverCapacity) {
|
|
1329
|
+
CompressedSecondaryCache* sec_cache =
|
|
1330
|
+
reinterpret_cast<CompressedSecondaryCache*>(GetSecondaryCache());
|
|
1324
1331
|
std::shared_ptr<Cache> tiered_cache = GetTieredCache();
|
|
1325
|
-
|
|
1326
|
-
|
|
1327
|
-
|
|
1328
|
-
|
|
1329
|
-
|
|
1330
|
-
|
|
1331
|
-
|
|
1332
|
-
|
|
1333
|
-
|
|
1334
|
-
|
|
1335
|
-
|
|
1336
|
-
|
|
1337
|
-
|
|
1338
|
-
|
|
1339
|
-
|
|
1340
|
-
|
|
1332
|
+
|
|
1333
|
+
ASSERT_OK(cache_res_mgr()->UpdateCacheReservation(110 << 20));
|
|
1334
|
+
// Use EXPECT_PRED3 instead of EXPECT_NEAR to void too many size_t to
|
|
1335
|
+
// double explicit casts
|
|
1336
|
+
EXPECT_PRED3(CacheUsageWithinBounds, GetCache()->GetUsage(), (110 << 20),
|
|
1337
|
+
GetPercent(110 << 20, 1));
|
|
1338
|
+
EXPECT_PRED3(CacheUsageWithinBounds, sec_cache->TEST_GetUsage(), (30 << 20),
|
|
1339
|
+
GetPercent(30 << 20, 1));
|
|
1340
|
+
size_t sec_capacity;
|
|
1341
|
+
ASSERT_OK(sec_cache->GetCapacity(sec_capacity));
|
|
1342
|
+
ASSERT_EQ(sec_capacity, (30 << 20));
|
|
1343
|
+
|
|
1344
|
+
ASSERT_OK(UpdateTieredCache(tiered_cache, -1, 0.39));
|
|
1345
|
+
EXPECT_PRED3(CacheUsageWithinBounds, GetCache()->GetUsage(), (110 << 20),
|
|
1346
|
+
GetPercent(110 << 20, 1));
|
|
1347
|
+
EXPECT_PRED3(CacheUsageWithinBounds, sec_cache->TEST_GetUsage(), (39 << 20),
|
|
1348
|
+
GetPercent(39 << 20, 1));
|
|
1349
|
+
ASSERT_OK(sec_cache->GetCapacity(sec_capacity));
|
|
1350
|
+
ASSERT_EQ(sec_capacity, (39 << 20));
|
|
1351
|
+
|
|
1352
|
+
ASSERT_OK(cache_res_mgr()->UpdateCacheReservation(90 << 20));
|
|
1353
|
+
EXPECT_PRED3(CacheUsageWithinBounds, GetCache()->GetUsage(), (94 << 20),
|
|
1354
|
+
GetPercent(94 << 20, 1));
|
|
1355
|
+
EXPECT_PRED3(CacheUsageWithinBounds, sec_cache->TEST_GetUsage(), (35 << 20),
|
|
1356
|
+
GetPercent(35 << 20, 1));
|
|
1357
|
+
ASSERT_OK(sec_cache->GetCapacity(sec_capacity));
|
|
1358
|
+
ASSERT_EQ(sec_capacity, (39 << 20));
|
|
1359
|
+
|
|
1360
|
+
ASSERT_OK(cache_res_mgr()->UpdateCacheReservation(0));
|
|
1341
1361
|
}
|
|
1342
1362
|
|
|
1343
1363
|
INSTANTIATE_TEST_CASE_P(
|
|
@@ -83,7 +83,10 @@ CacheWithSecondaryAdapter::CacheWithSecondaryAdapter(
|
|
|
83
83
|
: CacheWrapper(std::move(target)),
|
|
84
84
|
secondary_cache_(std::move(secondary_cache)),
|
|
85
85
|
adm_policy_(adm_policy),
|
|
86
|
-
distribute_cache_res_(distribute_cache_res)
|
|
86
|
+
distribute_cache_res_(distribute_cache_res),
|
|
87
|
+
placeholder_usage_(0),
|
|
88
|
+
reserved_usage_(0),
|
|
89
|
+
sec_reserved_(0) {
|
|
87
90
|
target_->SetEvictionCallback(
|
|
88
91
|
[this](const Slice& key, Handle* handle, bool was_hit) {
|
|
89
92
|
return EvictionHandler(key, handle, was_hit);
|
|
@@ -103,8 +106,7 @@ CacheWithSecondaryAdapter::CacheWithSecondaryAdapter(
|
|
|
103
106
|
// secondary cache is freed from the reservation.
|
|
104
107
|
s = pri_cache_res_->UpdateCacheReservation(sec_capacity);
|
|
105
108
|
assert(s.ok());
|
|
106
|
-
sec_cache_res_ratio_
|
|
107
|
-
std::memory_order_relaxed);
|
|
109
|
+
sec_cache_res_ratio_ = (double)sec_capacity / target_->GetCapacity();
|
|
108
110
|
}
|
|
109
111
|
}
|
|
110
112
|
|
|
@@ -113,7 +115,7 @@ CacheWithSecondaryAdapter::~CacheWithSecondaryAdapter() {
|
|
|
113
115
|
// use after free
|
|
114
116
|
target_->SetEvictionCallback({});
|
|
115
117
|
#ifndef NDEBUG
|
|
116
|
-
if (distribute_cache_res_
|
|
118
|
+
if (distribute_cache_res_) {
|
|
117
119
|
size_t sec_capacity = 0;
|
|
118
120
|
Status s = secondary_cache_->GetCapacity(sec_capacity);
|
|
119
121
|
assert(s.ok());
|
|
@@ -236,13 +238,31 @@ Status CacheWithSecondaryAdapter::Insert(const Slice& key, ObjectPtr value,
|
|
|
236
238
|
const Slice& compressed_value,
|
|
237
239
|
CompressionType type) {
|
|
238
240
|
Status s = target_->Insert(key, value, helper, charge, handle, priority);
|
|
239
|
-
if (s.ok() && value == nullptr && distribute_cache_res_) {
|
|
240
|
-
|
|
241
|
-
|
|
242
|
-
|
|
243
|
-
|
|
244
|
-
|
|
245
|
-
|
|
241
|
+
if (s.ok() && value == nullptr && distribute_cache_res_ && handle) {
|
|
242
|
+
charge = target_->GetCharge(*handle);
|
|
243
|
+
|
|
244
|
+
MutexLock l(&cache_res_mutex_);
|
|
245
|
+
placeholder_usage_ += charge;
|
|
246
|
+
// Check if total placeholder reservation is more than the overall
|
|
247
|
+
// cache capacity. If it is, then we don't try to charge the
|
|
248
|
+
// secondary cache because we don't want to overcharge it (beyond
|
|
249
|
+
// its capacity).
|
|
250
|
+
// In order to make this a bit more lightweight, we also check if
|
|
251
|
+
// the difference between placeholder_usage_ and reserved_usage_ is
|
|
252
|
+
// atleast kReservationChunkSize and avoid any adjustments if not.
|
|
253
|
+
if ((placeholder_usage_ <= target_->GetCapacity()) &&
|
|
254
|
+
((placeholder_usage_ - reserved_usage_) >= kReservationChunkSize)) {
|
|
255
|
+
reserved_usage_ = placeholder_usage_ & ~(kReservationChunkSize - 1);
|
|
256
|
+
size_t new_sec_reserved =
|
|
257
|
+
static_cast<size_t>(reserved_usage_ * sec_cache_res_ratio_);
|
|
258
|
+
size_t sec_charge = new_sec_reserved - sec_reserved_;
|
|
259
|
+
s = secondary_cache_->Deflate(sec_charge);
|
|
260
|
+
assert(s.ok());
|
|
261
|
+
s = pri_cache_res_->UpdateCacheReservation(sec_charge,
|
|
262
|
+
/*increase=*/false);
|
|
263
|
+
assert(s.ok());
|
|
264
|
+
sec_reserved_ += sec_charge;
|
|
265
|
+
}
|
|
246
266
|
}
|
|
247
267
|
// Warm up the secondary cache with the compressed block. The secondary
|
|
248
268
|
// cache may choose to ignore it based on the admission policy.
|
|
@@ -287,14 +307,27 @@ bool CacheWithSecondaryAdapter::Release(Handle* handle,
|
|
|
287
307
|
ObjectPtr v = target_->Value(handle);
|
|
288
308
|
if (v == nullptr && distribute_cache_res_) {
|
|
289
309
|
size_t charge = target_->GetCharge(handle);
|
|
290
|
-
|
|
291
|
-
|
|
292
|
-
|
|
293
|
-
|
|
294
|
-
|
|
295
|
-
|
|
296
|
-
|
|
297
|
-
|
|
310
|
+
|
|
311
|
+
MutexLock l(&cache_res_mutex_);
|
|
312
|
+
placeholder_usage_ -= charge;
|
|
313
|
+
// Check if total placeholder reservation is more than the overall
|
|
314
|
+
// cache capacity. If it is, then we do nothing as reserved_usage_ must
|
|
315
|
+
// be already maxed out
|
|
316
|
+
if ((placeholder_usage_ <= target_->GetCapacity()) &&
|
|
317
|
+
(placeholder_usage_ < reserved_usage_)) {
|
|
318
|
+
// Adjust reserved_usage_ in chunks of kReservationChunkSize, so
|
|
319
|
+
// we don't hit this slow path too often.
|
|
320
|
+
reserved_usage_ = placeholder_usage_ & ~(kReservationChunkSize - 1);
|
|
321
|
+
size_t new_sec_reserved =
|
|
322
|
+
static_cast<size_t>(reserved_usage_ * sec_cache_res_ratio_);
|
|
323
|
+
size_t sec_charge = sec_reserved_ - new_sec_reserved;
|
|
324
|
+
Status s = secondary_cache_->Inflate(sec_charge);
|
|
325
|
+
assert(s.ok());
|
|
326
|
+
s = pri_cache_res_->UpdateCacheReservation(sec_charge,
|
|
327
|
+
/*increase=*/true);
|
|
328
|
+
assert(s.ok());
|
|
329
|
+
sec_reserved_ -= sec_charge;
|
|
330
|
+
}
|
|
298
331
|
}
|
|
299
332
|
}
|
|
300
333
|
return target_->Release(handle, erase_if_last_ref);
|
|
@@ -441,13 +474,11 @@ const char* CacheWithSecondaryAdapter::Name() const {
|
|
|
441
474
|
// where the new capacity < total cache reservations.
|
|
442
475
|
void CacheWithSecondaryAdapter::SetCapacity(size_t capacity) {
|
|
443
476
|
size_t sec_capacity = static_cast<size_t>(
|
|
444
|
-
capacity * (distribute_cache_res_
|
|
445
|
-
? sec_cache_res_ratio_.load(std::memory_order_relaxed)
|
|
446
|
-
: 0.0));
|
|
477
|
+
capacity * (distribute_cache_res_ ? sec_cache_res_ratio_ : 0.0));
|
|
447
478
|
size_t old_sec_capacity = 0;
|
|
448
479
|
|
|
449
480
|
if (distribute_cache_res_) {
|
|
450
|
-
MutexLock m(&
|
|
481
|
+
MutexLock m(&cache_res_mutex_);
|
|
451
482
|
|
|
452
483
|
Status s = secondary_cache_->GetCapacity(old_sec_capacity);
|
|
453
484
|
if (!s.ok()) {
|
|
@@ -462,9 +493,17 @@ void CacheWithSecondaryAdapter::SetCapacity(size_t capacity) {
|
|
|
462
493
|
// 3. Decrease the primary cache capacity to the total budget
|
|
463
494
|
s = secondary_cache_->SetCapacity(sec_capacity);
|
|
464
495
|
if (s.ok()) {
|
|
496
|
+
if (placeholder_usage_ > capacity) {
|
|
497
|
+
// Adjust reserved_usage_ down
|
|
498
|
+
reserved_usage_ = capacity & ~(kReservationChunkSize - 1);
|
|
499
|
+
}
|
|
500
|
+
size_t new_sec_reserved =
|
|
501
|
+
static_cast<size_t>(reserved_usage_ * sec_cache_res_ratio_);
|
|
465
502
|
s = pri_cache_res_->UpdateCacheReservation(
|
|
466
|
-
old_sec_capacity - sec_capacity
|
|
503
|
+
(old_sec_capacity - sec_capacity) -
|
|
504
|
+
(sec_reserved_ - new_sec_reserved),
|
|
467
505
|
/*increase=*/false);
|
|
506
|
+
sec_reserved_ = new_sec_reserved;
|
|
468
507
|
assert(s.ok());
|
|
469
508
|
target_->SetCapacity(capacity);
|
|
470
509
|
}
|
|
@@ -498,7 +537,7 @@ Status CacheWithSecondaryAdapter::GetSecondaryCachePinnedUsage(
|
|
|
498
537
|
size_t& size) const {
|
|
499
538
|
Status s;
|
|
500
539
|
if (distribute_cache_res_) {
|
|
501
|
-
MutexLock m(&
|
|
540
|
+
MutexLock m(&cache_res_mutex_);
|
|
502
541
|
size_t capacity = 0;
|
|
503
542
|
s = secondary_cache_->GetCapacity(capacity);
|
|
504
543
|
if (s.ok()) {
|
|
@@ -526,12 +565,11 @@ Status CacheWithSecondaryAdapter::GetSecondaryCachePinnedUsage(
|
|
|
526
565
|
// in the future.
|
|
527
566
|
Status CacheWithSecondaryAdapter::UpdateCacheReservationRatio(
|
|
528
567
|
double compressed_secondary_ratio) {
|
|
529
|
-
if (!distribute_cache_res_
|
|
530
|
-
sec_cache_res_ratio_.load(std::memory_order_relaxed) == 0.0) {
|
|
568
|
+
if (!distribute_cache_res_) {
|
|
531
569
|
return Status::NotSupported();
|
|
532
570
|
}
|
|
533
571
|
|
|
534
|
-
MutexLock m(&
|
|
572
|
+
MutexLock m(&cache_res_mutex_);
|
|
535
573
|
size_t pri_capacity = target_->GetCapacity();
|
|
536
574
|
size_t sec_capacity =
|
|
537
575
|
static_cast<size_t>(pri_capacity * compressed_secondary_ratio);
|
|
@@ -541,38 +579,17 @@ Status CacheWithSecondaryAdapter::UpdateCacheReservationRatio(
|
|
|
541
579
|
return s;
|
|
542
580
|
}
|
|
543
581
|
|
|
544
|
-
TEST_SYNC_POINT(
|
|
545
|
-
"CacheWithSecondaryAdapter::UpdateCacheReservationRatio:Begin");
|
|
546
|
-
|
|
547
|
-
// There's a possible race condition here. Since the read of pri_cache_res_
|
|
548
|
-
// memory used (secondary cache usage charged to primary cache), and the
|
|
549
|
-
// change to sec_cache_res_ratio_ are not guarded by a mutex, its possible
|
|
550
|
-
// that an Insert/Release in another thread might decrease/increase the
|
|
551
|
-
// pri_cache_res_ reservation by the wrong amount. This should not be a
|
|
552
|
-
// problem because updating the sec/pri ratio is a rare operation, and
|
|
553
|
-
// the worst that can happen is we may over/under charge the secondary
|
|
554
|
-
// cache usage by a little bit. But we do need to protect against
|
|
555
|
-
// underflow of old_sec_reserved.
|
|
556
|
-
// TODO: Make the accounting more accurate by tracking the total memory
|
|
557
|
-
// reservation on the primary cache. This will also allow us to remove
|
|
558
|
-
// the restriction of not being able to change the sec/pri ratio from
|
|
559
|
-
// 0.0 to higher.
|
|
560
|
-
size_t sec_charge_to_pri = pri_cache_res_->GetTotalMemoryUsed();
|
|
561
|
-
size_t old_sec_reserved = (old_sec_capacity > sec_charge_to_pri)
|
|
562
|
-
? (old_sec_capacity - sec_charge_to_pri)
|
|
563
|
-
: 0;
|
|
564
582
|
// Calculate the new secondary cache reservation
|
|
565
|
-
|
|
566
|
-
|
|
567
|
-
|
|
568
|
-
|
|
569
|
-
|
|
570
|
-
std::memory_order_relaxed);
|
|
583
|
+
// reserved_usage_ will never be > the cache capacity, so we don't
|
|
584
|
+
// have to worry about adjusting it here.
|
|
585
|
+
sec_cache_res_ratio_ = compressed_secondary_ratio;
|
|
586
|
+
size_t new_sec_reserved =
|
|
587
|
+
static_cast<size_t>(reserved_usage_ * sec_cache_res_ratio_);
|
|
571
588
|
if (sec_capacity > old_sec_capacity) {
|
|
572
589
|
// We're increasing the ratio, thus ending up with a larger secondary
|
|
573
590
|
// cache and a smaller usable primary cache capacity. Similar to
|
|
574
591
|
// SetCapacity(), we try to avoid a temporary increase in total usage
|
|
575
|
-
// beyond
|
|
592
|
+
// beyond the configured capacity -
|
|
576
593
|
// 1. A higher secondary cache ratio means it gets a higher share of
|
|
577
594
|
// cache reservations. So first account for that by deflating the
|
|
578
595
|
// secondary cache
|
|
@@ -580,12 +597,13 @@ Status CacheWithSecondaryAdapter::UpdateCacheReservationRatio(
|
|
|
580
597
|
// cache utilization (increase in capacity - increase in share of cache
|
|
581
598
|
// reservation)
|
|
582
599
|
// 3. Increase secondary cache capacity
|
|
583
|
-
s = secondary_cache_->Deflate(
|
|
600
|
+
s = secondary_cache_->Deflate(new_sec_reserved - sec_reserved_);
|
|
584
601
|
assert(s.ok());
|
|
585
602
|
s = pri_cache_res_->UpdateCacheReservation(
|
|
586
|
-
(sec_capacity - old_sec_capacity) - (
|
|
603
|
+
(sec_capacity - old_sec_capacity) - (new_sec_reserved - sec_reserved_),
|
|
587
604
|
/*increase=*/true);
|
|
588
605
|
assert(s.ok());
|
|
606
|
+
sec_reserved_ = new_sec_reserved;
|
|
589
607
|
s = secondary_cache_->SetCapacity(sec_capacity);
|
|
590
608
|
assert(s.ok());
|
|
591
609
|
} else {
|
|
@@ -599,21 +617,16 @@ Status CacheWithSecondaryAdapter::UpdateCacheReservationRatio(
|
|
|
599
617
|
s = secondary_cache_->SetCapacity(sec_capacity);
|
|
600
618
|
if (s.ok()) {
|
|
601
619
|
s = pri_cache_res_->UpdateCacheReservation(
|
|
602
|
-
(old_sec_capacity - sec_capacity) -
|
|
620
|
+
(old_sec_capacity - sec_capacity) -
|
|
621
|
+
(sec_reserved_ - new_sec_reserved),
|
|
603
622
|
/*increase=*/false);
|
|
604
623
|
assert(s.ok());
|
|
605
|
-
s = secondary_cache_->Inflate(
|
|
624
|
+
s = secondary_cache_->Inflate(sec_reserved_ - new_sec_reserved);
|
|
606
625
|
assert(s.ok());
|
|
626
|
+
sec_reserved_ = new_sec_reserved;
|
|
607
627
|
}
|
|
608
628
|
}
|
|
609
629
|
|
|
610
|
-
TEST_SYNC_POINT("CacheWithSecondaryAdapter::UpdateCacheReservationRatio:End");
|
|
611
|
-
#ifndef NDEBUG
|
|
612
|
-
// As mentioned in the function comments, we may accumulate some erros when
|
|
613
|
-
// the ratio is changed. We set a flag here which disables some assertions
|
|
614
|
-
// in the destructor
|
|
615
|
-
ratio_changed_ = true;
|
|
616
|
-
#endif
|
|
617
630
|
return s;
|
|
618
631
|
}
|
|
619
632
|
|
|
@@ -60,6 +60,8 @@ class CacheWithSecondaryAdapter : public CacheWrapper {
|
|
|
60
60
|
SecondaryCache* TEST_GetSecondaryCache() { return secondary_cache_.get(); }
|
|
61
61
|
|
|
62
62
|
private:
|
|
63
|
+
static constexpr size_t kReservationChunkSize = 1 << 20;
|
|
64
|
+
|
|
63
65
|
bool EvictionHandler(const Slice& key, Handle* handle, bool was_hit);
|
|
64
66
|
|
|
65
67
|
void StartAsyncLookupOnMySecondary(AsyncLookupHandle& async_handle);
|
|
@@ -84,11 +86,18 @@ class CacheWithSecondaryAdapter : public CacheWrapper {
|
|
|
84
86
|
std::shared_ptr<ConcurrentCacheReservationManager> pri_cache_res_;
|
|
85
87
|
// Fraction of a cache memory reservation to be assigned to the secondary
|
|
86
88
|
// cache
|
|
87
|
-
|
|
88
|
-
|
|
89
|
-
|
|
90
|
-
|
|
91
|
-
|
|
89
|
+
double sec_cache_res_ratio_;
|
|
90
|
+
// Mutex for use when managing cache memory reservations. Should not be used
|
|
91
|
+
// for other purposes, as it may risk causing deadlocks.
|
|
92
|
+
mutable port::Mutex cache_res_mutex_;
|
|
93
|
+
// Total memory reserved by placeholder entriesin the cache
|
|
94
|
+
size_t placeholder_usage_;
|
|
95
|
+
// Total placeholoder memory charged to both the primary and secondary
|
|
96
|
+
// caches. Will be <= placeholder_usage_.
|
|
97
|
+
size_t reserved_usage_;
|
|
98
|
+
// Amount of memory reserved in the secondary cache. This should be
|
|
99
|
+
// reserved_usage_ * sec_cache_res_ratio_ in steady state.
|
|
100
|
+
size_t sec_reserved_;
|
|
92
101
|
};
|
|
93
102
|
|
|
94
103
|
} // namespace ROCKSDB_NAMESPACE
|
|
@@ -189,7 +189,7 @@ void CompressedCacheSetCapacityThread(void* v) {
|
|
|
189
189
|
s.ToString().c_str());
|
|
190
190
|
}
|
|
191
191
|
} else if (FLAGS_compressed_secondary_cache_ratio > 0.0) {
|
|
192
|
-
if (thread->rand.OneIn(2)) {
|
|
192
|
+
if (thread->rand.OneIn(2)) { // if (thread->rand.OneIn(2)) {
|
|
193
193
|
size_t capacity = block_cache->GetCapacity();
|
|
194
194
|
size_t adjustment;
|
|
195
195
|
if (FLAGS_use_write_buffer_manager && FLAGS_db_write_buffer_size > 0) {
|
|
@@ -13,7 +13,7 @@
|
|
|
13
13
|
// minor or major version number planned for release.
|
|
14
14
|
#define ROCKSDB_MAJOR 8
|
|
15
15
|
#define ROCKSDB_MINOR 8
|
|
16
|
-
#define ROCKSDB_PATCH
|
|
16
|
+
#define ROCKSDB_PATCH 1
|
|
17
17
|
|
|
18
18
|
// Do not use these. We made the mistake of declaring macros starting with
|
|
19
19
|
// double underscore. Now we have to live with our choice. We'll deprecate these
|
package/index.js
CHANGED
|
@@ -3,15 +3,14 @@
|
|
|
3
3
|
const { fromCallback } = require('catering')
|
|
4
4
|
const { AbstractLevel } = require('abstract-level')
|
|
5
5
|
const ModuleError = require('module-error')
|
|
6
|
-
const fs = require('fs')
|
|
7
6
|
const binding = require('./binding')
|
|
8
7
|
const { ChainedBatch } = require('./chained-batch')
|
|
9
8
|
const { Iterator } = require('./iterator')
|
|
10
|
-
const
|
|
9
|
+
const fs = require('node:fs')
|
|
10
|
+
const assert = require('node:assert')
|
|
11
11
|
|
|
12
12
|
const kContext = Symbol('context')
|
|
13
13
|
const kColumns = Symbol('columns')
|
|
14
|
-
const kLocation = Symbol('location')
|
|
15
14
|
const kPromise = Symbol('promise')
|
|
16
15
|
const kRef = Symbol('ref')
|
|
17
16
|
const kUnref = Symbol('unref')
|
|
@@ -21,34 +20,7 @@ const kPendingClose = Symbol('pendingClose')
|
|
|
21
20
|
const EMPTY = {}
|
|
22
21
|
|
|
23
22
|
class RocksLevel extends AbstractLevel {
|
|
24
|
-
constructor (
|
|
25
|
-
// To help migrating to abstract-level
|
|
26
|
-
if (typeof options === 'function' || typeof _ === 'function') {
|
|
27
|
-
throw new ModuleError('The levelup-style callback argument has been removed', {
|
|
28
|
-
code: 'LEVEL_LEGACY'
|
|
29
|
-
})
|
|
30
|
-
}
|
|
31
|
-
|
|
32
|
-
if (typeof location !== 'string' || location === '') {
|
|
33
|
-
throw new TypeError("The first argument 'location' must be a non-empty string")
|
|
34
|
-
}
|
|
35
|
-
|
|
36
|
-
options = {
|
|
37
|
-
...options, // TODO (fix): Other defaults...
|
|
38
|
-
parallelism: options?.parallelism ?? Math.max(1, os.cpus().length / 2),
|
|
39
|
-
createIfMissing: options?.createIfMissing ?? true,
|
|
40
|
-
errorIfExists: options?.errorIfExists ?? false,
|
|
41
|
-
walTTL: options?.walTTL ?? 0,
|
|
42
|
-
walSizeLimit: options?.walSizeLimit ?? 0,
|
|
43
|
-
walCompression: options?.walCompression ?? false,
|
|
44
|
-
unorderedWrite: options?.unorderedWrite ?? false,
|
|
45
|
-
manualWalFlush: options?.manualWalFlush ?? false,
|
|
46
|
-
walTotalSizeLimit: options?.walTotalSizeLimit ?? 0,
|
|
47
|
-
infoLogLevel: options?.infoLogLevel ?? ''
|
|
48
|
-
}
|
|
49
|
-
|
|
50
|
-
// TODO (fix): Check options.
|
|
51
|
-
|
|
23
|
+
constructor (locationOrHandle, options) {
|
|
52
24
|
super({
|
|
53
25
|
encodings: {
|
|
54
26
|
buffer: true,
|
|
@@ -61,45 +33,64 @@ class RocksLevel extends AbstractLevel {
|
|
|
61
33
|
}
|
|
62
34
|
}, options)
|
|
63
35
|
|
|
64
|
-
this[
|
|
65
|
-
this[kContext] = binding.db_init()
|
|
36
|
+
this[kContext] = binding.db_init(locationOrHandle)
|
|
66
37
|
this[kColumns] = {}
|
|
67
38
|
|
|
68
39
|
this[kRefs] = 0
|
|
69
40
|
this[kPendingClose] = null
|
|
70
|
-
|
|
71
|
-
// .updates(...) uses 'update' listener.
|
|
72
|
-
this.setMaxListeners(100)
|
|
73
41
|
}
|
|
74
42
|
|
|
75
43
|
get sequence () {
|
|
76
44
|
return binding.db_get_latest_sequence(this[kContext])
|
|
77
45
|
}
|
|
78
46
|
|
|
79
|
-
get location () {
|
|
80
|
-
return this[kLocation]
|
|
81
|
-
}
|
|
82
|
-
|
|
83
47
|
get columns () {
|
|
84
48
|
return this[kColumns]
|
|
85
49
|
}
|
|
86
50
|
|
|
51
|
+
get handle () {
|
|
52
|
+
// TODO (fix): Support returning handle even if not open yet...
|
|
53
|
+
assert(this.status === 'open', 'Database is not open')
|
|
54
|
+
|
|
55
|
+
return binding.db_get_handle(this[kContext])
|
|
56
|
+
}
|
|
57
|
+
|
|
58
|
+
get location () {
|
|
59
|
+
return binding.db_get_location(this[kContext])
|
|
60
|
+
}
|
|
61
|
+
|
|
87
62
|
_open (options, callback) {
|
|
88
|
-
const
|
|
89
|
-
|
|
63
|
+
const doOpen = () => {
|
|
64
|
+
let columns
|
|
65
|
+
try {
|
|
66
|
+
columns = binding.db_open(this[kContext], options, (err, columns) => {
|
|
67
|
+
if (err) {
|
|
68
|
+
callback(err)
|
|
69
|
+
} else {
|
|
70
|
+
this[kColumns] = columns
|
|
71
|
+
callback(null)
|
|
72
|
+
}
|
|
73
|
+
})
|
|
74
|
+
} catch (err) {
|
|
90
75
|
callback(err)
|
|
91
|
-
}
|
|
76
|
+
}
|
|
77
|
+
|
|
78
|
+
if (columns) {
|
|
92
79
|
this[kColumns] = columns
|
|
93
80
|
callback(null)
|
|
94
81
|
}
|
|
95
82
|
}
|
|
83
|
+
|
|
96
84
|
if (options.createIfMissing) {
|
|
97
|
-
fs.mkdir(this
|
|
98
|
-
if (err)
|
|
99
|
-
|
|
85
|
+
fs.mkdir(this.location, { recursive: true }, (err) => {
|
|
86
|
+
if (err) {
|
|
87
|
+
callback(err)
|
|
88
|
+
} else {
|
|
89
|
+
doOpen()
|
|
90
|
+
}
|
|
100
91
|
})
|
|
101
92
|
} else {
|
|
102
|
-
|
|
93
|
+
doOpen()
|
|
103
94
|
}
|
|
104
95
|
}
|
|
105
96
|
|
package/package.json
CHANGED
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
{
|
|
2
2
|
"name": "@nxtedition/rocksdb",
|
|
3
|
-
"version": "
|
|
3
|
+
"version": "10.0.1",
|
|
4
4
|
"description": "A low-level Node.js RocksDB binding",
|
|
5
5
|
"license": "MIT",
|
|
6
6
|
"main": "index.js",
|
|
@@ -14,8 +14,8 @@
|
|
|
14
14
|
"abstract-level": "^1.0.2",
|
|
15
15
|
"catering": "^2.1.1",
|
|
16
16
|
"module-error": "^1.0.2",
|
|
17
|
-
"napi-macros": "~2.
|
|
18
|
-
"node-gyp-build": "^4.
|
|
17
|
+
"napi-macros": "~2.2.2",
|
|
18
|
+
"node-gyp-build": "^4.8.0"
|
|
19
19
|
},
|
|
20
20
|
"devDependencies": {
|
|
21
21
|
"@types/node": "^18.11.3",
|
|
@@ -33,7 +33,7 @@
|
|
|
33
33
|
"readfiletree": "^1.0.0",
|
|
34
34
|
"rimraf": "^3.0.0",
|
|
35
35
|
"standard": "^17.0.0",
|
|
36
|
-
"tape": "^5.
|
|
36
|
+
"tape": "^5.7.5",
|
|
37
37
|
"tempy": "^1.0.1"
|
|
38
38
|
},
|
|
39
39
|
"standard": {
|
|
Binary file
|
package/util.h
CHANGED
|
@@ -45,6 +45,12 @@ static void Finalize(napi_env env, void* data, void* hint) {
|
|
|
45
45
|
}
|
|
46
46
|
}
|
|
47
47
|
|
|
48
|
+
static void FinalizeFree(napi_env env, void* data, void* hint) {
|
|
49
|
+
if (hint) {
|
|
50
|
+
free(hint);
|
|
51
|
+
}
|
|
52
|
+
}
|
|
53
|
+
|
|
48
54
|
static napi_value CreateError(napi_env env, const std::optional<std::string_view>& code, const std::string_view& msg) {
|
|
49
55
|
napi_value codeValue = nullptr;
|
|
50
56
|
if (code) {
|
|
@@ -352,4 +358,4 @@ napi_status runAsync(const std::string& name, napi_env env, napi_value callback,
|
|
|
352
358
|
worker.release();
|
|
353
359
|
|
|
354
360
|
return napi_ok;
|
|
355
|
-
}
|
|
361
|
+
}
|
|
Binary file
|