@nxtedition/rocksdb 7.0.28 → 7.0.31
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/binding.cc +190 -35
- package/index.js +60 -2
- package/package.json +1 -1
- package/prebuilds/darwin-arm64/node.napi.node +0 -0
- package/prebuilds/linux-x64/node.napi.node +0 -0
package/binding.cc
CHANGED
|
@@ -10,11 +10,11 @@
|
|
|
10
10
|
#include <rocksdb/db.h>
|
|
11
11
|
#include <rocksdb/env.h>
|
|
12
12
|
#include <rocksdb/filter_policy.h>
|
|
13
|
+
#include <rocksdb/merge_operator.h>
|
|
13
14
|
#include <rocksdb/options.h>
|
|
14
15
|
#include <rocksdb/slice_transform.h>
|
|
15
16
|
#include <rocksdb/table.h>
|
|
16
17
|
#include <rocksdb/write_batch.h>
|
|
17
|
-
#include <rocksdb/merge_operator.h>
|
|
18
18
|
|
|
19
19
|
#include <array>
|
|
20
20
|
#include <memory>
|
|
@@ -543,19 +543,8 @@ struct Updates {
|
|
|
543
543
|
bool keys,
|
|
544
544
|
bool values,
|
|
545
545
|
bool data,
|
|
546
|
-
const
|
|
547
|
-
: database_(database), seqNumber_(seqNumber), keys_(keys), values_(values), data_(data) {
|
|
548
|
-
if (column) {
|
|
549
|
-
auto columns = database->columns_;
|
|
550
|
-
auto columnIt = std::find_if(columns.begin(), columns.end(),
|
|
551
|
-
[&](const auto& handle) { return handle->GetName() == *column; });
|
|
552
|
-
if (columnIt != columns.end()) {
|
|
553
|
-
column_family_id_ = (*columnIt)->GetID();
|
|
554
|
-
} else {
|
|
555
|
-
// TODO: Throw?
|
|
556
|
-
}
|
|
557
|
-
}
|
|
558
|
-
}
|
|
546
|
+
const rocksdb::ColumnFamilyHandle* column)
|
|
547
|
+
: database_(database), seqNumber_(seqNumber), keys_(keys), values_(values), data_(data), column_(column) {}
|
|
559
548
|
|
|
560
549
|
void Close() { iterator_.reset(); }
|
|
561
550
|
|
|
@@ -577,7 +566,7 @@ struct Updates {
|
|
|
577
566
|
bool keys_;
|
|
578
567
|
bool values_;
|
|
579
568
|
bool data_;
|
|
580
|
-
|
|
569
|
+
const rocksdb::ColumnFamilyHandle* column_;
|
|
581
570
|
|
|
582
571
|
private:
|
|
583
572
|
napi_ref ref_ = nullptr;
|
|
@@ -586,7 +575,8 @@ struct Updates {
|
|
|
586
575
|
static napi_status GetColumnFamily(Database* database,
|
|
587
576
|
napi_env env,
|
|
588
577
|
napi_value options,
|
|
589
|
-
rocksdb::ColumnFamilyHandle** column
|
|
578
|
+
rocksdb::ColumnFamilyHandle** column,
|
|
579
|
+
bool fallback = true) {
|
|
590
580
|
bool hasColumn = false;
|
|
591
581
|
NAPI_STATUS_RETURN(napi_has_named_property(env, options, "column", &hasColumn));
|
|
592
582
|
|
|
@@ -594,8 +584,10 @@ static napi_status GetColumnFamily(Database* database,
|
|
|
594
584
|
napi_value value = nullptr;
|
|
595
585
|
NAPI_STATUS_RETURN(napi_get_named_property(env, options, "column", &value));
|
|
596
586
|
NAPI_STATUS_RETURN(napi_get_value_external(env, value, reinterpret_cast<void**>(column)));
|
|
597
|
-
} else {
|
|
587
|
+
} else if (fallback) {
|
|
598
588
|
*column = database->db_->DefaultColumnFamily();
|
|
589
|
+
} else {
|
|
590
|
+
*column = nullptr;
|
|
599
591
|
}
|
|
600
592
|
|
|
601
593
|
return napi_ok;
|
|
@@ -822,8 +814,9 @@ NAPI_METHOD(db_open) {
|
|
|
822
814
|
|
|
823
815
|
rocksdb::Options dbOptions;
|
|
824
816
|
|
|
825
|
-
|
|
826
|
-
.value_or(std::max<uint32_t>(1, std::thread::hardware_concurrency() / 2))
|
|
817
|
+
const auto parallelismValue = Uint32Property(env, argv[2], "parallelism")
|
|
818
|
+
.value_or(std::max<uint32_t>(1, std::thread::hardware_concurrency() / 2));
|
|
819
|
+
dbOptions.IncreaseParallelism(parallelismValue);
|
|
827
820
|
|
|
828
821
|
dbOptions.create_if_missing = BooleanProperty(env, argv[2], "createIfMissing").value_or(true);
|
|
829
822
|
dbOptions.error_if_exists = BooleanProperty(env, argv[2], "errorIfExists").value_or(false);
|
|
@@ -835,12 +828,53 @@ NAPI_METHOD(db_open) {
|
|
|
835
828
|
.value_or(std::max<uint32_t>(2, std::thread::hardware_concurrency() / 8));
|
|
836
829
|
dbOptions.WAL_ttl_seconds = Uint32Property(env, argv[2], "walTTL").value_or(0) / 1e3;
|
|
837
830
|
dbOptions.WAL_size_limit_MB = Uint32Property(env, argv[2], "walSizeLimit").value_or(0) / 1e6;
|
|
838
|
-
dbOptions.create_missing_column_families = true;
|
|
839
|
-
dbOptions.unordered_write = BooleanProperty(env, argv[2], "unorderedWrite").value_or(false);
|
|
840
|
-
dbOptions.fail_if_options_file_error = true;
|
|
841
831
|
dbOptions.wal_compression = BooleanProperty(env, argv[2], "walCompression").value_or(false)
|
|
842
832
|
? rocksdb::CompressionType::kZSTD
|
|
843
833
|
: rocksdb::CompressionType::kNoCompression;
|
|
834
|
+
dbOptions.create_missing_column_families = true;
|
|
835
|
+
dbOptions.unordered_write = BooleanProperty(env, argv[2], "unorderedWrite").value_or(false);
|
|
836
|
+
dbOptions.fail_if_options_file_error = true;
|
|
837
|
+
dbOptions.manual_wal_flush = BooleanProperty(env, argv[2], "manualWalFlush").value_or(false);
|
|
838
|
+
|
|
839
|
+
napi_value ret;
|
|
840
|
+
NAPI_STATUS_THROWS(napi_create_object(env, &ret));
|
|
841
|
+
{
|
|
842
|
+
napi_value parallelism;
|
|
843
|
+
NAPI_STATUS_THROWS(napi_create_int64(env, parallelismValue, ¶llelism));
|
|
844
|
+
NAPI_STATUS_THROWS(napi_set_named_property(env, ret, "parallelism", parallelism));
|
|
845
|
+
|
|
846
|
+
napi_value createIfMissing;
|
|
847
|
+
NAPI_STATUS_THROWS(napi_get_boolean(env, dbOptions.create_if_missing, &createIfMissing));
|
|
848
|
+
NAPI_STATUS_THROWS(napi_set_named_property(env, ret, "createIfMissing", createIfMissing));
|
|
849
|
+
|
|
850
|
+
napi_value errorIfExists;
|
|
851
|
+
NAPI_STATUS_THROWS(napi_get_boolean(env, dbOptions.error_if_exists, &errorIfExists));
|
|
852
|
+
NAPI_STATUS_THROWS(napi_set_named_property(env, ret, "errorIfExists", errorIfExists));
|
|
853
|
+
|
|
854
|
+
napi_value maxBackgroundJobs;
|
|
855
|
+
NAPI_STATUS_THROWS(napi_create_int64(env, dbOptions.max_background_jobs, &maxBackgroundJobs));
|
|
856
|
+
NAPI_STATUS_THROWS(napi_set_named_property(env, ret, "maxBackgroundJobs", maxBackgroundJobs));
|
|
857
|
+
|
|
858
|
+
napi_value walTTL;
|
|
859
|
+
NAPI_STATUS_THROWS(napi_create_int64(env, dbOptions.WAL_ttl_seconds * 1e3, &walTTL));
|
|
860
|
+
NAPI_STATUS_THROWS(napi_set_named_property(env, ret, "walTTL", walTTL));
|
|
861
|
+
|
|
862
|
+
napi_value walSizeLimit;
|
|
863
|
+
NAPI_STATUS_THROWS(napi_create_int64(env, dbOptions.WAL_size_limit_MB * 1e6, &walSizeLimit));
|
|
864
|
+
NAPI_STATUS_THROWS(napi_set_named_property(env, ret, "walSizeLimit", walSizeLimit));
|
|
865
|
+
|
|
866
|
+
napi_value walCompression;
|
|
867
|
+
NAPI_STATUS_THROWS(napi_create_int64(env, dbOptions.wal_compression, &walCompression));
|
|
868
|
+
NAPI_STATUS_THROWS(napi_set_named_property(env, ret, "walCompression", walCompression));
|
|
869
|
+
|
|
870
|
+
napi_value unorderedWrite;
|
|
871
|
+
NAPI_STATUS_THROWS(napi_get_boolean(env, dbOptions.error_if_exists, &unorderedWrite));
|
|
872
|
+
NAPI_STATUS_THROWS(napi_set_named_property(env, ret, "unorderedWrite", unorderedWrite));
|
|
873
|
+
|
|
874
|
+
napi_value manualWalFlush;
|
|
875
|
+
NAPI_STATUS_THROWS(napi_create_int64(env, dbOptions.manual_wal_flush, &manualWalFlush));
|
|
876
|
+
NAPI_STATUS_THROWS(napi_set_named_property(env, ret, "manualWalFlush", manualWalFlush));
|
|
877
|
+
}
|
|
844
878
|
|
|
845
879
|
// TODO (feat): dbOptions.listeners
|
|
846
880
|
|
|
@@ -905,7 +939,7 @@ NAPI_METHOD(db_open) {
|
|
|
905
939
|
auto worker = new OpenWorker(env, database, argv[3], location, dbOptions, columnsFamilies);
|
|
906
940
|
worker->Queue(env);
|
|
907
941
|
|
|
908
|
-
return
|
|
942
|
+
return ret;
|
|
909
943
|
}
|
|
910
944
|
|
|
911
945
|
struct CloseWorker final : public Worker {
|
|
@@ -1013,57 +1047,82 @@ struct UpdatesNextWorker final : public rocksdb::WriteBatch::Handler, public Wor
|
|
|
1013
1047
|
}
|
|
1014
1048
|
|
|
1015
1049
|
rocksdb::Status PutCF(uint32_t column_family_id, const rocksdb::Slice& key, const rocksdb::Slice& value) override {
|
|
1016
|
-
if (updates_->
|
|
1050
|
+
if (updates_->column_ && updates_->column_->GetID() != column_family_id) {
|
|
1017
1051
|
return rocksdb::Status::OK();
|
|
1018
1052
|
}
|
|
1053
|
+
|
|
1019
1054
|
cache_.emplace_back("put");
|
|
1055
|
+
|
|
1020
1056
|
if (updates_->keys_) {
|
|
1021
1057
|
cache_.emplace_back(key.ToStringView());
|
|
1022
1058
|
} else {
|
|
1023
1059
|
cache_.emplace_back(std::nullopt);
|
|
1024
1060
|
}
|
|
1061
|
+
|
|
1025
1062
|
if (updates_->values_) {
|
|
1026
1063
|
cache_.emplace_back(value.ToStringView());
|
|
1027
1064
|
} else {
|
|
1028
1065
|
cache_.emplace_back(std::nullopt);
|
|
1029
1066
|
}
|
|
1030
|
-
|
|
1067
|
+
|
|
1068
|
+
if (!updates_->column_) {
|
|
1069
|
+
cache_.emplace_back(GetColumnName(column_family_id));
|
|
1070
|
+
} else {
|
|
1071
|
+
cache_.emplace_back(std::nullopt);
|
|
1072
|
+
}
|
|
1073
|
+
|
|
1031
1074
|
return rocksdb::Status::OK();
|
|
1032
1075
|
}
|
|
1033
1076
|
|
|
1034
1077
|
rocksdb::Status DeleteCF(uint32_t column_family_id, const rocksdb::Slice& key) override {
|
|
1035
|
-
if (updates_->
|
|
1078
|
+
if (updates_->column_ && updates_->column_->GetID() != column_family_id) {
|
|
1036
1079
|
return rocksdb::Status::OK();
|
|
1037
1080
|
}
|
|
1081
|
+
|
|
1038
1082
|
cache_.emplace_back("del");
|
|
1083
|
+
|
|
1039
1084
|
if (updates_->keys_) {
|
|
1040
1085
|
cache_.emplace_back(key.ToStringView());
|
|
1041
1086
|
} else {
|
|
1042
1087
|
cache_.emplace_back(std::nullopt);
|
|
1043
1088
|
}
|
|
1089
|
+
|
|
1044
1090
|
cache_.emplace_back(std::nullopt);
|
|
1045
|
-
|
|
1091
|
+
|
|
1092
|
+
if (!updates_->column_) {
|
|
1093
|
+
cache_.emplace_back(GetColumnName(column_family_id));
|
|
1094
|
+
} else {
|
|
1095
|
+
cache_.emplace_back(std::nullopt);
|
|
1096
|
+
}
|
|
1097
|
+
|
|
1046
1098
|
return rocksdb::Status::OK();
|
|
1047
1099
|
}
|
|
1048
1100
|
|
|
1049
1101
|
rocksdb::Status MergeCF(uint32_t column_family_id, const rocksdb::Slice& key, const rocksdb::Slice& value) override {
|
|
1050
|
-
if (updates_->
|
|
1102
|
+
if (updates_->column_ && updates_->column_->GetID() != column_family_id) {
|
|
1051
1103
|
return rocksdb::Status::OK();
|
|
1052
1104
|
}
|
|
1105
|
+
|
|
1053
1106
|
cache_.emplace_back("put");
|
|
1107
|
+
|
|
1054
1108
|
if (updates_->keys_) {
|
|
1055
1109
|
cache_.emplace_back(key.ToStringView());
|
|
1056
1110
|
} else {
|
|
1057
1111
|
cache_.emplace_back(std::nullopt);
|
|
1058
1112
|
}
|
|
1113
|
+
|
|
1059
1114
|
if (updates_->values_) {
|
|
1060
1115
|
cache_.emplace_back(value.ToStringView());
|
|
1061
1116
|
} else {
|
|
1062
1117
|
cache_.emplace_back(std::nullopt);
|
|
1063
1118
|
}
|
|
1064
|
-
|
|
1119
|
+
|
|
1120
|
+
if (!updates_->column_) {
|
|
1065
1121
|
cache_.emplace_back(GetColumnName(column_family_id));
|
|
1122
|
+
} else {
|
|
1123
|
+
cache_.emplace_back(std::nullopt);
|
|
1066
1124
|
}
|
|
1125
|
+
|
|
1067
1126
|
return rocksdb::Status::OK();
|
|
1068
1127
|
}
|
|
1069
1128
|
|
|
@@ -1090,13 +1149,30 @@ NAPI_METHOD(updates_init) {
|
|
|
1090
1149
|
Database* database;
|
|
1091
1150
|
NAPI_STATUS_THROWS(napi_get_value_external(env, argv[0], reinterpret_cast<void**>(&database)));
|
|
1092
1151
|
|
|
1093
|
-
|
|
1094
|
-
|
|
1095
|
-
|
|
1096
|
-
|
|
1097
|
-
|
|
1152
|
+
napi_value sinceProperty;
|
|
1153
|
+
int64_t since;
|
|
1154
|
+
NAPI_STATUS_THROWS(napi_get_named_property(env, argv[1], "since", &sinceProperty));
|
|
1155
|
+
NAPI_STATUS_THROWS(napi_get_value_int64(env, sinceProperty, &since));
|
|
1156
|
+
|
|
1157
|
+
napi_value keysProperty;
|
|
1158
|
+
bool keys;
|
|
1159
|
+
NAPI_STATUS_THROWS(napi_get_named_property(env, argv[1], "keys", &keysProperty));
|
|
1160
|
+
NAPI_STATUS_THROWS(napi_get_value_bool(env, keysProperty, &keys));
|
|
1161
|
+
|
|
1162
|
+
napi_value valuesProperty;
|
|
1163
|
+
bool values;
|
|
1164
|
+
NAPI_STATUS_THROWS(napi_get_named_property(env, argv[1], "values", &valuesProperty));
|
|
1165
|
+
NAPI_STATUS_THROWS(napi_get_value_bool(env, valuesProperty, &values));
|
|
1166
|
+
|
|
1167
|
+
napi_value dataProperty;
|
|
1168
|
+
bool data;
|
|
1169
|
+
NAPI_STATUS_THROWS(napi_get_named_property(env, argv[1], "data", &dataProperty));
|
|
1170
|
+
NAPI_STATUS_THROWS(napi_get_value_bool(env, dataProperty, &data));
|
|
1098
1171
|
|
|
1099
|
-
|
|
1172
|
+
rocksdb::ColumnFamilyHandle* column;
|
|
1173
|
+
NAPI_STATUS_THROWS(GetColumnFamily(database, env, argv[1], &column, false));
|
|
1174
|
+
|
|
1175
|
+
auto updates = std::make_unique<Updates>(database, since, keys, values, data, column);
|
|
1100
1176
|
|
|
1101
1177
|
napi_value result;
|
|
1102
1178
|
NAPI_STATUS_THROWS(napi_create_external(env, updates.get(), Finalize<Updates>, updates.get(), &result));
|
|
@@ -1858,6 +1934,81 @@ NAPI_METHOD(batch_merge) {
|
|
|
1858
1934
|
return 0;
|
|
1859
1935
|
}
|
|
1860
1936
|
|
|
1937
|
+
NAPI_METHOD(db_flush_wal) {
|
|
1938
|
+
NAPI_ARGV(2);
|
|
1939
|
+
|
|
1940
|
+
Database* database;
|
|
1941
|
+
NAPI_STATUS_THROWS(napi_get_value_external(env, argv[0], reinterpret_cast<void**>(&database)));
|
|
1942
|
+
|
|
1943
|
+
const auto flush = BooleanProperty(env, argv[1], "flush").value_or(false);
|
|
1944
|
+
|
|
1945
|
+
ROCKS_STATUS_THROWS(database->db_->FlushWAL(flush));
|
|
1946
|
+
|
|
1947
|
+
return 0;
|
|
1948
|
+
}
|
|
1949
|
+
|
|
1950
|
+
napi_status FromLogFile(napi_env env, const auto& file, napi_value* obj) {
|
|
1951
|
+
NAPI_STATUS_RETURN(napi_create_object(env, obj));
|
|
1952
|
+
|
|
1953
|
+
napi_value pathName;
|
|
1954
|
+
NAPI_STATUS_RETURN(napi_create_string_utf8(env, file->PathName().c_str(), NAPI_AUTO_LENGTH, &pathName));
|
|
1955
|
+
NAPI_STATUS_RETURN(napi_set_named_property(env, *obj, "pathName", pathName));
|
|
1956
|
+
|
|
1957
|
+
napi_value logNumber;
|
|
1958
|
+
NAPI_STATUS_RETURN(napi_create_int64(env, file->LogNumber(), &logNumber));
|
|
1959
|
+
NAPI_STATUS_RETURN(napi_set_named_property(env, *obj, "logNumber", logNumber));
|
|
1960
|
+
|
|
1961
|
+
napi_value type;
|
|
1962
|
+
NAPI_STATUS_RETURN(napi_create_int64(env, file->Type(), &type));
|
|
1963
|
+
NAPI_STATUS_RETURN(napi_set_named_property(env, *obj, "type", type));
|
|
1964
|
+
|
|
1965
|
+
napi_value startSequence;
|
|
1966
|
+
NAPI_STATUS_RETURN(napi_create_int64(env, file->StartSequence(), &startSequence));
|
|
1967
|
+
NAPI_STATUS_RETURN(napi_set_named_property(env, *obj, "startSequence", startSequence));
|
|
1968
|
+
|
|
1969
|
+
napi_value sizeFileBytes;
|
|
1970
|
+
NAPI_STATUS_RETURN(napi_create_int64(env, file->SizeFileBytes(), &sizeFileBytes));
|
|
1971
|
+
NAPI_STATUS_RETURN(napi_set_named_property(env, *obj, "sizeFileBytes", sizeFileBytes));
|
|
1972
|
+
|
|
1973
|
+
return napi_ok;
|
|
1974
|
+
}
|
|
1975
|
+
|
|
1976
|
+
NAPI_METHOD(db_get_sorted_wal_files) {
|
|
1977
|
+
NAPI_ARGV(1);
|
|
1978
|
+
|
|
1979
|
+
Database* database;
|
|
1980
|
+
NAPI_STATUS_THROWS(napi_get_value_external(env, argv[0], reinterpret_cast<void**>(&database)));
|
|
1981
|
+
|
|
1982
|
+
rocksdb::VectorLogPtr files;
|
|
1983
|
+
ROCKS_STATUS_THROWS(database->db_->GetSortedWalFiles(files));
|
|
1984
|
+
|
|
1985
|
+
napi_value ret;
|
|
1986
|
+
NAPI_STATUS_THROWS(napi_create_array_with_length(env, files.size(), &ret));
|
|
1987
|
+
|
|
1988
|
+
for (size_t n = 0; n < files.size(); ++n) {
|
|
1989
|
+
napi_value obj;
|
|
1990
|
+
NAPI_STATUS_THROWS(FromLogFile(env, files[n], &obj));
|
|
1991
|
+
NAPI_STATUS_THROWS(napi_set_element(env, ret, n, obj));
|
|
1992
|
+
}
|
|
1993
|
+
|
|
1994
|
+
return ret;
|
|
1995
|
+
}
|
|
1996
|
+
|
|
1997
|
+
NAPI_METHOD(db_get_current_wal_file) {
|
|
1998
|
+
NAPI_ARGV(1);
|
|
1999
|
+
|
|
2000
|
+
Database* database;
|
|
2001
|
+
NAPI_STATUS_THROWS(napi_get_value_external(env, argv[0], reinterpret_cast<void**>(&database)));
|
|
2002
|
+
|
|
2003
|
+
std::unique_ptr<rocksdb::LogFile> file;
|
|
2004
|
+
ROCKS_STATUS_THROWS(database->db_->GetCurrentWalFile(&file));
|
|
2005
|
+
|
|
2006
|
+
napi_value ret;
|
|
2007
|
+
NAPI_STATUS_THROWS(FromLogFile(env, file, &ret));
|
|
2008
|
+
|
|
2009
|
+
return ret;
|
|
2010
|
+
}
|
|
2011
|
+
|
|
1861
2012
|
NAPI_INIT() {
|
|
1862
2013
|
NAPI_EXPORT_FUNCTION(db_init);
|
|
1863
2014
|
NAPI_EXPORT_FUNCTION(db_open);
|
|
@@ -1880,6 +2031,10 @@ NAPI_INIT() {
|
|
|
1880
2031
|
NAPI_EXPORT_FUNCTION(updates_close);
|
|
1881
2032
|
NAPI_EXPORT_FUNCTION(updates_next);
|
|
1882
2033
|
|
|
2034
|
+
NAPI_EXPORT_FUNCTION(db_flush_wal);
|
|
2035
|
+
NAPI_EXPORT_FUNCTION(db_get_sorted_wal_files);
|
|
2036
|
+
NAPI_EXPORT_FUNCTION(db_get_current_wal_file);
|
|
2037
|
+
|
|
1883
2038
|
NAPI_EXPORT_FUNCTION(batch_do);
|
|
1884
2039
|
NAPI_EXPORT_FUNCTION(batch_init);
|
|
1885
2040
|
NAPI_EXPORT_FUNCTION(batch_put);
|
package/index.js
CHANGED
|
@@ -10,6 +10,7 @@ const { Iterator } = require('./iterator')
|
|
|
10
10
|
const kContext = Symbol('context')
|
|
11
11
|
const kColumns = Symbol('columns')
|
|
12
12
|
const kLocation = Symbol('location')
|
|
13
|
+
const kOptions = Symbol('options')
|
|
13
14
|
|
|
14
15
|
class RocksLevel extends AbstractLevel {
|
|
15
16
|
constructor (location, options, _) {
|
|
@@ -43,6 +44,10 @@ class RocksLevel extends AbstractLevel {
|
|
|
43
44
|
this[kColumns] = {}
|
|
44
45
|
}
|
|
45
46
|
|
|
47
|
+
get options () {
|
|
48
|
+
return this[kOptions]
|
|
49
|
+
}
|
|
50
|
+
|
|
46
51
|
get sequence () {
|
|
47
52
|
return Number(binding.db_get_latest_sequence(this[kContext]))
|
|
48
53
|
}
|
|
@@ -67,10 +72,10 @@ class RocksLevel extends AbstractLevel {
|
|
|
67
72
|
if (options.createIfMissing) {
|
|
68
73
|
fs.mkdir(this[kLocation], { recursive: true }, (err) => {
|
|
69
74
|
if (err) return callback(err)
|
|
70
|
-
binding.db_open(this[kContext], this[kLocation], options, onOpen)
|
|
75
|
+
this[kOptions] = binding.db_open(this[kContext], this[kLocation], options, onOpen)
|
|
71
76
|
})
|
|
72
77
|
} else {
|
|
73
|
-
binding.db_open(this[kContext], this[kLocation], options, onOpen)
|
|
78
|
+
this[kOptions] = binding.db_open(this[kContext], this[kLocation], options, onOpen)
|
|
74
79
|
}
|
|
75
80
|
}
|
|
76
81
|
|
|
@@ -145,6 +150,36 @@ class RocksLevel extends AbstractLevel {
|
|
|
145
150
|
return binding.db_get_property(this[kContext], property)
|
|
146
151
|
}
|
|
147
152
|
|
|
153
|
+
async getCurrentWALFile () {
|
|
154
|
+
if (this.status !== 'open') {
|
|
155
|
+
throw new ModuleError('Database is not open', {
|
|
156
|
+
code: 'LEVEL_DATABASE_NOT_OPEN'
|
|
157
|
+
})
|
|
158
|
+
}
|
|
159
|
+
|
|
160
|
+
return binding.db_get_current_wal_file(this[kContext])
|
|
161
|
+
}
|
|
162
|
+
|
|
163
|
+
async getSortedWALFiles () {
|
|
164
|
+
if (this.status !== 'open') {
|
|
165
|
+
throw new ModuleError('Database is not open', {
|
|
166
|
+
code: 'LEVEL_DATABASE_NOT_OPEN'
|
|
167
|
+
})
|
|
168
|
+
}
|
|
169
|
+
|
|
170
|
+
return binding.db_get_sorted_wal_files(this[kContext])
|
|
171
|
+
}
|
|
172
|
+
|
|
173
|
+
async flushWAL (options) {
|
|
174
|
+
if (this.status !== 'open') {
|
|
175
|
+
throw new ModuleError('Database is not open', {
|
|
176
|
+
code: 'LEVEL_DATABASE_NOT_OPEN'
|
|
177
|
+
})
|
|
178
|
+
}
|
|
179
|
+
|
|
180
|
+
binding.db_flush_wal(this[kContext], options)
|
|
181
|
+
}
|
|
182
|
+
|
|
148
183
|
async query (options) {
|
|
149
184
|
if (this.status !== 'open') {
|
|
150
185
|
throw new ModuleError('Database is not open', {
|
|
@@ -191,6 +226,29 @@ class RocksLevel extends AbstractLevel {
|
|
|
191
226
|
})
|
|
192
227
|
}
|
|
193
228
|
|
|
229
|
+
options = {
|
|
230
|
+
since: options?.since ?? 0,
|
|
231
|
+
keys: options?.keys ?? true,
|
|
232
|
+
values: options?.values ?? true,
|
|
233
|
+
data: options?.data ?? true
|
|
234
|
+
}
|
|
235
|
+
|
|
236
|
+
if (typeof options.since !== 'number') {
|
|
237
|
+
throw new TypeError("'since' must be nully or a number")
|
|
238
|
+
}
|
|
239
|
+
|
|
240
|
+
if (typeof options.keys !== 'boolean') {
|
|
241
|
+
throw new TypeError("'keys' must be nully or a boolean")
|
|
242
|
+
}
|
|
243
|
+
|
|
244
|
+
if (typeof options.values !== 'boolean') {
|
|
245
|
+
throw new TypeError("'values' must be nully or a boolean")
|
|
246
|
+
}
|
|
247
|
+
|
|
248
|
+
if (typeof options.data !== 'boolean') {
|
|
249
|
+
throw new TypeError("'data' must be nully or a boolean")
|
|
250
|
+
}
|
|
251
|
+
|
|
194
252
|
class Updates {
|
|
195
253
|
constructor (db, options) {
|
|
196
254
|
this.context = binding.updates_init(db[kContext], options)
|
package/package.json
CHANGED
|
Binary file
|
|
Binary file
|