@nxtedition/rocksdb 7.0.19 → 7.0.22
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/binding.cc
CHANGED
|
@@ -14,7 +14,6 @@
|
|
|
14
14
|
#include <rocksdb/slice_transform.h>
|
|
15
15
|
#include <rocksdb/table.h>
|
|
16
16
|
#include <rocksdb/write_batch.h>
|
|
17
|
-
#include <rocksdb/filter_policy.h>
|
|
18
17
|
|
|
19
18
|
#include <array>
|
|
20
19
|
#include <memory>
|
|
@@ -134,8 +133,7 @@ static std::optional<int64_t> Int64Property(napi_env env, napi_value obj, const
|
|
|
134
133
|
if (HasProperty(env, obj, key.data())) {
|
|
135
134
|
const auto value = GetProperty(env, obj, key.data());
|
|
136
135
|
int64_t result;
|
|
137
|
-
|
|
138
|
-
napi_get_value_bigint_int64(env, value, &result, &lossless);
|
|
136
|
+
napi_get_value_int64(env, value, &result);
|
|
139
137
|
return result;
|
|
140
138
|
}
|
|
141
139
|
|
|
@@ -539,9 +537,24 @@ struct Iterator final : public BaseIterator {
|
|
|
539
537
|
};
|
|
540
538
|
|
|
541
539
|
struct Updates {
|
|
542
|
-
Updates(Database* database,
|
|
543
|
-
|
|
544
|
-
|
|
540
|
+
Updates(Database* database,
|
|
541
|
+
int64_t seqNumber,
|
|
542
|
+
bool keys,
|
|
543
|
+
bool values,
|
|
544
|
+
bool data,
|
|
545
|
+
const std::optional<std::string>& column)
|
|
546
|
+
: database_(database), seqNumber_(seqNumber), keys_(keys), values_(values), data_(data) {
|
|
547
|
+
if (column) {
|
|
548
|
+
auto columns = database->columns_;
|
|
549
|
+
auto columnIt = std::find_if(columns.begin(), columns.end(),
|
|
550
|
+
[&](const auto& handle) { return handle->GetName() == *column; });
|
|
551
|
+
if (columnIt != columns.end()) {
|
|
552
|
+
column_family_id_ = (*columnIt)->GetID();
|
|
553
|
+
} else {
|
|
554
|
+
// TODO: Throw?
|
|
555
|
+
}
|
|
556
|
+
}
|
|
557
|
+
}
|
|
545
558
|
|
|
546
559
|
void Close() { iterator_.reset(); }
|
|
547
560
|
|
|
@@ -560,6 +573,10 @@ struct Updates {
|
|
|
560
573
|
Database* database_;
|
|
561
574
|
int64_t seqNumber_;
|
|
562
575
|
std::unique_ptr<rocksdb::TransactionLogIterator> iterator_;
|
|
576
|
+
bool keys_;
|
|
577
|
+
bool values_;
|
|
578
|
+
bool data_;
|
|
579
|
+
std::optional<uint32_t> column_family_id_;
|
|
563
580
|
|
|
564
581
|
private:
|
|
565
582
|
napi_ref ref_ = nullptr;
|
|
@@ -944,13 +961,15 @@ struct UpdatesNextWorker final : public rocksdb::WriteBatch::Handler, public Wor
|
|
|
944
961
|
|
|
945
962
|
updates_->seqNumber_ = batch.sequence;
|
|
946
963
|
|
|
947
|
-
|
|
964
|
+
count_ = batch.writeBatchPtr->Count();
|
|
965
|
+
cache_.reserve(batch.writeBatchPtr->Count() * 4);
|
|
948
966
|
|
|
949
967
|
return batch.writeBatchPtr->Iterate(this);
|
|
950
968
|
}
|
|
951
969
|
|
|
952
970
|
napi_status OnOk(napi_env env, napi_value callback) override {
|
|
953
|
-
napi_value argv[
|
|
971
|
+
napi_value argv[4];
|
|
972
|
+
|
|
954
973
|
NAPI_STATUS_RETURN(napi_get_null(env, &argv[0]));
|
|
955
974
|
|
|
956
975
|
if (cache_.empty()) {
|
|
@@ -958,23 +977,17 @@ struct UpdatesNextWorker final : public rocksdb::WriteBatch::Handler, public Wor
|
|
|
958
977
|
}
|
|
959
978
|
|
|
960
979
|
NAPI_STATUS_RETURN(napi_create_array_with_length(env, cache_.size(), &argv[1]));
|
|
961
|
-
for (size_t idx = 0; idx < cache_.size(); idx
|
|
962
|
-
napi_value op;
|
|
963
|
-
NAPI_STATUS_RETURN(Convert(env, cache_[idx + 0], false, op));
|
|
964
|
-
NAPI_STATUS_RETURN(napi_set_element(env, argv[1], static_cast<int>(idx + 0), op));
|
|
965
|
-
|
|
966
|
-
napi_value key;
|
|
967
|
-
NAPI_STATUS_RETURN(Convert(env, cache_[idx + 1], false, key));
|
|
968
|
-
NAPI_STATUS_RETURN(napi_set_element(env, argv[1], static_cast<int>(idx + 1), key));
|
|
969
|
-
|
|
980
|
+
for (size_t idx = 0; idx < cache_.size(); idx++) {
|
|
970
981
|
napi_value val;
|
|
971
|
-
NAPI_STATUS_RETURN(Convert(env, cache_[idx
|
|
972
|
-
NAPI_STATUS_RETURN(napi_set_element(env, argv[1],
|
|
982
|
+
NAPI_STATUS_RETURN(Convert(env, cache_[idx], false, val));
|
|
983
|
+
NAPI_STATUS_RETURN(napi_set_element(env, argv[1], idx, val));
|
|
973
984
|
}
|
|
974
985
|
|
|
975
|
-
NAPI_STATUS_RETURN(
|
|
986
|
+
NAPI_STATUS_RETURN(napi_create_int64(env, updates_->seqNumber_, &argv[2]));
|
|
976
987
|
|
|
977
|
-
|
|
988
|
+
NAPI_STATUS_RETURN(napi_create_int64(env, count_, &argv[3]));
|
|
989
|
+
|
|
990
|
+
return CallFunction(env, callback, 4, argv);
|
|
978
991
|
}
|
|
979
992
|
|
|
980
993
|
void Destroy(napi_env env) override {
|
|
@@ -982,33 +995,82 @@ struct UpdatesNextWorker final : public rocksdb::WriteBatch::Handler, public Wor
|
|
|
982
995
|
Worker::Destroy(env);
|
|
983
996
|
}
|
|
984
997
|
|
|
985
|
-
|
|
998
|
+
std::optional<std::string> GetColumnName(uint32_t column_family_id) {
|
|
999
|
+
if (column_family_id == 0) {
|
|
1000
|
+
return "default";
|
|
1001
|
+
}
|
|
1002
|
+
auto columns = database_->columns_;
|
|
1003
|
+
auto columnIt = std::find_if(columns.begin(), columns.end(),
|
|
1004
|
+
[&](const auto& handle) { return handle->GetID() == column_family_id; });
|
|
1005
|
+
return columnIt == columns.end() ? std::nullopt : std::optional<std::string>((*columnIt)->GetName());
|
|
1006
|
+
}
|
|
1007
|
+
|
|
1008
|
+
rocksdb::Status PutCF(uint32_t column_family_id, const rocksdb::Slice& key, const rocksdb::Slice& value) override {
|
|
1009
|
+
if (updates_->column_family_id_ && *updates_->column_family_id_ != column_family_id) {
|
|
1010
|
+
return rocksdb::Status::OK();
|
|
1011
|
+
}
|
|
986
1012
|
cache_.emplace_back("put");
|
|
987
|
-
|
|
988
|
-
|
|
1013
|
+
if (updates_->keys_) {
|
|
1014
|
+
cache_.emplace_back(key.ToStringView());
|
|
1015
|
+
} else {
|
|
1016
|
+
cache_.emplace_back(std::nullopt);
|
|
1017
|
+
}
|
|
1018
|
+
if (updates_->values_) {
|
|
1019
|
+
cache_.emplace_back(value.ToStringView());
|
|
1020
|
+
} else {
|
|
1021
|
+
cache_.emplace_back(std::nullopt);
|
|
1022
|
+
}
|
|
1023
|
+
cache_.emplace_back(GetColumnName(column_family_id));
|
|
1024
|
+
return rocksdb::Status::OK();
|
|
989
1025
|
}
|
|
990
1026
|
|
|
991
|
-
|
|
1027
|
+
rocksdb::Status DeleteCF(uint32_t column_family_id, const rocksdb::Slice& key) override {
|
|
1028
|
+
if (updates_->column_family_id_ && *updates_->column_family_id_ != column_family_id) {
|
|
1029
|
+
return rocksdb::Status::OK();
|
|
1030
|
+
}
|
|
992
1031
|
cache_.emplace_back("del");
|
|
993
|
-
|
|
1032
|
+
if (updates_->keys_) {
|
|
1033
|
+
cache_.emplace_back(key.ToStringView());
|
|
1034
|
+
} else {
|
|
1035
|
+
cache_.emplace_back(std::nullopt);
|
|
1036
|
+
}
|
|
994
1037
|
cache_.emplace_back(std::nullopt);
|
|
1038
|
+
cache_.emplace_back(GetColumnName(column_family_id));
|
|
1039
|
+
return rocksdb::Status::OK();
|
|
995
1040
|
}
|
|
996
1041
|
|
|
997
|
-
|
|
998
|
-
|
|
999
|
-
|
|
1000
|
-
|
|
1042
|
+
rocksdb::Status MergeCF(uint32_t column_family_id, const rocksdb::Slice& key, const rocksdb::Slice& value) override {
|
|
1043
|
+
if (updates_->column_family_id_ && *updates_->column_family_id_ != column_family_id) {
|
|
1044
|
+
return rocksdb::Status::OK();
|
|
1045
|
+
}
|
|
1046
|
+
cache_.emplace_back("put");
|
|
1047
|
+
if (updates_->keys_) {
|
|
1048
|
+
cache_.emplace_back(key.ToStringView());
|
|
1049
|
+
} else {
|
|
1050
|
+
cache_.emplace_back(std::nullopt);
|
|
1051
|
+
}
|
|
1052
|
+
if (updates_->values_) {
|
|
1053
|
+
cache_.emplace_back(value.ToStringView());
|
|
1054
|
+
} else {
|
|
1055
|
+
cache_.emplace_back(std::nullopt);
|
|
1056
|
+
}
|
|
1057
|
+
cache_.emplace_back(GetColumnName(column_family_id));
|
|
1058
|
+
return rocksdb::Status::OK();
|
|
1001
1059
|
}
|
|
1002
1060
|
|
|
1003
1061
|
void LogData(const rocksdb::Slice& data) override {
|
|
1004
|
-
|
|
1005
|
-
|
|
1006
|
-
|
|
1062
|
+
if (updates_->data_) {
|
|
1063
|
+
cache_.emplace_back("data");
|
|
1064
|
+
cache_.emplace_back(std::nullopt);
|
|
1065
|
+
cache_.emplace_back(data.ToStringView());
|
|
1066
|
+
cache_.emplace_back(std::nullopt);
|
|
1067
|
+
}
|
|
1007
1068
|
}
|
|
1008
1069
|
|
|
1009
1070
|
bool Continue() override { return true; }
|
|
1010
1071
|
|
|
1011
1072
|
private:
|
|
1073
|
+
size_t count_;
|
|
1012
1074
|
std::vector<std::optional<std::string>> cache_;
|
|
1013
1075
|
Updates* updates_;
|
|
1014
1076
|
};
|
|
@@ -1020,8 +1082,12 @@ NAPI_METHOD(updates_init) {
|
|
|
1020
1082
|
NAPI_STATUS_THROWS(napi_get_value_external(env, argv[0], reinterpret_cast<void**>(&database)));
|
|
1021
1083
|
|
|
1022
1084
|
const auto seqNumber = Int64Property(env, argv[1], "since").value_or(database->db_->GetLatestSequenceNumber());
|
|
1085
|
+
const auto keys = BooleanProperty(env, argv[1], "keys").value_or(true);
|
|
1086
|
+
const auto values = BooleanProperty(env, argv[1], "values").value_or(true);
|
|
1087
|
+
const auto data = BooleanProperty(env, argv[1], "data").value_or(true);
|
|
1088
|
+
const auto column = StringProperty(env, argv[1], "column");
|
|
1023
1089
|
|
|
1024
|
-
auto updates = std::make_unique<Updates>(database, seqNumber);
|
|
1090
|
+
auto updates = std::make_unique<Updates>(database, seqNumber, keys, values, data, column);
|
|
1025
1091
|
|
|
1026
1092
|
napi_value result;
|
|
1027
1093
|
NAPI_STATUS_THROWS(napi_create_external(env, updates.get(), Finalize<Updates>, updates.get(), &result));
|
|
@@ -1630,6 +1696,22 @@ NAPI_METHOD(batch_do) {
|
|
|
1630
1696
|
NAPI_STATUS_THROWS(ToString(env, valueProperty, value));
|
|
1631
1697
|
|
|
1632
1698
|
ROCKS_STATUS_THROWS(batch.Put(column, key, value));
|
|
1699
|
+
} else if (type == "data") {
|
|
1700
|
+
napi_value valueProperty;
|
|
1701
|
+
NAPI_STATUS_THROWS(napi_get_named_property(env, element, "value", &valueProperty));
|
|
1702
|
+
NAPI_STATUS_THROWS(ToString(env, valueProperty, value));
|
|
1703
|
+
|
|
1704
|
+
ROCKS_STATUS_THROWS(batch.PutLogData(value));
|
|
1705
|
+
} else if (type == "merge") {
|
|
1706
|
+
napi_value keyProperty;
|
|
1707
|
+
NAPI_STATUS_THROWS(napi_get_named_property(env, element, "key", &keyProperty));
|
|
1708
|
+
NAPI_STATUS_THROWS(ToString(env, keyProperty, key));
|
|
1709
|
+
|
|
1710
|
+
napi_value valueProperty;
|
|
1711
|
+
NAPI_STATUS_THROWS(napi_get_named_property(env, element, "value", &valueProperty));
|
|
1712
|
+
NAPI_STATUS_THROWS(ToString(env, valueProperty, value));
|
|
1713
|
+
|
|
1714
|
+
ROCKS_STATUS_THROWS(batch.Merge(column, key, value));
|
|
1633
1715
|
} else {
|
|
1634
1716
|
ROCKS_STATUS_THROWS(rocksdb::Status::InvalidArgument());
|
|
1635
1717
|
}
|
|
@@ -1741,6 +1823,29 @@ NAPI_METHOD(batch_put_log_data) {
|
|
|
1741
1823
|
return 0;
|
|
1742
1824
|
}
|
|
1743
1825
|
|
|
1826
|
+
NAPI_METHOD(batch_merge) {
|
|
1827
|
+
NAPI_ARGV(5);
|
|
1828
|
+
|
|
1829
|
+
Database* database;
|
|
1830
|
+
NAPI_STATUS_THROWS(napi_get_value_external(env, argv[0], reinterpret_cast<void**>(&database)));
|
|
1831
|
+
|
|
1832
|
+
rocksdb::WriteBatch* batch;
|
|
1833
|
+
NAPI_STATUS_THROWS(napi_get_value_external(env, argv[1], (void**)(&batch)));
|
|
1834
|
+
|
|
1835
|
+
std::string key;
|
|
1836
|
+
NAPI_STATUS_THROWS(ToString(env, argv[2], key));
|
|
1837
|
+
|
|
1838
|
+
std::string val;
|
|
1839
|
+
NAPI_STATUS_THROWS(ToString(env, argv[3], val));
|
|
1840
|
+
|
|
1841
|
+
rocksdb::ColumnFamilyHandle* column;
|
|
1842
|
+
NAPI_STATUS_THROWS(GetColumnFamily(database, env, argv[4], &column));
|
|
1843
|
+
|
|
1844
|
+
ROCKS_STATUS_THROWS(batch->Merge(column, key, val));
|
|
1845
|
+
|
|
1846
|
+
return 0;
|
|
1847
|
+
}
|
|
1848
|
+
|
|
1744
1849
|
NAPI_INIT() {
|
|
1745
1850
|
NAPI_EXPORT_FUNCTION(db_init);
|
|
1746
1851
|
NAPI_EXPORT_FUNCTION(db_open);
|
|
@@ -1770,4 +1875,5 @@ NAPI_INIT() {
|
|
|
1770
1875
|
NAPI_EXPORT_FUNCTION(batch_clear);
|
|
1771
1876
|
NAPI_EXPORT_FUNCTION(batch_write);
|
|
1772
1877
|
NAPI_EXPORT_FUNCTION(batch_put_log_data);
|
|
1878
|
+
NAPI_EXPORT_FUNCTION(batch_merge);
|
|
1773
1879
|
}
|
package/chained-batch.js
CHANGED
|
@@ -43,6 +43,11 @@ class ChainedBatch extends AbstractChainedBatch {
|
|
|
43
43
|
// TODO (fix): Check if open...
|
|
44
44
|
binding.batch_put_log_data(this[kDbContext], this[kBatchContext], data, options)
|
|
45
45
|
}
|
|
46
|
+
|
|
47
|
+
merge (key, value, options) {
|
|
48
|
+
// TODO (fix): Check if open...
|
|
49
|
+
binding.batch_merge(this[kDbContext], this[kBatchContext], key, value, options)
|
|
50
|
+
}
|
|
46
51
|
}
|
|
47
52
|
|
|
48
53
|
exports.ChainedBatch = ChainedBatch
|
package/index.js
CHANGED
|
@@ -193,10 +193,7 @@ class RocksLevel extends AbstractLevel {
|
|
|
193
193
|
|
|
194
194
|
class Updates {
|
|
195
195
|
constructor (db, options) {
|
|
196
|
-
this.context = binding.updates_init(db[kContext],
|
|
197
|
-
...options,
|
|
198
|
-
since: BigInt(options.since || 0)
|
|
199
|
-
})
|
|
196
|
+
this.context = binding.updates_init(db[kContext], options)
|
|
200
197
|
this.closed = false
|
|
201
198
|
this.promise = null
|
|
202
199
|
this.db = db
|
|
@@ -208,12 +205,12 @@ class RocksLevel extends AbstractLevel {
|
|
|
208
205
|
return {}
|
|
209
206
|
}
|
|
210
207
|
|
|
211
|
-
this.promise = new Promise(resolve => binding.updates_next(this.context, (err, rows, sequence,
|
|
208
|
+
this.promise = new Promise(resolve => binding.updates_next(this.context, (err, rows, sequence, count) => {
|
|
212
209
|
this.promise = null
|
|
213
210
|
if (err) {
|
|
214
211
|
resolve(Promise.reject(err))
|
|
215
212
|
} else {
|
|
216
|
-
resolve({ rows,
|
|
213
|
+
resolve({ rows, sequence, count })
|
|
217
214
|
}
|
|
218
215
|
}))
|
|
219
216
|
|
package/package.json
CHANGED
|
Binary file
|
|
Binary file
|
|
Binary file
|