@nxtedition/rocksdb 7.0.18 → 7.0.21
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/binding.cc
CHANGED
|
@@ -14,7 +14,6 @@
|
|
|
14
14
|
#include <rocksdb/slice_transform.h>
|
|
15
15
|
#include <rocksdb/table.h>
|
|
16
16
|
#include <rocksdb/write_batch.h>
|
|
17
|
-
#include <rocksdb/filter_policy.h>
|
|
18
17
|
|
|
19
18
|
#include <array>
|
|
20
19
|
#include <memory>
|
|
@@ -539,12 +538,7 @@ struct Iterator final : public BaseIterator {
|
|
|
539
538
|
};
|
|
540
539
|
|
|
541
540
|
struct Updates {
|
|
542
|
-
Updates(Database* database,
|
|
543
|
-
: database_(database),
|
|
544
|
-
values_(values),
|
|
545
|
-
keyAsBuffer_(keyAsBuffer),
|
|
546
|
-
valueAsBuffer_(valueAsBuffer),
|
|
547
|
-
seqNumber_(seqNumber) {}
|
|
541
|
+
Updates(Database* database, int64_t seqNumber) : database_(database), seqNumber_(seqNumber) {}
|
|
548
542
|
|
|
549
543
|
void Close() { iterator_.reset(); }
|
|
550
544
|
|
|
@@ -561,9 +555,6 @@ struct Updates {
|
|
|
561
555
|
}
|
|
562
556
|
|
|
563
557
|
Database* database_;
|
|
564
|
-
const bool values_;
|
|
565
|
-
const bool keyAsBuffer_;
|
|
566
|
-
const bool valueAsBuffer_;
|
|
567
558
|
int64_t seqNumber_;
|
|
568
559
|
std::unique_ptr<rocksdb::TransactionLogIterator> iterator_;
|
|
569
560
|
|
|
@@ -950,40 +941,49 @@ struct UpdatesNextWorker final : public rocksdb::WriteBatch::Handler, public Wor
|
|
|
950
941
|
|
|
951
942
|
updates_->seqNumber_ = batch.sequence;
|
|
952
943
|
|
|
953
|
-
cache_.reserve(batch.writeBatchPtr->Count() *
|
|
944
|
+
cache_.reserve(batch.writeBatchPtr->Count() * 4);
|
|
954
945
|
|
|
955
946
|
return batch.writeBatchPtr->Iterate(this);
|
|
956
947
|
}
|
|
957
948
|
|
|
958
949
|
napi_status OnOk(napi_env env, napi_value callback) override {
|
|
959
|
-
napi_value argv[
|
|
950
|
+
napi_value argv[3];
|
|
960
951
|
NAPI_STATUS_RETURN(napi_get_null(env, &argv[0]));
|
|
961
952
|
|
|
962
953
|
if (cache_.empty()) {
|
|
963
954
|
return CallFunction(env, callback, 1, argv);
|
|
964
955
|
}
|
|
965
956
|
|
|
966
|
-
NAPI_STATUS_RETURN(napi_create_array_with_length(env, cache_.size(), &argv[1]));
|
|
967
|
-
for (size_t idx = 0; idx < cache_.size(); idx
|
|
957
|
+
NAPI_STATUS_RETURN(napi_create_array_with_length(env, cache_.size() * 4, &argv[1]));
|
|
958
|
+
for (size_t idx = 0; idx < cache_.size(); idx++) {
|
|
959
|
+
napi_value op;
|
|
960
|
+
NAPI_STATUS_RETURN(Convert(env, std::get<0>(cache_[idx]), false, op));
|
|
961
|
+
NAPI_STATUS_RETURN(napi_set_element(env, argv[1], static_cast<int>(idx * 4 + 0), op));
|
|
962
|
+
|
|
968
963
|
napi_value key;
|
|
969
|
-
NAPI_STATUS_RETURN(Convert(env, cache_[idx
|
|
970
|
-
NAPI_STATUS_RETURN(napi_set_element(env, argv[1], static_cast<int>(idx +
|
|
964
|
+
NAPI_STATUS_RETURN(Convert(env, std::get<1>(cache_[idx]), false, key));
|
|
965
|
+
NAPI_STATUS_RETURN(napi_set_element(env, argv[1], static_cast<int>(idx * 4 + 1), key));
|
|
971
966
|
|
|
972
967
|
napi_value val;
|
|
973
|
-
NAPI_STATUS_RETURN(Convert(env, cache_[idx
|
|
974
|
-
NAPI_STATUS_RETURN(napi_set_element(env, argv[1], static_cast<int>(idx +
|
|
968
|
+
NAPI_STATUS_RETURN(Convert(env, std::get<2>(cache_[idx]), false, val));
|
|
969
|
+
NAPI_STATUS_RETURN(napi_set_element(env, argv[1], static_cast<int>(idx * 4 + 2), val));
|
|
970
|
+
|
|
971
|
+
auto column_family_id = std::get<3>(cache_[idx]);
|
|
972
|
+
auto columns = database_->columns_;
|
|
973
|
+
auto columnIt = std::find_if(columns.begin(), columns.end(),
|
|
974
|
+
[&](const auto& handle) { return handle->GetID() == column_family_id; });
|
|
975
|
+
napi_value column;
|
|
976
|
+
if (columnIt != columns.end()) {
|
|
977
|
+
NAPI_STATUS_RETURN(napi_create_external(env, *columnIt, nullptr, nullptr, &column));
|
|
978
|
+
} else {
|
|
979
|
+
NAPI_STATUS_RETURN(napi_get_null(env, &column));
|
|
980
|
+
}
|
|
981
|
+
NAPI_STATUS_RETURN(napi_set_element(env, argv[1], static_cast<int>(idx * 4 + 3), column));
|
|
975
982
|
}
|
|
976
983
|
|
|
977
984
|
NAPI_STATUS_RETURN(napi_create_bigint_int64(env, updates_->seqNumber_, &argv[2]));
|
|
978
985
|
|
|
979
|
-
|
|
980
|
-
for (size_t idx = 0; idx < logData_.size(); idx += 1) {
|
|
981
|
-
napi_value logData;
|
|
982
|
-
NAPI_STATUS_RETURN(Convert(env, logData_[idx], false, logData));
|
|
983
|
-
NAPI_STATUS_RETURN(napi_set_element(env, argv[3], static_cast<int>(idx), logData));
|
|
984
|
-
}
|
|
985
|
-
|
|
986
|
-
return CallFunction(env, callback, 4, argv);
|
|
986
|
+
return CallFunction(env, callback, 3, argv);
|
|
987
987
|
}
|
|
988
988
|
|
|
989
989
|
void Destroy(napi_env env) override {
|
|
@@ -991,29 +991,28 @@ struct UpdatesNextWorker final : public rocksdb::WriteBatch::Handler, public Wor
|
|
|
991
991
|
Worker::Destroy(env);
|
|
992
992
|
}
|
|
993
993
|
|
|
994
|
-
|
|
995
|
-
cache_.emplace_back(key.
|
|
996
|
-
|
|
997
|
-
cache_.emplace_back(value.ToString());
|
|
998
|
-
} else {
|
|
999
|
-
cache_.emplace_back(std::nullopt);
|
|
1000
|
-
}
|
|
994
|
+
rocksdb::Status PutCF(uint32_t column_family_id, const rocksdb::Slice& key, const rocksdb::Slice& value) override {
|
|
995
|
+
cache_.emplace_back("put", key.ToStringView(), value.ToStringView(), column_family_id);
|
|
996
|
+
return rocksdb::Status::OK();
|
|
1001
997
|
}
|
|
1002
998
|
|
|
1003
|
-
|
|
1004
|
-
cache_.emplace_back(key.
|
|
1005
|
-
|
|
999
|
+
rocksdb::Status DeleteCF(uint32_t column_family_id, const rocksdb::Slice& key) override {
|
|
1000
|
+
cache_.emplace_back("del", key.ToStringView(), std::nullopt, column_family_id);
|
|
1001
|
+
return rocksdb::Status::OK();
|
|
1006
1002
|
}
|
|
1007
1003
|
|
|
1008
|
-
|
|
1009
|
-
|
|
1004
|
+
rocksdb::Status MergeCF(uint32_t column_family_id, const rocksdb::Slice& key, const rocksdb::Slice& value) override {
|
|
1005
|
+
cache_.emplace_back("merge", key.ToStringView(), value.ToStringView(), column_family_id);
|
|
1006
|
+
return rocksdb::Status::OK();
|
|
1010
1007
|
}
|
|
1011
1008
|
|
|
1009
|
+
void LogData(const rocksdb::Slice& data) override { cache_.emplace_back("data", std::nullopt, data.ToStringView()); }
|
|
1010
|
+
|
|
1012
1011
|
bool Continue() override { return true; }
|
|
1013
1012
|
|
|
1014
1013
|
private:
|
|
1015
|
-
std::vector<std::optional<std::string>>
|
|
1016
|
-
|
|
1014
|
+
std::vector<std::tuple<std::optional<std::string>, std::optional<std::string>, std::optional<std::string>, uint32_t>>
|
|
1015
|
+
cache_;
|
|
1017
1016
|
Updates* updates_;
|
|
1018
1017
|
};
|
|
1019
1018
|
|
|
@@ -1023,12 +1022,9 @@ NAPI_METHOD(updates_init) {
|
|
|
1023
1022
|
Database* database;
|
|
1024
1023
|
NAPI_STATUS_THROWS(napi_get_value_external(env, argv[0], reinterpret_cast<void**>(&database)));
|
|
1025
1024
|
|
|
1026
|
-
const auto values = BooleanProperty(env, argv[1], "values").value_or(true);
|
|
1027
|
-
const bool keyAsBuffer = EncodingIsBuffer(env, argv[1], "keyEncoding");
|
|
1028
|
-
const bool valueAsBuffer = EncodingIsBuffer(env, argv[1], "valueEncoding");
|
|
1029
1025
|
const auto seqNumber = Int64Property(env, argv[1], "since").value_or(database->db_->GetLatestSequenceNumber());
|
|
1030
1026
|
|
|
1031
|
-
auto updates = std::make_unique<Updates>(database,
|
|
1027
|
+
auto updates = std::make_unique<Updates>(database, seqNumber);
|
|
1032
1028
|
|
|
1033
1029
|
napi_value result;
|
|
1034
1030
|
NAPI_STATUS_THROWS(napi_create_external(env, updates.get(), Finalize<Updates>, updates.get(), &result));
|
|
@@ -1637,6 +1633,22 @@ NAPI_METHOD(batch_do) {
|
|
|
1637
1633
|
NAPI_STATUS_THROWS(ToString(env, valueProperty, value));
|
|
1638
1634
|
|
|
1639
1635
|
ROCKS_STATUS_THROWS(batch.Put(column, key, value));
|
|
1636
|
+
} else if (type == "data") {
|
|
1637
|
+
napi_value valueProperty;
|
|
1638
|
+
NAPI_STATUS_THROWS(napi_get_named_property(env, element, "value", &valueProperty));
|
|
1639
|
+
NAPI_STATUS_THROWS(ToString(env, valueProperty, value));
|
|
1640
|
+
|
|
1641
|
+
ROCKS_STATUS_THROWS(batch.PutLogData(value));
|
|
1642
|
+
} else if (type == "merge") {
|
|
1643
|
+
napi_value keyProperty;
|
|
1644
|
+
NAPI_STATUS_THROWS(napi_get_named_property(env, element, "key", &keyProperty));
|
|
1645
|
+
NAPI_STATUS_THROWS(ToString(env, keyProperty, key));
|
|
1646
|
+
|
|
1647
|
+
napi_value valueProperty;
|
|
1648
|
+
NAPI_STATUS_THROWS(napi_get_named_property(env, element, "value", &valueProperty));
|
|
1649
|
+
NAPI_STATUS_THROWS(ToString(env, valueProperty, value));
|
|
1650
|
+
|
|
1651
|
+
ROCKS_STATUS_THROWS(batch.Merge(column, key, value));
|
|
1640
1652
|
} else {
|
|
1641
1653
|
ROCKS_STATUS_THROWS(rocksdb::Status::InvalidArgument());
|
|
1642
1654
|
}
|
|
@@ -1748,6 +1760,29 @@ NAPI_METHOD(batch_put_log_data) {
|
|
|
1748
1760
|
return 0;
|
|
1749
1761
|
}
|
|
1750
1762
|
|
|
1763
|
+
NAPI_METHOD(batch_merge) {
|
|
1764
|
+
NAPI_ARGV(5);
|
|
1765
|
+
|
|
1766
|
+
Database* database;
|
|
1767
|
+
NAPI_STATUS_THROWS(napi_get_value_external(env, argv[0], reinterpret_cast<void**>(&database)));
|
|
1768
|
+
|
|
1769
|
+
rocksdb::WriteBatch* batch;
|
|
1770
|
+
NAPI_STATUS_THROWS(napi_get_value_external(env, argv[1], (void**)(&batch)));
|
|
1771
|
+
|
|
1772
|
+
std::string key;
|
|
1773
|
+
NAPI_STATUS_THROWS(ToString(env, argv[2], key));
|
|
1774
|
+
|
|
1775
|
+
std::string val;
|
|
1776
|
+
NAPI_STATUS_THROWS(ToString(env, argv[3], val));
|
|
1777
|
+
|
|
1778
|
+
rocksdb::ColumnFamilyHandle* column;
|
|
1779
|
+
NAPI_STATUS_THROWS(GetColumnFamily(database, env, argv[4], &column));
|
|
1780
|
+
|
|
1781
|
+
ROCKS_STATUS_THROWS(batch->Merge(column, key, val));
|
|
1782
|
+
|
|
1783
|
+
return 0;
|
|
1784
|
+
}
|
|
1785
|
+
|
|
1751
1786
|
NAPI_INIT() {
|
|
1752
1787
|
NAPI_EXPORT_FUNCTION(db_init);
|
|
1753
1788
|
NAPI_EXPORT_FUNCTION(db_open);
|
|
@@ -1777,4 +1812,5 @@ NAPI_INIT() {
|
|
|
1777
1812
|
NAPI_EXPORT_FUNCTION(batch_clear);
|
|
1778
1813
|
NAPI_EXPORT_FUNCTION(batch_write);
|
|
1779
1814
|
NAPI_EXPORT_FUNCTION(batch_put_log_data);
|
|
1815
|
+
NAPI_EXPORT_FUNCTION(batch_merge);
|
|
1780
1816
|
}
|
package/chained-batch.js
CHANGED
|
@@ -43,6 +43,11 @@ class ChainedBatch extends AbstractChainedBatch {
|
|
|
43
43
|
// TODO (fix): Check if open...
|
|
44
44
|
binding.batch_put_log_data(this[kDbContext], this[kBatchContext], data, options)
|
|
45
45
|
}
|
|
46
|
+
|
|
47
|
+
merge (key, value, options) {
|
|
48
|
+
// TODO (fix): Check if open...
|
|
49
|
+
binding.batch_merge(this[kDbContext], this[kBatchContext], key, value, options)
|
|
50
|
+
}
|
|
46
51
|
}
|
|
47
52
|
|
|
48
53
|
exports.ChainedBatch = ChainedBatch
|
package/index.js
CHANGED
|
@@ -208,12 +208,12 @@ class RocksLevel extends AbstractLevel {
|
|
|
208
208
|
return {}
|
|
209
209
|
}
|
|
210
210
|
|
|
211
|
-
this.promise = new Promise(resolve => binding.updates_next(this.context, (err, rows, sequence
|
|
211
|
+
this.promise = new Promise(resolve => binding.updates_next(this.context, (err, rows, sequence) => {
|
|
212
212
|
this.promise = null
|
|
213
213
|
if (err) {
|
|
214
214
|
resolve(Promise.reject(err))
|
|
215
215
|
} else {
|
|
216
|
-
resolve({ rows,
|
|
216
|
+
resolve({ rows, sequence: Number(sequence) })
|
|
217
217
|
}
|
|
218
218
|
}))
|
|
219
219
|
|
package/package.json
CHANGED
|
Binary file
|
|
Binary file
|
|
Binary file
|